int
generate_matrix_structure(const simple_mesh_description<typename MatrixType::GlobalOrdinalType>& mesh,
                          MatrixType& A)
{
  int myproc = 0;
#ifdef HAVE_MPI
  MPI_Comm_rank(MPI_COMM_WORLD, &myproc);
#endif

  int threw_exc = 0;
  try {

  typedef typename MatrixType::GlobalOrdinalType GlobalOrdinal;
  typedef typename MatrixType::LocalOrdinalType LocalOrdinal;

  int global_nodes_x = mesh.global_box[0][1]+1;
  int global_nodes_y = mesh.global_box[1][1]+1;
  int global_nodes_z = mesh.global_box[2][1]+1;
  int box[3][2];
  copy_box(mesh.local_box, box);

  //num-owned-nodes in each dimension is num-elems+1
  //only if num-elems > 0 in that dimension *and*
  //we are at the high end of the global range in that dimension:
  if (box[0][1] > box[0][0] && box[0][1] == mesh.global_box[0][1]) ++box[0][1];
  if (box[1][1] > box[1][0] && box[1][1] == mesh.global_box[1][1]) ++box[1][1];
  if (box[2][1] > box[2][0] && box[2][1] == mesh.global_box[2][1]) ++box[2][1];

  GlobalOrdinal global_nrows = global_nodes_x;
  global_nrows *= global_nodes_y*global_nodes_z;

  GlobalOrdinal nrows = get_num_ids<GlobalOrdinal>(box);
  try {
    A.rows.resize(nrows);
    A.row_offsets.resize(nrows+1);
    A.packed_cols.reserve(nrows*27);
    A.packed_coefs.reserve(nrows*27);
  }
  catch(std::exception& exc) {
    std::ostringstream osstr;
    osstr << "One of A.rows.resize, A.row_offsets.resize, A.packed_cols.reserve or A.packed_coefs.reserve: nrows=" <<nrows<<": ";
    osstr << exc.what();
    std::string str1 = osstr.str();
    throw std::runtime_error(str1);
  }

  std::vector<GlobalOrdinal> rows(nrows);
  std::vector<LocalOrdinal> row_offsets(nrows+1);
  std::vector<int> row_coords(nrows*3);

  //In the code below we reverse the sign of z-coordinates before
  //passing them to get_id because we want our id-numbering to start
  //at the origin, but our finite-element calculations require the
  //z-coordinates to extend in the negative direction along the
  //z-axis...

  unsigned roffset = 0;
  GlobalOrdinal nnz = 0;

  for(int iz=box[2][0]; iz<box[2][1]; ++iz)
   for(int iy=box[1][0]; iy<box[1][1]; ++iy)
    for(int ix=box[0][0]; ix<box[0][1]; ++ix) {
      GlobalOrdinal row_id =
          get_id<GlobalOrdinal>(global_nodes_x, global_nodes_y, global_nodes_z,
                                ix, iy, -iz);
      rows[roffset] = mesh.map_id_to_row(row_id);
      row_coords[roffset*3] = ix;
      row_coords[roffset*3+1] = iy;
      row_coords[roffset*3+2] = iz;
      row_offsets[roffset++] = nnz;

      GlobalOrdinal row_begin_offset = nnz;
      for(int sz=-1; sz<=1; ++sz)
       for(int sy=-1; sy<=1; ++sy)
        for(int sx=-1; sx<=1; ++sx) {
          GlobalOrdinal col_id =
              get_id<GlobalOrdinal>(global_nodes_x, global_nodes_y, global_nodes_z,
                                   ix+sx, iy+sy, -iz-sz);
          if (col_id >= 0 && col_id < global_nrows) {
            ++nnz;
          }
        }
    }
  row_offsets[roffset] = nnz;
  A.packed_cols.resize(nnz);
  A.packed_coefs.resize(nnz);
  init_matrix(A, rows, row_offsets, row_coords,
              global_nodes_x, global_nodes_y, global_nodes_z, global_nrows, mesh);
  }
  catch(...) {
    std::cout << "proc " << myproc << " threw an exception in generate_matrix_structure, probably due to running out of memory." << std::endl;
    threw_exc = 1;
  }
#ifdef HAVE_MPI
  int global_throw = 0;
  MPI_Allreduce(&threw_exc, &global_throw, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
  threw_exc = global_throw;
#endif
  if (threw_exc) {
    return 1;
  }

  return 0;
}
示例#2
0
int
verify_solution(const simple_mesh_description<typename VectorType::GlobalOrdinalType>& mesh,
                const VectorType& x, double tolerance, bool verify_whole_domain = false)
{
  typedef typename VectorType::GlobalOrdinalType GlobalOrdinal;
  typedef typename VectorType::ScalarType Scalar;

  int global_nodes_x = mesh.global_box[0][1]+1;
  int global_nodes_y = mesh.global_box[1][1]+1;
  int global_nodes_z = mesh.global_box[2][1]+1;
  Box box;
  copy_box(mesh.local_box, box);

  //num-owned-nodes in each dimension is num-elems+1
  //only if num-elems > 0 in that dimension *and*
  //we are at the high end of the global range in that dimension:
  if (box[0][1] > box[0][0] && box[0][1] == mesh.global_box[0][1]) ++box[0][1];
  if (box[1][1] > box[1][0] && box[1][1] == mesh.global_box[1][1]) ++box[1][1];
  if (box[2][1] > box[2][0] && box[2][1] == mesh.global_box[2][1]) ++box[2][1];

  std::vector<GlobalOrdinal> rows;
  std::vector<Scalar> row_coords;

  int roffset = 0;
  for(int iz=box[2][0]; iz<box[2][1]; ++iz) {
   for(int iy=box[1][0]; iy<box[1][1]; ++iy) {
    for(int ix=box[0][0]; ix<box[0][1]; ++ix) {
      GlobalOrdinal row_id =
          get_id<GlobalOrdinal>(global_nodes_x, global_nodes_y, global_nodes_z,
                                ix, iy, iz);
      Scalar x, y, z;
      get_coords(row_id, global_nodes_x, global_nodes_y, global_nodes_z, x, y, z);

      bool verify_this_point = false;
      if (verify_whole_domain) verify_this_point = true;
      else if (std::abs(x - 0.5) < 0.05 && std::abs(y - 0.5) < 0.05 && std::abs(z - 0.5) < 0.05) {
        verify_this_point = true;
      }

      if (verify_this_point) {
        rows.push_back(roffset);
        row_coords.push_back(x);
        row_coords.push_back(y);
        row_coords.push_back(z);
      }

      ++roffset;
    }
   }
  }

  int return_code = 0;

  const int num_terms = 300;

  err_info<Scalar> max_error;
  max_error.err = 0.0;

  for(size_t i=0; i<rows.size(); ++i) {
    Scalar computed_soln = x.coefs[rows[i]];
    Scalar x = row_coords[i*3];
    Scalar y = row_coords[i*3+1];
    Scalar z = row_coords[i*3+2];
    Scalar analytic_soln = 0.0;
    //set exact boundary-conditions:
    if (x == 1.0) {
      //x==1 is first, we want soln to be 1 even around the edges
      //of the x==1 plane where y and/or z may be 0 or 1...
      analytic_soln = 1;
    }
    else if (x == 0.0 || y == 0.0 || z == 0.0) {
      analytic_soln = 0;
    }
    else if (y == 1.0 || z == 1.0) {
      analytic_soln = 0;
    }
    else {
      analytic_soln = soln(x, y, z, num_terms, num_terms);
    }

#ifdef MINIFE_DEBUG_VERBOSE
std::cout<<"("<<x<<","<<y<<","<<z<<") row "<<rows[i]<<": computed: "<<computed_soln<<",  analytic: "<<analytic_soln<<std::endl;
#endif
    Scalar err = std::abs(analytic_soln - computed_soln);
    if (err > max_error.err) {
      max_error.err = err;
      max_error.computed = computed_soln;
      max_error.analytic = analytic_soln;
      max_error.coords[0] = x;
      max_error.coords[1] = y;
      max_error.coords[2] = z;
    }
  }

  Scalar local_max_err = max_error.err;
  Scalar global_max_err = 0;
#ifdef HAVE_MPI
  MPI_Allreduce(&local_max_err, &global_max_err, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
#else
  global_max_err = local_max_err;
#endif

  if (local_max_err == global_max_err) {
    if (max_error.err > tolerance) {
      std::cout << "max absolute error is "<<max_error.err<<":"<<std::endl;
      std::cout << "   at position ("<<max_error.coords[0]<<","<<max_error.coords[1]<<","<<max_error.coords[2]<<"), "<<std::endl;
      std::cout << "   computed solution: "<<max_error.computed<<",  analytic solution: "<<max_error.analytic<<std::endl;
    }
    else {
      std::cout << "solution matches analytic solution to within "<<tolerance<<" or better."<<std::endl;
    }
  }

  if (global_max_err > tolerance) {
    return_code = 1;
  }

  return return_code;
}