Ejemplo n.º 1
0
void test_read_parallel(int num_verts)
{
  Core moab;
  Interface& mb = moab;
  EntityHandle file_set;
  ErrorCode rval;
  rval = mb.create_meshset(MESHSET_SET, file_set);
  CHECK_ERR(rval);

  std::string opt = std::string("PARALLEL=READ_PART;PARTITION=;PARTITION_DISTRIBUTE;PARALLEL_RESOLVE_SHARED_ENTS") +
      partition_method;
  rval = mb.load_file(example, &file_set, opt.c_str());
  CHECK_ERR(rval);

  ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);

  rval = pcomm->check_all_shared_handles();
  CHECK_ERR(rval);

    // get the total # owned verts
  Range verts;
  rval = mb.get_entities_by_type(0, MBVERTEX, verts);
  CHECK_ERR(rval);
  rval = pcomm->filter_pstatus(verts, PSTATUS_NOT_OWNED, PSTATUS_NOT);
  CHECK_ERR(rval);
  int my_num = verts.size(), total_verts;
  MPI_Reduce(&my_num, &total_verts, 1, MPI_INTEGER, MPI_SUM, 0, pcomm->proc_config().proc_comm());
  
  if (0 == pcomm->proc_config().proc_rank()) CHECK_EQUAL(total_verts, num_verts);
}
Ejemplo n.º 2
0
ErrorCode test_read(const char *filename, const char *option) 
{
  Core mb_instance;
  Interface& moab = mb_instance;
  ErrorCode rval;

  rval = moab.load_file( filename, 0, option);
  CHKERR(rval);

  ParallelComm* pcomm = ParallelComm::get_pcomm(&moab, 0);

  rval = pcomm->check_all_shared_handles();
  CHKERR(rval);

  return MB_SUCCESS;
}
Ejemplo n.º 3
0
void read_mesh_parallel(bool rcbzoltan)
{
  Core moab;
  Interface& mb = moab;

  read_options = "PARALLEL=READ_PART;PARTITION_METHOD=TRIVIAL;PARALLEL_RESOLVE_SHARED_ENTS;VARIABLE=";
  if (rcbzoltan)
    read_options = "PARALLEL=READ_PART;PARTITION_METHOD=RCBZOLTAN;PARALLEL_RESOLVE_SHARED_ENTS;VARIABLE=";

  ErrorCode rval = mb.load_file(example, NULL, read_options.c_str());
  CHECK_ERR(rval);

  ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
  int procs = pcomm->proc_config().proc_size();
  int rank = pcomm->proc_config().proc_rank();

  rval = pcomm->check_all_shared_handles();
  CHECK_ERR(rval);

  // Get local vertices
  Range local_verts;
  rval = mb.get_entities_by_type(0, MBVERTEX, local_verts);
  CHECK_ERR(rval);

  int verts_num = local_verts.size();
  if (2 == procs) {
    if (rcbzoltan) {
      if (0 == rank)
        CHECK_EQUAL(684, verts_num);
      else if (1 == rank)
        CHECK_EQUAL(691, verts_num); // Not owned vertices included
    }
    else {
      if (0 == rank)
        CHECK_EQUAL(687, verts_num);
      else if (1 == rank)
        CHECK_EQUAL(688, verts_num); // Not owned vertices included
    }
  }

  rval = pcomm->filter_pstatus(local_verts, PSTATUS_NOT_OWNED, PSTATUS_NOT);
  CHECK_ERR(rval);

  verts_num = local_verts.size();
  if (2 == procs) {
    if (rcbzoltan) {
      if (0 == rank)
        CHECK_EQUAL(684, verts_num);
      else if (1 == rank)
        CHECK_EQUAL(596, verts_num); // Not owned vertices excluded
    }
    else {
      if (0 == rank)
        CHECK_EQUAL(687, verts_num);
      else if (1 == rank)
        CHECK_EQUAL(593, verts_num); // Not owned vertices excluded
    }
  }

  // Get local edges
  Range local_edges;
  rval = mb.get_entities_by_type(0, MBEDGE, local_edges);
  CHECK_ERR(rval);

  int edges_num = local_edges.size();
  if (2 == procs) {
    if (rcbzoltan) {
      if (0 == rank)
        CHECK_EQUAL(1002, edges_num);
      else if (1 == rank)
        CHECK_EQUAL(1013, edges_num); // Not owned edges included
    }
    else {
      if (0 == rank)
        CHECK_EQUAL(1007, edges_num);
      else if (1 == rank)
        CHECK_EQUAL(1008, edges_num); // Not owned edges included
    }
  }

  rval = pcomm->filter_pstatus(local_edges, PSTATUS_NOT_OWNED, PSTATUS_NOT);
  CHECK_ERR(rval);

  edges_num = local_edges.size();
  if (2 == procs) {
    if (rcbzoltan) {
      if (0 == rank)
        CHECK_EQUAL(1002, edges_num);
      else if (1 == rank)
        CHECK_EQUAL(918, edges_num); // Not owned edges excluded
    }
    else {
      if (0 == rank)
        CHECK_EQUAL(1007, edges_num);
      else if (1 == rank)
        CHECK_EQUAL(913, edges_num); // Not owned edges excluded
    }
  }

  // Get local cells
  Range local_cells;
  rval = mb.get_entities_by_type(0, MBPOLYGON, local_cells);
  CHECK_ERR(rval);
  // No mixed elements
  CHECK_EQUAL((size_t)1, local_cells.psize());

  int cells_num = local_cells.size();
  if (2 == procs) {
    if (rcbzoltan) {
      if (0 == rank)
        CHECK_EQUAL(319, cells_num);
      else
        CHECK_EQUAL(323, cells_num);
    }
    else
      CHECK_EQUAL(321, cells_num);
  }

  rval = pcomm->filter_pstatus(local_cells, PSTATUS_NOT_OWNED, PSTATUS_NOT);
  CHECK_ERR(rval);

  cells_num = local_cells.size();
  if (2 == procs) {
    if (rcbzoltan) {
      if (0 == rank)
        CHECK_EQUAL(319, cells_num);
      else
        CHECK_EQUAL(323, cells_num);
    }
    else
      CHECK_EQUAL(321, cells_num);
  }

  std::cout << "proc: " << rank << " verts:" << verts_num << "\n";

  int total_verts_num;
  MPI_Reduce(&verts_num, &total_verts_num, 1, MPI_INT, MPI_SUM, 0, pcomm->proc_config().proc_comm());
  if (0 == rank) {
    std::cout << "total vertices: " << total_verts_num << "\n";
    CHECK_EQUAL(1280, total_verts_num);
  }

  std::cout << "proc: " << rank << " edges:" << edges_num << "\n";

  int total_edges_num;
  MPI_Reduce(&edges_num, &total_edges_num, 1, MPI_INT, MPI_SUM, 0, pcomm->proc_config().proc_comm());
  if (0 == rank) {
    std::cout << "total edges: " << total_edges_num << "\n";
    CHECK_EQUAL(1920, total_edges_num);
  }

  std::cout << "proc: " << rank << " cells:" << cells_num << "\n";

  int total_cells_num;
  MPI_Reduce(&cells_num, &total_cells_num, 1, MPI_INT, MPI_SUM, 0, pcomm->proc_config().proc_comm());
  if (0 == rank) {
    std::cout << "total cells: " << total_cells_num << "\n";
    CHECK_EQUAL(642, total_cells_num);
  }

#ifdef MOAB_HAVE_HDF5_PARALLEL
  std::string write_options("PARALLEL=WRITE_PART;");

  std::string output_file = "test_gcrm";
  if (rcbzoltan)
    output_file += "_rcbzoltan";
  output_file += ".h5m";

  mb.write_file(output_file.c_str(), NULL, write_options.c_str());
#endif
}
Ejemplo n.º 4
0
int main(int argc, char **argv)
{

  MPI_Init(&argc, &argv);
  LONG_DESC << "This program simulates a transport problem on a sphere"
        " according to a benchmark from a Nair & Lauritzen paper.\n"
        << "It starts with a partitioned mesh on a sphere, add a tracer, and steps through.\n" <<
        "The flow reverses after half time, and it should return to original configuration, if the integration was exact. ";
  ProgOptions opts(LONG_DESC.str(), BRIEF_DESC);

  // read a homme file, partitioned in 16 so far
  std::string fileN= TestDir + "/HN16.h5m";
  const char *filename_mesh1 = fileN.c_str();

  opts.addOpt<double>("gtolerance,g",
      "geometric absolute tolerance (used for point concidence on the sphere)", &gtol);

  std::string input_file;
  opts.addOpt<std::string>("input_file,i", "input mesh file, partitioned",
      &input_file);
  std::string extra_read_opts;
  opts.addOpt<std::string>("extra_read_options,O", "extra read options ",
        &extra_read_opts);
  //int field_type;
  opts.addOpt<int>("field_type,f",
        "field type--  1: quasi-smooth; 2: smooth; 3: slotted cylinders (non-smooth)", &field_type);

  opts.addOpt<int>("num_steps,n",
          "number of  steps ", &numSteps);

  //bool reorder = false;
  opts.addOpt<void>("write_debug_files,w", "write debugging files during simulation ",
        &writeFiles);

  opts.addOpt<void>("write_velocity_files,v", "Reorder mesh to group entities by partition",
     &velocity);

  opts.addOpt<void>("write_result_in_parallel,p", "write tracer result files",
     &parallelWrite);

  opts.parseCommandLine(argc, argv);

  if (!input_file.empty())
    filename_mesh1=input_file.c_str();

  // read in parallel, in the "euler_set", the initial mesh
  std::string optsRead = std::string("PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION")+
            std::string(";PARALLEL_RESOLVE_SHARED_ENTS")+extra_read_opts;
  Core moab;
  Interface & mb = moab;
  EntityHandle euler_set;
  ErrorCode rval;
  rval = mb.create_meshset(MESHSET_SET, euler_set);
  CHECK_ERR(rval);

  rval = mb.load_file(filename_mesh1, &euler_set, optsRead.c_str());

  ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
  CHECK_ERR(rval);

  rval = pcomm->check_all_shared_handles();
  CHECK_ERR(rval);

  int rank = pcomm->proc_config().proc_rank();

  if (0==rank)
  {
    std::cout << " case 1: use -gtol " << gtol <<
        " -R " << radius << " -input " << filename_mesh1 <<  " -f " << field_type <<
        " numSteps: " << numSteps << "\n";
    std::cout<<" write debug results: " << (writeFiles ? "yes" : "no") << "\n";
    std::cout<< " write tracer in parallel: " << ( parallelWrite ? "yes" : "no") << "\n";
    std::cout <<" output velocity: " << (velocity? "yes" : "no") << "\n";
  }

  // tagTracer is the value at nodes
  Tag tagTracer = 0;
  std::string tag_name("Tracer");
  rval = mb.tag_get_handle(tag_name.c_str(), 1, MB_TYPE_DOUBLE, tagTracer, MB_TAG_DENSE | MB_TAG_CREAT);
  CHECK_ERR(rval);

  // tagElem is the average computed at each element, from nodal values
  Tag tagElem = 0;
  std::string tag_name2("TracerAverage");
  rval = mb.tag_get_handle(tag_name2.c_str(), 1, MB_TYPE_DOUBLE, tagElem, MB_TAG_DENSE | MB_TAG_CREAT);
  CHECK_ERR(rval);

  // area of the euler element is fixed, store it; it is used to recompute the averages at each
  // time step
  Tag tagArea = 0;
  std::string tag_name4("Area");
  rval = mb.tag_get_handle(tag_name4.c_str(), 1, MB_TYPE_DOUBLE, tagArea, MB_TAG_DENSE | MB_TAG_CREAT);
  CHECK_ERR(rval);

  // add a field value, quasi smooth first
  rval = add_field_value(&mb, euler_set, rank, tagTracer, tagElem, tagArea);
  CHECK_ERR(rval);

  // iniVals are used for 1-norm error computation
  Range redEls;
  rval = mb.get_entities_by_dimension(euler_set, 2, redEls);
  CHECK_ERR(rval);
  std::vector<double> iniVals(redEls.size());
  rval = mb.tag_get_data(tagElem, redEls, &iniVals[0]);
  CHECK_ERR(rval);

  Tag tagh = 0;
  std::string tag_name3("Case1");
  rval = mb.tag_get_handle(tag_name3.c_str(), 3, MB_TYPE_DOUBLE, tagh, MB_TAG_DENSE | MB_TAG_CREAT);
  CHECK_ERR(rval);
  EntityHandle out_set, lagr_set;
  rval = mb.create_meshset(MESHSET_SET, out_set);
  CHECK_ERR(rval);
  rval = mb.create_meshset(MESHSET_SET, lagr_set);
  CHECK_ERR(rval);
  // copy the initial mesh in the lagrangian set
  // initial vertices will be at the same position as euler;

  rval = create_lagr_mesh(&mb, euler_set, lagr_set);
  CHECK_ERR(rval);

  Intx2MeshOnSphere worker(&mb);
  worker.SetRadius(radius);

  worker.SetErrorTolerance(gtol);

  Range local_verts;
  rval = worker.build_processor_euler_boxes(euler_set, local_verts);// output also the local_verts
  // these stay fixed for one run
  // other things from intersection might need to change, like input blue set (departure set)
  // so we need also a method to clean memory
  CHECK_ERR(rval);

  for (int i=1; i<numSteps+1; i++)
  {
    // time depends on i; t = i*T/numSteps: ( 0, T/numSteps, 2*T/numSteps, ..., T )
    // this is really just to create some plots; it is not really needed to proceed
    // the compute_tracer_case1 method actually computes the departure point position
    if (velocity)
    {
      rval = compute_velocity_case1(&mb, euler_set, tagh, rank, i);
      CHECK_ERR(rval);
    }

    // this is to actually compute concentrations at time step i, using the
    //  current concentrations
    //
    rval = compute_tracer_case1(&mb, worker, euler_set, lagr_set, out_set,
        tagElem, tagArea, rank, i, local_verts);
    CHECK_ERR(rval);

  }

  //final vals and 1-norm
  Range::iterator iter = redEls.begin();
  double norm1 = 0.;
  int count =0;
  void * data;
  int j=0;// index in iniVals
  while (iter != redEls.end())
  {
    rval = mb.tag_iterate(tagElem, iter, redEls.end(), count, data);
    CHECK_ERR(rval);
    double * ptrTracer=(double*)data;

    rval = mb.tag_iterate(tagArea, iter, redEls.end(), count, data);
    CHECK_ERR(rval);
    double * ptrArea=(double*)data;
    for (int i=0; i<count; i++, iter++, ptrTracer++, ptrArea++, j++)
    {
      //double area = *ptrArea;
      norm1+=fabs(*ptrTracer - iniVals[j])* (*ptrArea);
    }
  }

  double total_norm1=0;
  int mpi_err = MPI_Reduce(&norm1, &total_norm1, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
  if (MPI_SUCCESS != mpi_err) return 1;
  if (0==rank)
    std::cout << " numSteps:" << numSteps << " 1-norm:" << total_norm1 << "\n";
  MPI_Finalize();
  return 0;
}