Пример #1
0
void test_read_parallel(int num_verts)
{
  Core moab;
  Interface& mb = moab;
  EntityHandle file_set;
  ErrorCode rval;
  rval = mb.create_meshset(MESHSET_SET, file_set);
  CHECK_ERR(rval);

  std::string opt = std::string("PARALLEL=READ_PART;PARTITION=;PARTITION_DISTRIBUTE;PARALLEL_RESOLVE_SHARED_ENTS") +
      partition_method;
  rval = mb.load_file(example, &file_set, opt.c_str());
  CHECK_ERR(rval);

  ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);

  rval = pcomm->check_all_shared_handles();
  CHECK_ERR(rval);

    // get the total # owned verts
  Range verts;
  rval = mb.get_entities_by_type(0, MBVERTEX, verts);
  CHECK_ERR(rval);
  rval = pcomm->filter_pstatus(verts, PSTATUS_NOT_OWNED, PSTATUS_NOT);
  CHECK_ERR(rval);
  int my_num = verts.size(), total_verts;
  MPI_Reduce(&my_num, &total_verts, 1, MPI_INTEGER, MPI_SUM, 0, pcomm->proc_config().proc_comm());
  
  if (0 == pcomm->proc_config().proc_rank()) CHECK_EQUAL(total_verts, num_verts);
}
Пример #2
0
void gather_one_cell_var(int gather_set_rank)
{
  Core moab;
  Interface& mb = moab;

  EntityHandle file_set;
  ErrorCode rval = mb.create_meshset(MESHSET_SET, file_set);
  CHECK_ERR(rval);

  read_options = "PARALLEL=READ_PART;PARTITION_METHOD=TRIVIAL;PARALLEL_RESOLVE_SHARED_ENTS";
  std::ostringstream gather_set_option;
  gather_set_option << ";GATHER_SET=" << gather_set_rank;
  read_options += gather_set_option.str();

  rval = mb.load_file(example, &file_set, read_options.c_str());
  CHECK_ERR(rval);

  ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
  int procs = pcomm->proc_config().proc_size();
  int rank = pcomm->proc_config().proc_rank();

  // Make sure gather_set_rank is valid
  if (gather_set_rank < 0 || gather_set_rank >= procs)
    return;

  Range cells, cells_owned;
  rval = mb.get_entities_by_type(file_set, MBPOLYGON, cells);
  CHECK_ERR(rval);

  // Get local owned cells
  rval = pcomm->filter_pstatus(cells, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &cells_owned);
  CHECK_ERR(rval);

  EntityHandle gather_set = 0;
  if (gather_set_rank == rank) {
    // Get gather set
    ReadUtilIface* readUtilIface;
    mb.query_interface(readUtilIface);
    rval = readUtilIface->get_gather_set(gather_set);
    CHECK_ERR(rval);
    assert(gather_set != 0);
  }

  Tag vorticity_tag0, gid_tag;
  rval = mb.tag_get_handle("vorticity0", layers, MB_TYPE_DOUBLE, vorticity_tag0, MB_TAG_DENSE);
  CHECK_ERR(rval);

  rval = mb.tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, gid_tag, MB_TAG_DENSE);
  CHECK_ERR(rval);

  pcomm->gather_data(cells_owned, vorticity_tag0, gid_tag, gather_set, gather_set_rank);

  if (gather_set_rank == rank) {
    // Get gather set cells
    Range gather_set_cells;
    rval = mb.get_entities_by_type(gather_set, MBPOLYGON, gather_set_cells);
    CHECK_ERR(rval);
    CHECK_EQUAL((size_t)642, gather_set_cells.size());
    CHECK_EQUAL((size_t)1, gather_set_cells.psize());

    // Check vorticity0 tag values on 4 gather set cells: first cell, two median cells, and last cell
    EntityHandle cell_ents[] = {gather_set_cells[0], gather_set_cells[320],
                                gather_set_cells[321], gather_set_cells[641]};
    double vorticity0_val[4 * layers];
    rval = mb.tag_get_data(vorticity_tag0, &cell_ents[0], 4, vorticity0_val);
    CHECK_ERR(rval);

    // Only check first two layers
    // Layer 0
    CHECK_REAL_EQUAL(3.629994, vorticity0_val[0 * layers], eps);
    CHECK_REAL_EQUAL(0.131688, vorticity0_val[1 * layers], eps);
    CHECK_REAL_EQUAL(-0.554888, vorticity0_val[2 * layers], eps);
    CHECK_REAL_EQUAL(-0.554888, vorticity0_val[3 * layers], eps);
    // Layer 1
    CHECK_REAL_EQUAL(3.629944, vorticity0_val[0 * layers + 1], eps);
    CHECK_REAL_EQUAL(0.131686, vorticity0_val[1 * layers + 1], eps);
    CHECK_REAL_EQUAL(-0.554881, vorticity0_val[2 * layers + 1], eps);
    CHECK_REAL_EQUAL(-0.554881, vorticity0_val[3 * layers + 1], eps);
  }
}
Пример #3
0
void read_mesh_parallel(bool rcbzoltan)
{
  Core moab;
  Interface& mb = moab;

  read_options = "PARALLEL=READ_PART;PARTITION_METHOD=TRIVIAL;PARALLEL_RESOLVE_SHARED_ENTS;VARIABLE=";
  if (rcbzoltan)
    read_options = "PARALLEL=READ_PART;PARTITION_METHOD=RCBZOLTAN;PARALLEL_RESOLVE_SHARED_ENTS;VARIABLE=";

  ErrorCode rval = mb.load_file(example, NULL, read_options.c_str());
  CHECK_ERR(rval);

  ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
  int procs = pcomm->proc_config().proc_size();
  int rank = pcomm->proc_config().proc_rank();

  rval = pcomm->check_all_shared_handles();
  CHECK_ERR(rval);

  // Get local vertices
  Range local_verts;
  rval = mb.get_entities_by_type(0, MBVERTEX, local_verts);
  CHECK_ERR(rval);

  int verts_num = local_verts.size();
  if (2 == procs) {
    if (rcbzoltan) {
      if (0 == rank)
        CHECK_EQUAL(684, verts_num);
      else if (1 == rank)
        CHECK_EQUAL(691, verts_num); // Not owned vertices included
    }
    else {
      if (0 == rank)
        CHECK_EQUAL(687, verts_num);
      else if (1 == rank)
        CHECK_EQUAL(688, verts_num); // Not owned vertices included
    }
  }

  rval = pcomm->filter_pstatus(local_verts, PSTATUS_NOT_OWNED, PSTATUS_NOT);
  CHECK_ERR(rval);

  verts_num = local_verts.size();
  if (2 == procs) {
    if (rcbzoltan) {
      if (0 == rank)
        CHECK_EQUAL(684, verts_num);
      else if (1 == rank)
        CHECK_EQUAL(596, verts_num); // Not owned vertices excluded
    }
    else {
      if (0 == rank)
        CHECK_EQUAL(687, verts_num);
      else if (1 == rank)
        CHECK_EQUAL(593, verts_num); // Not owned vertices excluded
    }
  }

  // Get local edges
  Range local_edges;
  rval = mb.get_entities_by_type(0, MBEDGE, local_edges);
  CHECK_ERR(rval);

  int edges_num = local_edges.size();
  if (2 == procs) {
    if (rcbzoltan) {
      if (0 == rank)
        CHECK_EQUAL(1002, edges_num);
      else if (1 == rank)
        CHECK_EQUAL(1013, edges_num); // Not owned edges included
    }
    else {
      if (0 == rank)
        CHECK_EQUAL(1007, edges_num);
      else if (1 == rank)
        CHECK_EQUAL(1008, edges_num); // Not owned edges included
    }
  }

  rval = pcomm->filter_pstatus(local_edges, PSTATUS_NOT_OWNED, PSTATUS_NOT);
  CHECK_ERR(rval);

  edges_num = local_edges.size();
  if (2 == procs) {
    if (rcbzoltan) {
      if (0 == rank)
        CHECK_EQUAL(1002, edges_num);
      else if (1 == rank)
        CHECK_EQUAL(918, edges_num); // Not owned edges excluded
    }
    else {
      if (0 == rank)
        CHECK_EQUAL(1007, edges_num);
      else if (1 == rank)
        CHECK_EQUAL(913, edges_num); // Not owned edges excluded
    }
  }

  // Get local cells
  Range local_cells;
  rval = mb.get_entities_by_type(0, MBPOLYGON, local_cells);
  CHECK_ERR(rval);
  // No mixed elements
  CHECK_EQUAL((size_t)1, local_cells.psize());

  int cells_num = local_cells.size();
  if (2 == procs) {
    if (rcbzoltan) {
      if (0 == rank)
        CHECK_EQUAL(319, cells_num);
      else
        CHECK_EQUAL(323, cells_num);
    }
    else
      CHECK_EQUAL(321, cells_num);
  }

  rval = pcomm->filter_pstatus(local_cells, PSTATUS_NOT_OWNED, PSTATUS_NOT);
  CHECK_ERR(rval);

  cells_num = local_cells.size();
  if (2 == procs) {
    if (rcbzoltan) {
      if (0 == rank)
        CHECK_EQUAL(319, cells_num);
      else
        CHECK_EQUAL(323, cells_num);
    }
    else
      CHECK_EQUAL(321, cells_num);
  }

  std::cout << "proc: " << rank << " verts:" << verts_num << "\n";

  int total_verts_num;
  MPI_Reduce(&verts_num, &total_verts_num, 1, MPI_INT, MPI_SUM, 0, pcomm->proc_config().proc_comm());
  if (0 == rank) {
    std::cout << "total vertices: " << total_verts_num << "\n";
    CHECK_EQUAL(1280, total_verts_num);
  }

  std::cout << "proc: " << rank << " edges:" << edges_num << "\n";

  int total_edges_num;
  MPI_Reduce(&edges_num, &total_edges_num, 1, MPI_INT, MPI_SUM, 0, pcomm->proc_config().proc_comm());
  if (0 == rank) {
    std::cout << "total edges: " << total_edges_num << "\n";
    CHECK_EQUAL(1920, total_edges_num);
  }

  std::cout << "proc: " << rank << " cells:" << cells_num << "\n";

  int total_cells_num;
  MPI_Reduce(&cells_num, &total_cells_num, 1, MPI_INT, MPI_SUM, 0, pcomm->proc_config().proc_comm());
  if (0 == rank) {
    std::cout << "total cells: " << total_cells_num << "\n";
    CHECK_EQUAL(642, total_cells_num);
  }

#ifdef MOAB_HAVE_HDF5_PARALLEL
  std::string write_options("PARALLEL=WRITE_PART;");

  std::string output_file = "test_gcrm";
  if (rcbzoltan)
    output_file += "_rcbzoltan";
  output_file += ".h5m";

  mb.write_file(output_file.c_str(), NULL, write_options.c_str());
#endif
}
Пример #4
0
void test_gather_onevar()
{
  Core moab;
  Interface& mb = moab;

  EntityHandle file_set;
  ErrorCode rval = mb.create_meshset(MESHSET_SET, file_set);
  CHECK_ERR(rval);

  std::string opts;
  get_options(opts);

  // Read cell variable vorticity and create gather set on processor 0
  opts += ";VARIABLE=vorticity;GATHER_SET=0";
  rval = mb.load_file(example, &file_set, opts.c_str());
  CHECK_ERR(rval);

#ifdef MOAB_HAVE_MPI
  ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
  int rank = pcomm->proc_config().proc_rank();

  Range cells, cells_owned;
  rval = mb.get_entities_by_type(file_set, MBPOLYGON, cells);
  CHECK_ERR(rval);

  // Get local owned cells
  rval = pcomm->filter_pstatus(cells, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &cells_owned);
  CHECK_ERR(rval);

  EntityHandle gather_set = 0;
  if (0 == rank) {
    // Get gather set
    ReadUtilIface* readUtilIface;
    mb.query_interface(readUtilIface);
    rval = readUtilIface->get_gather_set(gather_set);
    CHECK_ERR(rval);
    assert(gather_set != 0);
  }

  Tag vorticity_tag0, gid_tag;
  rval = mb.tag_get_handle("vorticity0", layers, MB_TYPE_DOUBLE, vorticity_tag0, MB_TAG_DENSE);
  CHECK_ERR(rval);

  rval = mb.tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, gid_tag, MB_TAG_DENSE);
  CHECK_ERR(rval);

  pcomm->gather_data(cells_owned, vorticity_tag0, gid_tag, gather_set, 0);

  if (0 == rank) {
    // Get gather set cells
    Range gather_set_cells;
    rval = mb.get_entities_by_type(gather_set, MBPOLYGON, gather_set_cells);
    CHECK_ERR(rval);
    CHECK_EQUAL((size_t)642, gather_set_cells.size());
    CHECK_EQUAL((size_t)1, gather_set_cells.psize());

    // Check vorticity0 tag values on 4 gather set cells: first cell, two median cells, and last cell
    EntityHandle cell_ents[] = {gather_set_cells[0], gather_set_cells[320],
                                gather_set_cells[321], gather_set_cells[641]};
    double vorticity0_val[4 * layers];
    rval = mb.tag_get_data(vorticity_tag0, cell_ents, 4, vorticity0_val);
    CHECK_ERR(rval);

    // Only check first two layers
    // Layer 0
    CHECK_REAL_EQUAL(3.629994, vorticity0_val[0 * layers], eps);
    CHECK_REAL_EQUAL(0.131688, vorticity0_val[1 * layers], eps);
    CHECK_REAL_EQUAL(-0.554888, vorticity0_val[2 * layers], eps);
    CHECK_REAL_EQUAL(-0.554888, vorticity0_val[3 * layers], eps);
    // Layer 1
    CHECK_REAL_EQUAL(3.629944, vorticity0_val[0 * layers + 1], eps);
    CHECK_REAL_EQUAL(0.131686, vorticity0_val[1 * layers + 1], eps);
    CHECK_REAL_EQUAL(-0.554881, vorticity0_val[2 * layers + 1], eps);
    CHECK_REAL_EQUAL(-0.554881, vorticity0_val[3 * layers + 1], eps);
  }
#endif
}
Пример #5
0
void test_gather_onevar()
{
  Core moab;
  Interface& mb = moab;

  EntityHandle file_set;
  ErrorCode rval = mb.create_meshset(MESHSET_SET, file_set);
  CHECK_ERR(rval);

  std::string opts;
  get_options(opts);

  // Read vertex variable T and create gather set on processor 0
  opts += ";VARIABLE=T;GATHER_SET=0";
#ifdef MOAB_HAVE_MPI
  opts += ";PARALLEL_RESOLVE_SHARED_ENTS";
#endif
  rval = mb.load_file(example, &file_set, opts.c_str());
  CHECK_ERR(rval);

#ifdef MOAB_HAVE_MPI
  ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
  int rank = pcomm->proc_config().proc_rank();

  Range verts, verts_owned;
  rval = mb.get_entities_by_type(file_set, MBVERTEX, verts);
  CHECK_ERR(rval);

  // Get local owned vertices
  rval = pcomm->filter_pstatus(verts, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &verts_owned);
  CHECK_ERR(rval);

  EntityHandle gather_set = 0;
  if (0 == rank) {
    // Get gather set
    ReadUtilIface* readUtilIface;
    mb.query_interface(readUtilIface);
    rval = readUtilIface->get_gather_set(gather_set);
    CHECK_ERR(rval);
    assert(gather_set != 0);
  }

  Tag Ttag0, gid_tag;
  rval = mb.tag_get_handle("T0", levels, MB_TYPE_DOUBLE, Ttag0, MB_TAG_DENSE);
  CHECK_ERR(rval);

  rval = mb.tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, gid_tag, MB_TAG_DENSE);
  CHECK_ERR(rval);

  pcomm->gather_data(verts_owned, Ttag0, gid_tag, gather_set, 0);

  if (0 == rank) {
    // Get gather set vertices
    Range gather_set_verts;
    rval = mb.get_entities_by_type(gather_set, MBVERTEX, gather_set_verts);
    CHECK_ERR(rval);
    CHECK_EQUAL((size_t)3458, gather_set_verts.size());

    // Get T0 tag values on 4 strategically selected gather set vertices
    double T0_val[4 * levels];
    EntityHandle vert_ents[] = {gather_set_verts[0], gather_set_verts[1728],
                                gather_set_verts[1729], gather_set_verts[3457]};
    rval = mb.tag_get_data(Ttag0, vert_ents, 4, T0_val);
    CHECK_ERR(rval);

    const double eps = 0.001;

    // Check first level values
    CHECK_REAL_EQUAL(233.1136, T0_val[0 * levels], eps); // First vert
    CHECK_REAL_EQUAL(236.1505, T0_val[1 * levels], eps); // Median vert
    CHECK_REAL_EQUAL(235.7722, T0_val[2 * levels], eps); // Median vert
    CHECK_REAL_EQUAL(234.0416, T0_val[3 * levels], eps); // Last vert
  }
#endif
}
Пример #6
0
Файл: umr.cpp Проект: obmun/moab
ErrorCode get_max_volume(Core &mb,  EntityHandle fileset, int dim, double &vmax)
{
  ErrorCode error;
  VerdictWrapper vw(&mb);
  QualityType q;

  switch (dim) {
    case 1: q = MB_LENGTH; break;
    case 2: q = MB_AREA; break;
    case 3: q = MB_VOLUME; break;
    default: return MB_FAILURE; break;
    }

  //Get all entities of the highest dimension which is passed as a command line argument.
  Range allents, owned;
  error = mb.get_entities_by_handle(fileset, allents);MB_CHK_ERR(error);
  owned = allents.subset_by_dimension(dim);MB_CHK_ERR(error);

  //Get all owned entities
#ifdef MOAB_HAVE_MPI
  int size = 1;
  MPI_Comm_size( MPI_COMM_WORLD, &size );
  int mpi_err;
  if (size>1)
    {
      // filter the entities not owned, so we do not process them more than once
      ParallelComm* pcomm = moab::ParallelComm::get_pcomm(&mb, 0);
      Range current = owned;
      owned.clear();
      error = pcomm->filter_pstatus(current, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &owned);
      if (error != MB_SUCCESS)
        {
          MPI_Finalize();
          return MB_FAILURE;
        }
    }
#endif

  double vmax_local=0;
  //Find the maximum volume of an entity in the owned mesh
  for (Range::iterator it=owned.begin(); it != owned.end(); it++)
    {
      double volume;
      error = vw.quality_measure(*it, q, volume);MB_CHK_ERR(error);
      if (volume >vmax_local)
        vmax_local = volume;
    }

  //Get the global maximum
  double vmax_global = vmax_local;
#ifdef MOAB_HAVE_MPI
  mpi_err = MPI_Reduce(&vmax_local, &vmax_global, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
  if (mpi_err)
      {
        MPI_Finalize();
        return MB_FAILURE;
      }
#endif

  vmax = vmax_global;

  return MB_SUCCESS;
}
Пример #7
0
int main(int argc, char **argv)
{
#ifdef MOAB_HAVE_MPI
  MPI_Init(&argc, &argv);

  string options;

  // Need option handling here for input filename
  if (argc > 1) {
    // User has input a mesh file
    test_file_name = argv[1];
  }  

  int nbComms = 1;
  if (argc > 2)
    nbComms = atoi(argv[2]);

  options = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS";

  // Get MOAB instance
  Interface* mb = new (std::nothrow) Core;
  if (NULL == mb)
    return 1;

  MPI_Comm comm;
  int global_rank, global_size;
  MPI_Comm_rank(MPI_COMM_WORLD, &global_rank);
  MPI_Comm_rank(MPI_COMM_WORLD, &global_size);

  int color = global_rank % nbComms; // For each angle group a different color
  if (nbComms > 1) {
    // Split the communicator, into ngroups = nbComms
    MPI_Comm_split(MPI_COMM_WORLD, color, global_rank, &comm);
  }
  else
    comm = MPI_COMM_WORLD;

  // Get the ParallelComm instance
  ParallelComm* pcomm = new ParallelComm(mb, comm);
  int nprocs = pcomm->proc_config().proc_size();
  int rank = pcomm->proc_config().proc_rank();
#ifndef NDEBUG
  MPI_Comm rcomm = pcomm->proc_config().proc_comm();
  assert(rcomm == comm);
#endif
  if (0 == global_rank)
    cout << " global rank:" << global_rank << " color:" << color << " rank:" << rank << " of " << nprocs << " processors\n";

  if (1 == global_rank)
    cout << " global rank:" << global_rank << " color:" << color << " rank:" << rank << " of " << nprocs << " processors\n";

  MPI_Barrier(MPI_COMM_WORLD);

  if (0 == global_rank)
    cout << "Reading file " << test_file_name << "\n with options: " << options <<
         "\n on " << nprocs << " processors on " << nbComms << " communicator(s)\n";

  // Read the file with the specified options
  ErrorCode rval = mb->load_file(test_file_name.c_str(), 0, options.c_str());MB_CHK_ERR(rval);

  Range shared_ents;
  // Get entities shared with all other processors
  rval = pcomm->get_shared_entities(-1, shared_ents);MB_CHK_ERR(rval);

  // Filter shared entities with not not_owned, which means owned
  Range owned_entities;
  rval = pcomm->filter_pstatus(shared_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &owned_entities);MB_CHK_ERR(rval);

  unsigned int nums[4] = {0}; // to store the owned entities per dimension
  for (int i = 0; i < 4; i++)
    nums[i] = (int)owned_entities.num_of_dimension(i);
  vector<int> rbuf(nprocs*4, 0);
  MPI_Gather(nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, comm);
  // Print the stats gathered:
  if (0 == global_rank) {
    for (int i = 0; i < nprocs; i++)
      cout << " Shared, owned entities on proc " << i << ": " << rbuf[4*i] << " verts, " <<
          rbuf[4*i + 1] << " edges, " << rbuf[4*i + 2] << " faces, " << rbuf[4*i + 3] << " elements" << endl;
  }

  // Now exchange 1 layer of ghost elements, using vertices as bridge
  // (we could have done this as part of reading process, using the PARALLEL_GHOSTS read option)
  rval = pcomm->exchange_ghost_cells(3, // int ghost_dim
                                     0, // int bridge_dim
                                     1, // int num_layers
                                     0, // int addl_ents
                                     true);MB_CHK_ERR(rval); // bool store_remote_handles

  // Repeat the reports, after ghost exchange
  shared_ents.clear();
  owned_entities.clear();
  rval = pcomm->get_shared_entities(-1, shared_ents);MB_CHK_ERR(rval);
  rval = pcomm->filter_pstatus(shared_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &owned_entities);MB_CHK_ERR(rval);

  // Find out how many shared entities of each dimension are owned on this processor
  for (int i = 0; i < 4; i++)
    nums[i] = (int)owned_entities.num_of_dimension(i);

  // Gather the statistics on processor 0
  MPI_Gather(nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, comm);
  if (0 == global_rank) {
    cout << " \n\n After exchanging one ghost layer: \n";
    for (int i = 0; i < nprocs; i++) {
      cout << " Shared, owned entities on proc " << i << ": " << rbuf[4*i] << " verts, " <<
          rbuf[4*i + 1] << " edges, " << rbuf[4*i + 2] << " faces, " << rbuf[4*i + 3] << " elements" << endl;
    }
  }

  delete mb;

  MPI_Finalize();
#else
  std::cout<<" compile with MPI and hdf5 for this example to work\n";

#endif
  return 0;
}