コード例 #1
0
    static inline void apply(Range const& range_in, ring_identifier id,
                    Range& range_out, MarkMap const& mark_map)
    {
        typename MarkMap::const_iterator mit = mark_map.find(id);
        if (mit == mark_map.end())
        {
            range_out = range_in;
            return;
        }
        typedef typename MarkMap::mapped_type bit_vector_type;

        if (boost::size(range_in) != boost::size(mit->second))
        {
            throw std::runtime_error("ERROR in size of mark_map");
            return;
        }

        range_out.clear();

        typename boost::range_iterator<bit_vector_type const>::type bit = boost::begin(mit->second);
        for (typename boost::range_iterator<Range const>::type it = boost::begin(range_in);
            it != boost::end(range_in); ++it, ++bit)
        {
            bool const& marked = *bit;
            if (! marked)
            {
                range_out.push_back(*it);
            }
        }
    }
コード例 #2
0
ファイル: convert.cpp プロジェクト: obmun/moab
void remove_entities_from_sets( Interface* gMB, Range& dead_entities, Range& empty_sets )
{
  empty_sets.clear();
  Range sets;
  gMB->get_entities_by_type( 0, MBENTITYSET, sets );
  for (Range::iterator i = sets.begin(); i != sets.end(); ++i) {
    Range set_contents;
    gMB->get_entities_by_handle( *i, set_contents, false );
    set_contents = intersect( set_contents, dead_entities );
    gMB->remove_entities( *i, set_contents );
    set_contents.clear();
    gMB->get_entities_by_handle( *i, set_contents, false );
    if (set_contents.empty())
      empty_sets.insert( *i );
  }
}
コード例 #3
0
ファイル: ParallelMergeMesh.cpp プロジェクト: obmun/moab
  //Sets mySkinEnts with all of the skin entities on the processor
  ErrorCode ParallelMergeMesh::PopulateMySkinEnts(const EntityHandle meshset, int dim, bool skip_local_merge)
  {
    /*Merge Mesh Locally*/
    //Get all dim dimensional entities
    Range ents;
    ErrorCode rval = myMB->get_entities_by_dimension(meshset,dim,ents);MB_CHK_ERR(rval);

    if (ents.empty() && dim==3)
    {
      dim--;
      rval =  myMB->get_entities_by_dimension(meshset,dim,ents);MB_CHK_ERR(rval);// maybe dimension 2
    }

    //Merge Mesh Locally
    if (!skip_local_merge)
      {
        MergeMesh merger(myMB, false);
        merger.merge_entities(ents,myEps);
        //We can return if there is only 1 proc
        if(rval != MB_SUCCESS || myPcomm->size() == 1){
            return rval;
          }

        //Rebuild the ents range
        ents.clear();
        rval = myMB->get_entities_by_dimension(meshset,dim,ents);MB_CHK_ERR(rval);
      }

    /*Get Skin
      -Get Range of all dimensional entities
      -skinEnts[i] is the skin entities of dimension i*/  
    Skinner skinner(myMB);
    for(int skin_dim = dim; skin_dim >= 0; skin_dim--){
      rval = skinner.find_skin(meshset,ents,skin_dim,mySkinEnts[skin_dim]);MB_CHK_ERR(rval);
    }
    return MB_SUCCESS;
  }
コード例 #4
0
ファイル: h5varlen.cpp プロジェクト: obmun/moab
void test_var_length_handle_tag()
{
  ErrorCode rval;
  Core moab1, moab2;
  Interface &mb1 = moab1, &mb2 = moab2;
  Tag tag;
  Range::const_iterator  i;
  
  create_mesh( mb1 );
  rval = mb1.tag_get_handle( "test_tag", 0, MB_TYPE_HANDLE, tag, 
                             MB_TAG_SPARSE|MB_TAG_VARLEN|MB_TAG_EXCL );
  CHECK_ERR( rval );
  
    // Get all entities
  Range range;
  rval = mb1.get_entities_by_handle( 0, range );
  CHECK_ERR(rval);

    // For each entity, if it is a vertex store its own handle
    // in its tag.  Otherwise store the element connectivity list
    // in the tag.  Skip entity sets.
  size_t num_tagged_entities = 0;
  for (i = range.begin(); i != range.end(); ++i) {
    EntityHandle h = *i;
    EntityType type = mb1.type_from_handle( h );
    if (type == MBVERTEX) {
      const int size = 1;
      const void* ptr = &h;
      rval = mb1.tag_set_by_ptr( tag, &h, 1, &ptr, &size );
      CHECK_ERR(rval);
      ++num_tagged_entities;
    }
    else if (type != MBENTITYSET) {
      int size = 0;
      const EntityHandle* conn = 0;
      rval = mb1.get_connectivity( h, conn, size );
      CHECK_ERR(rval);
      const void* ptr = conn;
      rval = mb1.tag_set_by_ptr( tag, &h, 1, &ptr, &size );
      CHECK_ERR(rval);
      ++num_tagged_entities;
   }
  }
  
  read_write( "test_var_length_handle_tag.h5m", mb1, mb2 );
  compare_tags( "test_tag", mb1, mb2 );
  
    // check number of tagged entities
  rval = mb2.tag_get_handle( "test_tag", 0, MB_TYPE_HANDLE, tag );
  CHECK_ERR(rval);
  range.clear();
  for (EntityType t = MBVERTEX; t != MBENTITYSET; ++t) {
    rval = mb2.get_entities_by_type_and_tag( 0, t, &tag, 0, 1, range, Interface::UNION );
    CHECK_ERR(rval);
  }
  CHECK_EQUAL( num_tagged_entities, range.size() );
  
    // check tag values
  for (i = range.begin(); i != range.end(); ++i) {
    EntityHandle h = *i;
    
    const void* ptr;
    int size;
    rval = mb2.tag_get_by_ptr( tag, &h, 1, &ptr, &size );
    CHECK_ERR(rval);
    
    const EntityHandle* handles = reinterpret_cast<const EntityHandle*>(ptr);
    
    if (mb2.type_from_handle(h) == MBVERTEX) {
      CHECK_EQUAL( 1, size );
      CHECK_EQUAL( h, *handles );
    }
    else {
      int len;
      const EntityHandle* conn;
      rval = mb2.get_connectivity( h, conn, len );
      CHECK_ERR(rval);
      CHECK_EQUAL( len, size );
      for (int j = 0; j < len; ++j)
        CHECK_EQUAL( conn[j], handles[j] );
    }
  }
}
コード例 #5
0
ファイル: h5varlen.cpp プロジェクト: obmun/moab
void test_var_length_big_data()
{
  ErrorCode rval;
  Core moab1, moab2;
  Interface &mb1 = moab1, &mb2 = moab2;
  Tag tag;
  
  create_mesh( mb1 );
  rval = mb1.tag_get_handle( "test_tag", 0, MB_TYPE_DOUBLE, tag, MB_TAG_SPARSE|MB_TAG_VARLEN|MB_TAG_EXCL );
  CHECK_ERR( rval );
  
    // choose 3 vertices upon which to set data
  Range range;
  rval = mb1.get_entities_by_type( 0, MBVERTEX, range );
  CHECK_ERR(rval);
  EntityHandle verts[3] = { range.front(), 
                              *(range.begin() += range.size()/3), 
                              *(range.begin() += 2*range.size()/3) };
  
    // set 1-millon value tag data on three vertices
  std::vector<double> data(1000000);
  for (int i = 0; i < 3; ++i) {
    calculate_big_value( mb1, verts[i], data.size(), &data[0] );
    const void* ptr = &data[0];
    const int size = data.size();
    rval = mb1.tag_set_by_ptr( tag, verts + i, 1, &ptr, &size );
    CHECK_ERR(rval);
  }
  
  read_write( "test_var_length_big_data.h5m", mb1, mb2 );
  compare_tags( "test_tag", mb1, mb2 );
  
    // check 3 tagged vertices
  rval = mb2.tag_get_handle( "test_tag", 0, MB_TYPE_DOUBLE, tag );
  CHECK_ERR(rval);
  range.clear();
  rval = mb2.get_entities_by_type_and_tag( 0, MBVERTEX, &tag, 0, 1, range, Interface::UNION );
  CHECK_ERR(rval);
  CHECK_EQUAL( (size_t)3, range.size() );
  
    // check tag values
  for (Range::const_iterator i = range.begin(); i != range.end(); ++i) {
      // calculate expected value
    const EntityHandle h = *i;
    calculate_big_value( mb2, h, data.size(), &data[0] );
    
      // get actual value
    const void* ptr;
    int size;
    rval = mb2.tag_get_by_ptr( tag, &h, 1, &ptr, &size );
    CHECK_ERR(rval);
    CHECK_EQUAL( data.size(), (size_t)size );
    
      // compare values
    const double* act_data = reinterpret_cast<const double*>(ptr);
    int wrong_count = 0;
    for (size_t j = 0; j < data.size(); ++j)
      if (act_data[j] != data[j])
        ++wrong_count;
    CHECK_EQUAL( 0, wrong_count );
  }    
}  
コード例 #6
0
ファイル: h5varlen.cpp プロジェクト: obmun/moab
void test_var_length_data_common( const char* filename, Interface& mb1, bool opaque = false )
{
    // create tag
  ErrorCode rval;
  Tag tag;
  DataType type = opaque ? MB_TYPE_OPAQUE : MB_TYPE_INTEGER;
  rval = mb1.tag_get_handle( "test_tag", 0, type, tag, MB_TAG_EXCL|MB_TAG_VARLEN|MB_TAG_SPARSE );
  CHECK_ERR( rval );
  
    // get all entities
  Range entities;
  rval = mb1.get_entities_by_handle( 0, entities );
  CHECK_ERR(rval);
  
    // Set tag data.
    // Tag data will be list of integer data as follows:
    //   number of values (counting this value)
    //   step, 2*step, 3*step, ...
  for (Range::const_iterator i = entities.begin(); i != entities.end(); ++i) {
    EntityHandle h = *i;
      // generate some data to write
    int num_values = h % 6 + 1;
    EntityType etype = mb1.type_from_handle(h);
    int step = (h%2) ? 1+(int)etype : -1-(int)etype;
    std::vector<int> tag_data( num_values, num_values );
    for (int j = 1; j < num_values; ++j)
      tag_data[j] = j*step;
      // set tag data
    const void* ptrarr[]= { &tag_data[0] };
    if (opaque) num_values *= sizeof(int);
    rval = mb1.tag_set_by_ptr(tag, &h,1, ptrarr, &num_values );
    CHECK_ERR(rval);
  }
  
    // write and read tag data
  Core moab;
  Interface &mb2 = moab;
  read_write( filename, mb1, mb2 );
  compare_tags( "test_tag", mb1, mb2 );
  
    // get new tag handle
  tag = 0;
  rval = mb2.tag_get_handle( "test_tag", 0, type, tag );
  CHECK_ERR(rval);
  
    // check consistency of tag values
  entities.clear();
  mb2.get_entities_by_handle( 0, entities );
    // remove sets created during read/write process
  Range sets;
  mb2.get_entities_by_type( 0, MBENTITYSET, sets );
  entities = subtract( entities, sets);
  for (Range::const_iterator i = entities.begin(); i != entities.end(); ++i) {
      // get data
    const void* ptrarr[] = { NULL };
    int size;
    rval = mb2.tag_get_by_ptr( tag, &*i, 1, ptrarr, &size );
    CHECK_ERR(rval);
    if (opaque) {
      CHECK_EQUAL( (size_t)0, size % sizeof(int) );
      size /= sizeof(int);
    }
    const int* dataptr = reinterpret_cast<const int*>(ptrarr[0]);
    CHECK( NULL != dataptr );
      // check size 
    CHECK( size > 0 );
    CHECK_EQUAL( size, dataptr[0] );
      // check other values
    if (size > 2) {
      int step = dataptr[1];
      for (int j = 2; j < size; ++j)
        CHECK_EQUAL( j*step, dataptr[j] );
    }
  }
}
コード例 #7
0
ファイル: convert.cpp プロジェクト: obmun/moab
int main(int argc, char* argv[])
{
  int proc_id = 0;
#ifdef MOAB_HAVE_MPI
  MPI_Init(&argc,&argv);
  MPI_Comm_rank( MPI_COMM_WORLD, &proc_id );
#endif

  Core core;
  Interface* gMB = &core;
  ErrorCode result;
  Range range;

  bool append_rank = false;
  bool percent_rank_subst = false;      
  int i, dim;
  std::list< std::string >::iterator j;
  bool dims[4] = {false, false, false, false};
  const char* format = NULL; // output file format
  std::list< std::string > in; // input file name list
  std::string out;   // output file name
  bool verbose = false;
  std::set<int> geom[4], mesh[4];       // user-specified IDs
  std::vector<EntityHandle> set_list; // list of user-specified sets to write
  std::vector<std::string> write_opts, read_opts;
  const char* const mesh_tag_names[] = { DIRICHLET_SET_TAG_NAME,
                                         NEUMANN_SET_TAG_NAME,
                                         MATERIAL_SET_TAG_NAME,
                                         PARALLEL_PARTITION_TAG_NAME };
  const char* const geom_names[] = { "VERTEX",
                                     "CURVE",
                                     "SURFACE",
                                     "VOLUME" };
  
    // scan arguments
  bool do_flag = true;
  bool print_times = false;
  bool generate[] = { false, false, false };
  bool pval;
  bool parallel = false, resolve_shared = false, exchange_ghosts = false;
  for (i = 1; i < argc; i++)
  {
    if (!argv[i][0])
      usage_error(argv[0]);
      
    if (do_flag && argv[i][0] == '-')
    {
      if (!argv[i][1] || (argv[i][1] != 'M' && argv[i][2]))
        usage_error(argv[0]);
      
      switch ( argv[i][1] )
      {
          // do flag arguments:
        case '-': do_flag = false;       break;
        case 'g': verbose = true;        break;
        case 't': print_times = true;    break;
        case 'A':                        break;
        case 'h': 
        case 'H': print_help( argv[0] ); break;
        case 'l': list_formats( gMB );   break;
#ifdef MOAB_HAVE_MPI
        case 'P': append_rank = true;    break;
        case 'p': percent_rank_subst = true; break;
        case 'M':
            parallel = true;
            if (argv[i][2] == '1' || argv[i][2] == '2') resolve_shared = true;
            if (argv[i][2] == '2') exchange_ghosts = true;
#endif
        case '1': case '2': case '3':
          dims[argv[i][1] - '0'] = true; break;
          // do options that require additional args:
        default: 
          ++i;
          if (i == argc || argv[i][0] == '-') {
            std::cerr << "Expected argument following " << argv[i-1] << std::endl;
            usage_error(argv[0]);
          }
          if (argv[i-1][1] == 'I') {
            dim = atoi( argv[i] );
            if (dim < 1 || dim > 2) {
              std::cerr << "Invalid dimension value following -I" << std::endl;
              usage_error(argv[0]);
            }
            generate[dim] = true;
            continue;
          }
          pval = false;
          switch ( argv[i-1][1] )
          {
            case 'a': 
              read_opts.push_back( std::string("SAT_FILE=") + argv[i] );
              pval = true;
              break;
            case 'f': format = argv[i]; pval = true;              break;
            case 'o': write_opts.push_back(argv[i]); pval = true; break;
            case 'O':  read_opts.push_back(argv[i]); pval = true; break;
            case 'v': pval = parse_id_list( argv[i], geom[3] ); break;
            case 's': pval = parse_id_list( argv[i], geom[2] ); break;
            case 'c': pval = parse_id_list( argv[i], geom[1] ); break;
            case 'V': pval = parse_id_list( argv[i], geom[0] ); break;
            case 'D': pval = parse_id_list( argv[i], mesh[3] ); break;
            case 'm': pval = parse_id_list( argv[i], mesh[2] ); break;
            case 'n': pval = parse_id_list( argv[i], mesh[1] ); break;
            case 'd': pval = parse_id_list( argv[i], mesh[0] ); break;
            default: std::cerr << "Invalid option: " << argv[i] << std::endl;
          }
          
          if (!pval) {
            std::cerr << "Invalid flag or flag value: " << argv[i-1] << " " << argv[i] << std::endl;
            usage_error(argv[0]);
          }
      }
    }
      // do file names
    else {
      in.push_back( argv[i] );
    }
  }
  if (in.size() < 2) {
    std::cerr << "No output file name specified." << std::endl;
    usage_error(argv[0]);
  }
    // output file name is the last one specified
  out = in.back();
  in.pop_back();
    
  if (append_rank) {
    std::ostringstream mod;
    mod << out << "." << proc_id;
    out = mod.str();
  }
  
  if (percent_rank_subst) {
    for (j = in.begin(); j != in.end(); ++j) 
      *j = percent_subst( *j , proc_id );
    out = percent_subst( out, proc_id );
  }
 
    // construct options string from individual options
  std::string read_options, write_options;
  if (parallel) {
    read_opts.push_back("PARALLEL=READ_PART");
    read_opts.push_back("PARTITION=PARALLEL_PARTITION");
    if (!append_rank && !percent_rank_subst)
      write_opts.push_back("PARALLEL=WRITE_PART");
  }
  if (resolve_shared) read_opts.push_back("PARALLEL_RESOLVE_SHARED_ENTS");
  if (exchange_ghosts) read_opts.push_back("PARALLEL_GHOSTS=3.0.1");
  
  if (!make_opts_string(  read_opts,  read_options ) ||
      !make_opts_string( write_opts, write_options )) 
  {
#ifdef MOAB_HAVE_MPI
    MPI_Finalize();
#endif
    return USAGE_ERROR;
  }
  
    // Read the input file.
  for (j = in.begin(); j != in.end(); ++j) {
    reset_times();
    result = gMB->load_file( j->c_str(), 0, read_options.c_str() );
    if (MB_SUCCESS != result)
    { 
      std::cerr << "Failed to load \"" << *j << "\"." << std::endl;
      std::cerr  << "Error code: " << gMB->get_error_string(result) << " (" << result << ")" << std::endl;
      std::string message;
      if (MB_SUCCESS == gMB->get_last_error(message) && !message.empty())
        std::cerr << "Error message: " << message << std::endl;
  #ifdef MOAB_HAVE_MPI
      MPI_Finalize();
  #endif
      return READ_ERROR;
    }
    if (!proc_id) std::cerr << "Read \"" << *j << "\"" << std::endl;
    if (print_times && !proc_id) write_times( std::cout );
  }
  
    // Determine if the user has specified any geometry sets to write
  bool have_geom = false;
  for (dim = 0; dim <= 3; ++dim)
  {
    if (!geom[dim].empty())
      have_geom = true;
    if (verbose)
      print_id_list( geom_names[dim], std::cout, geom[dim] );
  }
  
    // True if the user has specified any sets to write
  bool have_sets = have_geom;
  
    // Get geometry tags
  Tag dim_tag, id_tag;
  if (have_geom) 
  {
    result = gMB->tag_get_handle( GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, id_tag );
    if (MB_SUCCESS != result) 
    {
      std::cerr << "No ID tag defined."  << std::endl;
      have_geom = false;
    }
    result = gMB->tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, dim_tag );
    if (MB_SUCCESS != result) 
    {
      std::cerr << "No geometry tag defined."  << std::endl;
      have_geom = false;
    }
  }
  
    // Get geometry sets
  if ( have_geom ) 
  {
    int id_val;
    Tag tags[] = { id_tag, dim_tag };
    const void* vals[] = { &id_val, &dim };
    for (dim = 0; dim <= 3; ++dim) 
    {
      int init_count = set_list.size();
      for (std::set<int>::iterator iter = geom[dim].begin(); iter != geom[dim].end(); ++iter) 
      {
        id_val = *iter;
        range.clear();
        result = gMB->get_entities_by_type_and_tag( 0, MBENTITYSET, tags, vals, 2, range );
        if (MB_SUCCESS != result || range.empty()) 
        {
          range.clear();
          std::cerr << geom_names[dim] << " " << id_val << " not found.\n";
        }
        std::copy( range.begin(), range.end(), std::back_inserter(set_list) );
      }
      
      if (verbose)
        std::cout << "Found " << (set_list.size()-init_count) << ' '
                  << geom_names[dim] << " sets" << std::endl;
    }
  }
  
    // Get mesh groupings
  for (i = 0; i < 4; ++i)
  {
    if (verbose)
      print_id_list( mesh_tag_names[i], std::cout, mesh[i] );
    
    if (mesh[i].empty())
      continue;
    have_sets = true;
    
      // Get tag
    Tag tag;
    result = gMB->tag_get_handle( mesh_tag_names[i], 1, MB_TYPE_INTEGER, tag );
    if (MB_SUCCESS != result) 
    {
      std::cerr << "Tag not found: " << mesh_tag_names[i] << std::endl;
      continue;
    }

      // get entity sets
    int init_count = set_list.size();
    for (std::set<int>::iterator iter = mesh[i].begin(); iter != mesh[i].end(); ++iter) 
    {
      range.clear();
      const void* vals[] = { &*iter };
      result = gMB->get_entities_by_type_and_tag( 0, MBENTITYSET, &tag, vals, 1, range );
      if (MB_SUCCESS != result || range.empty()) 
      {
        range.clear();
        std::cerr << mesh_tag_names[i] << " " << *iter << " not found.\n";
      }
      std::copy( range.begin(), range.end(), std::back_inserter(set_list) );
    }
      
    if (verbose)
      std::cout << "Found " << (set_list.size()-init_count) << ' '
                << mesh_tag_names[i] << " sets" << std::endl;
  }
  
    // Check if output is limited to certain dimensions of elements
  bool bydim = false;
  for (dim = 1; dim < 4; ++dim)
    if (dims[dim])
      bydim = true;
  
    // Check conflicting input
  if (bydim) {
    if (generate[1] && !dims[1]) {
      std::cerr << "Warning: Request to generate 1D internal entities but not export them." << std::endl;
      generate[1] = false;
    } 
     if (generate[2] && !dims[2]) {
      std::cerr << "Warning: Request to generate 2D internal entities but not export them." << std::endl;
      generate[2] = false;
    } 
  }
 
    // Generate any internal entities
  if (generate[1] || generate[2]) {
    EntityHandle all_mesh = 0;
    const EntityHandle* sets = &all_mesh;
    int num_sets = 1;
    if (have_sets) {
      num_sets = set_list.size();
      sets = &set_list[0];
    }
    for (i = 0; i < num_sets; ++i) {
      Range dim3, dim2, adj;
      gMB->get_entities_by_dimension( sets[i], 3, dim3, true );
      if (generate[1]) {
        gMB->get_entities_by_dimension( sets[i], 2, dim2, true );
        gMB->get_adjacencies( dim3, 1, true, adj, Interface::UNION );
        gMB->get_adjacencies( dim2, 1, true, adj, Interface::UNION );
      }
      if (generate[2]) {
        gMB->get_adjacencies( dim3, 2, true, adj, Interface::UNION );
      }
      if (sets[i])
        gMB->add_entities( sets[i], adj );
    }
  }
      
    // Delete any entities not of the dimensions to be exported
  if (bydim) {
      // Get list of dead elements
    Range dead_entities , tmp_range;
    for (dim = 1; dim <= 3; ++dim) {
      if (dims[dim])
        continue;
      gMB->get_entities_by_dimension(0, dim, tmp_range );
      dead_entities.merge( tmp_range );
    }
      // Remove dead entities from all sets, and add all 
      // empty sets to list of dead entities.
    Range empty_sets;
    remove_entities_from_sets( gMB, dead_entities, empty_sets );
    while (!empty_sets.empty()) {
      if (!set_list.empty())
        remove_from_vector( set_list, empty_sets );
      dead_entities.merge( empty_sets );
      tmp_range.clear();
      remove_entities_from_sets( gMB, empty_sets, tmp_range );
      empty_sets = subtract( tmp_range,  dead_entities );
    }
      // Destroy dead entities
    gMB->delete_entities( dead_entities );
  }
  
    // If user specified sets to write, but none were found, exit.
  if (have_sets && set_list.empty())
  {
    std::cerr << "Nothing to write." << std::endl;
#ifdef MOAB_HAVE_MPI
    MPI_Finalize();
#endif
    return ENT_NOT_FOUND;
  }
  
  if (verbose)
  {
    if (have_sets)
      std::cout << "Found " << set_list.size() 
              << " specified sets to write (total)." << std::endl;  
    else
      std::cout << "No sets specifed.  Writing entire mesh." << std::endl; 
  }  
  
    // Write the output file
  reset_times();
  if (have_sets) 
    result = gMB->write_file( out.c_str(), format, write_options.c_str(), &set_list[0], set_list.size() );
  else
    result = gMB->write_file( out.c_str(), format, write_options.c_str() );
  if (MB_SUCCESS != result)
  { 
    std::cerr << "Failed to write \"" << out << "\"." << std::endl; 
    std::cerr  << "Error code: " << gMB->get_error_string(result) << " (" << result << ")" << std::endl;
    std::string message;
    if (MB_SUCCESS == gMB->get_last_error(message) && !message.empty())
      std::cerr << "Error message: " << message << std::endl;
#ifdef MOAB_HAVE_MPI
    MPI_Finalize();
#endif
    return WRITE_ERROR;
  }
  
  if (!proc_id) std::cerr << "Wrote \"" << out << "\"" << std::endl;
  if (print_times && !proc_id) write_times( std::cout );

#ifdef MOAB_HAVE_MPI
  MPI_Finalize();
#endif
  return 0;
}
コード例 #8
0
ファイル: ReadHDF5VarLen.cpp プロジェクト: obmun/moab
/*
ErrorCode ReadHDF5VarLen::read_offsets( ReadHDF5Dataset& data_set,
                                        const Range& file_ids,
                                        EntityHandle start_file_id,
                                        unsigned num_columns,
                                        const unsigned indices[],
                                        EntityHandle nudge,
                                        Range offsets_out[],
                                        std::vector<unsigned> counts_out[],
                                        Range* ranged_file_ids = 0 )
{
  const int local_index = 1;

    // sanity check
  const unsigned max_cols = ranged_file_ids ? data_set.columns() - 1 : data_set.columns()
  for (unsigned i = 0; i < num_columns; ++i) {
    assert(indices[i] >= max_cols);
    if (indices[i] >= max_cols)    
      return MB_FAILURE;
 }
  
    // Use hints to make sure insertion into ranges is O(1)
  std::vector<Range::iterator> hints;
  if (ranged_file_ids) {
    hints.resize( num_colums + 1 );
    hints.back() = ranged_file_ids->begin();
  }
  else {
    hints.resize( num_columns );
  }
  for (unsigned i = 0; i < num_columns; ++i)
    offsets_out[i].clear();
    counts_out[i].clear();
    counts_out[i].reserve( file_ids.size() );
    hints[i] = offsets_out[i].begin();
  }

    // If we only need one column from a multi-column data set,
    // then read only that column.
  if (num_columns == 1 && data_set.columns() > 1 && !ranged_file_ids) {
    data_set.set_column( indices[0] );
    indices = &local_index;
  }
  else if (ranged_file_ids && data_set.columns() > 1 && 0 == num_columns) {
    data_set.set_column( data_set.columns() - 1 );
  }
    // NOTE: do not move this above the previous block.  
    //       The previous block changes the results of data_set.columns()!
  const size_t table_columns = data_set.columns();

    // Calculate which rows we need to read from the offsets table
  Range rows;
  Range::iterator hint = rows.begin();
  Range::const_pair_iterator pair = file_ids.const_pair_begin();
    // special case if reading first entity in dataset, because
    // there is no previous end value.
  if (pair != file_ids.const_pair_end() && pair->first == start_file_id) 
    hint = rows.insert( nudge, pair->second - start_file_id + nudge );
  while (pair != file_ids.const_pair_end()) {
    hint = rows.insert( hint,
                        pair->first + nudge - 1 - start_file_id, 
                        pair->second + nudge - start_file_id );
    ++pair;
  }
    
    // set up read of offsets dataset
  hsize_t buffer_size = bufferSize / (sizeof(hssize_t) * data_set.columns());
  hssize_t* buffer = reinterpret_cast<hssize_t*>(dataBuffer);
  data_set.set_file_ids( rows, nudge, buffer_size, H5T_NATIVE_HSSIZE );
  std::vector<hssize_t> prev_end;
    // If we're reading the first row of the table, then the 
    // previous end is implicitly -1.
  if (!file_ids.empty() && file_ids.front() == start_file_id) 
    prev_end.resize(num_columns,-1);
  
    // read offset table
  size_t count, offset;
  Range::const_iterator fiter = file_ids.begin();
  while (!data_set.done()) {
    try {
      data_set.read( buffer, count );
    }
    catch (ReadHDF5Dataset::Exception e) {
      return MB_FAILURE;
    }
    if (!count) // might have been NULL read for collective IO
      continue;
    
      // If the previous end values were read in the previous iteration,
      // then they're stored in prev_end.  
    size_t offset = 0;
    if (!prev_end.empty()) {
       for (unsigned i = 0; i < num_columns; ++i) {
        counts_out[i].push_back( buffer[indices[i]] - prev_end[i] );
        hints[i] = offsets_out[i].insert( hints[i],
                                          prev_end[i] + 1 + nudge,
                                          buffer[indices[i]] + nudge );
      }
      if (ranged_file_ids && (buffer[table_columns-1] & mhdf_SET_RANGE_BIT))
        hints.back() = ranged_file_ids->insert( hints.back(), *fiter );
      ++fiter;
      offset = 1;
      prev_end.clear();
    }

    while (offset < count) {
      assert(fiter != file_ids.end());
        // whenever we get to a gap between blocks we need to 
        // advance one step because we read an extra end id 
        // preceding teah block
      if (fiter == fiter.start_of_block()) {
        if (offset == count-1) 
          break;
        ++offset;
      }
      
      for (unsigned i = 0; i < num_columns; ++i) {
        size_t s = buffer[(offset-1)*table_columns+indices[i]] + 1;
        size_t e = buffer[ offset   *table_columns+indices[i]];
        counts_out.push_back( e - s + 1 );
        hints[i] = offsets_out.insert( hints[i], s, e );
      }
      if (ranged_file_ids && (buffer[offset*table_columns+table_columns-1] & mhdf_SET_RANGE_BIT))
        hints.back() = ranged_file_ids->insert( hints.back(), *fiter );
      
      ++fiter;
      ++offset;
    }
    
      // If we did not end on the boundary between two blocks,
      // then we need to save the end indices for the final entry
      // for use in the next iteration.  Similarly, if we ended
      // with extra values that were read with the express intention
      // of getting the previous end values for a block, we need to
      // save them.  This case only arises if we hit the break in
      // the above loop.
    if (fiter != fiter.start_of_block() || offset < count) {
      assert(prev_end.empty());
      if (offset == count) {
        --offset;
        assert(fiter != fiter.start_of_block());
      }
      else {
        assert(offset+1 == count);
        assert(fiter == fiter.start_of_block());
      }
      for (unsigned i = 0; i < num_columns; ++i) 
        prev_end.push_back(buffer[offset*table_columns+indices[i]]);
    }
  }
  assert(prev_end.empty());
  assert(fiter == file_ids.end());
  
  return MB_SUCCESS;
}
*/
ErrorCode ReadHDF5VarLen::read_offsets( ReadHDF5Dataset& data_set,
                                        const Range& file_ids,
                                        EntityHandle start_file_id,
                                        EntityHandle nudge,
                                        Range& offsets_out,
                                        std::vector<unsigned>& counts_out )
{
  
    // Use hints to make sure insertion into ranges is O(1)
  offsets_out.clear();
  counts_out.clear();
  counts_out.reserve( file_ids.size() );
  Range::iterator hint;

    // Calculate which rows we need to read from the offsets table
  Range rows;
  hint = rows.begin();
  Range::const_pair_iterator pair = file_ids.const_pair_begin();
    // special case if reading first entity in dataset, because
    // there is no previous end value.
  if (pair != file_ids.const_pair_end() && pair->first == start_file_id) {
    hint = rows.insert( nudge, pair->second - start_file_id + nudge );
    ++pair;
  }
  while (pair != file_ids.const_pair_end()) {
    hint = rows.insert( hint,
                        pair->first  - start_file_id + nudge - 1, 
                        pair->second - start_file_id + nudge );
    ++pair;
  }
    
    // set up read of offsets dataset
  hsize_t buffer_size = bufferSize / sizeof(hssize_t);
  hssize_t* buffer = reinterpret_cast<hssize_t*>(dataBuffer);
  data_set.set_file_ids( rows, nudge, buffer_size, H5T_NATIVE_HSSIZE );
  hssize_t prev_end;
  bool have_prev_end = false;
    // If we're reading the first row of the table, then the 
    // previous end is implicitly -1.
  if (!file_ids.empty() && file_ids.front() == start_file_id)  {
    prev_end = -1;
    have_prev_end = true;
  }
  
  dbgOut.printf( 3, "Reading %s in %lu chunks\n", data_set.get_debug_desc(), data_set.get_read_count() );
  
    // read offset table
  size_t count, offset;
  Range::const_iterator fiter = file_ids.begin();
  hint = offsets_out.begin();
  int nn = 0;
  while (!data_set.done()) {
    dbgOut.printf( 3, "Reading chunk %d of %s\n", ++nn, data_set.get_debug_desc() );
    try {
      data_set.read( buffer, count );
    }
    catch (ReadHDF5Dataset::Exception& ) {
      return MB_FAILURE;
    }
    if (!count) // might have been NULL read for collective IO
      continue;
    
      // If the previous end values were read in the previous iteration,
      // then they're stored in prev_end.  
    offset = 0;
    if (have_prev_end) {
      counts_out.push_back( buffer[0] - prev_end );
      hint = offsets_out.insert( hint,
                                 prev_end + 1 + nudge,
                                 buffer[0] + nudge );
      ++fiter;
      offset = 1;
      have_prev_end = false;
    }

    while (offset < count) {
      assert(fiter != file_ids.end());
        // whenever we get to a gap between blocks we need to 
        // advance one step because we read an extra end id 
        // preceding teah block
      if (fiter == fiter.start_of_block()) {
        if (offset == count-1) 
          break;
        ++offset;
      }
      
      size_t s = buffer[offset-1] + 1;
      size_t e = buffer[offset];
      counts_out.push_back( e - s + 1 );
      hint = offsets_out.insert( hint, s + nudge, e + nudge );
      
      ++fiter;
      ++offset;
    }
    
      // If we did not end on the boundary between two blocks,
      // then we need to save the end indices for the final entry
      // for use in the next iteration.  Similarly, if we ended
      // with extra values that were read with the express intention
      // of getting the previous end values for a block, we need to
      // save them.  This case only arises if we hit the break in
      // the above loop.
    if (fiter != fiter.start_of_block() || offset < count) {
      assert(!have_prev_end);
      if (offset == count) {
        --offset;
        assert(fiter != fiter.start_of_block());
      }
      else {
        assert(offset+1 == count);
        assert(fiter == fiter.start_of_block());
      }
      have_prev_end = true;
      prev_end = buffer[offset];
    }
  }
  assert(!have_prev_end);
  assert(fiter == file_ids.end());
  
  return MB_SUCCESS;
}
コード例 #9
0
ファイル: h5file_test.cpp プロジェクト: chrismullins/moab
bool compare()
{
  Range range;
  EntityHandle hex[2];
  EntityHandle dod[2];
  Tag elemtag;
  Range::iterator iter;
  
  // get tag
  if (MB_SUCCESS != iface->tag_get_handle( bitname, 2, MB_TYPE_BIT, elemtag ))
    moab_error( "tag_get_handle" );
  
  // get two hexes
  char two = '\002';
  const void* tarray[] = { &two };
  if (MB_SUCCESS != iface->
      get_entities_by_type_and_tag( 0, MBHEX, &elemtag, tarray, 1, range ))
    moab_error( "get_entities_by_type_and_tag" );
  if (range.size() != 2)
  {
    fprintf( stderr, "Expected 2 Hexes.  Got %lu\n", (unsigned long)range.size() );
    exit( 1 );
  }
  iter = range.begin();
  hex[0] = *iter;
  hex[1] = *++iter;
  
  // get two polyhedra
  range.clear();
  char one = '\001';
  const void* oarray[] = { &one };
  if (MB_SUCCESS != iface->
      get_entities_by_type_and_tag( 0, MBPOLYHEDRON, &elemtag, oarray, 1, range ))
    moab_error( "get_entities_by_type_and_tag" );
  if (range.size() != 2)
  {
    fprintf( stderr, "Expected 2 Polyhedra.  Got %lu\n", (unsigned long)range.size() );
    exit( 1 );
  }
  iter = range.begin();
  dod[0] = *iter;
  dod[1] = *++iter;
  
  // compare hexes
  std::vector<EntityHandle> conn[2];
  if (MB_SUCCESS != iface->get_connectivity( hex  , 1, conn[0] ) ||
      MB_SUCCESS != iface->get_connectivity( hex+1, 1, conn[1] ))
    moab_error( "get_connectivity" );
  if (!compare_conn( conn[0], conn[1] ))
    return false;
  
  // compare polyhedra
  
  std::vector<EntityHandle> face[2];
  conn[0].clear(); conn[1].clear();
  if (MB_SUCCESS != iface->get_connectivity( dod  , 1, conn[0], false ) ||
      MB_SUCCESS != iface->get_connectivity( dod+1, 1, conn[1], false ))
    moab_error( "get_connectivity" );
  if (conn[0].size() != 12 || conn[1].size() != 12)
  {
    fprintf(stderr, "Expected two dodecahedrons.  Got polyhedrons with "
                    "%lu and %lu faces respectively.\n", 
                    (unsigned long)conn[0].size(), (unsigned long)conn[1].size() );
    return false;
  }
  
  for (int i = 0; i < 12; ++i )
  {
    face[0].clear(); face[1].clear();
    if (MB_SUCCESS != iface->get_connectivity( &conn[0][i], 1, face[0], false) ||
        MB_SUCCESS != iface->get_connectivity( &conn[1][i], 1, face[1], false))
      moab_error( "get_connectivity" );
    if (!compare_conn( face[0], face[1] ))
      return false;
  }
  
  // compare sets
  
  if (!compare_sets( VERTEX_SET_ID, tagname ) ||
      !compare_sets( FACE_SET_ID ) ||
      !compare_sets( REGION_SET_ID ) ||
      !compare_sets( EMPTY_SET_ID ) ||
      !compare_sets( SET_SET_ID, GLOBAL_ID_TAG_NAME ))
    return false;
    
  // check tags
  if (!compare_tags( dod ))
    return false;
  
  return true;
}
コード例 #10
0
ファイル: MetisPartitioner.cpp プロジェクト: obmun/moab
ErrorCode MetisPartitioner::assemble_graph(const int dimension,
                                               std::vector<double> &coords,
                                               std::vector<int> &moab_ids,
                                               std::vector<int> &adjacencies, 
                                               std::vector<int> &length,
                                               Range &elems) 
{
  length.push_back(0);
    // assemble a graph with vertices equal to elements of specified dimension, edges
    // signified by list of other elements to which an element is connected

    // get the elements of that dimension
  ErrorCode result = mbImpl->get_entities_by_dimension(0, dimension, elems);
  if (MB_SUCCESS != result || elems.empty()) return result;
  
#ifdef MOAB_HAVE_MPI
    // assign global ids
  result = mbpc->assign_global_ids(0, dimension, 0); 
#endif

    // now assemble the graph, calling MeshTopoUtil to get bridge adjacencies through d-1 dimensional
    // neighbors
  MeshTopoUtil mtu(mbImpl);
  Range adjs;
    // can use a fixed-size array 'cuz the number of lower-dimensional neighbors is limited
    // by MBCN
  int neighbors[5*MAX_SUB_ENTITIES];
  double avg_position[3];
  int moab_id;
  
    // get the global id tag hanlde
  Tag gid;
  result = mbImpl->tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER,
                                  gid, MB_TAG_DENSE|MB_TAG_CREAT);MB_CHK_ERR(result);
  
  for (Range::iterator rit = elems.begin(); rit != elems.end(); rit++) {

      // get bridge adjacencies
    adjs.clear();
    result = mtu.get_bridge_adjacencies(*rit, (dimension > 0 ? dimension-1 : 3), 
                                        dimension, adjs);MB_CHK_ERR(result);
    
      // get the graph vertex ids of those
    if (!adjs.empty()) {
      assert(adjs.size() < 5*MAX_SUB_ENTITIES);
      result = mbImpl->tag_get_data(gid, adjs, neighbors);MB_CHK_ERR(result);
    }

      // copy those into adjacencies vector
    length.push_back(length.back()+(int)adjs.size());
    std::copy(neighbors, neighbors+adjs.size(), std::back_inserter(adjacencies));

      // get average position of vertices
    result = mtu.get_average_position(*rit, avg_position);MB_CHK_ERR(result);
    
      // get the graph vertex id for this element
    result = mbImpl->tag_get_data(gid, &(*rit), 1, &moab_id);MB_CHK_ERR(result);

      // copy those into coords vector
    moab_ids.push_back(moab_id);
    std::copy(avg_position, avg_position+3, std::back_inserter(coords));
  }

  if (debug) {
    std::cout << "Length vector: " << std::endl;
    std::copy(length.begin(), length.end(), std::ostream_iterator<int>(std::cout, ", "));
    std::cout << std::endl;
    std::cout << "Adjacencies vector: " << std::endl;
    std::copy(adjacencies.begin(), adjacencies.end(), std::ostream_iterator<int>(std::cout, ", "));
    std::cout << std::endl;
    std::cout << "Moab_ids vector: " << std::endl;
    std::copy(moab_ids.begin(), moab_ids.end(), std::ostream_iterator<int>(std::cout, ", "));
    std::cout << std::endl;
    std::cout << "Coords vector: " << std::endl;
    std::copy(coords.begin(), coords.end(), std::ostream_iterator<double>(std::cout, ", "));
    std::cout << std::endl;
  }

  return MB_SUCCESS;
}
コード例 #11
0
ファイル: MetisPartitioner.cpp プロジェクト: obmun/moab
ErrorCode MetisPartitioner::assemble_taggedents_graph(const int dimension,
                                                          std::vector<double> &coords,
                                                          std::vector<int> &moab_ids,
                                                          std::vector<int> &adjacencies, 
                                                          std::vector<int> &length,
                                                          Range &elems,
					                  const char *aggregating_tag)
{
  Tag partSetTag;
  ErrorCode result = mbImpl->tag_get_handle(aggregating_tag, 1, MB_TYPE_INTEGER, partSetTag);
  if (MB_SUCCESS != result) return result;
 
  Range allSubElems;
  result = mbImpl->get_entities_by_dimension(0, dimension, allSubElems);
  if (MB_SUCCESS != result || allSubElems.empty()) return result;
  int partSet;
  std::map<int, Range> aggloElems;
  for (Range::iterator rit = allSubElems.begin(); rit != allSubElems.end(); rit++) 
  {
    EntityHandle entity = *rit;
    result = mbImpl->tag_get_data(partSetTag,&entity,1,&partSet);
    if (MB_SUCCESS != result) return result;
    if (partSet >= 0)
      aggloElems[partSet].insert(entity);
  }
  // clear aggregating tag data
  TagType type;
  result = mbImpl->tag_get_type(partSetTag, type);
  if (type == MB_TAG_DENSE)
  {
    // clear tag on ents and sets
    result = mbImpl->tag_delete(partSetTag); 
    if (MB_SUCCESS != result) return result;
  }
  if (type == MB_TAG_SPARSE)
  {
    // clear tag on ents
    result = mbImpl->tag_delete_data(partSetTag, allSubElems); 
    if (MB_SUCCESS != result) return result;
    // clear tag on sets
    result = mbImpl->get_entities_by_type_and_tag(0 , MBENTITYSET, &partSetTag, 0, 1, elems);
    if (MB_SUCCESS != result) return result;
    result = mbImpl->tag_delete_data(partSetTag, elems); 
    if (MB_SUCCESS != result) return result;
    elems.clear();
  }
  result = mbImpl->tag_get_handle("PARALLEL_PARTITION", 1, MB_TYPE_INTEGER,
                                  partSetTag, MB_TAG_SPARSE|MB_TAG_CREAT); 
  if (MB_SUCCESS != result) return result;
  
  for (std::map<int, Range>::iterator mit = aggloElems.begin(); mit != aggloElems.end(); mit++) 
  {
    EntityHandle new_set;
    result = mbImpl->create_meshset(MESHSET_SET, new_set);
    if (MB_SUCCESS != result) return result;
    result = mbImpl->add_entities(new_set, mit->second);
    if (MB_SUCCESS != result) return result;
    result = mbImpl->tag_set_data (partSetTag, &new_set, 1, &mit->first);
    if (MB_SUCCESS != result) return result;
  }

  result = assemble_taggedsets_graph(dimension, coords, moab_ids, adjacencies, length, elems, &(*aggregating_tag));
  return MB_SUCCESS;
}
コード例 #12
0
ファイル: coupling.cpp プロジェクト: tpeterka/cian2
//
// prints mesh statistics (for debugging)
//
void PrintMeshStats(Interface *mbint,        // moab interface
                    EntityHandle *mesh_set,  // moab mesh set
                    ParallelComm *mbpc)      // moab parallel communicator
{
    Range range;
    ErrorCode rval;
    static int mesh_num = 0;                 // counts how many time this function is called
    float loc_verts = 0.0;                   // num local verts (fractional for shared verts)
    float glo_verts;                         // global number of verts
    float loc_share_verts = 0.0;             // num local shared verts (fractional)
    float glo_share_verts;                   // global number of shared verts
    int loc_cells, glo_cells;                // local and global number of cells (no sharing)
    int rank;

    MPI_Comm_rank(mbpc->comm(), &rank);

    // get local quantities
    range.clear();
    rval = mbint->get_entities_by_dimension(*mesh_set, 0, range); ERR;

    // compute fractional contribution of shared vertices attributed to this proc
    int ps[MAX_SHARING_PROCS];               // sharing procs for a vert
    EntityHandle hs[MAX_SHARING_PROCS];      // handles of shared vert on sharing procs
    int num_ps = 0;                          // number of sharing procs, returned by moab
    unsigned char pstat;                     // pstatus, returned by moab
    for (Range::iterator verts_it = range.begin(); verts_it != range.end();
         verts_it++)
    {
        rval = mbpc->get_sharing_data(*verts_it, ps, hs, pstat, num_ps); ERR;
        if (num_ps == 0)
            loc_verts++;
        else if (num_ps == 1)                // when 2 procs, moab lists only one (the other)
        {
            loc_verts += 0.5;
            loc_share_verts += 0.5;
        }
        else
        {
            loc_verts += (1.0 / (float)num_ps);
            loc_share_verts += (1.0 / (float)num_ps);
        }
        // debug
        // if (rank == 0) {
        // fprintf(stderr, "num_ps = %d: ", num_ps);
        // for (int i = 0; i < num_ps; i++)
        // 	fprintf(stderr, "%d ", ps[i]);
        // fprintf(stderr, "\n");
        // fprintf(stderr, "loc_verts = %.3f\n", loc_verts);
        // }
    }

    range.clear();
    rval = mbint->get_entities_by_dimension(*mesh_set, 3, range); ERR;
    loc_cells = (int)range.size();

    // add totals for global quantities
    MPI_Reduce(&loc_verts, &glo_verts, 1, MPI_FLOAT, MPI_SUM, 0,
               mbpc->comm());
    MPI_Reduce(&loc_share_verts, &glo_share_verts, 1, MPI_FLOAT, MPI_SUM, 0,
               mbpc->comm());
    MPI_Reduce(&loc_cells, &glo_cells, 1, MPI_INT, MPI_SUM, 0, mbpc->comm());

    // report results
    if (rank == 0)
    {
        fprintf(stderr, "----------------- Mesh %d statistics -----------------\n",
                mesh_num);
        fprintf(stderr, "Total number of verts = %.0f of which %.0f "
                "are shared\n", glo_verts, glo_share_verts);
        fprintf(stderr, "Total number of cells = %d\n", glo_cells);
        fprintf(stderr, "------------------------------------------------------\n");
    }

    mesh_num = (mesh_num + 1) % 2;
}
コード例 #13
0
ファイル: h5sets_test.cpp プロジェクト: obmun/moab
// Test to reproduce bug reported by brandom smith on 2011-3-7
// and test other possible issues with the somewhat inconsistant
// meshset creation flags.  Bug was fixed in SVN revision 4548.
void test_set_flags()
{
  const char filename2[] = "test_set_flags.h5m";
  ErrorCode rval;
  Core core;
  Interface& mb = core;

  // create a bunch of vertices so we have something to put in sets
  const int nverts = 20;
  double coords[3*nverts] = {0.0};
  Range verts;
  rval = mb.create_vertices( coords, nverts, verts );
  CHECK_ERR(rval);
  
  // Assign IDs to things so that we can identify them in the
  // data we read back in.
  Tag tag;
  rval = mb.tag_get_handle( "GLOBAL_ID", 1, MB_TYPE_INTEGER, tag ); CHECK_ERR(rval);
  int ids[nverts];
  for (int i = 0; i < nverts; ++i)
    ids[i] = i+1;
  rval = mb.tag_set_data( tag, verts, ids ); CHECK_ERR(rval);
  
  // define two lists of vertex ids corresponding to the
  // vertices that we are going to put into different sets
  const int set_verts1[] = { 1, 2, 3, 4, 8, 13, 14, 15 };
  const int set_verts2[] = { 3, 9, 10, 11, 12, 13, 14, 15, 16, 17 };
  const int num_verts1 = sizeof(set_verts1)/sizeof(set_verts1[0]);
  const int num_verts2 = sizeof(set_verts1)/sizeof(set_verts1[0]);
  
  // convert to handle lists
  EntityHandle set_handles1[num_verts1], set_handles2[num_verts2];
  for (int i = 0; i < num_verts1; ++i)
    set_handles1[i] = *(verts.begin() + set_verts1[i] - 1);
  for (int i = 0; i < num_verts2; ++i)
    set_handles2[i] = *(verts.begin() + set_verts2[i] - 1);
  
  // now create some sets with different flag combinations
  EntityHandle sets[6];
  rval = mb.create_meshset( 0, sets[0] );
  rval = mb.create_meshset( MESHSET_TRACK_OWNER, sets[1] );
  rval = mb.create_meshset( MESHSET_SET, sets[2] );
  rval = mb.create_meshset( MESHSET_SET|MESHSET_TRACK_OWNER, sets[3] );
  rval = mb.create_meshset( MESHSET_ORDERED, sets[4] );
  rval = mb.create_meshset( MESHSET_ORDERED|MESHSET_TRACK_OWNER, sets[5] );
  
  // assign IDs to sets so that we can identify them later
  rval = mb.tag_set_data( tag, sets, 6, ids ); CHECK_ERR(rval);
  // add entities to sets
  rval = mb.add_entities( sets[0], set_handles1, num_verts1 ); CHECK_ERR(rval);
  rval = mb.add_entities( sets[1], set_handles2, num_verts2 ); CHECK_ERR(rval);
  rval = mb.add_entities( sets[2], set_handles1, num_verts1 ); CHECK_ERR(rval);
  rval = mb.add_entities( sets[3], set_handles2, num_verts2 ); CHECK_ERR(rval);
  rval = mb.add_entities( sets[4], set_handles1, num_verts1 ); CHECK_ERR(rval);
  rval = mb.add_entities( sets[5], set_handles2, num_verts2 ); CHECK_ERR(rval);
  
  // now write the file and read it back in
  rval = mb.write_file( filename2, 0, "BUFFER_SIZE=1024;DEBUG_BINIO" ); CHECK_ERR(rval);
  mb.delete_mesh();
  rval = mb.load_file( filename2 );
  if (!keep_file)
    remove( filename2 );
  CHECK_ERR(rval);
  rval = mb.tag_get_handle( "GLOBAL_ID", 1, MB_TYPE_INTEGER, tag ); CHECK_ERR(rval);
  
  // find our sets
  Range tmp;
  for (int i = 0; i < 6; ++i) {
    int id = i+1;
    tmp.clear();
    const void* vals[] = {&id};
    rval = mb.get_entities_by_type_and_tag( 0, MBENTITYSET, &tag, vals, 1, tmp ); CHECK_ERR(rval);
    CHECK_EQUAL( 1u, (unsigned)tmp.size() );
    sets[i] = tmp.front();
  }
  
  // check that sets have correct flags
  unsigned opts;
  rval = mb.get_meshset_options( sets[0], opts ); CHECK_ERR(rval);
  CHECK_EQUAL( 0u, opts );
  rval = mb.get_meshset_options( sets[1], opts ); CHECK_ERR(rval);
  CHECK_EQUAL( (unsigned)MESHSET_TRACK_OWNER, opts );
  rval = mb.get_meshset_options( sets[2], opts ); CHECK_ERR(rval);
  CHECK_EQUAL( (unsigned)MESHSET_SET, opts );
  rval = mb.get_meshset_options( sets[3], opts ); CHECK_ERR(rval);
  CHECK_EQUAL( (unsigned)(MESHSET_SET|MESHSET_TRACK_OWNER), opts );
  rval = mb.get_meshset_options( sets[4], opts ); CHECK_ERR(rval);
  CHECK_EQUAL( (unsigned)MESHSET_ORDERED, opts );
  rval = mb.get_meshset_options( sets[5], opts ); CHECK_ERR(rval);
  CHECK_EQUAL( (unsigned)(MESHSET_ORDERED|MESHSET_TRACK_OWNER), opts );
  
  // check that sets have correct contents
  int set_ids1[num_verts1], set_ids2[num_verts2];
  
  tmp.clear();
  rval = mb.get_entities_by_handle( sets[0], tmp ); CHECK_ERR(rval);
  CHECK_EQUAL( num_verts1, (int)tmp.size() );
  rval = mb.tag_get_data( tag, tmp, set_ids1 ); CHECK_ERR(rval);
  std::sort( set_ids1, set_ids1+num_verts1 );
  CHECK_ARRAYS_EQUAL( set_verts1, num_verts1, set_ids1, num_verts1 );
  
  tmp.clear();
  rval = mb.get_entities_by_handle( sets[1], tmp ); CHECK_ERR(rval);
  CHECK_EQUAL( num_verts2, (int)tmp.size() );
  rval = mb.tag_get_data( tag, tmp, set_ids2 ); CHECK_ERR(rval);
  std::sort( set_ids2, set_ids2+num_verts2 );
  CHECK_ARRAYS_EQUAL( set_verts2, num_verts2, set_ids2, num_verts2 );
  
  tmp.clear();
  rval = mb.get_entities_by_handle( sets[2], tmp ); CHECK_ERR(rval);
  CHECK_EQUAL( num_verts1, (int)tmp.size() );
  rval = mb.tag_get_data( tag, tmp, set_ids1 ); CHECK_ERR(rval);
  std::sort( set_ids1, set_ids1+num_verts1 );
  CHECK_ARRAYS_EQUAL( set_verts1, num_verts1, set_ids1, num_verts1 );
  
  tmp.clear();
  rval = mb.get_entities_by_handle( sets[3], tmp ); CHECK_ERR(rval);
  CHECK_EQUAL( num_verts2, (int)tmp.size() );
  rval = mb.tag_get_data( tag, tmp, set_ids2 ); CHECK_ERR(rval);
  std::sort( set_ids2, set_ids2+num_verts2 );
  CHECK_ARRAYS_EQUAL( set_verts2, num_verts2, set_ids2, num_verts2 );
  
  tmp.clear();
  rval = mb.get_entities_by_handle( sets[4], tmp ); CHECK_ERR(rval);
  CHECK_EQUAL( num_verts1, (int)tmp.size() );
  rval = mb.tag_get_data( tag, tmp, set_ids1 ); CHECK_ERR(rval);
  std::sort( set_ids1, set_ids1+num_verts1 );
  CHECK_ARRAYS_EQUAL( set_verts1, num_verts1, set_ids1, num_verts1 );
  
  tmp.clear();
  rval = mb.get_entities_by_handle( sets[5], tmp ); CHECK_ERR(rval);
  CHECK_EQUAL( num_verts2, (int)tmp.size() );
  rval = mb.tag_get_data( tag, tmp, set_ids2 ); CHECK_ERR(rval);
  std::sort( set_ids2, set_ids2+num_verts2 );
  CHECK_ARRAYS_EQUAL( set_verts2, num_verts2, set_ids2, num_verts2 );
}
コード例 #14
0
ファイル: WriteVtk.cpp プロジェクト: vijaysm/MOAB
ErrorCode WriteVtk::gather_mesh(const EntityHandle* set_list,
                                int num_sets,
                                Range& nodes,
                                Range& elems)
{
  ErrorCode rval;
  int e;

  if (!set_list || !num_sets) {
    Range a;
    rval = mbImpl->get_entities_by_handle(0, a);
    if (MB_SUCCESS != rval)
      return rval;

    Range::const_iterator node_i, elem_i, set_i;
    node_i = a.lower_bound(a.begin(), a.end(), CREATE_HANDLE(   MBVERTEX, 0, e));
    elem_i = a.lower_bound(   node_i, a.end(), CREATE_HANDLE(     MBEDGE, 0, e));
    set_i  = a.lower_bound(   elem_i, a.end(), CREATE_HANDLE(MBENTITYSET, 0, e));
    nodes.merge(node_i, elem_i);
    elems.merge(elem_i, set_i);

    // Filter out unsupported element types
    EntityType et = MBEDGE;
    for (et++; et < MBENTITYSET; et++) {
      if (VtkUtil::get_vtk_type(et, CN::VerticesPerEntity(et)))
        continue;
      Range::iterator
        eit = elems.lower_bound(elems.begin(), elems.end(), CREATE_HANDLE(et, 0, e)),
        ep1it = elems.lower_bound(elems.begin(), elems.end(), CREATE_HANDLE(et + 1, 0, e));
      elems.erase(eit, ep1it);
    }
  }
  else {
    std::set<EntityHandle> visited;
    std::vector<EntityHandle> sets;
    sets.reserve(num_sets);
    std::copy(set_list, set_list + num_sets, std::back_inserter(sets));
    while (!sets.empty()) {
      // Get next set
      EntityHandle set = sets.back();
      sets.pop_back();
      // Skip sets we've already done
      if (!visited.insert(set).second)
        continue;

      Range a;
      rval = mbImpl->get_entities_by_handle(set, a);
      if (MB_SUCCESS != rval)
        return rval;

      Range::const_iterator node_i, elem_i, set_i;
      node_i = a.lower_bound(a.begin(), a.end(), CREATE_HANDLE(   MBVERTEX, 0, e));
      elem_i = a.lower_bound(   node_i, a.end(), CREATE_HANDLE(     MBEDGE, 0, e));
      set_i  = a.lower_bound(   elem_i, a.end(), CREATE_HANDLE(MBENTITYSET, 0, e));
      nodes.merge(node_i, elem_i);
      elems.merge(elem_i, set_i);
      std::copy(set_i, a.end(), std::back_inserter(sets));

      a.clear();
      rval = mbImpl->get_child_meshsets(set, a);
      std::copy(a.begin(), a.end(), std::back_inserter(sets));
    }

    for (Range::const_iterator ei = elems.begin(); ei != elems.end(); ++ei) {
      std::vector<EntityHandle> connect;
      rval = mbImpl->get_connectivity(&(*ei), 1, connect);
      if (MB_SUCCESS != rval)
        return rval;

      for (unsigned int i = 0; i < connect.size(); ++i)
        nodes.insert(connect[i]);
    }
  }

  if (nodes.empty()) {
    MB_SET_ERR(MB_ENTITY_NOT_FOUND, "Nothing to write");
  }

  return MB_SUCCESS;
}