Esempio n. 1
0
void GearsFixture::communicate_model_fields()
{
  //copy the field data to the aura nodes

  std::vector< const FieldBase *> fields;

  fields.push_back(& cartesian_coord_field);
  fields.push_back(& translation_field);
  fields.push_back(& cylindrical_coord_field);
  fields.push_back(& displacement_field.field_of_state(stk::mesh::StateNew));
  fields.push_back(& displacement_field.field_of_state(stk::mesh::StateOld));

  // Parallel collective call:
  communicate_field_data(bulk_data.shared_aura(), fields);
}
Esempio n. 2
0
/** Copy data for the given fields, from owned entities to shared-but-not-owned entities.
 * I.e., shared-but-not-owned entities get an update of the field-data from the owned entity.
*/
inline
void copy_owned_to_shared( const BulkData& mesh,
                           const std::vector< const FieldBase *> & fields )
{
  communicate_field_data(*mesh.ghostings()[0], fields);
}
Esempio n. 3
0
void comm_mesh_rebalance( BulkData & M ,
                          const CoordinateField & node_coord_field ,
                          const WeightField  * const elem_weight_field ,
                          std::vector<OctTreeKey> & cut_keys )
{
  const MetaData & mesh_meta_data  = M.mesh_meta_data();
  Part * const uses_part = & mesh_meta_data.locally_used_part();
  Part * const owns_part = & mesh_meta_data.locally_owned_part();

  const unsigned p_size = M.parallel_size();
  const unsigned p_rank = M.parallel_rank();

  //--------------------------------------------------------------------
  // The node_coord_field must be up to date on all processors
  // so that the element oct tree keys are parallel consistent.
  // It is assumed that the shared node_coord_field values are
  // already consistent.
  {
    const FieldBase * const ptr = & node_coord_field ;
    std::vector< const FieldBase *> tmp ;
    tmp.push_back( ptr );
    const std::vector<EntityProc> & aura_domain = M.ghost_source();
    const std::vector<EntityProc> & aura_range  = M.ghost_destination();
    communicate_field_data( M , aura_domain , aura_range , tmp , false );
  }
  //--------------------------------------------------------------------
  // Generate global oct-tree keys for local element centroids
  // and cuts for the global element centroids.

  double bounds[4] ;

  global_coordinate_bounds( M , node_coord_field , bounds );

  cut_keys.assign( p_size , OctTreeKey() );

  OctTreeKey * const cut_begin = & cut_keys[0] ;
  OctTreeKey * const cut_first = cut_begin + 1 ;
  OctTreeKey * const cut_end   = cut_begin + p_size ;

  global_element_cuts( M , bounds , node_coord_field ,
                                    elem_weight_field , cut_begin );

  //--------------------------------------------------------------------
  // Mapping of *all* elements to load balanced processor,
  // even the aura elements.
  // This requires that the node coordinates on the aura
  // elements be up to date.

  {
    std::vector< const FieldBase * > tmp ;
    const FieldBase * const tmp_coord = & node_coord_field ;
    tmp.push_back( tmp_coord );

    const std::vector<EntityProc> & d = M.ghost_source();
    const std::vector<EntityProc> & r = M.ghost_destination();

    communicate_field_data( M , d , r , tmp , false );
  }

  {
    const EntitySet & elem_set = M.entities( Element );
    const EntitySet::iterator i_end = elem_set.end();
          EntitySet::iterator i     = elem_set.begin();
    while ( i != i_end ) {
      Entity & elem = *i ; ++i ;

      const OctTreeKey k = elem_key( bounds , node_coord_field , elem );

      const unsigned p = std::upper_bound(cut_first, cut_end, k) - cut_first ;

      M.change_entity_owner( elem , p );
    }
  }
  //--------------------------------------------------------------------
  // Fill 'rebal' with all uses entities' rebalancing processors

  std::vector<EntityProc> rebal ;

  rebal_elem_entities( M , Node , rebal );
  rebal_elem_entities( M , Edge , rebal );
  rebal_elem_entities( M , Face , rebal );

  {
    const Part & part_uses = * uses_part ;

    const KernelSet & elem_kernels = M.kernels( Element );
    const KernelSet::const_iterator i_end = elem_kernels.end();
          KernelSet::const_iterator i     = elem_kernels.begin();

    while ( i != i_end ) {
      const Kernel & kernel = *i ; ++i ;

      if ( kernel.has_superset( part_uses ) ) {

        const Kernel::iterator j_end = kernel.end();
              Kernel::iterator j     = kernel.begin();

        while ( j != j_end ) {
          Entity * const entity = *j ; ++j ;
          const unsigned p = entity->owner_rank();
          EntityProc tmp( entity , p );
          rebal.push_back( tmp );
        }
      }
    }
  }

  // The 'other' entities rebalance based upon the entities
  // that they use.  This may lead to more sharing entities.
  // Thus 'rebal' is input and then updated.

  rebal_other_entities( M , Particle , rebal );
  rebal_other_entities( M , Constraint , rebal );

  // 'rebal' now contains the rebalancing (entity,processor) pairs
  // for every non-aura entity.  Can now delete the aura entities.

  remove_aura( M );

  // Copy entities to new processors according to 'rebal'.
  // Only send the owned entities.
  // Include all processors associated with the entity in 'rebal'.
  // Unpack all nodes, then all edges, then all faces, then all elements, 
  // from each processor.
  // The owner of a shared entity is the max-rank processor.
  // Add received entities to shared if more than one processor.

  {
    const RebalanceComm manager ;
    std::vector<EntityProc> recv_rebal ;

    communicate_entities( manager , M , M , rebal , recv_rebal , false );

    // Destroy not-retained entities, they have been packed.
    // Remove the corresponding entries in 'rebal'

    destroy_not_retained( M , rebal );

    rebal.insert( rebal.end() , recv_rebal.begin() , recv_rebal.end() );

    sort_unique( rebal );
  }

  // The 'rebal' should contain a reference to every non-aura entity
  // on the local processor.  These references include every
  // processor on which the entity now resides, including the
  // local processor.

  { // Set parallel ownership and sharing parts.

    std::vector<EntityProc>::iterator ish ;

    for ( ish = rebal.begin() ; ish != rebal.end() ; ) {
      Entity & e = * ish->first ;

      for ( ; ish != rebal.end() && ish->first == & e ; ++ish );

      const bool is_owned = p_rank == e.owner_rank() ;

      // Change ownership.

      std::vector<Part*> add_parts ;
      std::vector<Part*> remove_parts ;

      if ( is_owned ) { add_parts.push_back( owns_part ); }
      else            { remove_parts.push_back( owns_part ); }

      M.change_entity_parts( e , add_parts , remove_parts );
    }

    // Remove references to the local processor,
    // the remaining entries define the sharing.

    for ( ish = rebal.end() ; ish != rebal.begin() ; ) {
      --ish ;
      if ( p_rank == ish->second ) { ish = rebal.erase( ish ); }
    }

    M.set_shares( rebal );
  }

  // Establish new aura

  comm_mesh_regenerate_aura( M );
}
Esempio n. 4
0
void FieldGameofLife::communicate_data()
{
    communicate_field_data(m_bulkData, {&m_lifeField});
}