Exemple #1
0
void communicate_field_data_verify_read( CommAll & sparse )
{
  std::ostringstream msg ;
  int error = 0 ;
  for ( unsigned p = 0 ; p < sparse.parallel_size() ; ++p ) {
    if ( sparse.recv_buffer( p ).remaining() ) {
      msg << "P" << sparse.parallel_rank()
          << " Unread data from P" << p << std::endl ;
      error = 1 ;
    }
  }
  all_reduce( sparse.parallel() , ReduceSum<1>( & error ) );
  ThrowErrorMsgIf( error, msg.str() );
}
void communicate_field_data_verify_read( CommAll & sparse )
{
  std::ostringstream msg ;
  int flag = 0 ;
  for ( unsigned p = 0 ; p < sparse.parallel_size() ; ++p ) {
    if ( sparse.recv_buffer( p ).remaining() ) {
      msg << "P" << sparse.parallel_rank()
          << " Unread data from P" << p << std::endl ;
      flag = 1 ;
    }
  }
  all_reduce( sparse.parallel() , ReduceSum<1>( & flag ) );
  if ( flag ) {
    throw std::runtime_error( msg.str() );
  }
}
inline void reserve_for_recv_buffer( const CommAll& all, const DistributedIndex::ProcType& comm_size, std::vector<T>& v)
{
  unsigned num_remote = 0;
  for (DistributedIndex::ProcType p = 0 ; p < comm_size ; ++p ) {
    CommBuffer & buf = all.recv_buffer( p );
    num_remote += buf.remaining() / sizeof(T);
  }
  v.reserve(v.size() + num_remote);
}
inline void unpack_recv_buffer( const CommAll& all, const DistributedIndex::ProcType& comm_size, std::vector<T>& v)
{
  reserve_for_recv_buffer(all, comm_size, v);
  for (DistributedIndex::ProcType p = 0 ; p < comm_size ; ++p ) {
    CommBuffer & buf = all.recv_buffer( p );
    while ( buf.remaining() ) {
      T kp;
      buf.unpack( kp );
      v.push_back( kp );
    }
  }
}
inline void unpack_with_proc_recv_buffer( const CommAll& all, const DistributedIndex::ProcType& comm_size, std::vector<std::pair<T,DistributedIndex::ProcType> >& v)
{
  reserve_for_recv_buffer(all, comm_size, v);
  for ( DistributedIndex::ProcType p = 0 ; p < comm_size ; ++p ) {
    CommBuffer & buf = all.recv_buffer( p );
    std::pair<T,DistributedIndex::ProcType> kp;
    kp.second = p;
    while ( buf.remaining() ) {
      buf.unpack( kp.first );
      v.push_back( kp );
    }
  }
}
Exemple #6
0
inline void unpack_recv_buffer( const CommAll& all, const DistributedIndex::ProcType& comm_size, Vector & v)
{
  typedef typename Vector::value_type value_type;
  reserve_for_recv_buffer(all, comm_size, v);
  for (DistributedIndex::ProcType p = 0 ; p < comm_size ; ++p ) {
    CommBuffer & buf = all.recv_buffer( p );
    while ( buf.remaining() ) {
      value_type kp;
      buf.unpack( kp );
      v.push_back( kp );
    }
  }
}
Exemple #7
0
inline void unpack_with_proc_recv_buffer( const CommAll& all, const DistributedIndex::ProcType& comm_size, VectorProcPair & v)
{
  typedef typename VectorProcPair::value_type pair_type;
  reserve_for_recv_buffer(all, comm_size, v);
  for ( DistributedIndex::ProcType p = 0 ; p < comm_size ; ++p ) {
    CommBuffer & buf = all.recv_buffer( p );
    pair_type kp;
    kp.second = p;
    while ( buf.remaining() ) {
      buf.unpack( kp.first );
      v.push_back( kp );
    }
  }
}
Exemple #8
0
void communicate_field_data(
  const BulkData & mesh ,
  const unsigned field_count ,
  const FieldBase * fields[] ,
  CommAll & sparse )
{
  const std::vector<Entity*> & entity_comm = mesh.entity_comm();

  const unsigned parallel_size = mesh.parallel_size();

  // Sizing for send and receive

  const unsigned zero = 0 ;
  std::vector<unsigned> msg_size( parallel_size , zero );

  size_t j = 0;

  for ( j = 0 ; j < field_count ; ++j ) {
    const FieldBase & f = * fields[j] ;
    for ( std::vector<Entity*>::const_iterator
          i = entity_comm.begin() ; i != entity_comm.end() ; ++i ) {
      Entity & e = **i ;
      const unsigned size = field_data_size( f , e );
      if ( size ) {
        for ( PairIterEntityComm
              ec = e.comm() ; ! ec.empty() && ec->ghost_id == 0 ; ++ec ) {
          msg_size[ ec->proc ] += size ;
        }
      }
    }
  }

  // Allocate send and receive buffers:

  {
    const unsigned * const s_size = & msg_size[0] ;
    sparse.allocate_buffers( mesh.parallel(), parallel_size / 4 , s_size, s_size);
  }

  // Pack for send:

  for ( j = 0 ; j < field_count ; ++j ) {
    const FieldBase & f = * fields[j] ;
    for ( std::vector<Entity*>::const_iterator
          i = entity_comm.begin() ; i != entity_comm.end() ; ++i ) {
      Entity & e = **i ;
      const unsigned size = field_data_size( f , e );
      if ( size ) {
        unsigned char * ptr =
          reinterpret_cast<unsigned char *>(field_data( f , e ));
        for ( PairIterEntityComm
              ec = e.comm() ; ! ec.empty() && ec->ghost_id == 0 ; ++ec ) {
          CommBuffer & b = sparse.send_buffer( ec->proc );
          b.pack<unsigned char>( ptr , size );
        }
      }
    }
  }

  // Communicate:

  sparse.communicate();
}
Exemple #9
0
void communicate_field_data(
  const Ghosting                        & ghosts ,
  const std::vector< const FieldBase *> & fields )
{
  if ( fields.empty() ) { return; }

  const BulkData & mesh = BulkData::get(ghosts);
  const unsigned parallel_size = mesh.parallel_size();
  const unsigned parallel_rank = mesh.parallel_rank();

  const std::vector<const FieldBase *>::const_iterator fe = fields.end();
  const std::vector<const FieldBase *>::const_iterator fb = fields.begin();
        std::vector<const FieldBase *>::const_iterator fi ;

  // Sizing for send and receive

  const unsigned zero = 0 ;
  std::vector<unsigned> send_size( parallel_size , zero );
  std::vector<unsigned> recv_size( parallel_size , zero );

  for ( std::vector<Entity*>::const_iterator
        i =  mesh.entity_comm().begin() ;
        i != mesh.entity_comm().end() ; ++i ) {
    Entity & e = **i ;
    const bool owned = e.owner_rank() == parallel_rank ;

    unsigned e_size = 0 ;
    for ( fi = fb ; fi != fe ; ++fi ) {
      const FieldBase & f = **fi ;
      e_size += field_data_size( f , e );
    }

    for ( PairIterEntityComm ec = e.comm() ; ! ec.empty() ; ++ec ) {
      if ( ghosts.ordinal() == ec->ghost_id ) {
        if ( owned ) {
          send_size[ ec->proc ] += e_size ;
        }
        else {
          recv_size[ ec->proc ] += e_size ;
        }
      }
    }
  }

  // Allocate send and receive buffers:

  CommAll sparse ;

  {
    const unsigned * const s_size = & send_size[0] ;
    const unsigned * const r_size = & recv_size[0] ;
    sparse.allocate_buffers( mesh.parallel(), parallel_size / 4 , s_size, r_size);
  }

  // Send packing:

  for ( std::vector<Entity*>::const_iterator
        i =  mesh.entity_comm().begin() ;
        i != mesh.entity_comm().end() ; ++i ) {
    Entity & e = **i ;
    if ( e.owner_rank() == parallel_rank ) {

      for ( fi = fb ; fi != fe ; ++fi ) {
        const FieldBase & f = **fi ;
        const unsigned size = field_data_size( f , e );

        if ( size ) {
          unsigned char * ptr =
            reinterpret_cast<unsigned char *>(field_data( f , e ));

          for ( PairIterEntityComm ec = e.comm() ; ! ec.empty() ; ++ec ) {

            if ( ghosts.ordinal() == ec->ghost_id ) {
              CommBuffer & b = sparse.send_buffer( ec->proc );
              b.pack<unsigned char>( ptr , size );
            }
          }
        }
      }
    }
  }

  // Communicate:

  sparse.communicate();

  // Unpack for recv:

  for ( std::vector<Entity*>::const_iterator
        i =  mesh.entity_comm().begin() ;
        i != mesh.entity_comm().end() ; ++i ) {
    Entity & e = **i ;
    if ( e.owner_rank() != parallel_rank ) {

      for ( fi = fb ; fi != fe ; ++fi ) {
        const FieldBase & f = **fi ;
        const unsigned size = field_data_size( f , e );

        if ( size ) {
          unsigned char * ptr =
            reinterpret_cast<unsigned char *>(field_data( f , e ));

          for ( PairIterEntityComm ec = e.comm() ; ! ec.empty() ; ++ec ) {

            if ( ghosts.ordinal() == ec->ghost_id ) {
              CommBuffer & b = sparse.recv_buffer( ec->proc );
              b.unpack<unsigned char>( ptr , size );
            }
          }
        }
      }
    }
  }
}
Exemple #10
0
void communicate_field_data(
  ParallelMachine machine,
  const std::vector<EntityProc> & domain ,
  const std::vector<EntityProc> & range ,
  const std::vector<const FieldBase *> & fields)
{
  if ( fields.empty() ) { return; }

  const unsigned parallel_size = parallel_machine_size( machine );
  const unsigned parallel_rank = parallel_machine_rank( machine );
  const bool     asymmetric    = & domain != & range ;

  const std::vector<const FieldBase *>::const_iterator fe = fields.end();
  const std::vector<const FieldBase *>::const_iterator fb = fields.begin();
        std::vector<const FieldBase *>::const_iterator fi ;

  // Sizing for send and receive

  const unsigned zero = 0 ;
  std::vector<unsigned> send_size( parallel_size , zero );
  std::vector<unsigned> recv_size( parallel_size , zero );

  std::vector<EntityProc>::const_iterator i ;

  for ( i = domain.begin() ; i != domain.end() ; ++i ) {
    Entity       & e = * i->first ;
    const unsigned p = i->second ;

    if ( asymmetric || parallel_rank == e.owner_rank() ) {
      unsigned e_size = 0 ;
      for ( fi = fb ; fi != fe ; ++fi ) {
        const FieldBase & f = **fi ;
        e_size += field_data_size( f , e );
      }
      send_size[ p ] += e_size ;
    }
  }

  for ( i = range.begin() ; i != range.end() ; ++i ) {
    Entity       & e = * i->first ;
    const unsigned p = i->second ;

    if ( asymmetric || p == e.owner_rank() ) {
      unsigned e_size = 0 ;
      for ( fi = fb ; fi != fe ; ++fi ) {
        const FieldBase & f = **fi ;
        e_size += field_data_size( f , e );
      }
      recv_size[ p ] += e_size ;
    }
  }

  // Allocate send and receive buffers:

  CommAll sparse ;

  {
    const unsigned * const s_size = & send_size[0] ;
    const unsigned * const r_size = & recv_size[0] ;
    sparse.allocate_buffers( machine, parallel_size / 4 , s_size, r_size);
  }

  // Pack for send:

  for ( i = domain.begin() ; i != domain.end() ; ++i ) {
    Entity       & e = * i->first ;
    const unsigned p = i->second ;

    if ( asymmetric || parallel_rank == e.owner_rank() ) {
      CommBuffer & b = sparse.send_buffer( p );
      for ( fi = fb ; fi != fe ; ++fi ) {
        const FieldBase & f = **fi ;
        const unsigned size = field_data_size( f , e );
        if ( size ) {
          unsigned char * ptr = reinterpret_cast<unsigned char *>(field_data( f , e ));
          b.pack<unsigned char>( ptr , size );
        }
      }
    }
  }

  // Communicate:

  sparse.communicate();

  // Unpack for recv:

  for ( i = range.begin() ; i != range.end() ; ++i ) {
    Entity       & e = * i->first ;
    const unsigned p = i->second ;

    if ( asymmetric || p == e.owner_rank() ) {
      CommBuffer & b = sparse.recv_buffer( p );
      for ( fi = fb ; fi != fe ; ++fi ) {
        const FieldBase & f = **fi ;
        const unsigned size = field_data_size( f , e );
        if ( size ) {
          unsigned char * ptr = reinterpret_cast<unsigned char *>(field_data( f , e ));
          b.unpack<unsigned char>( ptr , size );
        }
      }
    }
  }
}
Exemple #11
0
void test_comm_bounds( ParallelMachine comm , std::istream & )
{
  static const char method[] = "phdmesh::test_comm_sizes" ;

  const unsigned u_zero = 0 ;

  const unsigned p_size = parallel_machine_size( comm );
  const unsigned p_rank = parallel_machine_rank( comm );

  std::vector<unsigned> send_size( p_size , u_zero );
  std::vector<unsigned> recv_size( p_size , u_zero );

  const unsigned * const ptr_send_size = & send_size[0] ;
  const unsigned * const ptr_recv_size = & recv_size[0] ;

  if ( 1 < p_size ) {

    // Send one point-to-point message doubling the size until it breaks
    // Send one message to next processor,
    // Receive one message from previous processor:

    const unsigned p_send = ( p_rank + 1 )          % p_size ;
    const unsigned p_recv = ( p_rank + p_size - 1 ) % p_size ;

    int error = 0 ;

    const unsigned msg_max = 0x8000000 ; // max to test 100 Mb
    unsigned msg_size = 0x1000 ; // 4k to start

    while ( ! error && msg_size <= msg_max ) {

      send_size[ p_send ] = msg_size ;
      recv_size[ p_recv ] = msg_size ;

      try {
        CommAll all ;

        all.allocate_buffers( comm , p_size , ptr_send_size , ptr_recv_size );

        // Fill send buffer with predefined values

        CommBuffer & send_buf = all.send_buffer( p_send );

        const unsigned n = msg_size / sizeof(unsigned);
        for ( unsigned i = 0 ; i < n ; ++i ) {
          send_buf.pack<unsigned>( msg_size );
        }

        all.communicate();

        // unpack the receive buffer, verify contents

        CommBuffer & recv_buf = all.recv_buffer( p_recv );

        for ( unsigned i = 0 ; i < n ; ++i ) {
          unsigned tmp ;
          recv_buf.unpack<unsigned>( tmp );
          if ( tmp != msg_size ) { error = 1 ; }
        }
      }
      catch ( const std::exception & x ) {
        if ( p_rank == 0 ) {
          std::cout << method << x.what() << std::endl ;
        }
        error = 2 ;
      }
      catch ( ... ) {
        error = 2 ;
      }

      all_reduce( comm , Max<1>( & error ) );

      if ( ! error ) {
        if ( p_rank == 0 ) {
          std::cout << "OK Message size = "
                    << msg_size << std::endl ;
        }
        msg_size <<= 1 ;
      }
    }

    if ( p_rank == 0 ) {
      if ( error == 1 ) {
        std::cout << method << " BAD message at "
                  << msg_size << " bytes" << std::endl ;
      }
    }
  }
}
Exemple #12
0
//----------------------------------------------------------------------
void communicate_field_data(
    const Ghosting                        & ghosts ,
    const std::vector< const FieldBase *> & fields )
{
    if ( fields.empty() ) {
        return;
    }

    const BulkData & mesh = ghosts.mesh();
    const int parallel_size = mesh.parallel_size();
    const int parallel_rank = mesh.parallel_rank();
    const unsigned ghost_id = ghosts.ordinal();

    const std::vector<const FieldBase *>::const_iterator fe = fields.end();
    const std::vector<const FieldBase *>::const_iterator fb = fields.begin();
    std::vector<const FieldBase *>::const_iterator fi ;

    // Sizing for send and receive

    const unsigned zero = 0 ;
    std::vector<unsigned> send_size( parallel_size , zero );
    std::vector<unsigned> recv_size( parallel_size , zero );

    for ( EntityCommListInfoVector::const_iterator
            i =  mesh.internal_comm_list().begin() , iend = mesh.internal_comm_list().end(); i != iend ; ++i ) {
        Entity e = i->entity;
        const MeshIndex meshIdx = mesh.mesh_index(e);
        const unsigned bucketId = meshIdx.bucket->bucket_id();

        const bool owned = i->owner == parallel_rank ;

        unsigned e_size = 0 ;
        for ( fi = fb ; fi != fe ; ++fi ) {
            const FieldBase & f = **fi ;

            if(is_matching_rank(f, *meshIdx.bucket)) {
                e_size += field_bytes_per_entity( f , bucketId );
            }
        }

        if (e_size == 0) {
            continue;
        }

        const EntityCommInfoVector& infovec = i->entity_comm->comm_map;
        PairIterEntityComm ec(infovec.begin(), infovec.end());
        if ( owned ) {
            for ( ; ! ec.empty() ; ++ec ) {
                if (ec->ghost_id == ghost_id) {
                    send_size[ ec->proc ] += e_size ;
                }
            }
        }
        else {
            for ( ; ! ec.empty() ; ++ec ) {
                if (ec->ghost_id == ghost_id) {
                    recv_size[ i->owner ] += e_size ;
                    break;//jump out since we know we're only recving 1 msg from the 1-and-only owner
                }
            }
        }
    }

    // Allocate send and receive buffers:

    CommAll sparse ;

    {
        const unsigned * const snd_size = send_size.data() ;
        const unsigned * const rcv_size = recv_size.data() ;
        sparse.allocate_buffers( mesh.parallel(), snd_size, rcv_size);
    }

    // Send packing:

    for (int phase = 0; phase < 2; ++phase) {

        for ( EntityCommListInfoVector::const_iterator i =  mesh.internal_comm_list().begin(), iend = mesh.internal_comm_list().end() ; i != iend ; ++i ) {
            if ( (i->owner == parallel_rank && phase == 0) || (i->owner != parallel_rank && phase == 1) ) {
                Entity e = i->entity;
                const MeshIndex meshIdx = mesh.mesh_index(e);
                const unsigned bucketId = meshIdx.bucket->bucket_id();

                for ( fi = fb ; fi != fe ; ++fi ) {
                    const FieldBase & f = **fi ;

                    if(!is_matching_rank(f, e)) continue;

                    const unsigned size = field_bytes_per_entity( f , e );

                    if ( size ) {
                        unsigned char * ptr =
                            reinterpret_cast<unsigned char *>(stk::mesh::field_data( f , bucketId, meshIdx.bucket_ordinal, size ));

                        const EntityCommInfoVector& infovec = i->entity_comm->comm_map;
                        PairIterEntityComm ec(infovec.begin(), infovec.end());
                        if (phase == 0) { // send
                            for ( ; !ec.empty() ; ++ec ) {
                                if (ec->ghost_id == ghost_id) {
                                    CommBuffer & b = sparse.send_buffer( ec->proc );
                                    b.pack<unsigned char>( ptr , size );
                                }
                            }
                        }
                        else { //recv
                            for ( ; !ec.empty(); ++ec ) {
                                if (ec->ghost_id == ghost_id) {
                                    CommBuffer & b = sparse.recv_buffer( i->owner );
                                    b.unpack<unsigned char>( ptr , size );
                                    break;
                                }
                            }
                        }
                    }
                }
            }
        }
        if (phase == 0) {
            sparse.communicate();
        }
    }
}
inline void parallel_sum_including_ghosts(
  const BulkData                        & mesh ,
  const std::vector< const FieldBase *> & fields )
{
  if ( fields.empty() ) { return; }

  const int parallel_size = mesh.parallel_size();
  const int parallel_rank = mesh.parallel_rank();

  const std::vector<const FieldBase *>::const_iterator fe = fields.end();
  const std::vector<const FieldBase *>::const_iterator fb = fields.begin();
        std::vector<const FieldBase *>::const_iterator fi ;

  // Sizing for send and receive

  const unsigned zero = 0 ;
  std::vector<unsigned> send_size( parallel_size , zero );
  std::vector<unsigned> recv_size( parallel_size , zero );

  const EntityCommListInfoVector& comm_info_vec = mesh.internal_comm_list();
  size_t comm_info_vec_size = comm_info_vec.size();
  for ( fi = fb ; fi != fe ; ++fi ) {
    const FieldBase & f = **fi ;

    for (size_t i=0; i<comm_info_vec_size; ++i) {
        if (!mesh.is_valid(comm_info_vec[i].entity))
        {
            ThrowAssertMsg(mesh.is_valid(comm_info_vec[i].entity),"parallel_sum_including_ghosts found invalid entity");
        }
      const Bucket* bucket = comm_info_vec[i].bucket;

      unsigned e_size = 0 ;
      if(is_matching_rank(f, *bucket)) {
        const unsigned bucketId = bucket->bucket_id();
        e_size += field_bytes_per_entity( f , bucketId );
      }

      if (e_size == 0) {
        continue;
      }

      const bool owned = comm_info_vec[i].owner == parallel_rank ;

      if ( !owned ) {
         send_size[ comm_info_vec[i].owner ] += e_size ;
      }
      else {
          const EntityCommInfoVector& infovec = comm_info_vec[i].entity_comm->comm_map;
          size_t info_vec_size = infovec.size();
          for (size_t j=0; j<info_vec_size; ++j ) {
              recv_size[ infovec[j].proc ] += e_size ;
          }
      }
    }
  }

  // Allocate send and receive buffers:

  CommAll sparse ;

  {
    const unsigned * const snd_size = & send_size[0] ;
    const unsigned * const rcv_size = & recv_size[0] ;
    sparse.allocate_buffers( mesh.parallel(), snd_size, rcv_size);
  }

  // Send packing:

  for (int phase = 0; phase < 2; ++phase) {

    for ( fi = fb ; fi != fe ; ++fi ) {
      const FieldBase & f = **fi ;

      for (size_t i=0; i<comm_info_vec_size; ++i) {
        const bool owned = comm_info_vec[i].owner == parallel_rank;
        if ( (!owned && phase == 0) || (owned && phase == 1) )
        {
            const Bucket* bucket = comm_info_vec[i].bucket;

            if(!is_matching_rank(f, *bucket)) continue;

            const unsigned bucketId = bucket->bucket_id();
            const size_t bucket_ordinal = comm_info_vec[i].bucket_ordinal;
            const unsigned scalars_per_entity = field_scalars_per_entity(f, bucketId);

            if ( scalars_per_entity > 0 ) {
              int owner = comm_info_vec[i].owner;

              if (f.data_traits().is_floating_point && f.data_traits().size_of == 8)
              {
                  send_or_recv_field_data_for_assembly<double>(sparse, phase, f, owner, comm_info_vec[i].entity_comm->comm_map, scalars_per_entity, bucketId, bucket_ordinal);
              }
              else if (f.data_traits().is_floating_point && f.data_traits().size_of == 4)
              {
                  send_or_recv_field_data_for_assembly<float>(sparse, phase, f, owner, comm_info_vec[i].entity_comm->comm_map, scalars_per_entity, bucketId, bucket_ordinal);
              }
              else if (f.data_traits().is_integral && f.data_traits().size_of == 4 && f.data_traits().is_unsigned)
              {
                  send_or_recv_field_data_for_assembly<unsigned>(sparse, phase, f, owner, comm_info_vec[i].entity_comm->comm_map, scalars_per_entity, bucketId, bucket_ordinal);
              }
              else if (f.data_traits().is_integral && f.data_traits().size_of == 4 && f.data_traits().is_signed)
              {
                  send_or_recv_field_data_for_assembly<int>(sparse, phase, f, owner, comm_info_vec[i].entity_comm->comm_map, scalars_per_entity, bucketId, bucket_ordinal);
              }
              else
              {
                  ThrowRequireMsg(false,"Unsupported field type in parallel_sum_including_ghosts");
              }
            }
          }
        }
      }

      if (phase == 0) { sparse.communicate(); }
  }

  copy_from_owned(mesh, fields);
}