Beispiel #1
0
unsigned int Ioss::ParallelUtils::global_minmax(unsigned int local_minmax, Ioss::ParallelUtils::MinMax which) const
{
  unsigned int minmax = local_minmax;

#ifdef HAVE_MPI
  if (parallel_size() > 1) {
    if (Ioss::SerializeIO::isEnabled() && Ioss::SerializeIO::inBarrier()) {
      std::ostringstream errmsg;
      errmsg << "Attempting mpi while in barrier owned by " << Ioss::SerializeIO::getOwner();
      IOSS_ERROR(errmsg);
    }
    static unsigned int inbuf[1], outbuf[1];
    inbuf[0] = local_minmax;

    MPI_Op oper;
    if (which == DO_MAX)
      oper = MPI_MAX;
    else
      oper = MPI_MIN;

    const int success = MPI_Allreduce((void*)&inbuf[0], &outbuf[0], 1,
				      MPI_UNSIGNED, oper, communicator_);
    if (success !=  MPI_SUCCESS) {
      std::ostringstream errmsg;
      errmsg << "Ioss::ParallelUtils::global_minmax - MPI_Allreduce failed";
      IOSS_ERROR(errmsg);
    }
    minmax = outbuf[0];
  }
#endif
  return minmax;
}
Beispiel #2
0
void Ioss::ParallelUtils::global_array_minmax(double *local_minmax, size_t count, Ioss::ParallelUtils::MinMax which) const
{
#ifdef HAVE_MPI
  if (parallel_size() > 1 && count > 0) {
    if (Ioss::SerializeIO::isEnabled() && Ioss::SerializeIO::inBarrier()) {
      std::ostringstream errmsg;
      errmsg << "Attempting mpi while in barrier owned by " << Ioss::SerializeIO::getOwner();
      IOSS_ERROR(errmsg);
    }

    std::vector<double> maxout(count);

    MPI_Op oper;
    if (which == DO_MAX)
      oper = MPI_MAX;
    else
      oper = MPI_MIN;

    const int success = MPI_Allreduce((void*)&local_minmax[0], &maxout[0],
				      static_cast<int>(count),
				      MPI_DOUBLE, oper, communicator_);
    if (success !=  MPI_SUCCESS) {
      std::ostringstream errmsg;
      errmsg << "Ioss::ParallelUtils::global_array_minmax - MPI_Allreduce failed";
      IOSS_ERROR(errmsg);
    }
    // Now copy back into passed in array...
    for (size_t i=0; i < count; i++) {
      local_minmax[i] = maxout[i];
    }
  }
#endif
}
Beispiel #3
0
void Ioss::ParallelUtils::global_count(const Int64Vector &local_counts, Int64Vector &global_counts) const
{
  // Vector 'local_counts' contains the number of objects
  // local to this processor.  On exit, global_counts
  // contains the total number of objects on all processors.
  // Assumes that ordering is the same on all processors
  global_counts.resize(local_counts.size());
#ifdef HAVE_MPI
  if (local_counts.size() > 0 && parallel_size() > 1) {
    if (Ioss::SerializeIO::isEnabled() && Ioss::SerializeIO::inBarrier()) {
      std::ostringstream errmsg;
      errmsg << "Attempting mpi while in barrier owned by " << Ioss::SerializeIO::getOwner();
      IOSS_ERROR(errmsg);
    }
    const int success = MPI_Allreduce((void*)&local_counts[0], &global_counts[0],
				      static_cast<int>(local_counts.size()),
				      MPI_LONG_LONG_INT, MPI_SUM, communicator_);
    if (success !=  MPI_SUCCESS) {
      std::ostringstream errmsg;
      errmsg  << "Ioss::ParallelUtils::global_count - MPI_Allreduce failed";
      IOSS_ERROR(errmsg);
    }
  } else {
    // Serial run, just copy local to global...
    std::copy(local_counts.begin(), local_counts.end(), global_counts.begin());
  }
#else
  std::copy(local_counts.begin(), local_counts.end(), global_counts.begin());
#endif
}
Beispiel #4
0
void Ioss::ParallelUtils::attribute_reduction( const int length , char buffer[]) const
{
#ifdef HAVE_MPI
  if ( 1 < parallel_size() ) {
    assert( sizeof(char) == 1 );

    char * const recv_buf = new char[ length ];

    std::memset( recv_buf , 0 , length );

    const int success =
      MPI_Allreduce( buffer , recv_buf , length , MPI_BYTE , MPI_BOR , communicator_);

    if ( MPI_SUCCESS != success ) {
      std::ostringstream errmsg;
      errmsg << "Ioss::ParallelUtils::attribute_reduction - MPI_Allreduce failed";
      IOSS_ERROR(errmsg);
    }

    std::memcpy( buffer , recv_buf , length );

    delete[] recv_buf ;
  }
#endif
}
Beispiel #5
0
bool BulkData::modification_begin()
{
  Trace_("stk_classic::mesh::BulkData::modification_begin");

  parallel_machine_barrier( m_parallel_machine );

  ThrowRequireMsg( m_mesh_finalized == false, "Unable to modifiy, BulkData has been finalized.");

  if ( m_sync_state == MODIFIABLE && m_mesh_finalized == false ) return false ;

  if ( ! m_meta_data_verified ) {
    require_metadata_committed();

    if (parallel_size() > 1) {
      verify_parallel_consistency( m_mesh_meta_data , m_parallel_machine );
    }

    m_meta_data_verified = true ;

    m_bucket_repository.declare_nil_bucket();
  }
  else {
    ++m_sync_count ;

    // Clear out the previous transaction information
    // m_transaction_log.flush();

    m_entity_repo.clean_changes();
  }

  m_sync_state = MODIFIABLE ;

  return true ;
}
void BulkData::destroy_relation( Entity & e_from , Entity & e_to )
{
  static const char method[]= "stk::mesh::BulkData::destroy_relation" ;

  assert_ok_to_modify( method );

  assert_valid_relation( method , *this , e_from , e_to );


  //------------------------------
  // When removing a relationship may need to
  // remove part membership and set field relation pointer to NULL

  PartVector del , keep ;

  for ( PairIterRelation i = e_to.relations() ; i.first != i.second ; ++(i.first) ) {

    if ( !( i.first->entity() == & e_from )  &&
        ( e_to.entity_rank() < i.first->entity_rank() ) )
    {
      induced_part_membership( * i.first->entity(), del, e_to.entity_rank(),
                               i.first->identifier(), keep );
    }
  }

  for ( PairIterRelation i = e_from.relations() ; i.first != i.second ; ++(i.first) ) {
    if ( i.first->entity() == & e_to ) {

      induced_part_membership( e_from, keep, e_to.entity_rank(),
                               i.first->identifier(), del );

      clear_field_relations( e_from , e_to.entity_rank() ,
                             i.first->identifier() );
    }
  }

  //------------------------------
  // 'keep' contains the parts deduced from kept relations
  // 'del'  contains the parts deduced from deleted relations
  //        that are not in 'keep'
  // Only remove these part memberships the entity is not shared.
  // If the entity is shared then wait until modificaton_end_synchronize.
  //------------------------------

  if ( ! del.empty() && (parallel_size() < 2 || e_to.sharing().empty()) ) {
    PartVector add ;
    internal_change_entity_parts( e_to , add , del );
  }

  //delete relations from the entities
  m_entity_repo.destroy_relation( e_from, e_to);

}
Beispiel #7
0
void Ioss::ParallelUtils::gather(int64_t my_value, std::vector<int64_t> &result) const
{
  if (parallel_rank() == 0) {
    result.resize(parallel_size());
  }
#ifdef HAVE_MPI
  if (parallel_size() > 1) {
    const int success = MPI_Gather((void*)&my_value,  1, MPI_LONG_LONG_INT,
				   (void*)&result[0], 1, MPI_LONG_LONG_INT,
				   0, communicator_);
    if (success !=  MPI_SUCCESS) {
      std::ostringstream errmsg;
      errmsg << "Ioss::ParallelUtils::gather - MPI_Gather failed";
      IOSS_ERROR(errmsg);
    }
  } else {
    result[0] = my_value;
  }
#else
  result[0] = my_value;
#endif
}
Beispiel #8
0
void Ioss::ParallelUtils::gather(std::vector<int64_t> &my_values, std::vector<int64_t> &result) const
{
  size_t count = my_values.size();
  if (parallel_rank() == 0) {
    result.resize(count * parallel_size());
  }
#ifdef HAVE_MPI
  if (parallel_size() > 1) {
    const int success = MPI_Gather((void*)TOPTR(my_values),  count, MPI_LONG_LONG_INT,
				   (void*)TOPTR(result), count, MPI_LONG_LONG_INT,
				   0, communicator_);
    if (success !=  MPI_SUCCESS) {
      std::ostringstream errmsg;
      errmsg << "Ioss::ParallelUtils::gather - MPI_Gather failed";
      IOSS_ERROR(errmsg);
    }
  } else {
    std::copy(my_values.begin(), my_values.end(), result.begin());
  }
#else
  std::copy(my_values.begin(), my_values.end(), result.begin());
#endif
}
Beispiel #9
0
std::string Ioss::ParallelUtils::decode_filename(const std::string &filename, bool is_parallel) const
{
  std::string decoded_filename(filename);

  if (is_parallel) {
    // Running in parallel, assume nemesis and decode what the filename
    // should be for this processor.
    int processor = parallel_rank();
    int num_processors = parallel_size();

    decoded_filename = Ioss::Utils::decode_filename(filename, processor, num_processors);
  }
  return decoded_filename;
}
void BulkData::internal_propagate_part_changes(
  Entity           & entity ,
  const PartVector & removed )
{
  const unsigned etype = entity.entity_rank();

  PairIterRelation rel = entity.relations();

  for ( ; ! rel.empty() ; ++rel ) {
    const unsigned rel_type  = rel->entity_rank();
    const unsigned rel_ident = rel->identifier();

    if ( rel_type < etype ) { // a 'to' entity

      Entity & e_to = * rel->entity();

      PartVector to_del , to_add , empty ;

      // Induce part membership from this relationship to
      // pick up any additions.
      induced_part_membership( entity, empty,
                               rel_type, rel_ident, to_add );

      if ( ! removed.empty() ) {
        // Something was removed from the 'from' entity,
        // deduce what may have to be removed from the 'to' entity.

        // Deduce parts for 'e_to' from all upward relations.
        // Any non-parallel part that I removed that is not deduced for
        // 'e_to' must be removed from 'e_to'

        for ( PairIterRelation
              to_rel = e_to.relations(); ! to_rel.empty() ; ++to_rel ) {
          if ( e_to.entity_rank() < to_rel->entity_rank() &&
               & entity != to_rel->entity() /* Already did this entity */ ) {
            // Relation from to_rel->entity() to e_to
            induced_part_membership( * to_rel->entity(), empty,
                                     e_to.entity_rank(),
                                     to_rel->identifier(),
                                     to_add );
          }
        }

        for ( PartVector::const_iterator
              j = removed.begin() ; j != removed.end() ; ++j ) {
          if ( ! contain( to_add , **j ) ) {
            induced_part_membership( **j, etype, rel_type, rel_ident, to_del );
          }
        }
      }

      if ( parallel_size() < 2 || e_to.sharing().empty() ) {
        // Entirely local, ok to remove memberships now
        internal_change_entity_parts( e_to , to_add , to_del );
      }
      else {
        // Shared, do not remove memberships now.
        // Wait until modification_end.
        internal_change_entity_parts( e_to , to_add , empty );
      }

      set_field_relations( entity, e_to, rel_ident );
    }
    else if ( etype < rel_type ) { // a 'from' entity
      Entity & e_from = * rel->entity();

      set_field_relations( e_from, entity, rel_ident );
    }
  }
}
Beispiel #11
0
void BulkDataTester::check_sharing_comm_maps()
{
    stk::CommSparse comm(parallel());

    for(int phase = 0; phase < 2; ++phase)
    {
        for(stk::mesh::EntityCommListInfoVector::const_iterator i = this->my_internal_comm_list().begin(); i != this->my_internal_comm_list().end(); ++i)
        {
            for(stk::mesh::PairIterEntityComm ec = this->internal_entity_comm_map(i->key); !ec.empty(); ++ec)
            {
                int type = ec->ghost_id;
                if ( type == 0 )
                {
                    std::vector<int> sharingProcs;
                    this->comm_shared_procs(i->key, sharingProcs);
                    // pack shared info
                    int owner = -1;
                    if(bucket_ptr(i->entity) != 0)
                    {
                        owner = parallel_owner_rank(i->entity);
                    }

                    comm.send_buffer(ec->proc).pack<stk::mesh::EntityKey>(i->key).pack<int>(type).pack<int>(owner);
                    comm.send_buffer(ec->proc).pack<size_t>(sharingProcs.size());
                    for (size_t proc=0;proc<sharingProcs.size();++proc)
                    {
                        comm.send_buffer(ec->proc).pack<int>(sharingProcs[proc]);
                    }
                }
            }
        }

        if(phase == 0)
        {
            comm.allocate_buffers();
        }
        else
        {
            comm.communicate();
        }
    }

    // unpack

    std::ostringstream os;
    bool anyErrors = false;

    for(int i = 0; i < parallel_size(); ++i)
    {
        if ( i != parallel_rank() )
        {
            stk::mesh::EntityKey key;
            int type = -1;
            int from = -1;
            int owner = -1;
            while(comm.recv_buffer(i).remaining())
            {
                from = i;
                comm.recv_buffer(from).unpack<stk::mesh::EntityKey>(key).unpack<int>(type).unpack<int>(owner);

                size_t numSharingProcs = 0;
                comm.recv_buffer(from).unpack<size_t>(numSharingProcs);
                std::vector<int> sharingProcs(numSharingProcs);
                for (size_t proc=0;proc<numSharingProcs;++proc)
                {
                    comm.recv_buffer(from).unpack<int>(sharingProcs[proc]);
                }

                std::vector<int> localSharingProcs;
                this->comm_shared_procs(key, localSharingProcs);

                std::sort(localSharingProcs.begin(), localSharingProcs.end());
                std::sort(sharingProcs.begin(), sharingProcs.end());
                size_t maxNum = localSharingProcs.size() + sharingProcs.size();
                std::vector<int> unsharedProcs(maxNum);
                std::vector<int>::iterator iter = std::set_symmetric_difference( localSharingProcs.begin(), localSharingProcs.end(),
                        sharingProcs.begin(), sharingProcs.end(), unsharedProcs.begin());

                size_t numUnshared = iter - unsharedProcs.begin();
                unsharedProcs.resize(numUnshared);

                int counter = 0;
                {
                    for (size_t j=0;j<numUnshared;++j)
                    {
                        std::vector<int>::iterator localIter = std::find(sharingProcs.begin(), sharingProcs.end(), unsharedProcs[j]);
                        if ( localIter != sharingProcs.end() && *localIter != parallel_rank() )
                        {
                            if ( counter == 0 )
                            {
                                os << "Error in sharing between procs for entity " << key.id() << " with rank " << key.rank()  << "  between procs: " << this->parallel_rank() << " and " << from << std::endl;
                                counter++;
                            }
                            os << "\tEntity " << key << " is shared with proc " << unsharedProcs[j] << " from other proc: "
                                    << from << " but not from this proc: " << parallel_rank() << std::endl;
                            anyErrors = true;
                        }

                        localIter = std::find(localSharingProcs.begin(), localSharingProcs.end(), unsharedProcs[j]);
                        if ( localIter != localSharingProcs.end() && *localIter != from )
                        {
                            if ( counter == 0 )
                            {
                                os << "Error in sharing between procs for entity " << key.id() << " with rank " << key.rank()  << "  between procs: " << this->parallel_rank() << " and " << from << std::endl;
                                counter++;
                            }
                            os << "\tEntity " << key << " is shared with proc " << unsharedProcs[j] << " from this proc: "
                                    << parallel_rank() << " but not from other proc: " << from << std::endl;
                            anyErrors = true;
                        }
                    }
                }
            }
        }
    }

    ThrowRequireMsg(!anyErrors, os.str());
}