Example #1
0
void abort() {
  stk::EnvData &env_data = stk::EnvData::instance();

  // Cannot be sure of parallel synchronization status; therefore, no communications can
  // occur.  Grab and dump all pending output buffers to 'std::cerr'.
  std::cerr << std::endl
            << "*** SIERRA ABORT on P" << stk::EnvData::instance().m_parallelRank << " ***"
            << std::endl
            << "*** check " << get_param("output-log")
            << " file for more information ***"
            << std::endl ;

  if (!env_data.m_output.str().empty()) {
    std::cerr << "Buffer contents of deferred output stream on processor " << parallel_rank()
              << std::endl ;
    std::cerr << env_data.m_output.str();
  }

  std::cerr.flush();
  std::cout.flush();

  ::sleep(1);					// Give the other processors a chance at
						// catching up, seems to help hanging problems.
#if defined(STK_HAS_MPI)
  MPI_Abort(env_data.m_parallelComm, MPI_ERR_OTHER);	// First try to die
#endif
  std::exit( EXIT_FAILURE );                    // Second try to die
}
Example #2
0
//--------------------------------------------------------------------------
//-------- set_log_file_stream ---------------------------------------------
//--------------------------------------------------------------------------
void
NaluEnv::set_log_file_stream(std::string naluLogName, bool pprint)
{
  if ( pRank_ == 0 ) {
    naluStreamBuffer_.open(naluLogName.c_str(), std::ios::out);
    naluLogStream_->rdbuf(&naluStreamBuffer_);
  }
  else {
    naluLogStream_->rdbuf(&naluEmptyStreamBuffer_);
  }

  // default to an empty stream buffer for parallel unless pprint is set
  parallelLog_ = pprint;
  if (parallelLog_) {
    int numPlaces = static_cast<int>(std::log10(pSize_-1)+1);

    std::stringstream paddedRank;
    paddedRank << std::setw(numPlaces) << std::setfill('0') << parallel_rank();

    // inputname.log -> inputname.log.16.02 for the rank 2 proc of a 16 proc job
    std::string parallelLogName =
        naluLogName + "." + std::to_string(pSize_) + "." + paddedRank.str();
    naluParallelStreamBuffer_.open(parallelLogName.c_str(), std::ios::out);
    naluParallelStream_->rdbuf(&naluParallelStreamBuffer_);
  }
  else {
    naluParallelStream_->rdbuf(stdoutStream_);
  }
}
Example #3
0
std::string Ioss::ParallelUtils::decode_filename(const std::string &filename, bool is_parallel) const
{
  std::string decoded_filename(filename);

  if (is_parallel) {
    // Running in parallel, assume nemesis and decode what the filename
    // should be for this processor.
    int processor = parallel_rank();
    int num_processors = parallel_size();

    decoded_filename = Ioss::Utils::decode_filename(filename, processor, num_processors);
  }
  return decoded_filename;
}
Example #4
0
void Ioss::ParallelUtils::gather(int64_t my_value, std::vector<int64_t> &result) const
{
  if (parallel_rank() == 0) {
    result.resize(parallel_size());
  }
#ifdef HAVE_MPI
  if (parallel_size() > 1) {
    const int success = MPI_Gather((void*)&my_value,  1, MPI_LONG_LONG_INT,
				   (void*)&result[0], 1, MPI_LONG_LONG_INT,
				   0, communicator_);
    if (success !=  MPI_SUCCESS) {
      std::ostringstream errmsg;
      errmsg << "Ioss::ParallelUtils::gather - MPI_Gather failed";
      IOSS_ERROR(errmsg);
    }
  } else {
    result[0] = my_value;
  }
#else
  result[0] = my_value;
#endif
}
Example #5
0
void Ioss::ParallelUtils::gather(std::vector<int64_t> &my_values, std::vector<int64_t> &result) const
{
  size_t count = my_values.size();
  if (parallel_rank() == 0) {
    result.resize(count * parallel_size());
  }
#ifdef HAVE_MPI
  if (parallel_size() > 1) {
    const int success = MPI_Gather((void*)TOPTR(my_values),  count, MPI_LONG_LONG_INT,
				   (void*)TOPTR(result), count, MPI_LONG_LONG_INT,
				   0, communicator_);
    if (success !=  MPI_SUCCESS) {
      std::ostringstream errmsg;
      errmsg << "Ioss::ParallelUtils::gather - MPI_Gather failed";
      IOSS_ERROR(errmsg);
    }
  } else {
    std::copy(my_values.begin(), my_values.end(), result.begin());
  }
#else
  std::copy(my_values.begin(), my_values.end(), result.begin());
#endif
}
Example #6
0
void BulkDataTester::check_sharing_comm_maps()
{
    stk::CommSparse comm(parallel());

    for(int phase = 0; phase < 2; ++phase)
    {
        for(stk::mesh::EntityCommListInfoVector::const_iterator i = this->my_internal_comm_list().begin(); i != this->my_internal_comm_list().end(); ++i)
        {
            for(stk::mesh::PairIterEntityComm ec = this->internal_entity_comm_map(i->key); !ec.empty(); ++ec)
            {
                int type = ec->ghost_id;
                if ( type == 0 )
                {
                    std::vector<int> sharingProcs;
                    this->comm_shared_procs(i->key, sharingProcs);
                    // pack shared info
                    int owner = -1;
                    if(bucket_ptr(i->entity) != 0)
                    {
                        owner = parallel_owner_rank(i->entity);
                    }

                    comm.send_buffer(ec->proc).pack<stk::mesh::EntityKey>(i->key).pack<int>(type).pack<int>(owner);
                    comm.send_buffer(ec->proc).pack<size_t>(sharingProcs.size());
                    for (size_t proc=0;proc<sharingProcs.size();++proc)
                    {
                        comm.send_buffer(ec->proc).pack<int>(sharingProcs[proc]);
                    }
                }
            }
        }

        if(phase == 0)
        {
            comm.allocate_buffers();
        }
        else
        {
            comm.communicate();
        }
    }

    // unpack

    std::ostringstream os;
    bool anyErrors = false;

    for(int i = 0; i < parallel_size(); ++i)
    {
        if ( i != parallel_rank() )
        {
            stk::mesh::EntityKey key;
            int type = -1;
            int from = -1;
            int owner = -1;
            while(comm.recv_buffer(i).remaining())
            {
                from = i;
                comm.recv_buffer(from).unpack<stk::mesh::EntityKey>(key).unpack<int>(type).unpack<int>(owner);

                size_t numSharingProcs = 0;
                comm.recv_buffer(from).unpack<size_t>(numSharingProcs);
                std::vector<int> sharingProcs(numSharingProcs);
                for (size_t proc=0;proc<numSharingProcs;++proc)
                {
                    comm.recv_buffer(from).unpack<int>(sharingProcs[proc]);
                }

                std::vector<int> localSharingProcs;
                this->comm_shared_procs(key, localSharingProcs);

                std::sort(localSharingProcs.begin(), localSharingProcs.end());
                std::sort(sharingProcs.begin(), sharingProcs.end());
                size_t maxNum = localSharingProcs.size() + sharingProcs.size();
                std::vector<int> unsharedProcs(maxNum);
                std::vector<int>::iterator iter = std::set_symmetric_difference( localSharingProcs.begin(), localSharingProcs.end(),
                        sharingProcs.begin(), sharingProcs.end(), unsharedProcs.begin());

                size_t numUnshared = iter - unsharedProcs.begin();
                unsharedProcs.resize(numUnshared);

                int counter = 0;
                {
                    for (size_t j=0;j<numUnshared;++j)
                    {
                        std::vector<int>::iterator localIter = std::find(sharingProcs.begin(), sharingProcs.end(), unsharedProcs[j]);
                        if ( localIter != sharingProcs.end() && *localIter != parallel_rank() )
                        {
                            if ( counter == 0 )
                            {
                                os << "Error in sharing between procs for entity " << key.id() << " with rank " << key.rank()  << "  between procs: " << this->parallel_rank() << " and " << from << std::endl;
                                counter++;
                            }
                            os << "\tEntity " << key << " is shared with proc " << unsharedProcs[j] << " from other proc: "
                                    << from << " but not from this proc: " << parallel_rank() << std::endl;
                            anyErrors = true;
                        }

                        localIter = std::find(localSharingProcs.begin(), localSharingProcs.end(), unsharedProcs[j]);
                        if ( localIter != localSharingProcs.end() && *localIter != from )
                        {
                            if ( counter == 0 )
                            {
                                os << "Error in sharing between procs for entity " << key.id() << " with rank " << key.rank()  << "  between procs: " << this->parallel_rank() << " and " << from << std::endl;
                                counter++;
                            }
                            os << "\tEntity " << key << " is shared with proc " << unsharedProcs[j] << " from this proc: "
                                    << parallel_rank() << " but not from other proc: " << from << std::endl;
                            anyErrors = true;
                        }
                    }
                }
            }
        }
    }

    ThrowRequireMsg(!anyErrors, os.str());
}