int
cartesian_communicator::ndims() const {
  int n = -1;
  BOOST_MPI_CHECK_RESULT(MPI_Cartdim_get, 
                         (MPI_Comm(*this), &n));
  return n;
}
Пример #2
0
status communicator::probe(int source, int tag) const
{
  status stat;
  BOOST_MPI_CHECK_RESULT(MPI_Probe,
                         (source, tag, MPI_Comm(*this), &stat.m_status));
  return stat;
}
std::vector<int>
cartesian_communicator::coordinates(int rk) const {
  std::vector<int> cbuf(ndims());
  BOOST_MPI_CHECK_RESULT(MPI_Cart_coords, 
                         (MPI_Comm(*this), rk, cbuf.size(), c_data(cbuf) ));
  return cbuf;
}
Пример #4
0
    ParMetisGraph::ParMetisGraph(ParMetisMesh* parMesh,
                                 MPI::Intracomm* comm,
                                 int ncommonnodes)
      : parMetisMesh(parMesh)
    {
      FUNCNAME("ParMetisGraph::ParMetisGraph()");

      TEST_EXIT(parMesh)("No ParMetisMesh defined!\n");
      TEST_EXIT(comm)("No MPI communicator defined!\n");

      int numflag = 0;

      if (ncommonnodes == -1)
        ncommonnodes = parMetisMesh->getDim();

      MPI_Comm tmpComm = MPI_Comm(*comm);

      ParMETIS_V3_Mesh2Dual(parMetisMesh->getElementDist(),
                            parMetisMesh->getElementPtr(),
                            parMetisMesh->getElementInd(),
                            &numflag,
                            &ncommonnodes,
                            &xadj,
                            &adjncy,
                            &tmpComm);
    }
Пример #5
0
communicator communicator::split(int color, int key) const
{
  MPI_Comm newcomm;
  BOOST_MPI_CHECK_RESULT(MPI_Comm_split,
                         (MPI_Comm(*this), color, key, &newcomm));
  return communicator(newcomm, comm_take_ownership);
}
std::pair<int, int>
cartesian_communicator::shifted_ranks(int dim, int disp) const {
  std::pair<int, int> r(-1,-1);
  assert(0 <= dim && dim < ndims());
  BOOST_MPI_CHECK_RESULT(MPI_Cart_shift, 
                         (MPI_Comm(*this), dim, disp, &(r.first), &(r.second)));
  return r;
}
Пример #7
0
status communicator::recv(int source, int tag) const
{
  status stat;
  BOOST_MPI_CHECK_RESULT(MPI_Recv,
                         (MPI_BOTTOM, 0, MPI_PACKED,
                          source, tag, MPI_Comm(*this), &stat.m_status));
  return stat;
}
Пример #8
0
request communicator::isend(int dest, int tag) const
{
  request req;
  BOOST_MPI_CHECK_RESULT(MPI_Isend,
                         (MPI_BOTTOM, 0, MPI_PACKED,
                          dest, tag, MPI_Comm(*this), &req.m_requests[0]));
  return req;
}
Пример #9
0
request communicator::isend<content>(int dest, int tag, const content& c) const
{
  request req;
  BOOST_MPI_CHECK_RESULT(MPI_Isend,
                         (MPI_BOTTOM, 1, c.get_mpi_datatype(),
                          dest, tag, MPI_Comm(*this), &req.m_requests[0]));
  return req;
}
Пример #10
0
request communicator::irecv(int source, int tag) const
{
  request req;
  BOOST_MPI_CHECK_RESULT(MPI_Irecv,
                         (MPI_BOTTOM, 0, MPI_PACKED,
                          source, tag, MPI_Comm(*this), &req.m_requests[0]));
  return req;
}
Пример #11
0
status communicator::probe(int source, int tag) const
{
  typedef optional<status> result_type;

  status stat;
  BOOST_MPI_CHECK_RESULT(MPI_Probe,
                         (source, tag, MPI_Comm(*this), &stat.m_status));
  return stat;
}
Пример #12
0
 void 
 broadcast_impl(const communicator& comm, T* values, int n, int root, 
                mpl::true_)
 {
   BOOST_MPI_CHECK_RESULT(MPI_Bcast,
                          (values, n,
                           boost::mpi::get_mpi_datatype<T>(*values),
                           root, MPI_Comm(comm)));
 }
Пример #13
0
request
communicator::isend<packed_oarchive>(int dest, int tag,
                                     const packed_oarchive& ar) const
{
  request req;
  detail::packed_archive_isend(MPI_Comm(*this), dest, tag, ar,
                               &req.m_requests[0] ,2);
  return req;
}
Пример #14
0
status
communicator::recv<packed_iarchive>(int source, int tag,
                                    packed_iarchive& ar) const
{
  status stat;
  detail::packed_archive_recv(MPI_Comm(*this), source, tag, ar,
                              stat.m_status);
  return stat;
}
Пример #15
0
status
communicator::recv<const content>(int source, int tag, const content& c) const
{
  status stat;
  BOOST_MPI_CHECK_RESULT(MPI_Recv,
                         (MPI_BOTTOM, 1, c.get_mpi_datatype(),
                          source, tag, MPI_Comm(*this), &stat.m_status));
  return stat;
}
int
cartesian_communicator::rank(const std::vector<int>& coords ) const {
  int r = -1;
  assert(int(coords.size()) == ndims());
  BOOST_MPI_CHECK_RESULT(MPI_Cart_rank, 
                         (MPI_Comm(*this), c_data(const_cast<std::vector<int>&>(coords)), 
                          &r));
  return r;
}
Пример #17
0
void
all_gather_impl(const communicator& comm, const T* in_values, int n, 
                T* out_values, int const* sizes, int const* skips, mpl::false_)
{
  int nproc = comm.size();
  // first, gather all size, these size can be different for
  // each process
  packed_oarchive oa(comm);
  for (int i = 0; i < n; ++i) {
    oa << in_values[i];
  }
  std::vector<int> oasizes(nproc);
  int oasize = oa.size();
  BOOST_MPI_CHECK_RESULT(MPI_Allgather,
                         (&oasize, 1, MPI_INTEGER,
                          c_data(oasizes), 1, MPI_INTEGER, 
                          MPI_Comm(comm)));
  // Gather the archives, which can be of different sizes, so
  // we need to use allgatherv.
  // Every thing is contiguous, so the offsets can be
  // deduced from the collected sizes.
  std::vector<int> offsets(nproc);
  sizes2offsets(oasizes, offsets);
  packed_iarchive::buffer_type recv_buffer(std::accumulate(oasizes.begin(), oasizes.end(), 0));
  BOOST_MPI_CHECK_RESULT(MPI_Allgatherv,
                         (const_cast<void*>(oa.address()), int(oa.size()), MPI_BYTE,
                          c_data(recv_buffer), c_data(oasizes), c_data(offsets), MPI_BYTE, 
                          MPI_Comm(comm)));
  for (int src = 0; src < nproc; ++src) {
    int nb   = sizes ? sizes[src] : n;
    int skip = skips ? skips[src] : 0;
    std::advance(out_values, skip);
    if (src == comm.rank()) { // this is our local data
      for (int i = 0; i < nb; ++i) {
        *out_values++ = *in_values++;
      }
    } else {
      packed_iarchive ia(comm,  recv_buffer, boost::archive::no_header, offsets[src]);
      for (int i = 0; i < nb; ++i) {
        ia >> *out_values++;
      }
    }
  }
}
Пример #18
0
request
communicator::irecv<const content>(int source, int tag,
                                   const content& c) const
{
  request req;
  BOOST_MPI_CHECK_RESULT(MPI_Irecv,
                         (MPI_BOTTOM, 1, c.get_mpi_datatype(),
                          source, tag, MPI_Comm(*this), &req.m_requests[0]));
  return req;
}
Пример #19
0
optional<status> communicator::iprobe(int source, int tag) const
{
  typedef optional<status> result_type;

  status stat;
  int flag;
  BOOST_MPI_CHECK_RESULT(MPI_Iprobe,
                         (source, tag, MPI_Comm(*this), &flag,
                          &stat.m_status));
  if (flag) return stat;
  else return result_type();
}
 request
 ibroadcast_impl(const communicator& comm, T* values, int n, int root,
                mpl::true_)
 {
   request req;
   BOOST_MPI_CHECK_RESULT(MPI_Ibcast,
                          (values, n,
                           boost::mpi::get_mpi_datatype<T>(*values),
                           root,
                           MPI_Comm(comm),
                           &req.m_requests[0]));
   return req;
 }
void
cartesian_communicator::topology(  cartesian_topology&  topo,
                                   std::vector<int>&  coords ) const {
  int ndims = this->ndims();
  topo.resize(ndims);
  coords.resize(ndims);
  std::vector<int> cdims(ndims);
  std::vector<int> cperiods(ndims);
  BOOST_MPI_CHECK_RESULT(MPI_Cart_get,
                         (MPI_Comm(*this), ndims, c_data(cdims), c_data(cperiods), c_data(coords)));
  cartesian_topology res(cdims.begin(), cperiods.begin(), ndims);
  topo.swap(res);
}
Пример #22
0
void communicator::abort(int errcode) const
{
  BOOST_MPI_CHECK_RESULT(MPI_Abort, (MPI_Comm(*this), errcode));
}
Пример #23
0
int main(int argc, char* argv[]) {

    boost::mpi::environment  boostEnv(argc, argv);
    boost::mpi::communicator boostWorld;
    boost::timer::auto_cpu_timer boostTimer;
    REAL time0 = MPI_Wtime();

    if (boostWorld.rank() == 0) {
        if (argc != 2) {
            std::cout << "please specify data file in the form: paraEllip3d input.txt" << std::endl;
            return -1;
        }

        dem::debugInf.open("debugInf");
        if(!dem::debugInf) {
            std::cout << "stream error: main.cpp debugInf" << std::endl;
            exit(-1);
        }
        dem::debugInf.setf(std::ios::scientific, std::ios::floatfield);

        dem::Parameter::getSingleton().readIn(argv[1]);
        dem::Parameter::getSingleton().writeOut();
        int mpiProcX = static_cast<int> (dem::Parameter::getSingleton().parameter["mpiProcX"]);
        int mpiProcY = static_cast<int> (dem::Parameter::getSingleton().parameter["mpiProcY"]);
        int mpiProcZ = static_cast<int> (dem::Parameter::getSingleton().parameter["mpiProcZ"]);
        if (mpiProcX * mpiProcY * mpiProcZ != boostWorld.size() ) {
            std::cout << "number of MPI processes does not match grids in data file!" << std::endl;
            return -1;
        }
    }
    broadcast(boostWorld, dem::Parameter::getSingleton(), 0); // broadcast from root process 0

    dem::Assembly assemb;
    assemb.setCommunicator(boostWorld);

    // parallel IO for overlap info
    MPI_File_open(MPI_Comm(boostWorld), "overlapInf", MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &dem::overlapInf);
    if(boostWorld.rank() == 0 && !dem::overlapInf) {
        std::cout << "stream error: main.cpp overlapInf" << std::endl;
        exit(-1);
    }

    int simuType = static_cast<int> (dem::Parameter::getSingleton().parameter["simuType"]);
    switch (simuType) {
    case 001: // proceed from preset state
        assemb.proceedFromPreset();
        break;
    case 002: // tune mass-percentage from number-percentage on size distribution curve by trial and error
        assemb.tuneMassPercent();
        break;
    case 003: // trim particles
        assemb.trimOnly();
        break;
    case 004: // remove particles
        assemb.removeBySphere();
        break;
    case 005: // calculate mass percentage
        assemb.calcMassPercent();
        break;
    case 101: // deposit spatially scattered particles into a rigid container
        assemb.depositIntoContainer();
        break;
    case 102: // resume deposition using specified data file of particles and boundaries
        assemb.resumeDepositIntoContainer();
        break;
    case 201: // isotropic type 1 - create an initial state with low confining pressure
        assemb.isotropic();
        break;
    case 202: // isotropic type 2 - increase confining pressure from sigmaInit to sigmaEnd
        assemb.isotropic();
        break;
    case 203: // isotropic type 3 - conduct loading-unloading-reloading path
        assemb.isotropic();
        break;
    case 301: // odometer type 1 - increase loading pressure
        assemb.odometer();
        break;
    case 302: // odometer type 2 - loading-unloading-reloading
        assemb.odometer();
        break;
    case 401: // triaxial type 1 - constant confining pressure
        assemb.triaxial();
        break;
    case 402: // triaxial type 2 - loading-unloading-reloading
        assemb.triaxial();
        break;
    case 411: // plane strain type 1 - in x direction
        assemb.planeStrain();
        break;
    case 412: // plane strain type 2 - loading-unloading-reloading
        assemb.planeStrain();
        break;
    case 501: // true triaxial 1 - create confining stress state
        assemb.trueTriaxial();
        break;
    case 502: // true triaxial 2 - increase stress in one direction
        assemb.trueTriaxial();
        break;
    case 601: // expand particles inside a virtual cavity and see what occurs
        assemb.expandCavityParticle();
        break;
    case 602: // resume expanding particles inside a virtual cavity and see what occurs
        assemb.resumeExpandCavityParticle();
        break;
    case 701: // couple with gas flow, bottom "left" part, R-H conditions
        assemb.coupleWithGas();
        break;
    case 702: // couple with gas flow, bottom "left" part
        assemb.coupleWithGas();
        break;
    case 703: // couple with gas flow, rectangular "left" part
        assemb.coupleWithGas();
        break;
    case 704: // couple with gas flow, spherical "left" part
        assemb.coupleWithGas();
        break;
    case 705: // couple with gas flow, rectangular "left" part with a zone below
        assemb.coupleWithGas();
        break;
    }

    if (boostWorld.rank() == 0) {
        dem::debugInf << std::endl << "MPI_Wtime: " << MPI_Wtime() - time0 << " seconds" << std::endl;
        dem::debugInf.close();
    }
    MPI_File_close(&dem::overlapInf);
    return 0;
}
Пример #24
0
void communicator::send(int dest, int tag) const
{
  BOOST_MPI_CHECK_RESULT(MPI_Send,
                         (MPI_BOTTOM, 0, MPI_PACKED,
                          dest, tag, MPI_Comm(*this)));
}
Пример #25
0
int communicator::size() const
{
  int size_;
  BOOST_MPI_CHECK_RESULT(MPI_Comm_size, (MPI_Comm(*this), &size_));
  return size_;
}
Пример #26
0
void (communicator::barrier)() const
{
  BOOST_MPI_CHECK_RESULT(MPI_Barrier, (MPI_Comm(*this)));
}
Пример #27
0
int communicator::rank() const
{
  int rank_;
  BOOST_MPI_CHECK_RESULT(MPI_Comm_rank, (MPI_Comm(*this), &rank_));
  return rank_;
}
Пример #28
0
void
communicator::send<packed_oarchive>(int dest, int tag,
                                    const packed_oarchive& ar) const
{
  detail::packed_archive_send(MPI_Comm(*this), dest, tag, ar);
}
Пример #29
0
void communicator::send<content>(int dest, int tag, const content& c) const
{
  BOOST_MPI_CHECK_RESULT(MPI_Send,
                         (MPI_BOTTOM, 1, c.get_mpi_datatype(),
                          dest, tag, MPI_Comm(*this)));
}
Пример #30
0
    bool ParMetisPartitioner::partition(map<int, double>& elemWeights,
                                        PartitionMode mode)
    {
      FUNCNAME("ParMetisPartitioner::partition()");

      int mpiSize = mpiComm->Get_size();


      // === Create parmetis mesh ===

      if (parMetisMesh)
        delete parMetisMesh;

      TEST_EXIT_DBG(elementInRank.size() != 0)("Should not happen!\n");

      parMetisMesh = new ParMetisMesh(mesh, mpiComm, elementInRank, mapLocalGlobal);

      int nElements = parMetisMesh->getNumElements();


      // === Create weight array ===

      vector<int> wgts(nElements);
      vector<float> floatWgts(nElements);
      unsigned int floatWgtsPos = 0;
      float maxWgt = 0.0;

      TraverseStack stack;
      ElInfo* elInfo = stack.traverseFirst(mesh, 0, Mesh::CALL_EL_LEVEL);
      while (elInfo)
      {
        int index = elInfo->getElement()->getIndex();

        if (elementInRank[index])
        {
          // get weight
          float wgt = static_cast<float>(elemWeights[index]);
          maxWgt = std::max(wgt, maxWgt);

          // write float weight
          TEST_EXIT_DBG(floatWgtsPos < floatWgts.size())("Should not happen!\n");
          floatWgts[floatWgtsPos++] = wgt;
        }
        elInfo = stack.traverseNext(elInfo);
      }

      TEST_EXIT_DBG(floatWgtsPos == floatWgts.size())("Should not happen!\n");

      float tmp;
      mpiComm->Allreduce(&maxWgt, &tmp, 1, MPI_FLOAT, MPI_MAX);
      maxWgt = tmp;


      // === Create dual graph ===

      ParMetisGraph parMetisGraph(parMetisMesh, mpiComm);


      // === Partitioning of dual graph ===

      int wgtflag = 2; // weights at vertices only!
      int numflag = 0; // c numbering style!
      int ncon = 1; // one weight at each vertex!
      int nparts = mpiSize; // number of partitions

      vector<double> tpwgts(mpiSize);
      double ubvec = 1.05;
      int options[4] = {0, 0, 15, PARMETIS_PSR_COUPLED}; // default options
      int edgecut = -1;
      vector<int> part(nElements);

      // set tpwgts
      for (int i = 0; i < mpiSize; i++)
        tpwgts[i] = 1.0 / static_cast<double>(nparts);

      //     float scale = 10000.0 / maxWgt;
      for (int i = 0; i < nElements; i++)
        wgts[i] = floatWgts[i];
      //      wgts[i] = static_cast<int>(floatWgts[i] * scale);


      // === Start ParMETIS. ===

      MPI_Comm tmpComm = MPI_Comm(*mpiComm);

      switch (mode)
      {
      case INITIAL:
        ParMETIS_V3_PartKway(parMetisMesh->getElementDist(),
                             parMetisGraph.getXAdj(),
                             parMetisGraph.getAdjncy(),
                             &(wgts[0]),
                             NULL,
                             &wgtflag,
                             &numflag,
                             &ncon,
                             &nparts,
                             &(tpwgts[0]),
                             &ubvec,
                             options,
                             &edgecut,
                             &(part[0]),
                             &tmpComm);
        break;
      case ADAPTIVE_REPART:
      {
        vector<int> vsize(nElements);
        for (int i = 0; i < nElements; i++)
          vsize[i] = static_cast<int>(floatWgts[i]);

        ParMETIS_V3_AdaptiveRepart(parMetisMesh->getElementDist(),
                                   parMetisGraph.getXAdj(),
                                   parMetisGraph.getAdjncy(),
                                   &(wgts[0]),
                                   NULL,
                                   &(vsize[0]),
                                   &wgtflag,
                                   &numflag,
                                   &ncon,
                                   &nparts,
                                   &(tpwgts[0]),
                                   &ubvec,
                                   &itr,
                                   options,
                                   &edgecut,
                                   &(part[0]),
                                   &tmpComm);
      }
      break;
      case REFINE_PART:
        ParMETIS_V3_RefineKway(parMetisMesh->getElementDist(),
                               parMetisGraph.getXAdj(),
                               parMetisGraph.getAdjncy(),
                               &(wgts[0]),
                               NULL,
                               &wgtflag,
                               &numflag,
                               &ncon,
                               &nparts,
                               &(tpwgts[0]),
                               &ubvec,
                               options,
                               &edgecut,
                               &(part[0]),
                               &tmpComm);

        break;
      default:
        ERROR_EXIT("unknown partitioning mode\n");
      }


      // === Distribute new partition data. ===

      return distributePartitioning(&(part[0]));
    }