Example #1
0
int Allgatherv(MPI_Comm comm,
               std::vector<T>& sendbuf,
               std::vector<int>& recvLengths,
               std::vector<T>& recvbuf)
{
#ifdef FEI_SER
  //If we're in serial mode, just copy sendbuf to recvbuf and return.

  recvbuf = sendbuf;
  recvLengths.resize(1);
  recvLengths[0] = sendbuf.size();
#else
  int numProcs = 1;
  MPI_Comm_size(comm, &numProcs);

  try {

  MPI_Datatype mpi_dtype = fei::mpiTraits<T>::mpi_type();

  std::vector<int> tmpInt(numProcs, 0);

  int len = sendbuf.size();
  int* tmpBuf = &tmpInt[0];

  recvLengths.resize(numProcs);
  int* recvLenPtr = &recvLengths[0];

  CHK_MPI( MPI_Allgather(&len, 1, MPI_INT, recvLenPtr, 1, MPI_INT, comm) );

  int displ = 0;
  for(int i=0; i<numProcs; i++) {
    tmpBuf[i] = displ;
    displ += recvLenPtr[i];
  }

  if (displ == 0) {
    recvbuf.resize(0);
    return(0);
  }

  recvbuf.resize(displ);

  T* sendbufPtr = sendbuf.size()>0 ? &sendbuf[0] : NULL;
  
  CHK_MPI( MPI_Allgatherv(sendbufPtr, len, mpi_dtype,
			&recvbuf[0], &recvLengths[0], tmpBuf,
			mpi_dtype, comm) );

  }
  catch(std::runtime_error& exc) {
    fei::console_out() << exc.what() << FEI_ENDL;
    return(-1);
  }
#endif

  return(0);
}
Example #2
0
//------------------------------------------------------------------------
int mirrorProcs(MPI_Comm comm, std::vector<int>& toProcs, std::vector<int>& fromProcs)
{
  fromProcs.resize(0);
#ifdef FEI_SER
  fromProcs.push_back(0);
  return(0);
#else
  int num_procs = fei::numProcs(comm);
  std::vector<int> tmpIntData(num_procs*3, 0);

  int* buf = &tmpIntData[0];
  int* recvbuf = buf+num_procs;

  for(unsigned i=0; i<toProcs.size(); ++i) {
    buf[toProcs[i]] = 1;
  }

  for(int ii=2*num_procs; ii<3*num_procs; ++ii) {
    buf[ii] = 1;
  }

  CHK_MPI( MPI_Reduce_scatter(buf, &(buf[num_procs]), &(buf[2*num_procs]),
                              MPI_INT, MPI_SUM, comm) );

  int numRecvProcs = buf[num_procs];

  int tag = 11116;
  std::vector<MPI_Request> mpiReqs(numRecvProcs);

  int offset = 0;
  for(int ii=0; ii<numRecvProcs; ++ii) {
    CHK_MPI( MPI_Irecv(&(recvbuf[ii]), 1, MPI_INT, MPI_ANY_SOURCE, tag,
                       comm, &(mpiReqs[offset++])) );
  }

  for(unsigned i=0; i<toProcs.size(); ++i) {
    CHK_MPI( MPI_Send(&(toProcs[i]), 1, MPI_INT, toProcs[i], tag, comm) );
  }

  MPI_Status status;
  for(int ii=0; ii<numRecvProcs; ++ii) {
    int index;
    MPI_Waitany(numRecvProcs, &mpiReqs[0], &index, &status);
    fromProcs.push_back(status.MPI_SOURCE);
  }

  std::sort(fromProcs.begin(), fromProcs.end());

  return(0);
#endif
}
Example #3
0
//------------------------------------------------------------------------
int exchangeIntData(MPI_Comm comm,
                    const std::vector<int>& sendProcs,
                    std::vector<int>& sendData,
                    const std::vector<int>& recvProcs,
                    std::vector<int>& recvData)
{
  if (sendProcs.size() == 0 && recvProcs.size() == 0) return(0);
  if (sendProcs.size() != sendData.size()) return(-1);
#ifndef FEI_SER
  recvData.resize(recvProcs.size());
  std::vector<MPI_Request> mpiReqs;
  mpiReqs.resize(recvProcs.size());

  int tag = 11114;
  MPI_Datatype mpi_dtype = MPI_INT;

  //launch Irecv's for recvData:

  int localProc = fei::localProc(comm);
  int numRecvProcs = recvProcs.size();
  int req_offset = 0;
  for(unsigned i=0; i<recvProcs.size(); ++i) {
    if (recvProcs[i] == localProc) {--numRecvProcs; continue; }

    CHK_MPI( MPI_Irecv(&(recvData[i]), 1, mpi_dtype, recvProcs[i], tag,
                       comm, &mpiReqs[req_offset++]) );
  }

  //send the sendData:

  for(unsigned i=0; i<sendProcs.size(); ++i) {
    if (sendProcs[i] == localProc) continue;

    CHK_MPI( MPI_Send(&(sendData[i]), 1, mpi_dtype,
                      sendProcs[i], tag, comm) );
  }

  //complete the Irecv's:

  for(int ii=0; ii<numRecvProcs; ++ii) {
    int index;
    MPI_Status status;
    CHK_MPI( MPI_Waitany(numRecvProcs, &mpiReqs[0], &index, &status) );
  }

#endif
  return(0);
}
Example #4
0
int Bcast(MPI_Comm comm, std::vector<T>& sendbuf, int sourceProc)
{
#ifndef FEI_SER
  MPI_Datatype mpi_dtype = fei::mpiTraits<T>::mpi_type();

  CHK_MPI(MPI_Bcast(&sendbuf[0], sendbuf.size(), mpi_dtype,
                    sourceProc, comm) );
#endif
  return(0);
}
Example #5
0
int GlobalSum(MPI_Comm comm, T local, T& global)
{
#ifdef FEI_SER
  global = local;
#else
  MPI_Datatype mpi_dtype = fei::mpiTraits<T>::mpi_type();

  CHK_MPI( MPI_Allreduce(&local, &global, 1, mpi_dtype, MPI_SUM, comm) );
#endif
  return(0);
}
Example #6
0
int GlobalSum(MPI_Comm comm, std::vector<T>& local, std::vector<T>& global)
{
#ifdef FEI_SER
  global = local;
#else
  global.resize(local.size());

  MPI_Datatype mpi_dtype = fei::mpiTraits<T>::mpi_type();

  CHK_MPI( MPI_Allreduce(&(local[0]), &(global[0]),
                      local.size(), mpi_dtype, MPI_SUM, comm) );
#endif
  return(0);
}
Example #7
0
//------------------------------------------------------------------------
int Allreduce(MPI_Comm comm, bool localBool, bool& globalBool)
{
#ifndef FEI_SER
  int localInt = localBool ? 1 : 0;
  int globalInt = 0;

  CHK_MPI( MPI_Allreduce(&localInt, &globalInt, 1, MPI_INT, MPI_MAX, comm) );

  globalBool = globalInt==1 ? true : false;
#else
  globalBool = localBool;
#endif

  return(0);
}
Example #8
0
int GlobalMax(MPI_Comm comm, std::vector<T>& local, std::vector<T>& global)
{
#ifdef FEI_SER
  global = local;
#else

  MPI_Datatype mpi_dtype = fei::mpiTraits<T>::mpi_type();

  try {
    global.resize(local.size());
  }
  catch(std::runtime_error& exc) {
    fei::console_out() << exc.what()<<FEI_ENDL;
    return(-1);
  }

  CHK_MPI( MPI_Allreduce(&(local[0]), &(global[0]),
			 local.size(), mpi_dtype, MPI_MAX, comm) );
#endif

  return(0);
}
int MatrixReducer::writeToFile(const char* filename,
			    bool matrixMarketFormat)
{
  static char mmbanner[] = "%%MatrixMarket matrix coordinate real general";
  std::vector<int>& localrows = reducer_->getLocalReducedEqns();
  int localNumRows = localrows.size();

  int globalNNZ = 0;
  int localNNZ = 0;

  for(int i=0; i<localNumRows; ++i) {
    int len;
    CHK_ERR( target_->getRowLength(localrows[i], len) );
    localNNZ += len;
  }

  MPI_Comm comm = getMatrixGraph()->getRowSpace()->getCommunicator();

  CHK_MPI( fei::GlobalSum(comm, localNNZ, globalNNZ) );
  int globalNumRows = 0;
  CHK_MPI( fei::GlobalSum(comm, localNumRows, globalNumRows) );

  int globalNumCols = globalNumRows;

  for(int p=0; p<fei::numProcs(comm); ++p) {
    fei::Barrier(comm);
    if (p != fei::localProc(comm)) continue;

    FEI_OFSTREAM* outFile = NULL;
    if (p==0) {
      outFile = new FEI_OFSTREAM(filename, IOS_OUT);
      FEI_OFSTREAM& ofs = *outFile;
      if (matrixMarketFormat) {
        ofs << mmbanner << FEI_ENDL;
        ofs <<globalNumRows<< " " <<globalNumCols<< " " <<globalNNZ<<FEI_ENDL;
      }
      else {
        ofs <<globalNumRows<< " " <<globalNumCols<<FEI_ENDL;
      }
    }
    else outFile = new FEI_OFSTREAM(filename, IOS_APP);

    outFile->setf(IOS_SCIENTIFIC, IOS_FLOATFIELD);
    outFile->precision(13);
    FEI_OFSTREAM& ofs = *outFile;

    int rowLength;
    std::vector<int> work_indices;
    std::vector<double> work_data1D;

    for(int i=0; i<localNumRows; ++i) {
      int row = localrows[i];
      CHK_ERR( target_->getRowLength(row, rowLength) );

      work_indices.resize(rowLength);
      work_data1D.resize(rowLength);

      int* indPtr = &work_indices[0];
      double* coefPtr = &work_data1D[0];

      CHK_ERR( target_->copyOutRow(row, rowLength, coefPtr, indPtr) );

      for(int j=0; j<rowLength; ++j) {
        if (matrixMarketFormat) {
          ofs << row+1 <<" "<<indPtr[j]+1<<" "<<coefPtr[j]<<FEI_ENDL;
        }
        else {
          ofs << row <<" "<<indPtr[j]<<" "<<coefPtr[j]<<FEI_ENDL;
        }
      }
    }

    delete outFile;
  }

  return(0);
}
Example #10
0
//------------------------------------------------------------------------
int mirrorCommPattern(MPI_Comm comm, comm_map* inPattern, comm_map*& outPattern)
{
#ifdef FEI_SER
  (void)inPattern;
  (void)outPattern;
#else
  int localP = localProc(comm);
  int numP  = numProcs(comm);

  if (numP < 2) return(0);

  std::vector<int> buf(numP*2, 0);

  int numInProcs = inPattern->getMap().size();
  std::vector<int> inProcs(numInProcs);
  fei::copyKeysToVector(inPattern->getMap(), inProcs);

  std::vector<int> outProcs;

  int err = mirrorProcs(comm, inProcs, outProcs);
  if (err != 0) ERReturn(-1);

  std::vector<int> recvbuf(outProcs.size(), 0);

  outPattern = new comm_map(0,1);

  MPI_Datatype mpi_ttype = fei::mpiTraits<int>::mpi_type();

  //now recv a length (the contents of buf[i]) from each "out-proc", which
  //will be the length of the equation data that will also be recvd from that
  //proc.
  std::vector<MPI_Request> mpiReqs(outProcs.size());
  std::vector<MPI_Status> mpiStss(outProcs.size());
  MPI_Request* requests = &mpiReqs[0];
  MPI_Status* statuses = &mpiStss[0];

  int firsttag = 11117;
  int offset = 0;
  int* outProcsPtr = &outProcs[0];
  for(unsigned i=0; i<outProcs.size(); ++i) {
    if (MPI_Irecv(&(recvbuf[i]), 1, MPI_INT, outProcsPtr[i], firsttag,
                  comm, &requests[offset++]) != MPI_SUCCESS) ERReturn(-1);
  }

  comm_map::map_type& in_row_map = inPattern->getMap();
  comm_map::map_type::iterator
    in_iter = in_row_map.begin(),
    in_end  = in_row_map.end();
 
  int* inProcsPtr = &inProcs[0];
  for(int ii=0; in_iter!= in_end; ++in_iter, ++ii) {
    comm_map::row_type* in_row = in_iter->second;
    buf[ii] = in_row->size();
    if (MPI_Send(&(buf[ii]), 1, MPI_INT, inProcsPtr[ii], firsttag,
                 comm) != MPI_SUCCESS) ERReturn(-1);
  }

  int numOutProcs = outProcs.size();

  MPI_Waitall(numOutProcs, requests, statuses);
  std::vector<int> lengths(numOutProcs);
  int totalRecvLen = 0;
  offset = 0;
  for(int ii=0; ii<numOutProcs; ++ii) {
    if (recvbuf[ii] > 0) {
      lengths[offset++] = recvbuf[ii];
      totalRecvLen += recvbuf[ii];
    }
  }

  //now we need to create the space into which we'll receive the
  //lists that other procs send to us.
  std::vector<int> recvData(totalRecvLen, 999999);

  int tag2 = 11118;
  offset = 0;
  for(int ii=0; ii<numOutProcs; ++ii) {
    CHK_MPI(MPI_Irecv(&(recvData[offset]), lengths[ii], mpi_ttype,
                      outProcs[ii], tag2, comm, &requests[ii]) );
    offset += lengths[ii];
  }

  std::vector<int> sendList;

  in_iter = in_row_map.begin();

  for(int ii=0; in_iter != in_end; ++in_iter,++ii) {
    if (inProcs[ii] == localP) {
      continue;
    }
    sendList.resize(in_iter->second->size());
    fei::copySetToArray(*(in_iter->second), sendList.size(), &sendList[0]);

    CHK_MPI(MPI_Send(&sendList[0], sendList.size(), mpi_ttype,
                     inProcs[ii], tag2, comm) );
  }

  //our final communication operation is to catch the Irecvs we started above.
  for(int ii=0; ii<numOutProcs; ++ii) {
    MPI_Wait(&requests[ii], &statuses[ii]);
  }

  //now we've completed all the communication, so we're ready to put the data
  //we received into the outPattern object.
  offset = 0;
  for(int ii=0; ii<numOutProcs; ii++) {
    outPattern->addIndices(outProcs[ii], lengths[ii],
                           &(recvData[offset]));
    offset += lengths[ii];
  }

#endif
  return(0);
}
Example #11
0
int exchangeData(MPI_Comm comm,
                 std::vector<int>& sendProcs,
                 std::vector<std::vector<T>*>& sendData,
                 std::vector<int>& recvProcs,
                 bool recvLengthsKnownOnEntry,
                 std::vector<std::vector<T>*>& recvData)
{
  if (sendProcs.size() == 0 && recvProcs.size() == 0) return(0);
  if (sendProcs.size() != sendData.size()) return(-1);
#ifndef FEI_SER
  int tag = 11115;
  MPI_Datatype mpi_dtype = fei::mpiTraits<T>::mpi_type();
  std::vector<MPI_Request> mpiReqs;

  try {
  mpiReqs.resize(recvProcs.size());

  if (!recvLengthsKnownOnEntry) {
    std::vector<int> tmpIntData;
    tmpIntData.resize(sendData.size());
    std::vector<int> recvLens(sendData.size());
    for(unsigned i=0; i<sendData.size(); ++i) {
      tmpIntData[i] = (int)sendData[i]->size();
    }

    if (exchangeIntData(comm, sendProcs, tmpIntData, recvProcs, recvLens) != 0) {
      return(-1);
    }

    for(unsigned i=0; i<recvLens.size(); ++i) {
      recvData[i]->resize(recvLens[i]);
    }
  }
  }
  catch(std::runtime_error& exc) {
    fei::console_out() << exc.what() << FEI_ENDL;
    return(-1);
  }

  //launch Irecv's for recvData:

  size_t numRecvProcs = recvProcs.size();
  int req_offset = 0;
  int localProc = fei::localProc(comm);
  for(unsigned i=0; i<recvProcs.size(); ++i) {
    if (recvProcs[i] == localProc) {--numRecvProcs; continue;}

    size_t len = recvData[i]->size();
    std::vector<T>& rbuf = *recvData[i];

    CHK_MPI( MPI_Irecv(&rbuf[0], (int)len, mpi_dtype,
                       recvProcs[i], tag, comm, &mpiReqs[req_offset++]) );
  }

  //send the sendData:

  for(unsigned i=0; i<sendProcs.size(); ++i) {
    if (sendProcs[i] == localProc) continue;

    std::vector<T>& sbuf = *sendData[i];
    CHK_MPI( MPI_Send(&sbuf[0], (int)sbuf.size(), mpi_dtype,
                      sendProcs[i], tag, comm) );
  }

  //complete the Irecv's:
  for(unsigned i=0; i<numRecvProcs; ++i) {
    if (recvProcs[i] == localProc) continue;
    int index;
    MPI_Status status;
    CHK_MPI( MPI_Waitany((int)numRecvProcs, &mpiReqs[0], &index, &status) );
  }

#endif
  return(0);
}
Example #12
0
int exchangeData(MPI_Comm comm,
                 std::vector<int>& sendProcs,
                 std::vector<std::vector<T> >& sendData,
                 std::vector<int>& recvProcs,
                 bool recvDataLengthsKnownOnEntry,
                 std::vector<std::vector<T> >& recvData)
{
  if (sendProcs.size() == 0 && recvProcs.size() == 0) return(0);
  if (sendProcs.size() != sendData.size()) return(-1);
#ifndef FEI_SER
  std::vector<MPI_Request> mpiReqs;
  mpiReqs.resize(recvProcs.size());

  int tag = 11119;
  MPI_Datatype mpi_dtype = fei::mpiTraits<T>::mpi_type();

  if (!recvDataLengthsKnownOnEntry) {
    std::vector<int> tmpIntData(sendData.size());
    std::vector<int> recvLengths(recvProcs.size());
    for(unsigned i=0; i<sendData.size(); ++i) {
      tmpIntData[i] = sendData[i].size();
    }

    if ( exchangeIntData(comm, sendProcs, tmpIntData, recvProcs, recvLengths) != 0) {
      return(-1);
    }
    for(unsigned i=0; i<recvProcs.size(); ++i) {
      recvData[i].resize(recvLengths[i]);
    }
  }

  //launch Irecv's for recvData:

  size_t numRecvProcs = recvProcs.size();
  int req_offset = 0;
  int localProc = fei::localProc(comm);
  for(size_t i=0; i<recvProcs.size(); ++i) {
    if (recvProcs[i] == localProc) {--numRecvProcs; continue; }

    int len = recvData[i].size();
    std::vector<T>& recv_vec = recvData[i];
    T* recv_buf = len > 0 ? &recv_vec[0] : NULL;

    CHK_MPI( MPI_Irecv(recv_buf, len, mpi_dtype, recvProcs[i],
                       tag, comm, &mpiReqs[req_offset++]) );
  }

  //send the sendData:

  for(size_t i=0; i<sendProcs.size(); ++i) {
    if (sendProcs[i] == localProc) continue;

    std::vector<T>& send_buf = sendData[i];
    CHK_MPI( MPI_Send(&send_buf[0], sendData[i].size(), mpi_dtype,
                      sendProcs[i], tag, comm) );
  }

  //complete the Irecvs:
  for(size_t i=0; i<numRecvProcs; ++i) {
    if (recvProcs[i] == localProc) continue;
    int index;
    MPI_Status status;
    CHK_MPI( MPI_Waitany(numRecvProcs, &mpiReqs[0], &index, &status) );
  }

#endif
  return(0);
}
Example #13
0
int exchangeCommMapData(MPI_Comm comm,
                        const typename CommMap<T>::Type& sendCommMap,
                        typename CommMap<T>::Type& recvCommMap,
                        bool recvProcsKnownOnEntry = false,
                        bool recvLengthsKnownOnEntry = false)
{
  if (!recvProcsKnownOnEntry) {
    recvCommMap.clear();
  }

#ifndef FEI_SER
  int tag = 11120;
  MPI_Datatype mpi_dtype = fei::mpiTraits<T>::mpi_type();

  std::vector<int> sendProcs;
  fei::copyKeysToVector(sendCommMap, sendProcs);
  std::vector<int> recvProcs;

  if (recvProcsKnownOnEntry) {
    fei::copyKeysToVector(recvCommMap, recvProcs);
  }
  else {
    mirrorProcs(comm, sendProcs, recvProcs);
    for(size_t i=0; i<recvProcs.size(); ++i) {
      addItemsToCommMap<T>(recvProcs[i], 0, NULL, recvCommMap);
    }
  }

  if (!recvLengthsKnownOnEntry) {
    std::vector<int> tmpIntData(sendProcs.size());
    std::vector<int> recvLengths(recvProcs.size());
    
    typename fei::CommMap<T>::Type::const_iterator
      s_iter = sendCommMap.begin(), s_end = sendCommMap.end();

    for(size_t i=0; s_iter != s_end; ++s_iter, ++i) {
      tmpIntData[i] = s_iter->second.size();
    }

    if ( exchangeIntData(comm, sendProcs, tmpIntData, recvProcs, recvLengths) != 0) {
      return(-1);
    }
    for(size_t i=0; i<recvProcs.size(); ++i) {
      std::vector<T>& rdata = recvCommMap[recvProcs[i]];
      rdata.resize(recvLengths[i]);
    }
  }

  //launch Irecv's for recv-data:
  std::vector<MPI_Request> mpiReqs;
  mpiReqs.resize(recvProcs.size());

  typename fei::CommMap<T>::Type::iterator
    r_iter = recvCommMap.begin(), r_end = recvCommMap.end();

  size_t req_offset = 0;
  for(; r_iter != r_end; ++r_iter) {
    int rproc = r_iter->first;
    std::vector<T>& recv_vec = r_iter->second;
    int len = recv_vec.size();
    T* recv_buf = len > 0 ? &recv_vec[0] : NULL;

    CHK_MPI( MPI_Irecv(recv_buf, len, mpi_dtype, rproc,
                       tag, comm, &mpiReqs[req_offset++]) );
  }

  //send the send-data:

  typename fei::CommMap<T>::Type::const_iterator
    s_iter = sendCommMap.begin(), s_end = sendCommMap.end();

  for(; s_iter != s_end; ++s_iter) {
    int sproc = s_iter->first;
    const std::vector<T>& send_vec = s_iter->second;
    int len = send_vec.size();
    T* send_buf = len>0 ? const_cast<T*>(&send_vec[0]) : NULL;

    CHK_MPI( MPI_Send(send_buf, len, mpi_dtype, sproc, tag, comm) );
  }

  //complete the Irecvs:
  for(size_t i=0; i<mpiReqs.size(); ++i) {
    int index;
    MPI_Status status;
    CHK_MPI( MPI_Waitany(mpiReqs.size(), &mpiReqs[0], &index, &status) );
  }

#endif
  return(0);
}