Exemplo n.º 1
0
void SparseMsg::setSizes(UInt *sizes) {
  // First, set up send buffers
  UInt totalsize = 0;
  // Find total buffer size; round up to word boundaries
  // for each new buffer
  for (UInt i = 0; i < nsend; i++) {
    totalsize += round_to_dword(sizes[i]);
  }

  // Allocate send buffer.  Delete in case already done
  delete [] sendBuf;

  sendBuf = new UChar[totalsize+1];

  // Set up pointers into buffer
  UInt cur_loc = 0;
  for (UInt i = 0; i < nsend; i++) {
    UInt bsize = round_to_dword(sizes[i]);
    buffer &buf = outBuffers[i];
    buf.beg = outBuffers[i].cur = &sendBuf[cur_loc];
    buf.end = &sendBuf[cur_loc+bsize];
    buf.bsize = bsize;
    buf.msize = sizes[i];
    cur_loc += bsize;
    procToOutBuffer[buf.proc] = &buf;
  }
//std::cout << "last buf end:" << (int) outBuffers[nsend-1].end << ", sendbuf end:" << (int) &sendBuf[cur_loc] << std::endl;

  // Second, send sizes to receive

  // avoid allocating zero (add 1)
  std::vector<MPI_Request> request(num_incoming+1, NULL);
  std::vector<MPI_Status> status(num_incoming+1);

  // Post Recieves
  std::vector<int> inSizes(num_incoming+1);
  UInt tag0 = 0;

  UInt enD = num_incoming - (sendself ? 1 : 0);

  for (UInt i = 0; i < enD; i++) {
    MPI_Irecv(&inSizes[i], 1, MPI_INT, MPI_ANY_SOURCE, tag0, comm, &request[i]);
  }

  // Sends
  for (UInt i = 0; i < nsend; i++) {
    if (!sendself || i != self_idx) {
      buffer &buf = outBuffers[i];
      MPI_Send(&(buf.msize), 1, MPI_INT, buf.proc, tag0, comm);
    }
  }

  int ret;
  if (enD > 0) {
    ret = MPI_Waitall(enD, &request[0], &status[0]);
    if (ret != MPI_SUCCESS) 
      throw("Bad MPI_WaitAll in setSizes");
  }
  // Now set up true size
  if (sendself) inSizes[enD] = outBuffers[self_idx].msize;
 
  // Third, set up receive sizes
  if (num_incoming > 0) {
    inBuffers.resize(num_incoming);
    inProcs.resize(num_incoming);
  } else {
    // We need these to have zero sizes since users iterate through them
    inBuffers.clear();
    inProcs.clear();
  }
  
  totalsize = 0;
  for (UInt i = 0; i < enD; i++) {
    totalsize += round_to_dword(inSizes[i]);
//std::cout << "P:" << rank << ", from " << status[i].MPI_SOURCE << ", size:" << inSizes[i] << std::endl;
  }
  delete [] recvBuf;

  recvBuf = new UChar[totalsize+1]; // avoid zero allocation

  
  cur_loc = 0;

  // Buffer zero is the special receive buffer from this proc.
  // Point it straight to the send buffer for this proc.  The
  // user won't even know what happened to them.
  for (UInt i = 0; i < enD; i++) {
    UInt bsize = round_to_dword(inSizes[i]);
    inBuffers[i].beg = inBuffers[i].cur = &recvBuf[cur_loc];
    inBuffers[i].end = &recvBuf[cur_loc+bsize];
    inBuffers[i].bsize = bsize;
    inBuffers[i].msize = inSizes[i];
    inBuffers[i].proc = status[i].MPI_SOURCE;
    procToInBuffer[status[i].MPI_SOURCE] = &inBuffers[i];
    inProcs[i] = status[i].MPI_SOURCE;
    cur_loc += bsize;
  }
  if (sendself) {
    UInt bsize = round_to_dword(inSizes[enD]);
    inBuffers[enD].beg = inBuffers[enD].cur = outBuffers[self_idx].beg;
    inBuffers[enD].end = outBuffers[self_idx].end;
    inBuffers[enD].bsize = bsize;
    inBuffers[enD].msize = inSizes[enD];
    inBuffers[enD].proc = rank;
    procToInBuffer[rank] = &inBuffers[enD];
    inProcs[enD] = rank;
  }

  // Sort inProcs to be conformal with CommRel
  if (num_incoming > 0) std::sort(inProcs.begin(), inProcs.end());
//std::cout << "last buf end:" << (int) inBuffers[num_incoming-1].end << ", sendbuf end:" << (int) &recvBuf[cur_loc] << std::endl;

}
Exemplo n.º 2
0
//BOPI
// !IROUTINE:  setSizes - Establish the message sizes
//
// !INTERFACE:
void SparseMsgVM::setSizes(
//
// !RETURN VALUE:

//
// !ARGUMENTS:
  UInt *sizes)
//
// !DESCRIPTION:
//     Establish and share the size of a message between pet(i,j).  Also
//     sets aside memory for this messages in the form of send/receive 
//     buffers.
//
//EOPI
// !REQUIREMENTS:  
//-----------------------------------------------------------------------------
{
  // Error checking
  if (obj_state != PATTERN) throw Ex() << "SparseMsgVM illegal transition from state:" << obj_state << " to SIZE";
  // First, set up send buffers
  UInt totalsize = 0;
  // Find total buffer size; round up to word boundaries
  // for each new buffer
  for (UInt i = 0; i < nsend; i++) {
    totalsize += round_to_dword(sizes[i]);
  }

  sendBuf = new UChar[totalsize+1];

  // Set up pointers into buffer
  UInt cur_loc = 0;
  for (UInt i = 0; i < nsend; i++) {
    UInt bsize = round_to_dword(sizes[i]);
    buffer &buf = outBuffers[i];
    buf.beg = outBuffers[i].cur = &sendBuf[cur_loc];
    buf.end = &sendBuf[cur_loc+bsize];
    buf.bsize = bsize;
    buf.msize = sizes[i];
    cur_loc += bsize;
    petToOutBuffer[buf.pet] = &buf;
  }
//std::cout << "last buf end:" << (int) outBuffers[nsend-1].end << ", sendbuf end:" << (int) &sendBuf[cur_loc] << std::endl;

  // Second, send sizes to receive

  // avoid allocating zero (add 1)
  //std::vector<MPI_Request> request(num_incoming+1, NULL);
  //std::vector<MPI_Status> status(num_incoming+1);
  std::vector<ESMCI::VMK::commhandle*> commhp(num_incoming+1);
  for (UInt i = 0; i < num_incoming; i++) commhp[i] =
    new ESMCI::VMK::commhandle;
  

  // Post Recieves
  std::vector<int> inSizes(num_incoming+1);
  UInt tag0 = 0;

  UInt enD = num_incoming - (sendself ? 1 : 0);

  for (UInt i = 0; i < enD; i++) {
    //MPI_Irecv(&inSizes[i], 1, MPI_INT, MPI_ANY_SOURCE, tag0, comm, &request[i]);
    vm.recv(static_cast<void*>(&inSizes[i]), sizeof(int), VM_ANY_SRC, &commhp[i], tag0);
  }

  // Sends
  for (UInt i = 0; i < nsend; i++) {
    if (!sendself || i != self_idx) {
      buffer &buf = outBuffers[i];
      //MPI_Send(&(buf.msize), 1, MPI_INT, buf.pet, tag0, comm);
      vm.send(static_cast<void*>(&(buf.msize)), sizeof(int), buf.pet, tag0);
    }
  }

  std::vector<ESMCI::VMK::status> stat(enD);
  if (num_incoming > 0) {
    for (UInt w = 0; w < enD; w++) {
      vm.commwait(&commhp[w], &stat[w]);
    }
    //ret = MPI_Waitall(enD, &request[0], &status[0]);
    //if (ret != MPI_SUCCESS) 
     // throw("Bad MPI_WaitAll in setSizes");
  }
  // Now set up true size
  if (sendself) inSizes[enD] = outBuffers[self_idx].msize;
 
  // Third, set up receive sizes
  if (num_incoming > 0) {
    inBuffers.resize(num_incoming);
    inPets.resize(num_incoming);
  } else {
    // We need these to have zero sizes since users iterate through them
    inBuffers.clear();
    inPets.clear();
  }
  
  totalsize = 0;
  for (UInt i = 0; i < enD; i++) {
    totalsize += round_to_dword(inSizes[i]);
//std::cout << "P:" << rank << ", from " << status[i].MPI_SOURCE << ", size:" << inSizes[i] << std::endl;
  }

  recvBuf = new UChar[totalsize+1]; // avoid zero allocation

  
  cur_loc = 0;

  for (UInt i = 0; i < enD; i++) {
    UInt bsize = round_to_dword(inSizes[i]);
    inBuffers[i].beg = inBuffers[i].cur = &recvBuf[cur_loc];
    inBuffers[i].end = &recvBuf[cur_loc+bsize];
    inBuffers[i].bsize = bsize;
    inBuffers[i].msize = inSizes[i];
    inBuffers[i].pet = stat[i].srcPet;
    petToInBuffer[stat[i].srcPet] = &inBuffers[i];
    inPets[i] = stat[i].srcPet;
    cur_loc += bsize;
  }

  // Buffer enD is the special receive buffer from this proc.
  // Point it straight to the send buffer for this proc.  In this
  // manner, when we copy into the send buffer, the communication is
  // done; the receive buffer has de facto been filled.
  if (sendself) {
    UInt bsize = round_to_dword(inSizes[enD]);
    inBuffers[enD].beg = inBuffers[enD].cur = outBuffers[self_idx].beg;
    inBuffers[enD].end = outBuffers[self_idx].end;
    inBuffers[enD].bsize = bsize;
    inBuffers[enD].msize = inSizes[enD];
    inBuffers[enD].pet = rank;
    petToInBuffer[rank] = &inBuffers[enD];
    inPets[enD] = rank;
  }

  // Sort inPets so unpacking can proceed in an expected manner.
  if (num_incoming > 0) std::sort(inPets.begin(), inPets.end());

  for (UInt i = 0; i < num_incoming; i++) delete commhp[i];

  obj_state = SIZE;
}