Пример #1
0
   /*
   * Broadcast a buffer.
   */
   void Buffer::bcast(MPI::Intracomm& comm, int source)
   {
      int comm_size = comm.Get_size();
      int myRank = comm.Get_rank();
      if (source > comm_size - 1 || source < 0) {
         UTIL_THROW("Source rank out of bounds");
      }

      int sendBytes;
      if (myRank == source) {
         sendBytes = sendPtr_ - sendBufferBegin_;
         comm.Bcast(&sendBytes, 1, MPI::INT, source);
         comm.Bcast(sendBufferBegin_, sendBytes, MPI::CHAR, source);
         sendPtr_ = sendBufferBegin_;
         sendType_ = NONE;
      } else {
         comm.Bcast(&sendBytes, 1, MPI::INT, source);
         comm.Bcast(recvBufferBegin_, sendBytes, MPI::CHAR, source);
         recvPtr_ = recvBufferBegin_;
         recvType_ = NONE;
      }
      if (sendBytes > maxSendLocal_) {
         maxSendLocal_ = sendBytes;
      }

   }
Пример #2
0
/// Octree情報を他rankにブロードキャスト.
void BCMOctree::broadcast(MPI::Intracomm& comm)
{
  assert(comm.Get_rank() == 0);
  rootGrid->broadcast(comm);

  int numLeafNode = leafNodeArray.size();
  int ibuf[2];
  ibuf[0] = numLeafNode;
  ibuf[1] = ordering;
  comm.Bcast(&ibuf, 2, MPI::INT, 0);

  size_t size = Pedigree::GetSerializeSize();
  unsigned char* buf = new unsigned char[size * numLeafNode];

  size_t ip = 0;
  for (int id = 0; id < rootGrid->getSize(); id++) {
    packPedigrees(rootNodes[id], ip, buf);
  }

  comm.Bcast(buf, size*numLeafNode, MPI::BYTE, 0);
  delete[] buf;
}
Пример #3
0
ifstream& FullyDistSpVec<IT,NT>::ReadDistribute (ifstream& infile, int master)
{
	IT total_nnz;
	MPI::Intracomm World = commGrid->GetWorld();
	int neighs = World.Get_size();	// number of neighbors (including oneself)
	int buffperneigh = MEMORYINBYTES / (neighs * (sizeof(IT) + sizeof(NT)));

	int * displs = new int[neighs];
	for (int i=0; i<neighs; ++i)
		displs[i] = i*buffperneigh;

	int * curptrs = NULL; 
	int recvcount = 0;
	IT * inds = NULL; 
	NT * vals = NULL;
	int rank = World.Get_rank();	
	if(rank == master)	// 1 processor only
	{		
		inds = new IT [ buffperneigh * neighs ];
		vals = new NT [ buffperneigh * neighs ];
		curptrs = new int[neighs]; 
		fill_n(curptrs, neighs, 0);	// fill with zero
		if (infile.is_open())
		{
			infile.clear();
			infile.seekg(0);
			infile >> glen >> total_nnz;
			World.Bcast(&glen, 1, MPIType<IT>(), master);			
	
			IT tempind;
			NT tempval;
			double loadval;
			IT cnz = 0;
			while ( (!infile.eof()) && cnz < total_nnz)
			{
				infile >> tempind;
				//infile >> tempval;
				infile >> loadval;
				tempval = static_cast<NT>(loadval);
				tempind--;
				IT locind;
				int rec = Owner(tempind, locind);	// recipient (owner) processor
				inds[ rec * buffperneigh + curptrs[rec] ] = locind;
				vals[ rec * buffperneigh + curptrs[rec] ] = tempval;
				++ (curptrs[rec]);				

				if(curptrs[rec] == buffperneigh || (cnz == (total_nnz-1)) )		// one buffer is full, or file is done !
				{
					// first, send the receive counts ...
					World.Scatter(curptrs, 1, MPI::INT, &recvcount, 1, MPI::INT, master);

					// generate space for own recv data ... (use arrays because vector<bool> is cripled, if NT=bool)
					IT * tempinds = new IT[recvcount];
					NT * tempvals = new NT[recvcount];
					
					// then, send all buffers that to their recipients ...
					World.Scatterv(inds, curptrs, displs, MPIType<IT>(), tempinds, recvcount,  MPIType<IT>(), master); 
					World.Scatterv(vals, curptrs, displs, MPIType<NT>(), tempvals, recvcount,  MPIType<NT>(), master); 
		
					// now push what is ours to tuples
					for(IT i=0; i< recvcount; ++i)
					{					
						ind.push_back( tempinds[i] );	// already offset'd by the sender
						num.push_back( tempvals[i] );
					}

					// reset current pointers so that we can reuse {inds,vals} buffers
					fill_n(curptrs, neighs, 0);
					DeleteAll(tempinds, tempvals);
				}
				++ cnz;
			}
			assert (cnz == total_nnz);
		
			// Signal the end of file to other processors along the diagonal
			fill_n(curptrs, neighs, numeric_limits<int>::max());	
			World.Scatter(curptrs, 1, MPI::INT, &recvcount, 1, MPI::INT, master);
		}