Esempio n. 1
0
void dummy_operations::run_collective_dummy_operations() {
        int rank, size;
        MPI_Comm_rank( MPI_COMM_WORLD, &rank);
        MPI_Comm_size( MPI_COMM_WORLD, &size);
        
        // Run Broadcast
        {
                int x;
                MPI_Comm_rank( MPI_COMM_WORLD, &x);
                MPI_Bcast(&x, 1, MPI_INT, 0, MPI_COMM_WORLD);
        }
        // Run Allgather.
        {
                int x, size;
                MPI_Comm_rank( MPI_COMM_WORLD, &x);
                MPI_Comm_size( MPI_COMM_WORLD, &size);
                
                std::vector<int> rcv(size);
                MPI_Allgather(&x, 1, MPI_INT, &rcv[0], 1, MPI_INT, MPI_COMM_WORLD);
        }

        // Run Allreduce.
        {
                int x;
                MPI_Comm_rank( MPI_COMM_WORLD, &x);
                
                int y = 0;
                MPI_Allreduce(&x, &y, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
        }

        // Dummy Prefix Sum
        {
                int x  = 1;
                int y  = 0;

                MPI_Scan(&x, &y, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); 
        }

        // Run Alltoallv.
        {
                std::vector<int> snd(size);
                std::vector<int> rcv(size);
                std::vector<int> scounts(size, 1);
                std::vector<int> rcounts(size, 1);
                std::vector<int> sdispls(size);
                std::vector<int> rdispls(size);
                for (int i = 0, iend = sdispls.size(); i < iend; ++i) {
                        sdispls[i] = rdispls[i] = i;
                }
                MPI_Alltoallv(&snd[0], &scounts[0], &sdispls[0], MPI_INT,
                              &rcv[0], &rcounts[0], &rdispls[0], MPI_INT, MPI_COMM_WORLD);
        }
        

}
Esempio n. 2
0
vector<vector<int> > MPIDistribution::MPILayerGatherVariables(vector<vector<int> > data)
{
	MPI_Comm* comm = MPIGetCommLayer(); // communicator for nodes in layer
	vector<vector<int> > gatheredData(data.size());
	
	for(int i=0;i<data.size();i++)
	{
		data[i].push_back(0); // will be removed later, allgatherv may not like zero-sized vectors
		int sizeLocal = data[i].size();
		int totalSize;
		MPI_Allreduce(&sizeLocal,&totalSize, 1,MPI_INT,MPI_SUM,*comm);

		vector<int> totalIndexes(totalSize);

		vector<int> rcounts(m_mpiSizeLocal);
		vector<int> displs(m_mpiSizeLocal);

		// counts from each node
		MPI_Allgather( &sizeLocal, 1, MPI_INT, &rcounts[0], 1, MPI_INT, *comm);

		for(int j=1;j<rcounts.size();j++)
		{
			displs[j] = rcounts[j-1]+displs[j-1];
		}

		MPI_Allgatherv( &data[i][0],data[i].size(),MPI_INT,&totalIndexes[0],&rcounts[0],&displs[0],MPI_INT,*comm);

		// remove all artificial values
		for(int j=rcounts.size()-1;j>-1;j--)
		{
			totalIndexes.erase(totalIndexes.begin() + displs[j] + rcounts[j] - 1);
		}

		gatheredData[i] = totalIndexes;
	}

	return gatheredData;
}