Ejemplo n.º 1
0
paracel::deque_type< paracel::coroutine<paracel::str_type> >
files_partition(paracel::list_type<paracel::str_type> & name_list,
                int np, const paracel::str_type & pattern = "",
                int blk_sz = BLK_SZ) {

    paracel::deque_type< paracel::coroutine<paracel::str_type> > func_loaders;
    if(pattern == "linesplit" || pattern == "fvec") blk_sz = 1;
    np = np * blk_sz;
    paracel::list_type<long> displs(name_list.size() + 1, 0);
    for(size_t i = 0; i < displs.size() - 1; ++i) {
        std::ifstream f(name_list[i], std::ios::binary | std::ios::ate);
        long tmp = f.tellg();
        displs[i + 1] = displs[i] + tmp;
    }
    long sz = displs[displs.size() - 1];
    int nbk = np;
    long bk_sz = sz / (long)nbk;
    long e;
    for(int i = 0; i < nbk; ++i) {
        long s = (long)i * bk_sz;
        if(i == nbk -1) {
            e = sz;
        } else {
            e = (i + 1) * bk_sz;
        }
        paracel::coroutine<paracel::str_type> files_load_lines(std::bind(paracel::files_load_lines_impl, std::placeholders::_1, name_list, displs, s, e));
        func_loaders.push_back(std::move(files_load_lines));
    }
    return func_loaders;
}
Ejemplo n.º 2
0
std::vector<int> scatter_vector_block_decomp(std::vector<int>& global_vec, MPI_Comm comm)
{
    // get MPI Communicator properties
    int rank, p;
    MPI_Comm_size(comm, &p);
    MPI_Comm_rank(comm, &rank);

    // the local vector size (MPI restricts message sizes to `int`)
    int local_size;

    // init result
    std::vector<int> local_elements;

    if (rank == 0)
    {
        /* I am the root process */

        // get size of global array and bcast
        unsigned long long n = global_vec.size();
        MPI_Bcast(&n, 1, MPI_UNSIGNED_LONG_LONG, 0, comm);
        local_size = block_decompose(n, p, rank);

        // scatter-v the actual data
        std::vector<int> counts(p);
        std::vector<int> displs(p);
        displs[0] = 0;
        for (int i = 0; i < p; ++i)
        {
            counts[i] = block_decompose(n, p, i);
            if (i > 0)
                displs[i] = displs[i-1] + counts[i-1];
        }
        local_elements.resize(local_size);
        MPI_Scatterv(&global_vec[0], &counts[0], &displs[0], MPI_INT,
                     &local_elements[0], local_size, MPI_INT, 0, comm);
    }
    else
    {
        /* I am NOT the root process */

        // receive the size of my local array
        unsigned long long n;
        MPI_Bcast(&n, 1, MPI_UNSIGNED_LONG_LONG, 0, comm);
        local_size = block_decompose(n, p, rank);

        // resize result buffer
        local_elements.resize(local_size);
        // actually receive all the data
        MPI_Scatterv(NULL, NULL, NULL, MPI_INT,
                     &local_elements[0], local_size, MPI_INT, 0, comm);
    }

    // return local array
    return local_elements;
}
Ejemplo n.º 3
0
std::vector<int> gather_vectors(std::vector<int>& local_vec, MPI_Comm comm)
{
    // get MPI parameters
    int rank;
    int p;
    MPI_Comm_rank(comm, &rank);
    MPI_Comm_size(comm, &p);

    // get local size
    int local_size = local_vec.size();

    // init result
    std::vector<int> result;

    // master process: receive results
    if (rank == 0)
    {
        // gather local array sizes, sizes are restricted to `int` by MPI anyway
        // therefore use int
        std::vector<int> local_sizes(p);
        MPI_Gather(&local_size, 1, MPI_INT, &local_sizes[0], 1, MPI_INT, 0, comm);

        // gather-v to collect all the elements
        int total_size = std::accumulate(local_sizes.begin(), local_sizes.end(), 0);
        result.resize(total_size);

        // get receive displacements
        std::vector<int> displs(p, 0);
        for (int i = 1; i < p; ++i)
            displs[i] = displs[i-1] + local_sizes[i-1];

        // gather v the vector data to the root
        MPI_Gatherv(&local_vec[0], local_size, MPI_INT,
                    &result[0], &local_sizes[0], &displs[0], MPI_INT, 0, comm);
    }
    // else: send results
    else {
        // gather local array sizes
        MPI_Gather(&local_size, 1, MPI_INT, NULL, 1, MPI_INT, 0, comm);

        // sent the actual data
        MPI_Gatherv(&local_vec[0], local_size, MPI_INT,
                    NULL, NULL, NULL, MPI_INT, 0, comm);
    }
    return result;
}
Ejemplo n.º 4
0
vector<vector<int> > MPIDistribution::MPILayerGatherVariables(vector<vector<int> > data)
{
	MPI_Comm* comm = MPIGetCommLayer(); // communicator for nodes in layer
	vector<vector<int> > gatheredData(data.size());
	
	for(int i=0;i<data.size();i++)
	{
		data[i].push_back(0); // will be removed later, allgatherv may not like zero-sized vectors
		int sizeLocal = data[i].size();
		int totalSize;
		MPI_Allreduce(&sizeLocal,&totalSize, 1,MPI_INT,MPI_SUM,*comm);

		vector<int> totalIndexes(totalSize);

		vector<int> rcounts(m_mpiSizeLocal);
		vector<int> displs(m_mpiSizeLocal);

		// counts from each node
		MPI_Allgather( &sizeLocal, 1, MPI_INT, &rcounts[0], 1, MPI_INT, *comm);

		for(int j=1;j<rcounts.size();j++)
		{
			displs[j] = rcounts[j-1]+displs[j-1];
		}

		MPI_Allgatherv( &data[i][0],data[i].size(),MPI_INT,&totalIndexes[0],&rcounts[0],&displs[0],MPI_INT,*comm);

		// remove all artificial values
		for(int j=rcounts.size()-1;j>-1;j--)
		{
			totalIndexes.erase(totalIndexes.begin() + displs[j] + rcounts[j] - 1);
		}

		gatheredData[i] = totalIndexes;
	}

	return gatheredData;
}