Пример #1
0
   void invoke() {

    if (!has_contiguous_data(lhs)) TRIQS_RUNTIME_ERROR << "mpi gather of array into a non contiguous view";

    auto c = laz.c;
    auto recvcounts = std::vector<int>(c.size());
    auto displs = std::vector<int>(c.size() + 1, 0);
    int sendcount = laz.ref.domain().number_of_elements();
    auto D = mpi::mpi_datatype<typename A::value_type>();

    auto d = laz.domain();
    if (laz.all || (laz.c.rank() == laz.root)) resize_or_check_if_view(lhs, d.lengths());

    void *lhs_p = lhs.data_start();
    const void *rhs_p = laz.ref.data_start();

    auto mpi_ty = mpi::mpi_datatype<int>();
    if (!laz.all)
     MPI_Gather(&sendcount, 1, mpi_ty, &recvcounts[0], 1, mpi_ty, laz.root, c.get());
    else
     MPI_Allgather(&sendcount, 1, mpi_ty, &recvcounts[0], 1, mpi_ty, c.get());

    for (int r = 0; r < c.size(); ++r) displs[r + 1] = recvcounts[r] + displs[r];

    if (!laz.all)
     MPI_Gatherv((void *)rhs_p, sendcount, D, lhs_p, &recvcounts[0], &displs[0], D, laz.root, c.get());
    else
     MPI_Allgatherv((void *)rhs_p, sendcount, D, lhs_p, &recvcounts[0], &displs[0], D, c.get());
   }
Пример #2
0
    //---------------------------------
    void _invoke(triqs::mpi::tag::allgather) {
        lhs.resize(laz.domain());

        // almost the same preparation as gather, except that the recvcounts are ALL gathered...
        auto c = laz.c;
        auto recvcounts = std::vector<int>(c.size());
        auto displs = std::vector<int>(c.size() + 1, 0);
        int sendcount = laz.ref.domain().number_of_elements();

        auto mpi_ty = mpi::mpi_datatype<int>::invoke();
        MPI_Allgather(&sendcount, 1, mpi_ty, &recvcounts[0], 1, mpi_ty, c.get());
        for (int r = 0; r < c.size(); ++r) displs[r + 1] = recvcounts[r] + displs[r];

        MPI_Allgatherv((void *)laz.ref.data_start(), sendcount, D(), (void *)lhs.data_start(), &recvcounts[0], &displs[0], D(),
                       c.get());
    }
Пример #3
0
    //---------------------------------
    void _invoke(triqs::mpi::tag::gather) {
        auto d = laz.domain();
        if (laz.c.rank() == laz.root) lhs.resize(d);

        auto c = laz.c;
        auto recvcounts = std::vector<int>(c.size());
        auto displs = std::vector<int>(c.size() + 1, 0);
        int sendcount = laz.ref.domain().number_of_elements();

        auto mpi_ty = mpi::mpi_datatype<int>::invoke();
        MPI_Gather(&sendcount, 1, mpi_ty, &recvcounts[0], 1, mpi_ty, laz.root, c.get());
        for (int r = 0; r < c.size(); ++r) displs[r + 1] = recvcounts[r] + displs[r];

        MPI_Gatherv((void *)laz.ref.data_start(), sendcount, D(), (void *)lhs.data_start(), &recvcounts[0], &displs[0], D(), laz.root,
                    c.get());
    }
Пример #4
0
    //---------------------------------
    void _invoke(triqs::mpi::tag::scatter) {
        lhs.resize(laz.domain());

        auto c = laz.c;
        auto slow_size = first_dim(laz.ref);
        auto slow_stride = laz.ref.indexmap().strides()[0];
        auto sendcounts = std::vector<int>(c.size());
        auto displs = std::vector<int>(c.size() + 1, 0);
        int recvcount = mpi::slice_length(slow_size - 1, c.size(), c.rank()) * slow_stride;

        for (int r = 0; r < c.size(); ++r) {
            sendcounts[r] = mpi::slice_length(slow_size - 1, c.size(), r) * slow_stride;
            displs[r + 1] = sendcounts[r] + displs[r];
        }

        MPI_Scatterv((void *)laz.ref.data_start(), &sendcounts[0], &displs[0], D(), (void *)lhs.data_start(), recvcount, D(),
                     laz.root, c.get());
    }
Пример #5
0
   void invoke() {

    if (!has_contiguous_data(lhs)) TRIQS_RUNTIME_ERROR << "mpi scatter of array into a non contiguous view";

    resize_or_check_if_view(lhs, laz.domain().lengths());

    auto c = laz.c;
    auto slow_size = first_dim(laz.ref);
    auto slow_stride = laz.ref.indexmap().strides()[0];
    auto sendcounts = std::vector<int>(c.size());
    auto displs = std::vector<int>(c.size() + 1, 0);
    int recvcount = mpi::slice_length(slow_size - 1, c.size(), c.rank()) * slow_stride;
    auto D = mpi::mpi_datatype<typename A::value_type>();

    for (int r = 0; r < c.size(); ++r) {
     sendcounts[r] = mpi::slice_length(slow_size - 1, c.size(), r) * slow_stride;
     displs[r + 1] = sendcounts[r] + displs[r];
    }

    MPI_Scatterv((void *)laz.ref.data_start(), &sendcounts[0], &displs[0], D, (void *)lhs.data_start(), recvcount, D, laz.root,
                 c.get());
   }
Пример #6
0
   void invoke() {

    if (!has_contiguous_data(lhs)) TRIQS_RUNTIME_ERROR << "mpi reduction of array into a non contiguous view";

    auto rhs_n_elem = laz.ref.domain().number_of_elements();
    auto c = laz.c;
    auto root = laz.root;
    auto D = mpi::mpi_datatype<typename A::value_type>();

    bool in_place = (lhs.data_start() == laz.ref.data_start());

    // some checks.
    if (in_place) {
     if (rhs_n_elem != lhs.domain().number_of_elements())
      TRIQS_RUNTIME_ERROR << "mpi reduce of array : same pointer to data start, but differnet number of elements !";
    } else { // check no overlap
     if ((c.rank() == root) || laz.all) resize_or_check_if_view(lhs, laz.domain().lengths());
     if (std::abs(lhs.data_start() - laz.ref.data_start()) < rhs_n_elem)
      TRIQS_RUNTIME_ERROR << "mpi reduce of array : overlapping arrays !";
    }

    void *lhs_p = lhs.data_start();
    void *rhs_p = (void *)laz.ref.data_start();

    if (!laz.all) {
     if (in_place)
      MPI_Reduce((c.rank() == root ? MPI_IN_PLACE : rhs_p), rhs_p, rhs_n_elem, D, MPI_SUM, root, c.get());
     else
      MPI_Reduce(rhs_p, lhs_p, rhs_n_elem, D, MPI_SUM, root, c.get());
    } else {
     if (in_place)
      MPI_Allreduce(MPI_IN_PLACE, rhs_p, rhs_n_elem, D, MPI_SUM, c.get());
     else
      MPI_Allreduce(rhs_p, lhs_p, rhs_n_elem, D, MPI_SUM, c.get());
    }
   }
Пример #7
0
 //---------------------------------
 void _invoke(triqs::mpi::tag::all_reduce) {
     // ADD debug check under macro that all nodes have same size
     lhs.resize(laz.domain());
     MPI_Allreduce((void *)laz.ref.data_start(), (void *)lhs.data_start(), laz.ref.domain().number_of_elements(), D(), MPI_SUM, laz.c.get());
 }
Пример #8
0
 //---------------------------------
 void _invoke(triqs::mpi::tag::reduce) {
     if (laz.c.rank() == laz.root) lhs.resize(laz.domain());
     MPI_Reduce((void *)laz.ref.data_start(), (void *)lhs.data_start(), laz.ref.domain().number_of_elements(), D(), MPI_SUM, laz.root, laz.c.get());
 }