예제 #1
0
 template <typename A> REQUIRES_IS_ARRAY mpi_broadcast(A &a, mpi::communicator c = {}, int root = 0) {
  if (!has_contiguous_data(a)) TRIQS_RUNTIME_ERROR << "Non contiguous view in mpi_broadcast";
  auto sh = a.shape();
  MPI_Bcast(&sh[0], sh.size(), mpi::mpi_datatype<typename decltype(sh)::value_type>(), root, c.get());
  if (c.rank() != root) resize_or_check_if_view(a, sh);
  MPI_Bcast(a.data_start(), a.domain().number_of_elements(), mpi::mpi_datatype<typename A::value_type>(), root, c.get());
 }
예제 #2
0
   void invoke() {

    if (!has_contiguous_data(lhs)) TRIQS_RUNTIME_ERROR << "mpi gather of array into a non contiguous view";

    auto c = laz.c;
    auto recvcounts = std::vector<int>(c.size());
    auto displs = std::vector<int>(c.size() + 1, 0);
    int sendcount = laz.ref.domain().number_of_elements();
    auto D = mpi::mpi_datatype<typename A::value_type>();

    auto d = laz.domain();
    if (laz.all || (laz.c.rank() == laz.root)) resize_or_check_if_view(lhs, d.lengths());

    void *lhs_p = lhs.data_start();
    const void *rhs_p = laz.ref.data_start();

    auto mpi_ty = mpi::mpi_datatype<int>();
    if (!laz.all)
     MPI_Gather(&sendcount, 1, mpi_ty, &recvcounts[0], 1, mpi_ty, laz.root, c.get());
    else
     MPI_Allgather(&sendcount, 1, mpi_ty, &recvcounts[0], 1, mpi_ty, c.get());

    for (int r = 0; r < c.size(); ++r) displs[r + 1] = recvcounts[r] + displs[r];

    if (!laz.all)
     MPI_Gatherv((void *)rhs_p, sendcount, D, lhs_p, &recvcounts[0], &displs[0], D, laz.root, c.get());
    else
     MPI_Allgatherv((void *)rhs_p, sendcount, D, lhs_p, &recvcounts[0], &displs[0], D, c.get());
   }
예제 #3
0
   void invoke() {

    if (!has_contiguous_data(lhs)) TRIQS_RUNTIME_ERROR << "mpi scatter of array into a non contiguous view";

    resize_or_check_if_view(lhs, laz.domain().lengths());

    auto c = laz.c;
    auto slow_size = first_dim(laz.ref);
    auto slow_stride = laz.ref.indexmap().strides()[0];
    auto sendcounts = std::vector<int>(c.size());
    auto displs = std::vector<int>(c.size() + 1, 0);
    int recvcount = mpi::slice_length(slow_size - 1, c.size(), c.rank()) * slow_stride;
    auto D = mpi::mpi_datatype<typename A::value_type>();

    for (int r = 0; r < c.size(); ++r) {
     sendcounts[r] = mpi::slice_length(slow_size - 1, c.size(), r) * slow_stride;
     displs[r + 1] = sendcounts[r] + displs[r];
    }

    MPI_Scatterv((void *)laz.ref.data_start(), &sendcounts[0], &displs[0], D, (void *)lhs.data_start(), recvcount, D, laz.root,
                 c.get());
   }
예제 #4
0
   void invoke() {

    if (!has_contiguous_data(lhs)) TRIQS_RUNTIME_ERROR << "mpi reduction of array into a non contiguous view";

    auto rhs_n_elem = laz.ref.domain().number_of_elements();
    auto c = laz.c;
    auto root = laz.root;
    auto D = mpi::mpi_datatype<typename A::value_type>();

    bool in_place = (lhs.data_start() == laz.ref.data_start());

    // some checks.
    if (in_place) {
     if (rhs_n_elem != lhs.domain().number_of_elements())
      TRIQS_RUNTIME_ERROR << "mpi reduce of array : same pointer to data start, but differnet number of elements !";
    } else { // check no overlap
     if ((c.rank() == root) || laz.all) resize_or_check_if_view(lhs, laz.domain().lengths());
     if (std::abs(lhs.data_start() - laz.ref.data_start()) < rhs_n_elem)
      TRIQS_RUNTIME_ERROR << "mpi reduce of array : overlapping arrays !";
    }

    void *lhs_p = lhs.data_start();
    void *rhs_p = (void *)laz.ref.data_start();

    if (!laz.all) {
     if (in_place)
      MPI_Reduce((c.rank() == root ? MPI_IN_PLACE : rhs_p), rhs_p, rhs_n_elem, D, MPI_SUM, root, c.get());
     else
      MPI_Reduce(rhs_p, lhs_p, rhs_n_elem, D, MPI_SUM, root, c.get());
    } else {
     if (in_place)
      MPI_Allreduce(MPI_IN_PLACE, rhs_p, rhs_n_elem, D, MPI_SUM, c.get());
     else
      MPI_Allreduce(rhs_p, lhs_p, rhs_n_elem, D, MPI_SUM, c.get());
    }
   }
예제 #5
0
 array_view<V, sizeof...(I), Opt,indexmaps::mem_layout::c_order(sizeof...(I))> reinterpret_array_view (array_view<V,R,Opt,To,B> const & a, I ... index) { 
  if (!has_contiguous_data(a)) TRIQS_RUNTIME_ERROR << "reinterpretation failure : data of the view are not contiguous";
  return { {make_shape(index...)}, a.storage() };
 }
예제 #6
0
 template <typename A> REQUIRES_IS_ARRAY2(gather) mpi_gather (A &a, mpi::communicator c = {}, int root = 0, bool all = false) {
  if (!has_contiguous_data(a)) TRIQS_RUNTIME_ERROR << "Non contiguous view in mpi_gather";
  return {a, c, root, all};
 }
예제 #7
0
 cache_impl(DataType const& x, ml_t ml_ = ml_t()) : ml(ml_), keeper(x) {
  need_copy = (!is_amv_value_or_view_class<DataType>::value) || need_copy_dynamic(x, is_amv_value_or_view_class<DataType>()) ||
              (!has_contiguous_data(x));
 }
예제 #8
0
 det_and_inverse_worker (ViewType const & a): V(a), dim(first_dim(a)), ipiv(dim), step(0) { 
  if (first_dim(a)!=second_dim(a)) 
   TRIQS_RUNTIME_ERROR<<"Inverse/Det error : non-square matrix. Dimensions are : ("<<first_dim(a)<<","<<second_dim(a)<<")"<<"\n  ";
  if (!(has_contiguous_data(a))) TRIQS_RUNTIME_ERROR<<"det_and_inverse_worker only takes a contiguous view";
 }
예제 #9
0
 static void check_is_contiguous(A const &a) {
     if (!has_contiguous_data(a)) TRIQS_RUNTIME_ERROR << "Non contiguous view in mpi_reduce_in_place";
 }
예제 #10
0
 explicit const_qcache(A const & x): need_copy (!(has_contiguous_data(x))), keeper(x) {}