示例#1
0
void
all_to_all_test(const communicator& comm, Generator generator,
                const char* kind)
{
  typedef typename Generator::result_type value_type;

  using boost::mpi::all_to_all;

  std::vector<value_type> in_values;
  for (int p = 0; p < comm.size(); ++p)
    in_values.push_back(generator((p + 1) * (comm.rank() + 1)));

  if (comm.rank() == 0) {
    std::cout << "Performing all-to-all operation on " << kind << "...";
    std::cout.flush();
  }
  std::vector<value_type> out_values;
  all_to_all(comm, in_values, out_values);

  for (int p = 0; p < comm.size(); ++p) {
    BOOST_CHECK(out_values[p] == generator((p + 1) * (comm.rank() + 1)));
  }

  if (comm.rank() == 0) {
    std::cout << " done." << std::endl;
  }

  (comm.barrier)();
}
示例#2
0
    static void gather(const communicator& comm, const std::vector<T>& in, std::vector< std::vector<T> >& out, int root)
    {
      std::vector<int>  counts(comm.size());
      Collectives<int,void*>::gather(comm, (int) in.size(), counts, root);

      std::vector<int>  offsets(comm.size(), 0);
      for (unsigned i = 1; i < offsets.size(); ++i)
        offsets[i] = offsets[i-1] + counts[i-1];

      std::vector<T> buffer(offsets.back() + counts.back());
      MPI_Gatherv(Datatype::address(const_cast<T&>(in[0])),
                  in.size(),
                  Datatype::datatype(),
                  Datatype::address(buffer[0]),
                  &counts[0],
                  &offsets[0],
                  Datatype::datatype(),
                  root, comm);

      out.resize(comm.size());
      size_t cur = 0;
      for (unsigned i = 0; i < (unsigned)comm.size(); ++i)
      {
          out[i].reserve(counts[i]);
          for (unsigned j = 0; j < (unsigned)counts[i]; ++j)
              out[i].push_back(buffer[cur++]);
      }
    }
示例#3
0
void
gather_test(const communicator& comm, Generator generator,
            const char* kind, int root = -1)
{
  typedef typename Generator::result_type value_type;
  value_type value = generator(comm.rank());

  if (root == -1) {
    for (root = 0; root < comm.size(); ++root)
      gather_test(comm, generator, kind, root);
  } else {
    using boost::mpi::gather;

    std::vector<value_type> values;
    if (comm.rank() == root) {
      std::cout << "Gathering " << kind << " from root "
                << root << "..." << std::endl;
    }

    gather(comm, value, values, root);

    if (comm.rank() == root) {
      std::vector<value_type> expected_values;
      for (int p = 0; p < comm.size(); ++p)
        expected_values.push_back(generator(p));
      BOOST_CHECK(values == expected_values);
    } else {
      BOOST_CHECK(values.empty());
    }
  }

  (comm.barrier)();
}
示例#4
0
void
scatter_test(const communicator& comm, Generator generator,
            const char* kind, int root = -1)
{
  typedef typename Generator::result_type value_type;

  if (root == -1) {
    for (root = 0; root < comm.size(); ++root)
      scatter_test(comm, generator, kind, root);
  } else {
    using boost::mpi::scatter;

    value_type value;

    if (comm.rank() == root) {
      std::vector<value_type> values;

      for (int p = 0; p < comm.size(); ++p)
        values.push_back(generator(p));

      if (comm.rank() == root) {
        std::cout << "Scattering " << kind << " from root " << root << "...";
        std::cout.flush();
      }

      scatter(comm, values, value, root);
    } else {
      scatter(comm, value, root);
    }

    BOOST_CHECK(value == generator(comm.rank()));
  }

  (comm.barrier)();
}
示例#5
0
object all_to_all(const communicator& comm, object in_values)
{
  // Build input values
  std::vector<object> in_values_vec(comm.size());
  object iterator = object(handle<>(PyObject_GetIter(in_values.ptr())));
  for (int i = 0; i < comm.size(); ++i)
    in_values_vec[i] = object(handle<>(PyIter_Next(iterator.ptr())));

  std::vector<object> out_values_vec(comm.size());
  boost::mpi::all_to_all(comm, in_values_vec, out_values_vec);

  boost::python::list l;
  for (int i = 0; i < comm.size(); ++i)
    l.append(out_values_vec[i]);
  return boost::python::tuple(l);
}
示例#6
0
object scatter(const communicator& comm, object values, int root)
{
  object result;

  if (comm.rank() == root) {
    std::vector<object> values_vec(comm.size());
    object iterator = object(handle<>(PyObject_GetIter(values.ptr())));
    for (int i = 0; i < comm.size(); ++i)
      values_vec[i] = object(handle<>(PyIter_Next(iterator.ptr())));

    boost::mpi::scatter(comm, values_vec, result, root);
  } else {
    boost::mpi::scatter(comm, result, root);
  }
  return result;
}
示例#7
0
void
broadcast_test(const communicator& comm, const T& bc_value,
               const char* kind, int root = -1)
{
  if (root == -1) {
    for (root = 0; root < comm.size(); ++root)
      broadcast_test(comm, bc_value, kind, root);
  } else {
    using boost::mpi::broadcast;

    T value;
    if (comm.rank() == root) {
      value = bc_value;
      std::cout << "Broadcasting " << kind << " from root " << root << "...";
      std::cout.flush();
    }

    broadcast(comm, value, root);
    BOOST_CHECK(value == bc_value);
    if (comm.rank() == root && value == bc_value)
      std::cout << "OK." << std::endl;
  }

  (comm.barrier)();
}
示例#8
0
void
ring_array_test(const communicator& comm, const T* pass_values,
                int n, const char* kind, int root = 0)
{
  T* transferred_values = new T[n];
  int rank = comm.rank();
  int size = comm.size();

  if (rank == root) {

    std::cout << "Passing " << kind << " array around a ring from root "
              << root  << "...";
    comm.send((rank + 1) % size, 0, pass_values, n);
    comm.recv((rank + size - 1) % size, 0, transferred_values, n);
    bool okay = std::equal(pass_values, pass_values + n,
                           transferred_values);
    BOOST_CHECK(okay);
    if (okay) std::cout << " OK." << std::endl;
  } else {
    status stat = comm.probe(boost::mpi::any_source, 0);
    boost::optional<int> num_values = stat.template count<T>();
    if (boost::mpi::is_mpi_datatype<T>())
      BOOST_CHECK(num_values && *num_values == n);
    else
      BOOST_CHECK(!num_values || *num_values == n);     
    comm.recv(stat.source(), 0, transferred_values, n);
    BOOST_CHECK(std::equal(pass_values, pass_values + n,
                           transferred_values));
    comm.send((rank + 1) % size, 0, transferred_values, n);
  }
  (comm.barrier)();
  delete [] transferred_values;
}
示例#9
0
文件: vector.hpp 项目: Titan-C/triqs
 template <typename T> std::vector<T> mpi_scatter(std::vector<T> const &a, communicator c, int root, std::true_type) {
  auto slow_size = a.size();
  auto sendcounts = std::vector<int>(c.size());
  auto displs = std::vector<int>(c.size() + 1, 0);
  int recvcount = slice_length(slow_size - 1, c.size(), c.rank());
  std::vector<T> b(recvcount);

  for (int r = 0; r < c.size(); ++r) {
   sendcounts[r] = slice_length(slow_size - 1, c.size(), r);
   displs[r + 1] = sendcounts[r] + displs[r];
  }

  MPI_Scatterv((void *)a.data(), &sendcounts[0], &displs[0], mpi_datatype<T>(), (void *)b.data(), recvcount, mpi_datatype<T>(),
               root, c.get());
  return b;
 }
示例#10
0
 void all_gather(const communicator& comm, const T& in_val, std::vector<T>& out_vals) {
     out_vals.resize(comm.size());
     MPI_Allgather((void*)&in_val, 1, detail::mpi_type<T>(),
                   &out_vals.front(), 1, detail::mpi_type<T>(),
                   comm);
     // throw std::logic_error(std::string("all_gather() is not implemented, called for type T=")
     //                        +typeid(T).name());
 }
示例#11
0
object all_gather(const communicator& comm, object value)
{
  std::vector<object> values;
  boost::mpi::all_gather(comm, value, values);

  boost::python::list l;
  for (int i = 0; i < comm.size(); ++i)
    l.append(values[i]);
  return boost::python::tuple(l);
}
示例#12
0
void
gatherv_test(const communicator& comm, Generator generator,
             const char* kind, int root = -1)
{
  typedef typename Generator::result_type value_type;

  if (root == -1) {
    for (root = 0; root < comm.size(); ++root)
      gatherv_test(comm, generator, kind, root);
  } else {
    using boost::mpi::gatherv;

    int mysize = comm.rank() + 1;
    int nprocs = comm.size();

    // process p will send p+1 identical generator(p) elements
    std::vector<value_type> myvalues(mysize, generator(comm.rank()));

    if (comm.rank() == root) {
      std::vector<value_type> values((nprocs*(nprocs+1))/2);
      std::vector<int> sizes(comm.size());
      for (int p = 0; p < comm.size(); ++p)
        sizes[p] = p + 1;

      std::cout << "Gatheringv " << kind << " from root "
                << root << "..." << std::endl;

      gatherv(comm, myvalues, &values[0], sizes, root);

      std::vector<value_type> expected_values;
      for (int p = 0; p < comm.size(); ++p)
        for (int i = 0; i < p+1; ++i)
          expected_values.push_back(generator(p));

      BOOST_CHECK(values == expected_values);
    } else {
      gatherv(comm, myvalues, root);
    }
  }

  (comm.barrier)();
}
示例#13
0
文件: gather.hpp 项目: Caraul/airgit
void
gather(const communicator& comm, const T* in_values, int n, 
       std::vector<T>& out_values, int root)
{
  if (comm.rank() == root) {
    out_values.resize(comm.size() * n);
    ::boost::mpi::gather(comm, in_values, n, &out_values[0], root);
  } 
  else
    ::boost::mpi::gather(comm, in_values, n, root);
}
示例#14
0
 static void gather(const communicator& comm, const T& in, std::vector<T>& out, int root)
 {
   size_t s  = comm.size();
          s *= Datatype::count(in);
   out.resize(s);
   MPI_Gather(Datatype::address(const_cast<T&>(in)),
              Datatype::count(in),
              Datatype::datatype(),
              Datatype::address(out[0]),
              Datatype::count(in),
              Datatype::datatype(),
              root, comm);
 }
示例#15
0
文件: gather.hpp 项目: Caraul/airgit
  void
  gather_impl(const communicator& comm, const T* in_values, int n, 
              T* out_values, int root, mpl::false_)
  {
    int tag = environment::collectives_tag();
    int size = comm.size();

    for (int src = 0; src < size; ++src) {
      if (src == root)
        std::copy(in_values, in_values + n, out_values + n * src);
      else
        comm.recv(src, tag, out_values + n * src, n);
    }
  }
示例#16
0
文件: vector.hpp 项目: Titan-C/triqs
 template <typename T> std::vector<T> mpi_gather(std::vector<T> const &a, communicator c, int root, bool all, std::true_type) {
  long size = mpi_reduce(a.size(), c, root, all);
  std::vector<T> b((all || (c.rank() == root) ? size : 0));

  auto recvcounts = std::vector<int>(c.size());
  auto displs = std::vector<int>(c.size() + 1, 0);
  int sendcount = a.size();
  auto mpi_ty = mpi::mpi_datatype<int>();
  if (!all)
   MPI_Gather(&sendcount, 1, mpi_ty, &recvcounts[0], 1, mpi_ty, root, c.get());
  else
   MPI_Allgather(&sendcount, 1, mpi_ty, &recvcounts[0], 1, mpi_ty, c.get());

  for (int r = 0; r < c.size(); ++r) displs[r + 1] = recvcounts[r] + displs[r];

  if (!all)
   MPI_Gatherv((void *)a.data(), sendcount, mpi_datatype<T>(), (void *)b.data(), &recvcounts[0], &displs[0], mpi_datatype<T>(),
               root, c.get());
  else
   MPI_Allgatherv((void *)a.data(), sendcount, mpi_datatype<T>(), (void *)b.data(), &recvcounts[0], &displs[0], mpi_datatype<T>(),
                  c.get());

  return b;
 }
示例#17
0
object gather(const communicator& comm, object value, int root)
{
  if (comm.rank() == root) {
    std::vector<object> values;
    boost::mpi::gather(comm, value, values, root);

    boost::python::list l;
    for (int i = 0; i < comm.size(); ++i)
      l.append(values[i]);
    return boost::python::tuple(l);
  } else {
    boost::mpi::gather(comm, value, root);
    return object();
  }
}
示例#18
0
void
all_gather_impl(const communicator& comm, const T* in_values, int n, 
                T* out_values, int const* sizes, int const* skips, mpl::false_)
{
  int nproc = comm.size();
  // first, gather all size, these size can be different for
  // each process
  packed_oarchive oa(comm);
  for (int i = 0; i < n; ++i) {
    oa << in_values[i];
  }
  std::vector<int> oasizes(nproc);
  int oasize = oa.size();
  BOOST_MPI_CHECK_RESULT(MPI_Allgather,
                         (&oasize, 1, MPI_INTEGER,
                          c_data(oasizes), 1, MPI_INTEGER, 
                          MPI_Comm(comm)));
  // Gather the archives, which can be of different sizes, so
  // we need to use allgatherv.
  // Every thing is contiguous, so the offsets can be
  // deduced from the collected sizes.
  std::vector<int> offsets(nproc);
  sizes2offsets(oasizes, offsets);
  packed_iarchive::buffer_type recv_buffer(std::accumulate(oasizes.begin(), oasizes.end(), 0));
  BOOST_MPI_CHECK_RESULT(MPI_Allgatherv,
                         (const_cast<void*>(oa.address()), int(oa.size()), MPI_BYTE,
                          c_data(recv_buffer), c_data(oasizes), c_data(offsets), MPI_BYTE, 
                          MPI_Comm(comm)));
  for (int src = 0; src < nproc; ++src) {
    int nb   = sizes ? sizes[src] : n;
    int skip = skips ? skips[src] : 0;
    std::advance(out_values, skip);
    if (src == comm.rank()) { // this is our local data
      for (int i = 0; i < nb; ++i) {
        *out_values++ = *in_values++;
      }
    } else {
      packed_iarchive ia(comm,  recv_buffer, boost::archive::no_header, offsets[src]);
      for (int i = 0; i < nb; ++i) {
        ia >> *out_values++;
      }
    }
  }
}
示例#19
0
void
all_reduce_array_test(const communicator& comm, Generator generator,
                      const char* type_kind, Op op, const char* op_kind,
                      typename Generator::result_type init, bool in_place)
{
  typedef typename Generator::result_type value_type;
  value_type value = generator(comm.rank());
  std::vector<value_type> send(10, value);

  using boost::mpi::all_reduce;
  using boost::mpi::inplace;

  if (comm.rank() == 0) {
      char const* place = in_place ? "in place" : "out of place";
      std::cout << "Reducing (" << place << ") array to " << op_kind << " of " << type_kind << "...";
      std::cout.flush();
  }
  std::vector<value_type> result;
  if (in_place) {
    all_reduce(comm, inplace(&(send[0])), send.size(), op);
    result.swap(send);
  } else {
    std::vector<value_type> recv(10, value_type());
    all_reduce(comm, &(send[0]), send.size(), &(recv[0]), op);
    result.swap(recv);
  }

  // Compute expected result
  std::vector<value_type> generated_values;
  for (int p = 0; p < comm.size(); ++p)
    generated_values.push_back(generator(p));
  value_type expected_result = std::accumulate(generated_values.begin(),
                                               generated_values.end(),
                                               init, op);
  
  bool got_expected_result = (std::equal_range(result.begin(), result.end(), 
                                               expected_result)
                              == std::make_pair(result.begin(), result.end()));
  BOOST_CHECK(got_expected_result);
  if (got_expected_result && comm.rank() == 0)
      std::cout << "OK." << std::endl;

  (comm.barrier)();
}
示例#20
0
  void
  scatter_impl(const communicator& comm, const T* in_values, T* out_values, 
               int n, int root, mpl::false_)
  {
    int tag = environment::collectives_tag();
    int size = comm.size();

    for (int dest = 0; dest < size; ++dest) {
      if (dest == root) {
        // Our own values will never be transmitted: just copy them.
        std::copy(in_values + dest * n, in_values + (dest + 1) * n, out_values);
      } else {
        // Send archive
        packed_oarchive oa(comm);
        for (int i = 0; i < n; ++i)
          oa << in_values[dest * n + i];
        detail::packed_archive_send(comm, dest, tag, oa);
      }
    }
  }
示例#21
0
void
scatterv_impl(const communicator& comm, const T* in_values, T* out_values, int out_size,
              int const* sizes, int const* displs, int root, mpl::false_)
{
  packed_oarchive::buffer_type sendbuf;
  bool is_root = comm.rank() == root;
  int nproc = comm.size();
  std::vector<int> archsizes;
  if (is_root) {
    assert(out_size == sizes[comm.rank()]);
    archsizes.resize(nproc);
    std::vector<int> skipped;
    if (displs) {
      skipped.resize(nproc);
      offsets2skipped(sizes, displs, c_data(skipped), nproc);
      displs = c_data(skipped);
    }
    fill_scatter_sendbuf(comm, in_values, sizes, (int const*)0, sendbuf, archsizes);
  }
  dispatch_scatter_sendbuf(comm, sendbuf, archsizes, (T const*)0, out_values, out_size, root);
}
示例#22
0
文件: reduce.hpp 项目: 8573/anura
  void
  tree_reduce_impl(const communicator& comm, const T* in_values, int n,
                   T* out_values, Op op, int root, 
                   mpl::true_ /*is_commutative*/)
  {
    std::copy(in_values, in_values + n, out_values);

    int size = comm.size();
    int rank = comm.rank();

    // The computation tree we will use.
    detail::computation_tree tree(rank, size, root);

    int tag = environment::collectives_tag();

    MPI_Status status;
    int children = 0;
    for (int child = tree.child_begin();
         children < tree.branching_factor() && child != root;
         ++children, child = (child + 1) % size) {
      // Receive archive
      packed_iarchive ia(comm);
      detail::packed_archive_recv(comm, child, tag, ia, status);

      T incoming;
      for (int i = 0; i < n; ++i) {
        ia >> incoming;
        out_values[i] = op(out_values[i], incoming);
      }
    }

    // For non-roots, send the result to the parent.
    if (tree.parent() != rank) {
      packed_oarchive oa(comm);
      for (int i = 0; i < n; ++i)
        oa << out_values[i];
      detail::packed_archive_send(comm, tree.parent(), tag, oa);
    }
  }
示例#23
0
    /// compute the array domain of the target array
    domain_type domain() const {
        auto dims = ref.shape();
        long slow_size = first_dim(ref);

        // tag::reduce and all_reduce : do nothing

        if (std::is_same<Tag, tag::scatter>::value) {
            mpi::broadcast(slow_size, c, root);
            dims[0] = mpi::slice_length(slow_size - 1, c.size(), c.rank());
        }

        if (std::is_same<Tag, tag::gather>::value) {
            auto s = mpi::reduce(slow_size, c, root);
            dims[0] = (c.rank()==root ? s : 1); // valid only on root
        }

        if (std::is_same<Tag, tag::allgather>::value) {
            dims[0] = mpi::all_reduce(slow_size, c, root); // in this case, it is valid on all nodes
        }

        return domain_type{dims};
    }
示例#24
0
文件: reduce.hpp 项目: 8573/anura
  void
  tree_reduce_impl(const communicator& comm, const T* in_values, int n,
                   T* out_values, Op op, int root, 
                   mpl::false_ /*is_commutative*/)
  {
    int tag = environment::collectives_tag();

    int left_child = root / 2;
    int right_child = (root + comm.size()) / 2;

    MPI_Status status;
    if (left_child != root) {
      // Receive value from the left child and merge it with the value
      // we had incoming.
      packed_iarchive ia(comm);
      detail::packed_archive_recv(comm, left_child, tag, ia, status);
      T incoming;
      for (int i = 0; i < n; ++i) {
        ia >> incoming;
        out_values[i] = op(incoming, in_values[i]);
      }
    } else {
示例#25
0
void
all_reduce_one_test(const communicator& comm, Generator generator,
                    const char* type_kind, Op op, const char* op_kind,
                    typename Generator::result_type init, bool in_place)
{
  typedef typename Generator::result_type value_type;
  value_type value = generator(comm.rank());

  using boost::mpi::all_reduce;
  using boost::mpi::inplace;

  if (comm.rank() == 0) {
    std::cout << "Reducing to " << op_kind << " of " << type_kind << "...";
    std::cout.flush();
  }

  value_type result_value;
  if (in_place) {
    all_reduce(comm, inplace(value), op);
    result_value = value;
  } else {
    result_value = all_reduce(comm, value, op);
  }
  
  // Compute expected result
  std::vector<value_type> generated_values;
  for (int p = 0; p < comm.size(); ++p)
    generated_values.push_back(generator(p));
  value_type expected_result = std::accumulate(generated_values.begin(),
                                               generated_values.end(),
                                               init, op);
  BOOST_CHECK(result_value == expected_result);
  if (result_value == expected_result && comm.rank() == 0)
    std::cout << "OK." << std::endl;

  (comm.barrier)();
}
示例#26
0
void
ring_test(const communicator& comm, const T& pass_value, const char* kind,
          int root = 0)
{
  T transferred_value;

  int rank = comm.rank();
  int size = comm.size();

  if (rank == root) {
    std::cout << "Passing " << kind << " around a ring from root " << root
              << "...";
    comm.send((rank + 1) % size, 0, pass_value);
    comm.recv((rank + size - 1) % size, 0, transferred_value);
    BOOST_CHECK(transferred_value == pass_value);
    if (transferred_value == pass_value) std::cout << " OK." << std::endl;
  } else {
    comm.recv((rank + size - 1) % size, 0, transferred_value);
    BOOST_CHECK(transferred_value == pass_value);
    comm.send((rank + 1) % size, 0, transferred_value);
  }

  (comm.barrier)();
}
示例#27
0
文件: scan.hpp 项目: 0xDEC0DE8/mcsema
 inline void
 scan_impl(const communicator& comm, const T* in_values, int n, T* out_values, 
           Op op, mpl::false_ /*is_mpi_op*/, mpl::false_/*is_mpi_datatype*/)
 {
   upper_lower_scan(comm, in_values, n, out_values, op, 0, comm.size());
 }
示例#28
0
文件: mpi.hpp 项目: TRIQS/triqs
 /**
   * Function to chunk a range, distributing it uniformly over all MPI ranks.
   *
   * @tparam T The type of the range
   *
   * @param range The range to chunk
   * @param comm The mpi communicator
   */
 template <typename T> auto chunk(T &&range, communicator comm = {}) {
   auto total_size           = std::distance(std::cbegin(range), std::cend(range));
   auto [start_idx, end_idx] = itertools::chunk_range(0, total_size, comm.size(), comm.rank());
   return itertools::slice(std::forward<T>(range), start_idx, end_idx);
 }
示例#29
0
void
nonblocking_test(const communicator& comm, const T* values, int num_values, 
                 const char* kind, method_kind method = mk_all)
{
  using boost::mpi::wait_any;
  using boost::mpi::test_any;
  using boost::mpi::wait_all;
  using boost::mpi::test_all;
  using boost::mpi::wait_some;
  using boost::mpi::test_some;

  if (method == mk_all || method == mk_all_except_test_all) {
    nonblocking_test(comm, values, num_values, kind, mk_wait_any);
    nonblocking_test(comm, values, num_values, kind, mk_test_any);
    nonblocking_test(comm, values, num_values, kind, mk_wait_all);
    nonblocking_test(comm, values, num_values, kind, mk_wait_all_keep);
    if (method == mk_all) {
      nonblocking_test(comm, values, num_values, kind, mk_test_all);
      nonblocking_test(comm, values, num_values, kind, mk_test_all_keep);
    }
    nonblocking_test(comm, values, num_values, kind, mk_wait_some);
    nonblocking_test(comm, values, num_values, kind, mk_wait_some_keep);
    nonblocking_test(comm, values, num_values, kind, mk_test_some);
    nonblocking_test(comm, values, num_values, kind, mk_test_some_keep);
  } else {
    if (comm.rank() == 0) {
      std::cout << "Testing " << method_kind_names[method] 
                << " with " << kind << "...";
      std::cout.flush();
    }

    typedef std::pair<status, std::vector<request>::iterator> 
      status_iterator_pair;

    T incoming_value;
    std::vector<T> incoming_values(num_values);

    std::vector<request> reqs;
    // Send/receive the first value
    reqs.push_back(comm.isend((comm.rank() + 1) % comm.size(), 0, values[0]));
    reqs.push_back(comm.irecv((comm.rank() + comm.size() - 1) % comm.size(),
                              0, incoming_value));

    if (method != mk_wait_any && method != mk_test_any) {
#ifndef LAM_MPI
      // We've run into problems here (with 0-length messages) with
      // LAM/MPI on Mac OS X and x86-86 Linux. Will investigate
      // further at a later time, but the problem only seems to occur
      // when using shared memory, not TCP.

      // Send/receive an empty message
      reqs.push_back(comm.isend((comm.rank() + 1) % comm.size(), 1));
      reqs.push_back(comm.irecv((comm.rank() + comm.size() - 1) % comm.size(),
                                1));
#endif

      // Send/receive an array
      reqs.push_back(comm.isend((comm.rank() + 1) % comm.size(), 2, values,
                                num_values));
      reqs.push_back(comm.irecv((comm.rank() + comm.size() - 1) % comm.size(),
                                2, &incoming_values.front(), num_values));
    }

    switch (method) {
    case mk_wait_any:
      if (wait_any(reqs.begin(), reqs.end()).second == reqs.begin())
        reqs[1].wait();
      else
        reqs[0].wait();
      break;

    case mk_test_any:
      {
        boost::optional<status_iterator_pair> result;
        do {
          result = test_any(reqs.begin(), reqs.end());
        } while (!result);
        if (result->second == reqs.begin())
          reqs[1].wait();
        else
          reqs[0].wait();
        break;
      }

    case mk_wait_all:
      wait_all(reqs.begin(), reqs.end());
      break;

    case mk_wait_all_keep:
      {
        std::vector<status> stats;
        wait_all(reqs.begin(), reqs.end(), std::back_inserter(stats));
      }
      break;

    case mk_test_all:
      while (!test_all(reqs.begin(), reqs.end())) { /* Busy wait */ }
      break;

    case mk_test_all_keep:
      {
        std::vector<status> stats;
        while (!test_all(reqs.begin(), reqs.end(), std::back_inserter(stats)))
          /* Busy wait */;
      }
      break;

    case mk_wait_some:
      {
        std::vector<request>::iterator pos = reqs.end();
        do {
          pos = wait_some(reqs.begin(), pos);
        } while (pos != reqs.begin());
      }
      break;

    case mk_wait_some_keep:
      {
        std::vector<status> stats;
        std::vector<request>::iterator pos = reqs.end();
        do {
          pos = wait_some(reqs.begin(), pos, std::back_inserter(stats)).second;
        } while (pos != reqs.begin());
      }
      break;

    case mk_test_some:
      {
        std::vector<request>::iterator pos = reqs.end();
        do {
          pos = test_some(reqs.begin(), pos);
        } while (pos != reqs.begin());
      }
      break;

    case mk_test_some_keep:
      {
        std::vector<status> stats;
        std::vector<request>::iterator pos = reqs.end();
        do {
          pos = test_some(reqs.begin(), pos, std::back_inserter(stats)).second;
        } while (pos != reqs.begin());
      }
      break;

    default:
      BOOST_CHECK(false);
    }

    if (comm.rank() == 0) {
      bool okay = true;

      if (!((incoming_value == values[0])))
        okay = false;

      if (method != mk_wait_any && method != mk_test_any
          && !std::equal(incoming_values.begin(), incoming_values.end(),
                         values))
        okay = false;

      if (okay)
        std::cout << "OK." << std::endl;
      else
        std::cerr << "ERROR!" << std::endl;
    }

    BOOST_CHECK(incoming_value == values[0]);

    if (method != mk_wait_any && method != mk_test_any)
      BOOST_CHECK(std::equal(incoming_values.begin(), incoming_values.end(),
                             values));
  }
}
示例#30
0
void
test_skeleton_and_content(const communicator& comm, int root = 0)
{
  using boost::mpi::content;
  using boost::mpi::get_content;
  using boost::make_counting_iterator;
  using boost::mpi::broadcast;

  typedef std::list<int>::iterator iterator;

  int list_size = comm.size() + 7;
  if (comm.rank() == root) {
    // Fill in the seed data
    std::list<int> original_list;
    for (int i = 0; i < list_size; ++i)
      original_list.push_back(i);

    // Build up the skeleton
    packed_skeleton_oarchive oa(comm);
    oa << original_list;

    // Broadcast the skeleton
    std::cout << "Broadcasting integer list skeleton from root " << root
              << "...";
    broadcast(comm, oa, root);
    std::cout << "OK." << std::endl;

    // Broadcast the content
    std::cout << "Broadcasting integer list content from root " << root
              << "...";
    {
      content c = get_content(original_list);
      broadcast(comm, c, root);
    }
    std::cout << "OK." << std::endl;

    // Reverse the list, broadcast the content again
    std::reverse(original_list.begin(), original_list.end());
    std::cout << "Broadcasting reversed integer list content from root "
              << root << "...";
    {
      content c = get_content(original_list);
      broadcast(comm, c, root);
    }
    std::cout << "OK." << std::endl;
  } else {
    // Allocate some useless data, to try to get the addresses of the
    // list<int>'s used later to be different across processes.
    std::list<int> junk_list(comm.rank() * 3 + 1, 17);

    // Receive the skeleton
    packed_skeleton_iarchive ia(comm);
    broadcast(comm, ia, root);

    // Build up a list to match the skeleton, and make sure it has the
    // right structure (we have no idea what the data will be).
    std::list<int> transferred_list;
    ia >> transferred_list;
    BOOST_CHECK((int)transferred_list.size() == list_size);

    // Receive the content and check it
    broadcast(comm, get_content(transferred_list), root);
    BOOST_CHECK(std::equal(make_counting_iterator(0),
                           make_counting_iterator(list_size),
                           transferred_list.begin()));

    // Receive the reversed content and check it
    broadcast(comm, get_content(transferred_list), root);
    BOOST_CHECK(std::equal(make_counting_iterator(0),
                           make_counting_iterator(list_size),
                           transferred_list.rbegin()));
  }

  (comm.barrier)();
}