Exemplo n.º 1
0
void mpi_process_group::pack_headers() const 
{
  for (process_id_type other = 0; other < num_processes(*this); ++other) {
    typedef std::vector<impl::message_header>::iterator iterator;

    impl::incoming_messages& incoming = impl_->incoming[other];

    buffer_type remaining_buffer;
    std::vector<impl::message_header> remaining_headers;

    iterator end = incoming.headers.end();
    for (iterator i = incoming.headers.begin(); i != end; ++i) {
      if (i->tag == -1)
        continue;

      // Hold on to this message until the next stage
      remaining_headers.push_back(*i);
      remaining_headers.back().offset = remaining_buffer.size();
      remaining_buffer.insert(remaining_buffer.end(),
                              &incoming.buffer[i->offset],
                              &incoming.buffer[i->offset] + i->bytes);
    }
    
    // Swap the remaining messages into the "incoming" set.
    incoming.headers.swap(remaining_headers);
    incoming.buffer.swap(remaining_buffer);

    // Set up the iterators pointing to the next header in each block
    for (std::size_t i = 0; i < incoming.next_header.size(); ++i)
      incoming.next_header[i] = incoming.headers.begin();
  }
}
Exemplo n.º 2
0
//-----------------------------------------------------------------------------
std::pair<std::size_t, std::size_t> dolfin::MPI::local_range(unsigned int process,
                                                             std::size_t N)
{
  if (process != 0 || num_processes() > 1)
  {
    dolfin_error("MPI.cpp",
                 "access local range for process",
                 "DOLFIN has not been configured with MPI support");
  }
  return std::make_pair(0, N);
}
Exemplo n.º 3
0
//-----------------------------------------------------------------------------
unsigned int dolfin::MPI::index_owner(std::size_t index, std::size_t N)
{
  dolfin_assert(index < N);

  // Get number of processes
  const unsigned int _num_processes = num_processes();

  // Compute number of items per process and remainder
  const std::size_t n = N / _num_processes;
  const std::size_t r = N % _num_processes;

  // First r processes own n + 1 indices
  if (index < r * (n + 1))
    return index / (n + 1);

  // Remaining processes own n indices
  return r + (index - r * (n + 1)) / n;
}
Exemplo n.º 4
0
metis_distribution_mod::metis_distribution_mod(std::istream& in,
  boost::graph::distributed::mpi_process_group& pg)
  : in(in), my_id(process_id(pg)), 
    vertices(std::istream_iterator<process_id_type>(in),
             std::istream_iterator<process_id_type>())
{
    local_mapping.resize(vertices.size());
    for (int id = 0; id < num_processes(pg); id++)
    {
        size_type count_n = 0;
        int index = 0;
        for (std::vector<process_id_type>::const_iterator i=vertices.begin(); i < vertices.end(); ++i) {
            if (id == *i) {
                local_mapping[index] = count_n++;
            }
            index++;
        }
    }
}
// where {LinearProcessGroup<ProcessGroup>, MessagingProcessGroup<ProcessGroup>}
void
inplace_all_to_all(ProcessGroup pg,
                   const std::vector<std::vector<T> >& outgoing,
                   std::vector<std::vector<T> >& incoming)
{
  typedef typename std::vector<T>::size_type size_type;

  typedef typename ProcessGroup::process_size_type process_size_type;
  typedef typename ProcessGroup::process_id_type process_id_type;

  process_size_type p = num_processes(pg);

  // Make sure there are no straggling messages
  synchronize(pg);

  // Send along the count (always) and the data (if count > 0)
  for (process_id_type dest = 0; dest < p; ++dest) {
    if (dest != process_id(pg)) {
      send(pg, dest, 0, outgoing[dest].size());
      if (!outgoing[dest].empty())
        send(pg, dest, 1, &outgoing[dest].front(), outgoing[dest].size());
    }
  }

  // Make sure all of the data gets transferred
  synchronize(pg);

  // Receive the sizes and data
  for (process_id_type source = 0; source < p; ++source) {
    if (source != process_id(pg)) {
      size_type size;
      receive(pg, source, 0, size);
      incoming[source].resize(size);
      if (size > 0)
        receive(pg, source, 1, &incoming[source].front(), size);
    } else if (&incoming != &outgoing) {
      incoming[source] = outgoing[source];
    }
  }
}
Exemplo n.º 6
0
void mpi_process_group::synchronize() const
{
  // Don't synchronize if we've already finished
  if (boost::mpi::environment::finalized()) 
    return;

#ifdef DEBUG
  std::cerr << "SYNC: " << process_id(*this) << std::endl;
#endif

  emit_on_synchronize();

  process_id_type id = process_id(*this);     // Our rank
  process_size_type p = num_processes(*this); // The number of processes

  // Pack the remaining incoming messages into the beginning of the
  // buffers, so that we can receive new messages in this
  // synchronization step without losing those messages that have not
  // yet been received.
  pack_headers();

  impl_->synchronizing_stage[id] = -1;
  int stage=-1;
  bool no_new_messages = false;
  while (true) {
      ++stage;
#ifdef DEBUG
      std::cerr << "SYNC: " << id << " starting stage " << (stage+1) << ".\n";
#endif

      // Tell everyone that we are synchronizing. Note: we use MPI_Isend since 
      // we absolutely cannot have any of these operations blocking.
      
      // increment the stage for the source
       ++impl_->synchronizing_stage[id];
       if (impl_->synchronizing_stage[id] != stage)
         std::cerr << "Expected stage " << stage << ", got " << impl_->synchronizing_stage[id] << std::endl;
       BOOST_ASSERT(impl_->synchronizing_stage[id]==stage);
      // record how many still have messages to be sent
      if (static_cast<int>(impl_->synchronizing_unfinished.size())<=stage) {
        BOOST_ASSERT(static_cast<int>(impl_->synchronizing_unfinished.size()) == stage);
        impl_->synchronizing_unfinished.push_back(no_new_messages ? 0 : 1);
      }
      else
        impl_->synchronizing_unfinished[stage]+=(no_new_messages ? 0 : 1);

      // record how many are in that stage
      if (static_cast<int>(impl_->processors_synchronizing_stage.size())<=stage) {
        BOOST_ASSERT(static_cast<int>(impl_->processors_synchronizing_stage.size()) == stage);
        impl_->processors_synchronizing_stage.push_back(1);
      }
      else
        ++impl_->processors_synchronizing_stage[stage];

      impl_->synchronizing = true;

      for (int dest = 0; dest < p; ++dest) {
        int sync_message = no_new_messages ? -1 : impl_->number_sent_batches[dest];
        if (dest != id) {
          impl_->number_sent_batches[dest]=0;       
          MPI_Request request;
          MPI_Isend(&sync_message, 1, MPI_INT, dest, msg_synchronizing, impl_->comm,&request);
          int done=0;
          do {
            poll();
            MPI_Test(&request,&done,MPI_STATUS_IGNORE);
          } while (!done);
        }
        else { // need to subtract how many messages I should have received
          impl_->number_received_batches[id] -=impl_->number_sent_batches[id];
          impl_->number_sent_batches[id]=0;
        }
      }

      // Keep handling out-of-band messages until everyone has gotten
      // to this point.
      while (impl_->processors_synchronizing_stage[stage] <p) {
        // with the trigger based solution we cannot easily pass true here 
        poll(/*wait=*/false, -1, true);

      }

      // check that everyone is at least here
      for (int source=0; source<p ; ++source)
        BOOST_ASSERT(impl_->synchronizing_stage[source] >= stage);

      // receive any batches sent in the meantime
      // all have to be available already
      while (true) {
        bool done=true;
        for (int source=0; source<p ; ++source)
          if(impl_->number_received_batches[source] < 0)
            done = false;
        if (done)
          break;
        poll(false,-1,true);
      }
      
#ifndef NO_IMMEDIATE_PROCESSING
      for (int source=0; source<p ; ++source)
        BOOST_ASSERT(impl_->number_received_batches[source] >= 0);
#endif

      impl_->synchronizing = false;
      
      // Flush out remaining messages
      if (impl_->synchronizing_unfinished[stage]==0)
        break;
#ifdef NO_IMMEDIATE_PROCESSING
      for (process_id_type dest = 0; dest < p; ++dest)
        process_batch(dest);
#endif

      no_new_messages = true;
      for (process_id_type dest = 0; dest < p; ++dest) {
        if (impl_->outgoing[dest].headers.size() || 
            impl_->number_sent_batches[dest]>0)
          no_new_messages = false;
        send_batch(dest);
      }
    }

  impl_->comm.barrier/*nomacro*/();
#if 0
  // set up for next synchronize call
  for (int source=0; source<p; ++source) {
    if (impl_->synchronizing_stage[source] != stage) {
      std::cerr << id << ": expecting stage " << stage << " from source "
                << source << ", got " << impl_->synchronizing_stage[source]
                << std::endl;
    }
    BOOST_ASSERT(impl_->synchronizing_stage[source]==stage);
  }
#endif
  std::fill(impl_->synchronizing_stage.begin(),
            impl_->synchronizing_stage.end(), -1);
            
  // get rid of the information regarding recorded numbers of processors
  // for the stages we just finished
  impl_->processors_synchronizing_stage.clear();
  impl_->synchronizing_unfinished.clear();

  for (process_id_type dest = 0; dest < p; ++dest)
    BOOST_ASSERT (impl_->outgoing[dest].headers.empty());
#ifndef NO_IMMEDIATE_PROCESSING
      for (int source=0; source<p ; ++source)
        BOOST_ASSERT (impl_->number_received_batches[source] == 0);
#endif

  impl_->free_sent_batches();
#ifdef DEBUG
  std::cerr << "SYNC: " << process_id(*this) << " completed." << std::endl;
#endif
}
Exemplo n.º 7
0
//-----------------------------------------------------------------------------
bool dolfin::MPI::is_broadcaster()
{
  // Always broadcast from processor number 0
  return num_processes() > 1 && process_number() == 0;
}
Exemplo n.º 8
0
//-----------------------------------------------------------------------------
std::pair<std::size_t, std::size_t> dolfin::MPI::local_range(unsigned int process,
                                                             std::size_t N)
{
  return local_range(process, N, num_processes());
}
Exemplo n.º 9
0
//-----------------------------------------------------------------------------
bool dolfin::MPI::is_receiver()
{
  // Always receive on processors with numbers > 0
  return num_processes() > 1 && process_number() > 0;
}
Exemplo n.º 10
0
 shuffled_distribution(ProcessGroup const& pg, BaseDistribution const& base)
   : BaseDistribution(base)
   , n(num_processes(pg))
   , mapping_(make_counting_iterator(size_type(0)), make_counting_iterator(n))
   , reverse_mapping(mapping_)
 {}
Exemplo n.º 11
0
int main()
{
	fprintf(stderr, "Number of my processes is %d\n", num_processes());
	exit(0);
}