示例#1
0
optional<std::pair<mpi_process_group::process_id_type, int> >
mpi_process_group::probe() const
{
#ifdef DEBUG
  std::cerr << "PROBE: " << process_id(*this) << ", tag block = "
            << my_block_number() << std::endl;
#endif

  typedef std::pair<process_id_type, int> result_type;

  int tag_block = my_block_number();

  for (std::size_t source = 0; source < impl_->incoming.size(); ++source) {
    impl::incoming_messages& incoming = impl_->incoming[source];
    std::vector<impl::message_header>::iterator& i =
      incoming.next_header[tag_block];
    std::vector<impl::message_header>::iterator end =  incoming.headers.end();

    while (i != end) {
      if (i->tag != -1 && decode_tag(i->tag).first == my_block_number()) {
#ifdef DEBUG
        std::cerr << "PROBE: " << process_id(*this) << " <- " << source
                  << ", block = " << my_block_number() << ", tag = "
                  << decode_tag(i->tag).second << ", bytes = " << i->bytes
                  << std::endl;
#endif
        return result_type(source, decode_tag(i->tag).second);
      }
      ++i;
    }
  }
  return optional<result_type>();
}
示例#2
0
文件: main.cpp 项目: CCJY/coliru
int main()
{
    process_id("A");
    process_id("B");
    process_id("C");
    process_id("D"); //will not be invoked!
}
示例#3
0
int node_id::compare(const node_id& other) const {
    if (this == &other || data_ == other.data_)
        return 0; // shortcut for comparing to self or identical instances
    if (! data_ != ! other.data_)
        return data_ ? 1 : -1; // invalid instances are always smaller
    int tmp = strncmp(reinterpret_cast<const char*>(host_id().data()),
                      reinterpret_cast<const char*>(other.host_id().data()),
                      host_id_size);
    return tmp != 0
           ? tmp
           : (process_id() < other.process_id()
              ? -1
              : (process_id() == other.process_id() ? 0 : 1));
}
示例#4
0
bool Conference::setRecording()
{
    bool recordStatus = Recordable::recAudio_.isRecording();

    Recordable::recAudio_.setRecording();
    MainBuffer *mbuffer = Manager::instance().getMainBuffer();

    std::string process_id(Recordable::recorder_.getRecorderID());

    // start recording
    if (!recordStatus) {
        for (ParticipantSet::const_iterator iter = participants_.begin(); iter != participants_.end(); ++iter)
            mbuffer->bindHalfDuplexOut(process_id, *iter);

        mbuffer->bindHalfDuplexOut(process_id, MainBuffer::DEFAULT_ID);

        Recordable::recorder_.start();
    } else {
        for (ParticipantSet::const_iterator iter = participants_.begin(); iter != participants_.end(); ++iter)
            mbuffer->unBindHalfDuplexOut(process_id, *iter);

        mbuffer->unBindHalfDuplexOut(process_id, MainBuffer::DEFAULT_ID);
    }

    return recordStatus;
}
void default_actor_addressing::write(serializer* sink, const actor_ptr& ptr) {
    CPPA_REQUIRE(sink != nullptr);
    if (ptr == nullptr) {
        CPPA_LOGMF(CPPA_DEBUG, self, "serialized nullptr");
        sink->begin_object("@0");
        sink->end_object();
    }
    else {
        // local actor?
        if (!ptr->is_proxy()) {
            get_actor_registry()->put(ptr->id(), ptr);
        }
        auto pinf = m_pinf;
        if (ptr->is_proxy()) {
            auto dptr = ptr.downcast<default_actor_proxy>();
            if (dptr) pinf = dptr->process_info();
            else CPPA_LOGMF(CPPA_ERROR, self, "downcast failed");
        }
        sink->begin_object("@actor");
        sink->write_value(ptr->id());
        sink->write_value(pinf->process_id());
        sink->write_raw(process_information::node_id_size,
                        pinf->node_id().data());
        sink->end_object();
    }
}
actor_ptr default_actor_addressing::read(deserializer* source) {
    CPPA_REQUIRE(source != nullptr);
    auto cname = source->seek_object();
    if (cname == "@0") {
        CPPA_LOGMF(CPPA_DEBUG, self, "deserialized nullptr");
        source->begin_object("@0");
        source->end_object();
        return nullptr;
    }
    else if (cname == "@actor") {
        process_information::node_id_type nid;
        source->begin_object(cname);
        auto aid = source->read<uint32_t>();
        auto pid = source->read<uint32_t>();
        source->read_raw(process_information::node_id_size, nid.data());
        source->end_object();
        // local actor?
        auto pinf = process_information::get();
        if (pid == pinf->process_id() && nid == pinf->node_id()) {
            return get_actor_registry()->get(aid);
        }
        else {
            process_information tmp(pid, nid);
            return get_or_put(tmp, aid);
        }
    }
    else throw runtime_error("expected type name \"@0\" or \"@actor\"; "
                                 "found: " + cname);
}
示例#7
0
void
mpi_process_group::make_distributed_object()
{
  if (my_block_number() == 0) {
    allocate_block();

    for (std::size_t i = 0; i < impl_->incoming.size(); ++i) {
      if (my_block_number() >= (int)impl_->incoming[i].next_header.size()) {
        impl_->incoming[i].next_header
          .push_back(impl_->incoming[i].headers.begin());
      } else {
        impl_->incoming[i].next_header[my_block_number()] =
          impl_->incoming[i].headers.begin();
      }

#ifdef DEBUG
      if (process_id(*this) == 0) {
        std::cerr << "Allocated tag block " << my_block_number() << std::endl;
      }
#endif
    }
  } else {
    // Clear out the existing triggers
    std::vector<shared_ptr<trigger_base> >()
      .swap(impl_->blocks[my_block_number()]->triggers); 
  }

  // Clear out the receive handler
  impl_->blocks[my_block_number()]->on_receive = 0;
}
void statement(void)
{
	token tok = next_token();
	expr_rec source,target;

	switch (tok){
		case ID:
			/*<statement> ::= ID := <expression>; */
			match(ID);
			target = process_id();
			match(ASSIGNOP);
			expression(& source); 
			match(SEMICOLON);
			assign(target,source);
			break;

		case READ:
			/*<statement> ::= READ (<id list>); */
			match(READ); match(LPAREN);
			id_list(); match(RPAREN);
			match(SEMICOLON);
			break;

		case WRITE:
			/*<statement> ::= WRITE (<expr list>); */
			match(WRITE); match(LPAREN);
			expr_list(); match(RPAREN);
			match(SEMICOLON);
			break;

		default:
			syntax_error(tok);
			break;
	}
}
void primary(expr_rec * x)
{
	token tok = next_token();

	switch (tok){
		case LPAREN:
			/*<primary> ::= (<expression>) */
			match(LPAREN); 
            
			expression(x);
			match(RPAREN);
			break;

		case ID:
			/*<primary> ::= ID*/
			match(ID);
			expr_rec p;
			p=process_id();
			*x=p;
			break;

		case INTLITERAL:
			/*<primary> ::= INTLITERAL*/
			match(INTLITERAL);
			expr_rec y;
			y=process_literal();
			*x=y;
			break;

		default:
			syntax_error(tok);
			break;
	}
}
示例#10
0
文件: Rnd.hpp 项目: JulianKunkel/siox
	inline ComponentID *component_id()
	{
		ComponentID *cid = new ComponentID;
		cid->pid = *process_id();
		cid->id = randr();
		
		return cid;
	}
 inline void
 put(const local_property_map<ProcessGroup, GlobalMap, StorageMap>& pm, 
     typename local_property_map<ProcessGroup, GlobalMap, StorageMap>
                ::key_type const & key,
     typename local_property_map<ProcessGroup, GlobalMap, StorageMap>
                ::value_type const& v)
 {
   typename property_traits<GlobalMap>::value_type p = get(pm.global(), key);
   BOOST_ASSERT(p.first == process_id(pm.process_group()));
   put(pm.base(), p.second, v);
 }
		PROCESS_INFORMATION ScopedProcessInformation::Take() {
			PROCESS_INFORMATION process_information = {};
			process_information.hProcess = process_handle_.Take();
			process_information.hThread = thread_handle_.Take();
			process_information.dwProcessId = process_id();
			process_information.dwThreadId = thread_id();
			process_id_ = 0;
			thread_id_ = 0;

			return process_information;
		}
// where {LinearProcessGroup<ProcessGroup>, MessagingProcessGroup<ProcessGroup>}
void
inplace_all_to_all(ProcessGroup pg,
                   const std::vector<std::vector<T> >& outgoing,
                   std::vector<std::vector<T> >& incoming)
{
  typedef typename std::vector<T>::size_type size_type;

  typedef typename ProcessGroup::process_size_type process_size_type;
  typedef typename ProcessGroup::process_id_type process_id_type;

  process_size_type p = num_processes(pg);

  // Make sure there are no straggling messages
  synchronize(pg);

  // Send along the count (always) and the data (if count > 0)
  for (process_id_type dest = 0; dest < p; ++dest) {
    if (dest != process_id(pg)) {
      send(pg, dest, 0, outgoing[dest].size());
      if (!outgoing[dest].empty())
        send(pg, dest, 1, &outgoing[dest].front(), outgoing[dest].size());
    }
  }

  // Make sure all of the data gets transferred
  synchronize(pg);

  // Receive the sizes and data
  for (process_id_type source = 0; source < p; ++source) {
    if (source != process_id(pg)) {
      size_type size;
      receive(pg, source, 0, size);
      incoming[source].resize(size);
      if (size > 0)
        receive(pg, source, 1, &incoming[source].front(), size);
    } else if (&incoming != &outgoing) {
      incoming[source] = outgoing[source];
    }
  }
}
示例#14
0
int node_id::compare(const node_id& other) const {
  if (this == &other) {
    return 0; // shortcut for comparing to self
  }
  if (m_data == other.m_data) {
    return 0; // shortcut for identical instances
  }
  if ((m_data != nullptr) != (other.m_data != nullptr)) {
    return m_data ? 1 : -1; // invalid instances are always smaller
  }
  int tmp = strncmp(reinterpret_cast<const char*>(host_id().data()),
                    reinterpret_cast<const char*>(other.host_id().data()),
                    host_id_size);
  if (tmp == 0) {
    if (process_id() < other.process_id()) {
      return -1;
    } else if (process_id() == other.process_id()) {
      return 0;
    }
    return 1;
  }
  return tmp;
}
示例#15
0
Handle* ProcessDispatcher::GetHandleLocked(zx_handle_t handle_value,
                                           bool skip_policy) {
    auto handle = map_value_to_handle(handle_value, handle_rand_);
    if (handle && handle->process_id() == get_koid())
        return handle;

    // Handle lookup failed.  We potentially generate an exception,
    // depending on the job policy.  Note that we don't use the return
    // value from QueryBasicPolicy() here: ZX_POL_ACTION_ALLOW and
    // ZX_POL_ACTION_DENY are equivalent for ZX_POL_BAD_HANDLE.
    if (likely(!skip_policy))
        QueryBasicPolicy(ZX_POL_BAD_HANDLE);
    return nullptr;
}
示例#16
0
int node_id::compare(const node_id& other) const {
  if (this == &other || data_ == other.data_)
    return 0; // shortcut for comparing to self or identical instances
  if (!data_ != !other.data_)
    return data_ ? 1 : -1; // invalid instances are always smaller
  // use mismatch instead of strncmp because the
  // latter bails out on the first 0-byte
  auto last = host_id().end();
  auto ipair = std::mismatch(host_id().begin(), last, other.host_id().begin());
  if (ipair.first == last)
    return static_cast<int>(process_id())-static_cast<int>(other.process_id());
  else if (*ipair.first < *ipair.second)
    return -1;
  else
    return 1;
}
metis_distribution_mod::metis_distribution_mod(std::istream& in,
  boost::graph::distributed::mpi_process_group& pg)
  : in(in), my_id(process_id(pg)), 
    vertices(std::istream_iterator<process_id_type>(in),
             std::istream_iterator<process_id_type>())
{
    local_mapping.resize(vertices.size());
    for (int id = 0; id < num_processes(pg); id++)
    {
        size_type count_n = 0;
        int index = 0;
        for (std::vector<process_id_type>::const_iterator i=vertices.begin(); i < vertices.end(); ++i) {
            if (id == *i) {
                local_mapping[index] = count_n++;
            }
            index++;
        }
    }
}
actor_ptr default_actor_addressing::read(deserializer* source) {
    CPPA_REQUIRE(source != nullptr);
    process_information::node_id_type nid;
    auto aid = source->read<uint32_t>();
    auto pid = source->read<uint32_t>();
    source->read_raw(process_information::node_id_size, nid.data());
    // local actor?
    auto pinf = process_information::get();
    if (aid == 0 && pid == 0) {
        return nullptr;
    }
    else if (pid == pinf->process_id() && nid == pinf->node_id()) {
        return get_actor_registry()->get(aid);
    }
    else {
        process_information tmp{pid, nid};
        return get_or_put(tmp, aid);
    }
}
void actor_namespace::write(serializer* sink, const actor_addr& addr) {
  CAF_ASSERT(sink != nullptr);
  if (! addr) {
    node_id::host_id_type zero;
    std::fill(zero.begin(), zero.end(), 0);
    sink->write_value(static_cast<actor_id>(0));         // actor id
    sink->write_raw(node_id::host_id_size, zero.data()); // host id
    sink->write_value(static_cast<uint32_t>(0));         // process id
  } else {
    // register locally running actors to be able to deserialize them later
    if (! addr.is_remote()) {
      auto reg = detail::singletons::get_actor_registry();
      reg->put(addr.id(), actor_cast<abstract_actor_ptr>(addr));
    }
    auto pinf = addr.node();
    sink->write_value(addr.id());                                  // actor id
    sink->write_raw(node_id::host_id_size, pinf.host_id().data()); // host id
    sink->write_value(pinf.process_id());                          // process id
  }
}
示例#20
0
bool Conference::toggleRecording()
{
    const bool startRecording = Recordable::toggleRecording();
    std::string process_id(Recordable::recAudio_->getRecorderID());
    auto& rbPool = Manager::instance().getRingBufferPool();

    // start recording
    if (startRecording) {
        for (const auto &item : participants_)
            rbPool.bindHalfDuplexOut(process_id, item);

        rbPool.bindHalfDuplexOut(process_id, RingBufferPool::DEFAULT_ID);
    } else {
        for (const auto &item : participants_)
            rbPool.unBindHalfDuplexOut(process_id, item);

        rbPool.unBindHalfDuplexOut(process_id, RingBufferPool::DEFAULT_ID);
    }

    return startRecording;
}
示例#21
0
int mpi_process_group::allocate_block(bool out_of_band_receive)
{
  BOOST_ASSERT(!block_num);
  block_iterator i = impl_->blocks.begin();
  while (i != impl_->blocks.end() && *i) ++i;

  if (i == impl_->blocks.end()) {
    impl_->blocks.push_back(new block_type());
    i = impl_->blocks.end() - 1;
  } else {
    *i = new block_type();
  }

  block_num.reset(new int(i - impl_->blocks.begin()),
                  deallocate_block(&impl_->blocks));

#ifdef DEBUG
  fprintf(stderr,
          "Processor %i allocated block #%i\n", process_id(*this), *block_num);
#endif

  return *block_num;
}
void default_actor_addressing::write(serializer* sink, const actor_ptr& ptr) {
    CPPA_REQUIRE(sink != nullptr);
    if (ptr == nullptr) {
        CPPA_LOG_DEBUG("serialize nullptr");
        sink->write_value(static_cast<actor_id>(0));
        process_information::serialize_invalid(sink);
    }
    else {
        // local actor?
        if (!ptr->is_proxy()) {
            get_actor_registry()->put(ptr->id(), ptr);
        }
        auto pinf = m_pinf;
        if (ptr->is_proxy()) {
            auto dptr = ptr.downcast<default_actor_proxy>();
            if (dptr) pinf = dptr->process_info();
            else CPPA_LOG_ERROR("downcast failed");
        }
        sink->write_value(ptr->id());
        sink->write_value(pinf->process_id());
        sink->write_raw(process_information::node_id_size,
                        pinf->node_id().data());
    }
}
示例#23
0
mpi_process_group::mpi_process_group(const mpi_process_group& other,
                                     attach_distributed_object, bool)
  : impl_(other.impl_)
{ 
  rank = impl_->comm.rank();
  size = impl_->comm.size();
  allocate_block();

  for (std::size_t i = 0; i < impl_->incoming.size(); ++i) {
    if (my_block_number() >= (int)impl_->incoming[i].next_header.size()) {
      impl_->incoming[i].next_header
        .push_back(impl_->incoming[i].headers.begin());
    } else {
      impl_->incoming[i].next_header[my_block_number()] =
        impl_->incoming[i].headers.begin();
    }

#ifdef DEBUG
    if (process_id(*this) == 0) {
      std::cerr << "Allocated tag block " << my_block_number() << std::endl;
    }
#endif
  }
}
示例#24
0
void
mpi_process_group::send_batch(process_id_type dest, 
                              outgoing_messages& outgoing) const
{
  impl_->free_sent_batches();
  process_id_type id = process_id(*this);

  // clear the batch
#ifdef DEBUG
  std::cerr << "Sending batch: " << id << " -> "  << dest << std::endl;
#endif
  // we increment the number of batches sent
  ++impl_->number_sent_batches[dest];
  // and send the batch
  BOOST_ASSERT(outgoing.headers.size() <= impl_->batch_header_number);
  if (id != dest) {
#ifdef NO_ISEND_BATCHES
    impl::batch_request req;
#else
#ifdef PREALLOCATE_BATCHES
    while (impl_->free_batches.empty()) {
      impl_->free_sent_batches();
      poll();
    }
    impl::batch_request& req = impl_->batch_pool[impl_->free_batches.top()];
    impl_->free_batches.pop();
#else
    impl_->sent_batches.push_back(impl::batch_request());
    impl::batch_request& req = impl_->sent_batches.back();
#endif
#endif
    boost::mpi::packed_oarchive oa(impl_->comm,req.buffer);
    oa << outgoing;

    int tag = msg_batch;
    
#ifdef IRECV_BATCH
    if (oa.size() > impl_->batch_message_size)
      tag = msg_large_batch;
#endif

#ifndef NDEBUG // Prevent uninitialized variable warning with NDEBUG is on
    int result =
#endif // !NDEBUG
      MPI_Isend(const_cast<void*>(oa.address()), oa.size(),
                MPI_PACKED, dest, tag, impl_->comm,
                &req.request);
    BOOST_ASSERT(result == MPI_SUCCESS);
    impl_->max_sent = (std::max)(impl_->max_sent,impl_->sent_batches.size());
#ifdef NO_ISEND_BATCHES
    int done=0;
    do {                                                        
        poll();                                                
        MPI_Test(&req.request,&done,MPI_STATUS_IGNORE);               
       } while (!done);                                            
#else
#ifdef MAX_BATCHES
    while (impl_->sent_batches.size() >= MAX_BATCHES-1) {
      impl_->free_sent_batches();
      poll();
    }
#endif
#endif
  }
  else
    receive_batch(id,outgoing);
}
示例#25
0
void DataReaderListenerImpl::on_data_available(DDS::DataReader_ptr reader)
throw(CORBA::SystemException)
{
  try {
    Messenger::MessageDataReader_var message_dr =
      Messenger::MessageDataReader::_narrow(reader);

    if (CORBA::is_nil(message_dr.in())) {
      ACE_ERROR((LM_ERROR,
                 ACE_TEXT("%T %N:%l: on_data_available()")
                 ACE_TEXT(" ERROR: _narrow failed!\n")));
      ACE_OS::exit(-1);
    }

    Messenger::MessageSeq messages;
    DDS::SampleInfoSeq info;

    DDS::ReturnCode_t error = message_dr->take(messages,
                                               info,
                                               DDS::LENGTH_UNLIMITED,
                                               DDS::ANY_SAMPLE_STATE,
                                               DDS::ANY_VIEW_STATE,
                                               DDS::ANY_INSTANCE_STATE);

    if (error == DDS::RETCODE_OK) {

      for (unsigned int i = 0; i < messages.length(); ++i) {
        const DDS::SampleInfo& si = info[i];
        if (si.valid_data) {
          const Messenger::Message& message = messages[i];

          // output for console to consume
          std::stringstream ss;
          ss << "Message: from writer " << message.process_id.in()
             << "->" << message.participant_id
             << "->" << message.writer_id
             << " sample_id = " << message.sample_id
             << " for reader=" << id_
             << std::endl;
          std::cerr << ss.str();
          // also track it in the log file
          ACE_DEBUG((LM_DEBUG,
                     ACE_TEXT("%T %N:%l: Message: process_id = %C ")
                     ACE_TEXT("participant_id = %d ")
                     ACE_TEXT("writer_id = %d ")
                     ACE_TEXT("sample_id = %d ")
                     ACE_TEXT("for reader = %C\n"),
                     message.process_id.in(),
                     message.participant_id,
                     message.writer_id,
                     message.sample_id,
                     id_.c_str()));

          for (CORBA::ULong i = 0; i < message.data.length(); ++i) {
            if (message.data[i] != i % 256) {
              std::cout << "ERROR: Bad data at index " << i << " writer_id "
                        << message.writer_id << " sample_id " << message.sample_id
                        << std::endl;
              break;
            }
          }
          if (!options_.no_validation) {
            std::string process_id(message.process_id.in());
            processes_[process_id][message.participant_id][message.writer_id].insert(message.sample_id);
          }

          ++num_samples_;

        } else if (si.instance_state == DDS::NOT_ALIVE_DISPOSED_INSTANCE_STATE) {
          ACE_DEBUG((LM_DEBUG, ACE_TEXT("%T %N:%l: INFO: instance is disposed\n")));

        } else if (si.instance_state == DDS::NOT_ALIVE_NO_WRITERS_INSTANCE_STATE) {
          ACE_DEBUG((LM_DEBUG, ACE_TEXT("%T %N:%l: INFO: instance is unregistered\n")));

        } else {
          ACE_ERROR((LM_ERROR,
                     ACE_TEXT("%T %N:%l: on_data_available()")
                     ACE_TEXT(" ERROR: unknown instance state: %d\n"),
                     si.instance_state));
        }
      }
    } else {
      ACE_ERROR((LM_ERROR,
                 ACE_TEXT("%T %N:%l: on_data_available()")
                 ACE_TEXT(" ERROR: unexpected status: %d\n"),
                 error));
    }

  } catch (const CORBA::Exception& e) {
    e._tao_print_exception("Exception caught in on_data_available():");
    ACE_OS::exit(-1);
  }
}
示例#26
0
// generates code to implement node's action
static void implement_node(ast_node node){
  if (node != NULL){
    if (node->node_type == ROOT){
      process_root(node); 
    }
    // generate code for assignment operator
    else if (node->node_type == OP_ASSIGN){
      process_assign(node); 
    }
    // generate code for negate operator
    else if (node->node_type == OP_NEG) {
      process_negate(node); 
    }
    // generate code for  +, -, *, /, %, =, !=, <, <=, >, >=
    else if (node->node_type > 0 && node->node_type <= 16 && node->node_type != 14 && node->node_type != 15){
      process_math(node); 
    }
    else if (node->node_type == OP_INC){
      process_inc(node, "1"); 
    }
    else if (node->node_type == OP_DEC){
      process_inc(node, "-1"); 
    }
    else if (node->node_type == IF_STMT){
      process_if(node); 
    }
    else if (node->node_type == IF_ELSE_STMT){
      process_ifelse(node); 
    }
    else if (node->node_type == CMPD){
      process_cmpd(node); 
    }
    else if (node->node_type == WHILE_STMT){
      process_while(node); 
    }
    else if (node->node_type == DO_WHILE_STMT){
      process_dowhile(node); 
    }
    else if (node->node_type == OP_AND){
      process_and(node); 
    }
    else if (node->node_type == OP_OR){
      process_or(node); 
    }
    else if (node->node_type == FOR_STRT || node->node_type == FOR_COND || node->node_type == FOR_UPDT){
      process_for_header(node); 
    }
    else if (node->node_type == FOR_STMT){
      process_for(node); 
    }
    else if (node->node_type == READ_STMT){
      process_read(node); 
    }
    else if (node->node_type == PRINT_STMT){
      process_print(node); 
    }
    else if (node->node_type == RETURN_STMT){
      process_return(node); 
    }
    else if (node->node_type == FUNCDEC){
      process_function(node); 
    }
    else if (node->node_type == PARAMS){
      process_params(node); 
    }
    else if (node->node_type == INT_TYPE || node->node_type == DOUBLE_TYPE){
      process_vardec(node); 
    }
    else if (node->node_type == CALL){
      process_call(node); 
    } 
    else if (node->node_type == IDENT){
      process_id(node); 
    } 
    else if (node->node_type == ARRAY){
      process_array(node); 
    }
  }
}
示例#27
0
void mpi_process_group::process_batch(int source) const
{
  bool processing_from_queue = !impl_->new_batches.empty();
  impl_->processing_batches++;
  typedef std::vector<impl::message_header>::iterator iterator;

  impl::incoming_messages& incoming = impl_->incoming[source];
  
  // Set up the iterators pointing to the next header in each block
  for (std::size_t i = 0; i < incoming.next_header.size(); ++i)
        incoming.next_header[i] = incoming.headers.begin();

  buffer_type remaining_buffer;
  std::vector<impl::message_header> remaining_headers;

  iterator end = incoming.headers.end();

  for (iterator i = incoming.headers.begin(); i != end; ++i) {
    // This this message has already been received, skip it
    if (i->tag == -1)
      continue;

#ifdef BATCH_DEBUG
    std::cerr << process_id(*this) << ": emit_receive(" << source << ", "
              << decode_tag(i->tag).first << ":" << decode_tag(i->tag).second
              << ")\n";
#endif

    if (!emit_receive(source, i->tag)) {
#ifdef BATCH_DEBUG
      std::cerr << process_id(*this) << ": keeping message # " 
            << remaining_headers.size() << " from " << source << " ("
            << decode_tag(i->tag).first << ":" 
            << decode_tag(i->tag).second << ", " 
            << i->bytes << " bytes)\n";
#endif
      // Hold on to this message until the next stage
      remaining_headers.push_back(*i);
      remaining_headers.back().offset = remaining_buffer.size();
      remaining_buffer.insert(remaining_buffer.end(),
                  &incoming.buffer[i->offset],
                  &incoming.buffer[i->offset] + i->bytes);
    }
  }

  // Swap the remaining messages into the "incoming" set.
  incoming.headers.swap(remaining_headers);
  incoming.buffer.swap(remaining_buffer);

  // Set up the iterators pointing to the next header in each block
  for (std::size_t i = 0; i < incoming.next_header.size(); ++i)
    incoming.next_header[i] = incoming.headers.begin();
  impl_->processing_batches--;
  
  if (!processing_from_queue)
    while (!impl_->new_batches.empty()) {
      receive_batch(impl_->new_batches.front().first,
                    impl_->new_batches.front().second);
      impl_->new_batches.pop();
    }
}
 reference operator[](const key_type& key) 
 { 
   owner_local_pair p = get(global_, key);
   BOOST_ASSERT(p.first == process_id(process_group_));
   return storage[p.second]; 
 }
示例#29
0
void mpi_process_group::synchronize() const
{
  // Don't synchronize if we've already finished
  if (boost::mpi::environment::finalized()) 
    return;

#ifdef DEBUG
  std::cerr << "SYNC: " << process_id(*this) << std::endl;
#endif

  emit_on_synchronize();

  process_id_type id = process_id(*this);     // Our rank
  process_size_type p = num_processes(*this); // The number of processes

  // Pack the remaining incoming messages into the beginning of the
  // buffers, so that we can receive new messages in this
  // synchronization step without losing those messages that have not
  // yet been received.
  pack_headers();

  impl_->synchronizing_stage[id] = -1;
  int stage=-1;
  bool no_new_messages = false;
  while (true) {
      ++stage;
#ifdef DEBUG
      std::cerr << "SYNC: " << id << " starting stage " << (stage+1) << ".\n";
#endif

      // Tell everyone that we are synchronizing. Note: we use MPI_Isend since 
      // we absolutely cannot have any of these operations blocking.
      
      // increment the stage for the source
       ++impl_->synchronizing_stage[id];
       if (impl_->synchronizing_stage[id] != stage)
         std::cerr << "Expected stage " << stage << ", got " << impl_->synchronizing_stage[id] << std::endl;
       BOOST_ASSERT(impl_->synchronizing_stage[id]==stage);
      // record how many still have messages to be sent
      if (static_cast<int>(impl_->synchronizing_unfinished.size())<=stage) {
        BOOST_ASSERT(static_cast<int>(impl_->synchronizing_unfinished.size()) == stage);
        impl_->synchronizing_unfinished.push_back(no_new_messages ? 0 : 1);
      }
      else
        impl_->synchronizing_unfinished[stage]+=(no_new_messages ? 0 : 1);

      // record how many are in that stage
      if (static_cast<int>(impl_->processors_synchronizing_stage.size())<=stage) {
        BOOST_ASSERT(static_cast<int>(impl_->processors_synchronizing_stage.size()) == stage);
        impl_->processors_synchronizing_stage.push_back(1);
      }
      else
        ++impl_->processors_synchronizing_stage[stage];

      impl_->synchronizing = true;

      for (int dest = 0; dest < p; ++dest) {
        int sync_message = no_new_messages ? -1 : impl_->number_sent_batches[dest];
        if (dest != id) {
          impl_->number_sent_batches[dest]=0;       
          MPI_Request request;
          MPI_Isend(&sync_message, 1, MPI_INT, dest, msg_synchronizing, impl_->comm,&request);
          int done=0;
          do {
            poll();
            MPI_Test(&request,&done,MPI_STATUS_IGNORE);
          } while (!done);
        }
        else { // need to subtract how many messages I should have received
          impl_->number_received_batches[id] -=impl_->number_sent_batches[id];
          impl_->number_sent_batches[id]=0;
        }
      }

      // Keep handling out-of-band messages until everyone has gotten
      // to this point.
      while (impl_->processors_synchronizing_stage[stage] <p) {
        // with the trigger based solution we cannot easily pass true here 
        poll(/*wait=*/false, -1, true);

      }

      // check that everyone is at least here
      for (int source=0; source<p ; ++source)
        BOOST_ASSERT(impl_->synchronizing_stage[source] >= stage);

      // receive any batches sent in the meantime
      // all have to be available already
      while (true) {
        bool done=true;
        for (int source=0; source<p ; ++source)
          if(impl_->number_received_batches[source] < 0)
            done = false;
        if (done)
          break;
        poll(false,-1,true);
      }
      
#ifndef NO_IMMEDIATE_PROCESSING
      for (int source=0; source<p ; ++source)
        BOOST_ASSERT(impl_->number_received_batches[source] >= 0);
#endif

      impl_->synchronizing = false;
      
      // Flush out remaining messages
      if (impl_->synchronizing_unfinished[stage]==0)
        break;
#ifdef NO_IMMEDIATE_PROCESSING
      for (process_id_type dest = 0; dest < p; ++dest)
        process_batch(dest);
#endif

      no_new_messages = true;
      for (process_id_type dest = 0; dest < p; ++dest) {
        if (impl_->outgoing[dest].headers.size() || 
            impl_->number_sent_batches[dest]>0)
          no_new_messages = false;
        send_batch(dest);
      }
    }

  impl_->comm.barrier/*nomacro*/();
#if 0
  // set up for next synchronize call
  for (int source=0; source<p; ++source) {
    if (impl_->synchronizing_stage[source] != stage) {
      std::cerr << id << ": expecting stage " << stage << " from source "
                << source << ", got " << impl_->synchronizing_stage[source]
                << std::endl;
    }
    BOOST_ASSERT(impl_->synchronizing_stage[source]==stage);
  }
#endif
  std::fill(impl_->synchronizing_stage.begin(),
            impl_->synchronizing_stage.end(), -1);
            
  // get rid of the information regarding recorded numbers of processors
  // for the stages we just finished
  impl_->processors_synchronizing_stage.clear();
  impl_->synchronizing_unfinished.clear();

  for (process_id_type dest = 0; dest < p; ++dest)
    BOOST_ASSERT (impl_->outgoing[dest].headers.empty());
#ifndef NO_IMMEDIATE_PROCESSING
      for (int source=0; source<p ; ++source)
        BOOST_ASSERT (impl_->number_received_batches[source] == 0);
#endif

  impl_->free_sent_batches();
#ifdef DEBUG
  std::cerr << "SYNC: " << process_id(*this) << " completed." << std::endl;
#endif
}
示例#30
0
abstract_actor_ptr remote_actor_impl(stream_ptr_pair io, string_set expected) {
    CPPA_LOGF_TRACE("io{" << io.first.get() << ", " << io.second.get() << "}");
    auto mm = get_middleman();
    auto pinf = mm->node();
    std::uint32_t process_id = pinf->process_id();
    // throws on error
    io.second->write(&process_id, sizeof(std::uint32_t));
    io.second->write(pinf->host_id().data(), pinf->host_id().size());
    // deserialize: actor id, process id, node id, interface
    actor_id remote_aid;
    std::uint32_t peer_pid;
    node_id::host_id_type peer_node_id;
    std::uint32_t iface_size;
    std::set<std::string> iface;
    auto& in = io.first;
    // -> actor id
    in->read(&remote_aid, sizeof(actor_id));
    // -> process id
    in->read(&peer_pid, sizeof(std::uint32_t));
    // -> node id
    in->read(peer_node_id.data(), peer_node_id.size());
    // -> interface
    in->read(&iface_size, sizeof(std::uint32_t));
    if (iface_size > max_iface_size) {
        throw std::invalid_argument("Remote actor claims to have more than"
                                    +std::to_string(max_iface_size)+
                                    " message types? Someone is trying"
                                    " something nasty!");
    }
    std::vector<char> strbuf;
    for (std::uint32_t i = 0; i < iface_size; ++i) {
        std::uint32_t str_size;
        in->read(&str_size, sizeof(std::uint32_t));
        if (str_size > max_iface_clause_size) {
            throw std::invalid_argument("Remote actor claims to have a"
                                        " reply_to<...>::with<...> clause with"
                                        " more than"
                                        +std::to_string(max_iface_clause_size)+
                                        " characters? Someone is"
                                        " trying something nasty!");
        }
        strbuf.reserve(str_size + 1);
        strbuf.resize(str_size);
        in->read(strbuf.data(), str_size);
        strbuf.push_back('\0');
        iface.insert(std::string{strbuf.data()});
    }
    // deserialization done, check interface
    if (iface != expected) {
        auto tostr = [](const std::set<std::string>& what) -> std::string {
            if (what.empty()) return "actor";
            std::string tmp;
            tmp = "typed_actor<";
            auto i = what.begin();
            auto e = what.end();
            tmp += *i++;
            while (i != e) tmp += *i++;
            tmp += ">";
            return tmp;
        };
        auto iface_str = tostr(iface);
        auto expected_str = tostr(expected);
        if (expected.empty()) {
            throw std::invalid_argument("expected remote actor to be a "
                                        "dynamically typed actor but found "
                                        "a strongly typed actor of type "
                                        + iface_str);
        }
        if (iface.empty()) {
            throw std::invalid_argument("expected remote actor to be a "
                                        "strongly typed actor of type "
                                        + expected_str +
                                        " but found a dynamically typed actor");
        }
        throw std::invalid_argument("expected remote actor to be a "
                                    "strongly typed actor of type "
                                    + expected_str +
                                    " but found a strongly typed actor of type "
                                    + iface_str);
    }
    auto pinfptr = make_counted<node_id>(peer_pid, peer_node_id);
    if (*pinf == *pinfptr) {
        // this is a local actor, not a remote actor
        CPPA_LOGF_INFO("remote_actor() called to access a local actor");
        auto ptr = get_actor_registry()->get(remote_aid);
        return ptr;
    }
    struct remote_actor_result { remote_actor_result* next; actor value; };
    std::mutex qmtx;
    std::condition_variable qcv;
    intrusive::single_reader_queue<remote_actor_result> q;
    mm->run_later([mm, io, pinfptr, remote_aid, &q, &qmtx, &qcv] {
        CPPA_LOGC_TRACE("cppa",
                        "remote_actor$create_connection", "");
        auto pp = mm->get_peer(*pinfptr);
        CPPA_LOGF_INFO_IF(pp, "connection already exists (re-use old one)");
        if (!pp) mm->new_peer(io.first, io.second, pinfptr);
        auto res = mm->get_namespace().get_or_put(pinfptr, remote_aid);
        q.synchronized_enqueue(qmtx, qcv, new remote_actor_result{0, res});
    });
    std::unique_ptr<remote_actor_result> result(q.synchronized_pop(qmtx, qcv));
    CPPA_LOGF_DEBUG(CPPA_MARG(result, get));
    return raw_access::get(result->value);
}