bool invoke(Policy& policy, Client* client, mailbox_element* node) { CPPA_REQUIRE(!empty()); CPPA_REQUIRE(client != nullptr); CPPA_REQUIRE(node != nullptr); // use a copy of bhvr, because invoked behavior might change m_elements auto id = m_elements.back().second; auto bhvr = m_elements.back().first; if (policy.invoke(client, node, bhvr, id)) { bool repeat; // try to match cached messages do { // remove synchronous response handler if needed if (id.valid()) { erase_if([id](const element_type& value) { return id == value.second; }); } if (!empty()) { id = m_elements.back().second; bhvr = m_elements.back().first; repeat = policy.invoke_from_cache(client, bhvr, id); } else repeat = false; } while (repeat); return true; } return false; }
bool invoke(Policy& policy, Client* client, recursive_queue_node* node) { CPPA_REQUIRE(!m_elements.empty()); CPPA_REQUIRE(client != nullptr); CPPA_REQUIRE(node != nullptr); // use a copy, because the invoked behavior might change m_elements behavior what = m_elements.back().first; auto id = m_elements.back().second; if (policy.invoke(client, node, what, id)) { // try to match cached messages do { // remove synchronous response handler if needed if (id.valid()) { auto last = m_elements.end(); auto i = std::find_if(m_elements.begin(), last, [id](element_type& e) { return id == e.second; }); if (i != last) { m_erased_elements.emplace_back(std::move(i->first)); m_elements.erase(i); } } id = empty() ? message_id_t() : m_elements.back().second; } while (!empty() && policy.invoke_from_cache(client, back(), id)); return true; } return false; }
bool enqueue_node(typename super::mailbox_element* node, int next_state = ready) { CPPA_REQUIRE(node->marked == false); if (this->m_mailbox._push_back(node)) { for (;;) { int state = m_state.load(); switch (state) { case blocked: { if (m_state.compare_exchange_weak(state, next_state)) { CPPA_REQUIRE(this->m_scheduler != nullptr); if (next_state == ready) { this->m_scheduler->enqueue(this); } return true; } break; } case about_to_block: { if (m_state.compare_exchange_weak(state, ready)) { return false; } break; } default: return false; } } } return false; }
// atomically sets m_stack back and enqueues all elements to the cache bool fetch_new_data(pointer end_ptr) { CPPA_REQUIRE(m_head == nullptr); CPPA_REQUIRE(!end_ptr || end_ptr == stack_empty_dummy()); pointer e = m_stack.load(); // must not be called on a closed queue CPPA_REQUIRE(e != nullptr); // it's enough to check this once, since only the owner is allowed // to close the queue and only the owner is allowed to call this // member function while (e != end_ptr) { if (m_stack.compare_exchange_weak(e, end_ptr)) { if (is_dummy(e)) { // only use-case for this is closing a queue CPPA_REQUIRE(end_ptr == nullptr); return false; } while (e) { CPPA_REQUIRE(!is_dummy(e)); auto next = e->next; e->next = m_head; m_head = e; e = next; } return true; } // next iteration } return false; }
decorated_tuple(cow_pointer_type d, const vector_type& v) : super(tuple_impl_info::statically_typed) , m_decorated(std::move(d)), m_mapping(v) { # ifdef CPPA_DEBUG const cow_pointer_type& ptr = m_decorated; // prevent detaching # endif CPPA_REQUIRE(ptr->size() >= sizeof...(ElementTypes)); CPPA_REQUIRE(v.size() == sizeof...(ElementTypes)); CPPA_REQUIRE(*(std::max_element(v.begin(), v.end())) < ptr->size()); }
decorated_tuple(cow_pointer_type d, size_t offset) : super(tuple_impl_info::statically_typed), m_decorated(std::move(d)) { # ifdef CPPA_DEBUG const cow_pointer_type& ptr = m_decorated; // prevent detaching # endif CPPA_REQUIRE((ptr->size() - offset) >= sizeof...(ElementTypes)); CPPA_REQUIRE(offset > 0); size_t i = offset; m_mapping.resize(sizeof...(ElementTypes)); std::generate(m_mapping.begin(), m_mapping.end(), [&]() {return i++;}); }
void synchronized_await(Mutex& mtx, CondVar& cv) { CPPA_REQUIRE(!closed()); if (try_block()) { std::unique_lock<Mutex> guard(mtx); while (blocked()) cv.wait(guard); } }
void abstract_scheduled_actor::enqueue_node(queue_node* node) { if (m_mailbox._push_back(node)) { for (;;) { int state = m_state.load(); switch (state) { case blocked: { if (m_state.compare_exchange_weak(state, ready)) { CPPA_REQUIRE(m_scheduler != nullptr); m_scheduler->enqueue(this); return; } break; } case about_to_block: { if (m_state.compare_exchange_weak(state, ready)) { return; } break; } default: return; } } } }
void buffer::append_from(network::input_stream* istream) { CPPA_REQUIRE(remaining() > 0); auto num_bytes = istream->read_some(wr_ptr(), remaining()); if (num_bytes > 0) { inc_size(num_bytes); } }
inline void operator()(const actor_ptr& sender, const message_id& mid) const { CPPA_REQUIRE(rsn != exit_reason::not_exited); if (mid.is_request() && sender != nullptr) { sender->enqueue({nullptr, sender, mid.response_id()}, make_any_tuple(atom("EXITED"), rsn)); } }
actor_ptr default_actor_addressing::read(deserializer* source) { CPPA_REQUIRE(source != nullptr); auto cname = source->seek_object(); if (cname == "@0") { CPPA_LOGMF(CPPA_DEBUG, self, "deserialized nullptr"); source->begin_object("@0"); source->end_object(); return nullptr; } else if (cname == "@actor") { process_information::node_id_type nid; source->begin_object(cname); auto aid = source->read<uint32_t>(); auto pid = source->read<uint32_t>(); source->read_raw(process_information::node_id_size, nid.data()); source->end_object(); // local actor? auto pinf = process_information::get(); if (pid == pinf->process_id() && nid == pinf->node_id()) { return get_actor_registry()->get(aid); } else { process_information tmp(pid, nid); return get_or_put(tmp, aid); } } else throw runtime_error("expected type name \"@0\" or \"@actor\"; " "found: " + cname); }
void default_actor_addressing::write(serializer* sink, const actor_ptr& ptr) { CPPA_REQUIRE(sink != nullptr); if (ptr == nullptr) { CPPA_LOGMF(CPPA_DEBUG, self, "serialized nullptr"); sink->begin_object("@0"); sink->end_object(); } else { // local actor? if (!ptr->is_proxy()) { get_actor_registry()->put(ptr->id(), ptr); } auto pinf = m_pinf; if (ptr->is_proxy()) { auto dptr = ptr.downcast<default_actor_proxy>(); if (dptr) pinf = dptr->process_info(); else CPPA_LOGMF(CPPA_ERROR, self, "downcast failed"); } sink->begin_object("@actor"); sink->write_value(ptr->id()); sink->write_value(pinf->process_id()); sink->write_raw(process_information::node_id_size, pinf->node_id().data()); sink->end_object(); } }
void assign(InputIterator first, InputIterator last, // dummy SFINAE argument typename std::iterator_traits<InputIterator>::pointer = 0) { auto dist = std::distance(first, last); CPPA_REQUIRE(dist >= 0); resize(static_cast<size_t>(dist)); std::copy(first, last, begin()); }
inline bool local_actor::awaits(message_id response_id) { CPPA_REQUIRE(response_id.is_response()); return std::any_of(m_pending_responses.begin(), m_pending_responses.end(), [=](message_id other) { return response_id == other; }); }
void middleman_event_handler::update() { CPPA_LOG_TRACE(""); auto mless = [](const fd_meta_info& lhs, native_socket_type rhs) { return lhs.fd < rhs; }; for (auto& elem_pair : m_alterations) { auto& elem = elem_pair.first; auto old = event::none; auto last = m_meta.end(); auto iter = std::lower_bound(m_meta.begin(), last, elem.fd, mless); if (iter != last) old = iter->mask; auto mask = next_bitmask(old, elem.mask, elem_pair.second); auto ptr = elem.ptr; CPPA_LOG_DEBUG("new bitmask for " << elem.ptr << ": " << eb2str(mask)); if (iter == last || iter->fd != elem.fd) { CPPA_LOG_ERROR_IF(mask == event::none, "cannot erase " << ptr << " (no such element)"); if (mask != event::none) { m_meta.insert(iter, elem); handle_event(fd_meta_event::add, elem.fd, event::none, mask, ptr); } } else if (iter->fd == elem.fd) { CPPA_REQUIRE(iter->ptr == elem.ptr); if (mask == event::none) { // note: we cannot decide whether it's safe to dispose `ptr`, // because we didn't parse all alterations yet m_dispose_list.emplace_back(ptr); m_meta.erase(iter); handle_event(fd_meta_event::erase, elem.fd, old, mask, ptr); } else { iter->mask = mask; handle_event(fd_meta_event::mod, elem.fd, old, mask, ptr); } } } m_alterations.clear(); // m_meta won't be touched inside loop auto first = m_meta.begin(); auto last = m_meta.end(); auto is_alive = [&](native_socket_type fd) -> bool { auto iter = std::lower_bound(first, last, fd, mless); return iter != last && iter->fd == fd; }; // check whether elements in dispose list can be safely deleted for (auto elem : m_dispose_list) { auto rd = elem->read_handle(); auto wr = elem->write_handle(); if ( (rd == wr && !is_alive(rd)) || (rd != wr && !is_alive(rd) && !is_alive(wr))) { elem->dispose(); } } m_dispose_list.clear(); }
abstract_scheduled_actor::abstract_scheduled_actor(scheduler* sched) : next(nullptr) , m_state(ready) , m_scheduler(sched) , m_has_pending_timeout_request(false) , m_active_timeout_id(0) { CPPA_REQUIRE(sched != nullptr); }
void binary_serializer::begin_object(const uniform_type_info* uti) { CPPA_REQUIRE(uti != nullptr); auto ot = outgoing_types(); std::uint32_t id = (ot) ? ot->id_of(uti) : 0; std::uint8_t flag = (id == 0) ? 1 : 0; binary_writer::write_int(m_sink, flag); if (flag == 1) binary_writer::write_string(m_sink, uti->name()); else binary_writer::write_int(m_sink, id); }
void delayed_reply(message_header hdr, const Duration& rel_time, any_tuple data ) { CPPA_REQUIRE(hdr.id.valid() && hdr.id.is_response()); auto tup = make_any_tuple(atom("SEND"), util::duration{rel_time}, std::move(hdr), std::move(data)); m_timer->enqueue(message_header{}, std::move(tup), nullptr); }
bool actor::unlink_from_impl(const actor_ptr& other) { guard_type guard{m_mtx}; // remove_backlink returns true if this actor is linked to other if (other && !exited() && other->remove_backlink(this)) { auto i = std::find(m_links.begin(), m_links.end(), other); CPPA_REQUIRE(i != m_links.end()); m_links.erase(i); return true; } return false; }
void launch(Actor* self, execution_unit*) { CPPA_REQUIRE(self != nullptr); CPPA_PUSH_AID(self->id()); CPPA_LOG_TRACE(CPPA_ARG(self)); intrusive_ptr<Actor> mself{self}; self->attach_to_scheduler(); std::thread([=] { CPPA_PUSH_AID(mself->id()); CPPA_LOG_TRACE(""); detail::cs_thread fself; for (;;) { if (mself->resume(&fself, nullptr) == resumable::done) { return; } // await new data before resuming actor await_data(mself.get()); CPPA_REQUIRE(self->mailbox().blocked() == false); } self->detach_from_scheduler(); }).detach(); }
bool timed_wait_for_data(const timeout_type& abs_time) { CPPA_REQUIRE(not this->m_mailbox.closed()); if (mailbox_empty()) { lock_type guard(m_mtx); while (mailbox_empty()) { if (m_cv.wait_until(guard, abs_time) == std::cv_status::timeout) { return false; } } } return true; }
bool abstract_actor::unlink_from_impl(const actor_addr& other) { if (!other) return false; guard_type guard{m_mtx}; // remove_backlink returns true if this actor is linked to other auto ptr = detail::raw_access::get(other); if (!exited() && ptr->remove_backlink(address())) { auto i = std::find(m_links.begin(), m_links.end(), ptr); CPPA_REQUIRE(i != m_links.end()); m_links.erase(i); return true; } return false; }
bool synchronized_await(Mutex& mtx, CondVar& cv, const TimePoint& timeout) { CPPA_REQUIRE(!closed()); if (try_block()) { std::unique_lock<Mutex> guard(mtx); while (blocked()) { if (cv.wait_until(guard, timeout) == std::cv_status::timeout) { // if we're unable to set the queue from blocked to empty, // than there's a new element in the list return !try_unblock(); } } } return true; }
void delayed_reply(const actor_ptr& to, const Duration& rel_time, message_id_t id, any_tuple data ) { CPPA_REQUIRE(!id.valid() || id.is_response()); if (id.valid()) { auto tup = make_any_tuple(atom("REPLY"), util::duration{rel_time}, to, id, std::move(data)); delayed_send_helper()->enqueue(self, std::move(tup)); } else { this->delayed_send(to, rel_time, std::move(data)); } }
inline void insert(iterator pos, InputIterator first, InputIterator last) { CPPA_REQUIRE(first <= last); auto num_elements = static_cast<size_t>(std::distance(first, last)); if ((size() + num_elements) > MaxSize) { throw std::length_error("limited_vector::insert: too much elements"); } if (pos == end()) { resize(size() + num_elements); std::copy(first, last, pos); } else { // move elements auto old_end = end(); resize(size() + num_elements); std::copy_backward(pos, old_end, end()); // insert new elements std::copy(first, last, pos); } }
actor_ptr default_actor_addressing::read(deserializer* source) { CPPA_REQUIRE(source != nullptr); process_information::node_id_type nid; auto aid = source->read<uint32_t>(); auto pid = source->read<uint32_t>(); source->read_raw(process_information::node_id_size, nid.data()); // local actor? auto pinf = process_information::get(); if (aid == 0 && pid == 0) { return nullptr; } else if (pid == pinf->process_id() && nid == pinf->node_id()) { return get_actor_registry()->get(aid); } else { process_information tmp{pid, nid}; return get_or_put(tmp, aid); } }
void abstract_actor::cleanup(std::uint32_t reason) { // log as 'actor' CPPA_LOGM_TRACE("cppa::actor", CPPA_ARG(m_id) << ", " << CPPA_ARG(reason) << ", " << CPPA_ARG(m_is_proxy)); CPPA_REQUIRE(reason != exit_reason::not_exited); // move everyhting out of the critical section before processing it decltype(m_links) mlinks; decltype(m_attachables) mattachables; { // lifetime scope of guard guard_type guard{m_mtx}; if (m_exit_reason != exit_reason::not_exited) { // already exited return; } m_exit_reason = reason; mlinks = std::move(m_links); mattachables = std::move(m_attachables); // make sure lists are empty m_links.clear(); m_attachables.clear(); } CPPA_LOGC_INFO_IF(not is_proxy(), "cppa::actor", __func__, "actor with ID " << m_id << " had " << mlinks.size() << " links and " << mattachables.size() << " attached functors; exit reason = " << reason << ", class = " << detail::demangle(typeid(*this))); // send exit messages auto msg = make_any_tuple(exit_msg{address(), reason}); CPPA_LOGM_DEBUG("cppa::actor", "send EXIT to " << mlinks.size() << " links"); for (auto& aptr : mlinks) { aptr->enqueue({address(), aptr, message_id{}.with_high_priority()}, msg, m_host); } CPPA_LOGM_DEBUG("cppa::actor", "run " << mattachables.size() << " attachables"); for (attachable_ptr& ptr : mattachables) { ptr->actor_exited(reason); } }
void update() { CPPA_LOG_TRACE(""); for (auto& elem_pair : m_alterations) { auto& elem = elem_pair.first; auto old = event::none; auto last = end(m_meta); auto iter = lower_bound(begin(m_meta), last, elem.fd, m_less); if (iter != last) old = iter->mask; auto mask = next_bitmask(old, elem.mask, elem_pair.second); auto ptr = elem.ptr.get(); CPPA_LOG_DEBUG("new bitmask for " << elem.ptr.get() << ": " << eb2str(mask)); if (iter == last || iter->fd != elem.fd) { CPPA_LOG_INFO_IF(mask == event::none, "cannot erase " << ptr << " (not found in m_meta)"); if (mask != event::none) { m_meta.insert(iter, elem); d()->handle_event(fd_meta_event::add, elem.fd, event::none, mask, ptr); } } else if (iter->fd == elem.fd) { CPPA_REQUIRE(iter->ptr == elem.ptr); if (mask == event::none) { m_meta.erase(iter); d()->handle_event(fd_meta_event::erase, elem.fd, old, mask, ptr); } else { iter->mask = mask; d()->handle_event(fd_meta_event::mod, elem.fd, old, mask, ptr); } } } m_alterations.clear(); }
void default_actor_addressing::write(serializer* sink, const actor_ptr& ptr) { CPPA_REQUIRE(sink != nullptr); if (ptr == nullptr) { CPPA_LOG_DEBUG("serialize nullptr"); sink->write_value(static_cast<actor_id>(0)); process_information::serialize_invalid(sink); } else { // local actor? if (!ptr->is_proxy()) { get_actor_registry()->put(ptr->id(), ptr); } auto pinf = m_pinf; if (ptr->is_proxy()) { auto dptr = ptr.downcast<default_actor_proxy>(); if (dptr) pinf = dptr->process_info(); else CPPA_LOG_ERROR("downcast failed"); } sink->write_value(ptr->id()); sink->write_value(pinf->process_id()); sink->write_raw(process_information::node_id_size, pinf->node_id().data()); } }
inline event_bitmask next_bitmask(event_bitmask old, event_bitmask arg, fd_meta_event op) { CPPA_REQUIRE(op == fd_meta_event::add || op == fd_meta_event::erase); return (op == fd_meta_event::add) ? old | arg : old & ~arg; }