void outbound_path::emit_irregular_shutdown(local_actor* self, error reason) { CAF_LOG_TRACE(CAF_ARG(slots) << CAF_ARG(reason)); /// Note that we always send abort messages anonymous. They can get send /// after `self` already terminated and we must not form strong references /// after that point. Since downstream messages contain the sender address /// anyway, we only omit redundant information. anon_send(actor_cast<actor>(hdl), make<downstream_msg::forced_close>(slots, self->address(), std::move(reason))); }
void anon_send(const typed_actor<Sigs...>& to, Ts&&... xs) { using token = detail::type_list< typename detail::implicit_conversions< typename std::decay<Ts>::type >::type...>; token tk; check_typed_input(to, tk); anon_send(message_priority::normal, actor_cast<channel>(to), std::forward<Ts>(xs)...); }
void outbound_path::emit_irregular_shutdown(local_actor* self, stream_slots slots, const strong_actor_ptr& hdl, error reason) { CAF_LOG_TRACE(CAF_ARG(slots) << CAF_ARG(hdl) << CAF_ARG(reason)); /// Note that we always send abort messages anonymous. See reasoning in first /// function overload. anon_send(actor_cast<actor>(hdl), make<downstream_msg::forced_close>(slots, self->address(), std::move(reason))); }
void parse_config(std::istream& input, config_format format, optional<std::ostream&> errors) { if (! input) return; auto cs = experimental::whereis(atom("ConfigServ")); auto consume = [&](std::string key, config_value value) { message_visitor mv; anon_send(cs, put_atom::value, std::move(key), apply_visitor(mv, value)); }; switch (format) { case config_format::ini: detail::parse_ini(input, consume, errors); } }
void unpublish_impl(const actor_addr& whom, uint16_t port, bool blocking) { CAF_LOGF_TRACE(CAF_TSARG(whom) << ", " << CAF_ARG(port) << CAF_ARG(blocking)); auto mm = get_middleman_actor(); if (blocking) { scoped_actor self; self->sync_send(mm, unpublish_atom::value, whom, port).await( [](ok_atom) { // ok, basp_broker is done }, [](error_atom, const std::string&) { // ok, basp_broker is done } ); } else { anon_send(mm, unpublish_atom::value, whom, port); } }
forwarding_actor_proxy::~forwarding_actor_proxy() { if (! manager_.unsafe()) anon_send(manager_, make_message(delete_atom::value, node(), id())); }
void actor_ostream::redirect_all(std::string f, int flags) { anon_send(detail::singletons::get_scheduling_coordinator()->printer(), redirect_atom::value, std::move(f), flags); }
remote_actor_proxy::~remote_actor_proxy() { anon_send(m_parent, make_message(atom("_DelProxy"), node(), id())); }
bool actor_pool::filter(upgrade_lock<detail::shared_spinlock>& guard, const strong_actor_ptr& sender, message_id mid, message_view& mv, execution_unit* eu) { auto& content = mv.content(); CAF_LOG_TRACE(CAF_ARG(mid) << CAF_ARG(content)); if (content.match_elements<exit_msg>()) { // acquire second mutex as well std::vector<actor> workers; auto em = content.get_as<exit_msg>(0).reason; if (cleanup(std::move(em), eu)) { auto tmp = mv.move_content_to_message(); // send exit messages *always* to all workers and clear vector afterwards // but first swap workers_ out of the critical section upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard}; workers_.swap(workers); unique_guard.unlock(); for (auto& w : workers) anon_send(w, tmp); unregister_from_system(); } return true; } if (content.match_elements<down_msg>()) { // remove failed worker from pool auto& dm = content.get_as<down_msg>(0); upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard}; auto last = workers_.end(); auto i = std::find(workers_.begin(), workers_.end(), dm.source); CAF_LOG_DEBUG_IF(i == last, "received down message for an unknown worker"); if (i != last) workers_.erase(i); if (workers_.empty()) { planned_reason_ = exit_reason::out_of_workers; unique_guard.unlock(); quit(eu); } return true; } if (content.match_elements<sys_atom, put_atom, actor>()) { auto& worker = content.get_as<actor>(2); worker->attach(default_attachable::make_monitor(worker.address(), address())); upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard}; workers_.push_back(worker); return true; } if (content.match_elements<sys_atom, delete_atom, actor>()) { upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard}; auto& what = content.get_as<actor>(2); auto last = workers_.end(); auto i = std::find(workers_.begin(), last, what); if (i != last) { workers_.erase(i); } return true; } if (content.match_elements<sys_atom, get_atom>()) { auto cpy = workers_; guard.unlock(); sender->enqueue(nullptr, mid.response_id(), make_message(std::move(cpy)), eu); return true; } if (workers_.empty()) { guard.unlock(); if (sender && mid.valid()) { // tell client we have ignored this sync message by sending // and empty message back sender->enqueue(nullptr, mid.response_id(), message{}, eu); } return true; } return false; }
bool actor_pool::filter(upgrade_lock<detail::shared_spinlock>& guard, const actor_addr& sender, message_id mid, const message& msg, execution_unit* eu) { auto rsn = planned_reason_; if (rsn != caf::exit_reason::not_exited) { guard.unlock(); if (mid.valid()) { detail::sync_request_bouncer srq{rsn}; srq(sender, mid); } return true; } if (msg.match_elements<exit_msg>()) { std::vector<actor> workers; // send exit messages *always* to all workers and clear vector afterwards // but first swap workers_ out of the critical section upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard}; workers_.swap(workers); planned_reason_ = msg.get_as<exit_msg>(0).reason; unique_guard.unlock(); for (auto& w : workers) { anon_send(w, msg); } quit(); return true; } if (msg.match_elements<down_msg>()) { // remove failed worker from pool auto& dm = msg.get_as<down_msg>(0); upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard}; auto last = workers_.end(); auto i = std::find(workers_.begin(), workers_.end(), dm.source); if (i != last) { workers_.erase(i); } if (workers_.empty()) { planned_reason_ = exit_reason::out_of_workers; unique_guard.unlock(); quit(); } return true; } if (msg.match_elements<sys_atom, put_atom, actor>()) { auto& worker = msg.get_as<actor>(2); if (worker == invalid_actor) { return true; } worker->attach(default_attachable::make_monitor(address())); upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard}; workers_.push_back(worker); return true; } if (msg.match_elements<sys_atom, delete_atom, actor>()) { upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard}; auto& what = msg.get_as<actor>(2); auto last = workers_.end(); auto i = std::find(workers_.begin(), last, what); if (i != last) { workers_.erase(i); } return true; } if (msg.match_elements<sys_atom, get_atom>()) { auto cpy = workers_; guard.unlock(); actor_cast<abstract_actor*>(sender)->enqueue(invalid_actor_addr, mid.response_id(), make_message(std::move(cpy)), eu); return true; } if (workers_.empty()) { guard.unlock(); if (sender != invalid_actor_addr && mid.valid()) { // tell client we have ignored this sync message by sending // and empty message back auto ptr = actor_cast<abstract_actor_ptr>(sender); ptr->enqueue(invalid_actor_addr, mid.response_id(), message{}, eu); } return true; } return false; }
void basp_broker_state::deliver(const node_id& src_nid, actor_id src_aid, strong_actor_ptr dest, message_id mid, std::vector<strong_actor_ptr>& stages, message& msg) { CAF_LOG_TRACE(CAF_ARG(src_nid) << CAF_ARG(src_aid) << CAF_ARG(dest) << CAF_ARG(msg) << CAF_ARG(mid)); auto src = src_nid == this_node() ? system().registry().get(src_aid) : proxies().get_or_put(src_nid, src_aid); // Intercept link messages. Forwarding actor proxies signalize linking // by sending link_atom/unlink_atom message with src = dest. if (msg.type_token() == make_type_token<atom_value, strong_actor_ptr>()) { switch (static_cast<uint64_t>(msg.get_as<atom_value>(0))) { default: break; case link_atom::value.uint_value(): { if (src_nid != this_node()) { CAF_LOG_WARNING("received link message for an other node"); return; } auto ptr = msg.get_as<strong_actor_ptr>(1); if (!ptr) { CAF_LOG_WARNING("received link message with invalid target"); return; } if (!src) { CAF_LOG_DEBUG("received link for invalid actor, report error"); anon_send(actor_cast<actor>(ptr), make_error(sec::remote_linking_failed)); return; } static_cast<actor_proxy*>(ptr->get())->local_link_to(src->get()); return; } case unlink_atom::value.uint_value(): { if (src_nid != this_node()) { CAF_LOG_WARNING("received unlink message for an other node"); return; } auto ptr = msg.get_as<strong_actor_ptr>(1); if (!ptr) { CAF_LOG_DEBUG("received unlink message with invalid target"); return; } if (!src) { CAF_LOG_DEBUG("received unlink for invalid actor, report error"); return; } static_cast<actor_proxy*>(ptr->get())->local_unlink_from(src->get()); return; } } } if (!dest) { auto rsn = exit_reason::remote_link_unreachable; CAF_LOG_INFO("cannot deliver message, destination not found"); self->parent().notify<hook::invalid_message_received>(src_nid, src, invalid_actor_id, mid, msg); if (mid.valid() && src) { detail::sync_request_bouncer srb{rsn}; srb(src, mid); } return; } self->parent().notify<hook::message_received>(src_nid, src, dest, mid, msg); dest->enqueue(make_mailbox_element(std::move(src), mid, std::move(stages), std::move(msg)), nullptr); }
forwarding_actor_proxy::~forwarding_actor_proxy() { anon_send(m_manager, make_message(atom("_DelProxy"), node(), id())); }