void initialize() override { CAF_LOG_TRACE(""); this->init_broker(); auto bhvr = make_behavior(); CAF_LOG_DEBUG_IF(!bhvr, "make_behavior() did not return a behavior:" << CAF_ARG(this->has_behavior())); if (bhvr) { // make_behavior() did return a behavior instead of using become() CAF_LOG_DEBUG("make_behavior() did return a valid behavior"); this->do_become(std::move(bhvr.unbox()), true); } }
void event_based_actor::initialize() { is_initialized(true); auto bhvr = make_behavior(); CAF_LOG_DEBUG_IF(!bhvr, "make_behavior() did not return a behavior, " << "has_behavior() = " << std::boolalpha << this->has_behavior()); if (bhvr) { // make_behavior() did return a behavior instead of using become() CAF_LOG_DEBUG("make_behavior() did return a valid behavior"); become(std::move(bhvr)); } }
void initialize() override { CAF_LOG_TRACE(""); super::initialize(); this->setf(abstract_actor::is_initialized_flag); auto bhvr = make_behavior(); CAF_LOG_DEBUG_IF(!bhvr, "make_behavior() did not return a behavior:" << CAF_ARG2("alive", this->alive())); if (bhvr) { // make_behavior() did return a behavior instead of using become() CAF_LOG_DEBUG("make_behavior() did return a valid behavior"); this->do_become(std::move(bhvr.unbox()), true); } }
// go on a raid in quest for a shiny new job job_ptr raid() { auto result = m_steal_policy.raid(this); CAF_LOG_DEBUG_IF(result, "got actor with id " << id_of(result)); return result; }
/** * Attempt to steal an element from the exposed job queue. */ job_ptr try_steal() override { auto result = m_queue_policy.try_external_dequeue(this); CAF_LOG_DEBUG_IF(result, "stole actor with id " << id_of(result)); return result; }
bool actor_pool::filter(upgrade_lock<detail::shared_spinlock>& guard, const strong_actor_ptr& sender, message_id mid, message_view& mv, execution_unit* eu) { auto& content = mv.content(); CAF_LOG_TRACE(CAF_ARG(mid) << CAF_ARG(content)); if (content.match_elements<exit_msg>()) { // acquire second mutex as well std::vector<actor> workers; auto em = content.get_as<exit_msg>(0).reason; if (cleanup(std::move(em), eu)) { auto tmp = mv.move_content_to_message(); // send exit messages *always* to all workers and clear vector afterwards // but first swap workers_ out of the critical section upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard}; workers_.swap(workers); unique_guard.unlock(); for (auto& w : workers) anon_send(w, tmp); unregister_from_system(); } return true; } if (content.match_elements<down_msg>()) { // remove failed worker from pool auto& dm = content.get_as<down_msg>(0); upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard}; auto last = workers_.end(); auto i = std::find(workers_.begin(), workers_.end(), dm.source); CAF_LOG_DEBUG_IF(i == last, "received down message for an unknown worker"); if (i != last) workers_.erase(i); if (workers_.empty()) { planned_reason_ = exit_reason::out_of_workers; unique_guard.unlock(); quit(eu); } return true; } if (content.match_elements<sys_atom, put_atom, actor>()) { auto& worker = content.get_as<actor>(2); worker->attach(default_attachable::make_monitor(worker.address(), address())); upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard}; workers_.push_back(worker); return true; } if (content.match_elements<sys_atom, delete_atom, actor>()) { upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard}; auto& what = content.get_as<actor>(2); auto last = workers_.end(); auto i = std::find(workers_.begin(), last, what); if (i != last) { workers_.erase(i); } return true; } if (content.match_elements<sys_atom, get_atom>()) { auto cpy = workers_; guard.unlock(); sender->enqueue(nullptr, mid.response_id(), make_message(std::move(cpy)), eu); return true; } if (workers_.empty()) { guard.unlock(); if (sender && mid.valid()) { // tell client we have ignored this sync message by sending // and empty message back sender->enqueue(nullptr, mid.response_id(), message{}, eu); } return true; } return false; }
resumable::resume_result resume(execution_unit* new_host, size_t max_throughput) override { CAF_REQUIRE(max_throughput > 0); auto d = static_cast<Derived*>(this); CAF_LOG_TRACE("id = " << d->id()); d->host(new_host); auto done_cb = [&]() -> bool { CAF_LOG_TRACE(""); d->bhvr_stack().clear(); d->bhvr_stack().cleanup(); d->on_exit(); if (!d->bhvr_stack().empty()) { CAF_LOG_DEBUG("on_exit did set a new behavior"); d->planned_exit_reason(exit_reason::not_exited); return false; // on_exit did set a new behavior } auto rsn = d->planned_exit_reason(); if (rsn == exit_reason::not_exited) { rsn = exit_reason::normal; d->planned_exit_reason(rsn); } d->cleanup(rsn); return true; }; auto actor_done = [&]() -> bool { if (d->bhvr_stack().empty() || d->planned_exit_reason() != exit_reason::not_exited) { return done_cb(); } return false; }; // actors without behavior or that have already defined // an exit reason must not be resumed CAF_REQUIRE(!d->is_initialized() || (!d->bhvr_stack().empty() && d->planned_exit_reason() == exit_reason::not_exited)); std::exception_ptr eptr = nullptr; try { if (!d->is_initialized()) { CAF_LOG_DEBUG("initialize actor"); d->is_initialized(true); auto bhvr = d->make_behavior(); CAF_LOG_DEBUG_IF(!bhvr, "make_behavior() did not return a behavior, " << "bhvr_stack().empty() = " << std::boolalpha << d->bhvr_stack().empty()); if (bhvr) { // make_behavior() did return a behavior instead of using become() CAF_LOG_DEBUG("make_behavior() did return a valid behavior"); d->become(std::move(bhvr)); } if (actor_done()) { CAF_LOG_DEBUG("actor_done() returned true right " << "after make_behavior()"); return resume_result::done; } } // max_throughput = 0 means infinite for (size_t i = 0; i < max_throughput; ++i) { auto ptr = d->next_message(); if (ptr) { if (d->invoke_message(ptr)) { if (actor_done()) { CAF_LOG_DEBUG("actor exited"); return resume_result::done; } // continue from cache if current message was // handled, because the actor might have changed // its behavior to match 'old' messages now while (d->invoke_message_from_cache()) { if (actor_done()) { CAF_LOG_DEBUG("actor exited"); return resume_result::done; } } } // add ptr to cache if invoke_message // did not reset it (i.e. skipped, but not dropped) if (ptr) { CAF_LOG_DEBUG("add message to cache"); d->push_to_cache(std::move(ptr)); } } else { CAF_LOG_DEBUG("no more element in mailbox; going to block"); if (d->mailbox().try_block()) { return resumable::awaiting_message; } CAF_LOG_DEBUG("try_block() interrupted by new message"); } } if (!d->has_next_message() && d->mailbox().try_block()) { return resumable::awaiting_message; } // time's up return resumable::resume_later; } catch (actor_exited& what) { CAF_LOG_INFO("actor died because of exception: actor_exited, " "reason = " << what.reason()); if (d->exit_reason() == exit_reason::not_exited) { d->quit(what.reason()); } } catch (std::exception& e) { CAF_LOG_INFO("actor died because of an exception: " << detail::demangle(typeid(e)) << ", what() = " << e.what()); if (d->exit_reason() == exit_reason::not_exited) { d->quit(exit_reason::unhandled_exception); } eptr = std::current_exception(); } catch (...) { CAF_LOG_INFO("actor died because of an unknown exception"); if (d->exit_reason() == exit_reason::not_exited) { d->quit(exit_reason::unhandled_exception); } eptr = std::current_exception(); } if (eptr) { auto opt_reason = d->handle(eptr); if (opt_reason) { // use exit reason defined by custom handler d->planned_exit_reason(*opt_reason); } } if (!actor_done()) { // actor has been "revived", try running it again later return resumable::resume_later; } return resumable::done; }
handle_message_result handle_message(Actor* self, mailbox_element* node, Fun& fun, message_id awaited_response) { bool handle_sync_failure_on_mismatch = true; if (dptr()->hm_should_skip(node)) { return hm_skip_msg; } switch (this->filter_msg(self, node)) { case msg_type::normal_exit: CAF_LOG_DEBUG("dropped normal exit signal"); return hm_drop_msg; case msg_type::expired_sync_response: CAF_LOG_DEBUG("dropped expired sync response"); return hm_drop_msg; case msg_type::expired_timeout: CAF_LOG_DEBUG("dropped expired timeout message"); return hm_drop_msg; case msg_type::inactive_timeout: CAF_LOG_DEBUG("skipped inactive timeout message"); return hm_skip_msg; case msg_type::non_normal_exit: CAF_LOG_DEBUG("handled non-normal exit signal"); // this message was handled // by calling self->quit(...) return hm_msg_handled; case msg_type::timeout: { CAF_LOG_DEBUG("handle timeout message"); auto& tm = node->msg.get_as<timeout_msg>(0); self->handle_timeout(fun, tm.timeout_id); if (awaited_response.valid()) { self->mark_arrived(awaited_response); self->remove_handler(awaited_response); } return hm_msg_handled; } case msg_type::timeout_response: handle_sync_failure_on_mismatch = false; CAF_ANNOTATE_FALLTHROUGH; case msg_type::sync_response: CAF_LOG_DEBUG("handle as synchronous response: " << CAF_TARG(node->msg, to_string) << ", " << CAF_MARG(node->mid, integer_value) << ", " << CAF_MARG(awaited_response, integer_value)); if (awaited_response.valid() && node->mid == awaited_response) { auto previous_node = dptr()->hm_begin(self, node); auto res = invoke_fun(self, node->msg, node->mid, fun); if (!res && handle_sync_failure_on_mismatch) { CAF_LOG_WARNING("sync failure occured in actor " << "with ID " << self->id()); self->handle_sync_failure(); } self->mark_arrived(awaited_response); self->remove_handler(awaited_response); dptr()->hm_cleanup(self, previous_node); return hm_msg_handled; } return hm_cache_msg; case msg_type::ordinary: if (!awaited_response.valid()) { auto previous_node = dptr()->hm_begin(self, node); auto res = invoke_fun(self, node->msg, node->mid, fun); if (res) { dptr()->hm_cleanup(self, previous_node); return hm_msg_handled; } // no match (restore self members) dptr()->hm_revert(self, previous_node); } CAF_LOG_DEBUG_IF(awaited_response.valid(), "ignored message; await response: " << awaited_response.integer_value()); return hm_cache_msg; } // should be unreachable CAF_CRITICAL("invalid message type"); }
optional<message> invoke_fun(Actor* self, message& msg, message_id& mid, Fun& fun, MaybeResponseHdl hdl = MaybeResponseHdl{}) { # ifdef CAF_LOG_LEVEL auto msg_str = to_string(msg); # endif CAF_LOG_TRACE(CAF_MARG(mid, integer_value) << ", msg = " << msg_str); auto res = fun(msg); // might change mid CAF_LOG_DEBUG_IF(res, "actor did consume message: " << msg_str); CAF_LOG_DEBUG_IF(!res, "actor did ignore message: " << msg_str); if (!res) { return none; } if (res->empty()) { // make sure synchronous requests // always receive a response if (mid.is_request() && !mid.is_answered()) { CAF_LOG_WARNING("actor with ID " << self->id() << " did not reply to a synchronous request message"); auto fhdl = fetch_response_promise(self, hdl); if (fhdl) { fhdl.deliver(make_message(unit)); } } } else { CAF_LOGF_DEBUG("res = " << to_string(*res)); if (res->template has_types<atom_value, uint64_t>() && res->template get_as<atom_value>(0) == atom("MESSAGE_ID")) { CAF_LOG_DEBUG("message handler returned a message id wrapper"); auto id = res->template get_as<uint64_t>(1); auto msg_id = message_id::from_integer_value(id); auto ref_opt = self->sync_handler(msg_id); // calls self->response_promise() if hdl is a dummy // argument, forwards hdl otherwise to reply to the // original request message auto fhdl = fetch_response_promise(self, hdl); if (ref_opt) { behavior cpy = *ref_opt; *ref_opt = cpy.add_continuation([=](message & intermediate) ->optional<message> { if (!intermediate.empty()) { // do no use lamba expresion type to // avoid recursive template instantiaton behavior::continuation_fun f2 = [=]( message & m)->optional<message> { return std::move(m); }; auto mutable_mid = mid; // recursively call invoke_fun on the // result to correctly handle stuff like // sync_send(...).then(...).then(...)... return this->invoke_fun(self, intermediate, mutable_mid, f2, fhdl); } return none; }); } // reset res to prevent "outer" invoke_fun // from handling the result again res->reset(); } else { // respond by using the result of 'fun' CAF_LOG_DEBUG("respond via response_promise"); auto fhdl = fetch_response_promise(self, hdl); if (fhdl) { fhdl.deliver(std::move(*res)); // inform caller about success return message{}; } } } return res; }