void abstract_broker::configure_read(connection_handle hdl, receive_policy::config cfg) { CAF_LOG_TRACE(CAF_ARG(hdl) << CAF_ARG(cfg)); auto x = by_id(hdl); if (x) x->configure_read(cfg); }
void blocking_actor::dequeue(behavior& bhvr, message_id mid) { CAF_LOG_TRACE(CAF_MARG(mid, integer_value)); // try to dequeue from cache first if (invoke_from_cache(bhvr, mid)) { return; } // requesting an invalid timeout will reset our active timeout uint32_t timeout_id = 0; if (mid == invalid_message_id) { timeout_id = request_timeout(bhvr.timeout()); } else { request_sync_timeout_msg(bhvr.timeout(), mid); } // read incoming messages for (;;) { await_data(); auto msg = next_message(); switch (invoke_message(msg, bhvr, mid)) { case im_success: if (mid == invalid_message_id) { reset_timeout(timeout_id); } return; case im_skipped: if (msg) { push_to_cache(std::move(msg)); } break; default: // delete msg break; } } }
downstream_manager::path_ptr downstream_manager::add_path(stream_slot slot, strong_actor_ptr target) { CAF_LOG_TRACE(CAF_ARG(slot) << CAF_ARG(target)); unique_path_ptr ptr{new outbound_path(slot, std::move(target))}; auto result = ptr.get(); return insert_path(std::move(ptr)) ? result : nullptr; }
bool monitorable_actor::cleanup(error&& reason, execution_unit* host) { CAF_LOG_TRACE(CAF_ARG(reason)); attachable_ptr head; bool set_fail_state = exclusive_critical_section([&]() -> bool { if (!getf(is_cleaned_up_flag)) { // local actors pass fail_state_ as first argument if (&fail_state_ != &reason) fail_state_ = std::move(reason); attachables_head_.swap(head); flags(flags() | is_terminated_flag | is_cleaned_up_flag); on_cleanup(); return true; } return false; }); if (!set_fail_state) return false; CAF_LOG_DEBUG("cleanup" << CAF_ARG(id()) << CAF_ARG(node()) << CAF_ARG(reason)); // send exit messages for (attachable* i = head.get(); i != nullptr; i = i->next.get()) i->actor_exited(reason, host); // tell printer to purge its state for us if we ever used aout() if (getf(abstract_actor::has_used_aout_flag)) { auto pr = home_system().scheduler().printer(); pr->enqueue(make_mailbox_element(nullptr, make_message_id(), {}, delete_atom::value, id()), nullptr); } return true; }
void manager::init(actor_system_config&) { CAF_LOG_TRACE(""); ERR_load_crypto_strings(); OPENSSL_add_all_algorithms_conf(); SSL_library_init(); SSL_load_error_strings(); if (authentication_enabled()) { if (system().config().openssl_certificate.size() == 0) CAF_RAISE_ERROR("No certificate configured for SSL endpoint"); if (system().config().openssl_key.size() == 0) CAF_RAISE_ERROR("No private key configured for SSL endpoint"); } #if OPENSSL_VERSION_NUMBER < 0x10100000L std::lock_guard<std::mutex> lock{init_mutex}; ++init_count; if (init_count == 1) { mutexes = std::vector<std::mutex>(CRYPTO_num_locks()); CRYPTO_set_locking_callback(locking_function); CRYPTO_set_dynlock_create_callback(dynlock_create); CRYPTO_set_dynlock_lock_callback(dynlock_lock); CRYPTO_set_dynlock_destroy_callback(dynlock_destroy); // OpenSSL's default thread ID callback should work, so don't set our own. } #endif }
void manager::detach(execution_unit*, bool invoke_disconnect_message) { CAF_LOG_TRACE(CAF_ARG(invoke_disconnect_message)); // This function gets called from the multiplexer when an error occurs or // from the broker when closing this manager. In both cases, we need to make // sure this manager does not receive further socket events. remove_from_loop(); // Disconnect from the broker if not already detached. if (!detached()) { CAF_LOG_DEBUG("disconnect servant from broker"); auto raw_ptr = parent(); // Keep a strong reference to our parent until we go out of scope. strong_actor_ptr ptr; ptr.swap(parent_); detach_from(raw_ptr); if (invoke_disconnect_message) { auto mptr = make_mailbox_element(nullptr, invalid_message_id, {}, detach_message()); switch (raw_ptr->consume(*mptr)) { case im_success: raw_ptr->finalize(); break; case im_skipped: raw_ptr->push_to_cache(std::move(mptr)); break; case im_dropped: CAF_LOG_INFO("broker dropped disconnect message"); break; } } } }
strong_actor_ptr proxy_registry::get_or_put(const key_type& nid, actor_id aid) { CAF_LOG_TRACE(CAF_ARG(nid) << CAF_ARG(aid)); auto& result = proxies_[nid][aid]; if (!result) result = backend_.make_proxy(nid, aid); return result; }
void private_thread::run() { auto job = const_cast<scheduled_actor*>(self_); CAF_PUSH_AID(job->id()); CAF_LOG_TRACE(""); scoped_execution_unit ctx{&job->system()}; auto max_throughput = std::numeric_limits<size_t>::max(); bool resume_later; for (;;) { state_ = await_resume_or_shutdown; do { resume_later = false; switch (job->resume(&ctx, max_throughput)) { case resumable::resume_later: resume_later = true; break; case resumable::done: intrusive_ptr_release(job->ctrl()); return; case resumable::awaiting_message: intrusive_ptr_release(job->ctrl()); break; case resumable::shutdown_execution_unit: return; } } while (resume_later); // wait until actor becomes ready again or was destroyed if (!await_resume()) return; } }
/// Causes this actor to subscribe to the group `what`. /// The group will be unsubscribed if the actor finishes execution. void join(const group& what) { CAF_LOG_TRACE(CAF_ARG(what)); if (what == invalid_group) return; if (what->subscribe(dptr()->ctrl())) subscriptions_.emplace(what); }
void stop() override { CAF_LOG_TRACE(""); // shutdown workers class shutdown_helper : public resumable, public ref_counted { public: resumable::resume_result resume(execution_unit* ptr, size_t) override { CAF_LOG_DEBUG("shutdown_helper::resume => shutdown worker"); CAF_ASSERT(ptr != nullptr); std::unique_lock<std::mutex> guard(mtx); last_worker = ptr; cv.notify_all(); return resumable::shutdown_execution_unit; } void intrusive_ptr_add_ref_impl() override { intrusive_ptr_add_ref(this); } void intrusive_ptr_release_impl() override { intrusive_ptr_release(this); } shutdown_helper() : last_worker(nullptr) { // nop } std::mutex mtx; std::condition_variable cv; execution_unit* last_worker; }; // use a set to keep track of remaining workers shutdown_helper sh; std::set<worker_type*> alive_workers; auto num = num_workers(); for (size_t i = 0; i < num; ++i) { alive_workers.insert(worker_by_id(i)); sh.ref(); // make sure reference count is high enough } CAF_LOG_DEBUG("enqueue shutdown_helper into each worker"); while (!alive_workers.empty()) { (*alive_workers.begin())->external_enqueue(&sh); // since jobs can be stolen, we cannot assume that we have // actually shut down the worker we've enqueued sh to { // lifetime scope of guard std::unique_lock<std::mutex> guard(sh.mtx); sh.cv.wait(guard, [&] { return sh.last_worker != nullptr; }); } alive_workers.erase(static_cast<worker_type*>(sh.last_worker)); sh.last_worker = nullptr; } // shutdown utility actors stop_actors(); // wait until all workers are done for (auto& w : workers_) { w->get_thread().join(); } // run cleanup code for each resumable auto f = &abstract_coordinator::cleanup_and_release; for (auto& w : workers_) policy_.foreach_resumable(w.get(), f); policy_.foreach_central_resumable(this, f); }
bool abstract_broker::cleanup(error&& reason, execution_unit* host) { CAF_LOG_TRACE(CAF_ARG(reason)); close_all(); CAF_ASSERT(doormen_.empty()); CAF_ASSERT(scribes_.empty()); cache_.clear(); return local_actor::cleanup(std::move(reason), host); }
void monitorable_actor::remove_link(abstract_actor* x) { CAF_LOG_TRACE(CAF_ARG(x)); default_attachable::observe_token tk{x->address(), default_attachable::link}; joined_exclusive_critical_section(this, x, [&] { x->remove_backlink(this); detach_impl(tk, true); }); }
void start() { CAF_ASSERT(this_thread_.get_id() == std::thread::id{}); auto this_worker = this; this_thread_ = std::thread{[this_worker] { CAF_LOG_TRACE(CAF_ARG(this_worker->id())); this_worker->run(); }}; }
void local_actor::quit(uint32_t reason) { CAF_LOG_TRACE("reason = " << reason << ", class " << detail::demangle(typeid(*this))); planned_exit_reason(reason); if (is_blocking()) { throw actor_exited(reason); } }
void manager::stop() { CAF_LOG_TRACE(""); scoped_actor self{system(), true}; self->send_exit(manager_, exit_reason::kill); if (!get_or(config(), "middleman.attach-utility-actors", false)) self->wait_for(manager_); manager_ = nullptr; }
void downstream_manager::abort(error reason) { CAF_LOG_TRACE(CAF_ARG(reason)); for_each_path([&](outbound_path& x) { auto tmp = reason; about_to_erase(&x, false, &tmp); }); clear_paths(); }
void outbound_path::emit_irregular_shutdown(local_actor* self, stream_slots slots, const strong_actor_ptr& hdl, error reason) { CAF_LOG_TRACE(CAF_ARG(slots) << CAF_ARG(hdl) << CAF_ARG(reason)); unsafe_send_as(self, hdl, make<downstream_msg::forced_close>(slots, self->address(), std::move(reason))); }
bool abstract_actor::remove_backlink_impl(const actor_addr& other) { CAF_LOG_TRACE(CAF_TSARG(other)); default_attachable::observe_token tk{other, default_attachable::link}; if (other && other != this) { guard_type guard{mtx_}; return detach_impl(tk, attachables_head_, true) > 0; } return false; }
void actor_registry::await_running_count_equal(size_t expected) const { CAF_ASSERT(expected == 0 || expected == 1); CAF_LOG_TRACE(CAF_ARG(expected)); std::unique_lock<std::mutex> guard{running_mtx_}; while (running_ != expected) { CAF_LOG_DEBUG(CAF_ARG(running_.load())); running_cv_.wait(guard); } }
void proxy_registry::erase(const key_type& nid) { CAF_LOG_TRACE(CAF_ARG(nid)); auto i = proxies_.find(nid); if (i == proxies_.end()) return; for (auto& kvp : i->second) kill_proxy(kvp.second, exit_reason::remote_link_unreachable); proxies_.erase(i); }
void basp_broker_state::deliver(const node_id& src_nid, actor_id src_aid, atom_value dest_name, message_id mid, std::vector<strong_actor_ptr>& stages, message& msg) { CAF_LOG_TRACE(CAF_ARG(src_nid) << CAF_ARG(src_aid) << CAF_ARG(dest_name) << CAF_ARG(msg) << CAF_ARG(mid)); deliver(src_nid, src_aid, system().registry().get(dest_name), mid, stages, msg); }
void local_actor::leave(const group& what) { CAF_LOG_TRACE(CAF_TSARG(what)); if (what == invalid_group) { return; } if (detach(abstract_group::subscription_token{what.ptr()}) > 0) { what->unsubscribe(address()); } }
void abstract_broker::init_broker() { CAF_LOG_TRACE(""); setf(is_initialized_flag); // launch backends now, because user-defined initialization // might call functions like add_connection for (auto& kvp : doormen_) kvp.second->launch(); }
scoped_actor::scoped_actor(actor_system& sys, bool hide) : context_(&sys) { actor_config cfg{&context_}; self_ = make_actor<impl, strong_actor_ptr>(sys.next_actor_id(), sys.node(), &sys, cfg); if (!hide) prev_ = CAF_SET_AID(self_->id()); CAF_LOG_TRACE(CAF_ARG(hide)); if (!hide) ptr()->register_at_system(); }
void outbound_path::emit_irregular_shutdown(local_actor* self, error reason) { CAF_LOG_TRACE(CAF_ARG(slots) << CAF_ARG(reason)); /// Note that we always send abort messages anonymous. They can get send /// after `self` already terminated and we must not form strong references /// after that point. Since downstream messages contain the sender address /// anyway, we only omit redundant information. anon_send(actor_cast<actor>(hdl), make<downstream_msg::forced_close>(slots, self->address(), std::move(reason))); }
void scribe::data_transferred(execution_unit* ctx, size_t written, size_t remaining) { CAF_LOG_TRACE(CAF_ARG(written) << CAF_ARG(remaining)); if (detached()) return; data_transferred_msg tmp{hdl(), written, remaining}; auto ptr = make_mailbox_element(nullptr, invalid_message_id, {}, tmp); parent()->context(ctx); parent()->consume(std::move(ptr)); }
scoped_actor::~scoped_actor() { CAF_LOG_TRACE(""); if (!self_) return; auto x = ptr(); if (x->getf(abstract_actor::is_registered_flag)) CAF_SET_AID(prev_); if (!x->getf(abstract_actor::is_terminated_flag)) x->cleanup(exit_reason::normal, &context_); }
void actor_namespace::erase(const key_type& inf, actor_id aid) { CAF_LOG_TRACE(CAF_TARG(inf, to_string) << ", " << CAF_ARG(aid)); auto i = proxies_.find(inf); if (i != proxies_.end()) { i->second.erase(aid); if (i->second.empty()) { proxies_.erase(i); } } }
void downstream_manager::about_to_erase(outbound_path* ptr, bool silent, error* reason) { CAF_LOG_TRACE(CAF_ARG(ptr) << CAF_ARG(silent) << CAF_ARG(reason)); if (!silent) { if (reason == nullptr) ptr->emit_regular_shutdown(self()); else ptr->emit_irregular_shutdown(self(), std::move(*reason)); } }
void remote_actor_proxy::forward_msg(const actor_addr& sender, message_id mid, message msg) { CAF_LOG_TRACE(CAF_ARG(id()) << ", " << CAF_TSARG(sender) << ", " << CAF_MARG(mid, integer_value) << ", " << CAF_TSARG(msg)); m_parent->enqueue( invalid_actor_addr, invalid_message_id, make_message(atom("_Dispatch"), sender, address(), mid, std::move(msg)), nullptr); }