continue_writing_result continue_writing() override { CPPA_LOG_TRACE(""); CPPA_LOG_DEBUG_IF(!m_has_unwritten_data, "nothing to write (done)"); while (m_has_unwritten_data) { size_t written; try { written = m_out->write_some(m_buf.data(), m_buf.size()); } catch (std::exception& e) { CPPA_LOG_ERROR(to_verbose_string(e)); static_cast<void>(e); // keep compiler happy return write_failure; } if (written != m_buf.size()) { CPPA_LOG_DEBUG("tried to write " << m_buf.size() << "bytes, " << "only " << written << " bytes written"); m_buf.erase_leading(written); return write_continue_later; } else { m_buf.clear(); m_has_unwritten_data = false; CPPA_LOG_DEBUG("write done, " << written << " bytes written"); } } return write_done; }
void local_actor::join(const group& what) { CPPA_LOG_TRACE(CPPA_TSARG(what)); if (what && m_subscriptions.count(what) == 0) { CPPA_LOG_DEBUG("join group: " << to_string(what)); m_subscriptions.insert(std::make_pair(what, what->subscribe(this))); } }
void actor_registry::await_running_count_equal(size_t expected) { CPPA_LOG_TRACE(CPPA_ARG(expected)); std::unique_lock<std::mutex> guard(m_running_mtx); while (m_running.load() != expected) { m_running_cv.wait(guard); } }
void middleman_event_handler::update() { CPPA_LOG_TRACE(""); auto mless = [](const fd_meta_info& lhs, native_socket_type rhs) { return lhs.fd < rhs; }; for (auto& elem_pair : m_alterations) { auto& elem = elem_pair.first; auto old = event::none; auto last = m_meta.end(); auto iter = std::lower_bound(m_meta.begin(), last, elem.fd, mless); if (iter != last) old = iter->mask; auto mask = next_bitmask(old, elem.mask, elem_pair.second); auto ptr = elem.ptr; CPPA_LOG_DEBUG("new bitmask for " << elem.ptr << ": " << eb2str(mask)); if (iter == last || iter->fd != elem.fd) { CPPA_LOG_ERROR_IF(mask == event::none, "cannot erase " << ptr << " (no such element)"); if (mask != event::none) { m_meta.insert(iter, elem); handle_event(fd_meta_event::add, elem.fd, event::none, mask, ptr); } } else if (iter->fd == elem.fd) { CPPA_REQUIRE(iter->ptr == elem.ptr); if (mask == event::none) { // note: we cannot decide whether it's safe to dispose `ptr`, // because we didn't parse all alterations yet m_dispose_list.emplace_back(ptr); m_meta.erase(iter); handle_event(fd_meta_event::erase, elem.fd, old, mask, ptr); } else { iter->mask = mask; handle_event(fd_meta_event::mod, elem.fd, old, mask, ptr); } } } m_alterations.clear(); // m_meta won't be touched inside loop auto first = m_meta.begin(); auto last = m_meta.end(); auto is_alive = [&](native_socket_type fd) -> bool { auto iter = std::lower_bound(first, last, fd, mless); return iter != last && iter->fd == fd; }; // check whether elements in dispose list can be safely deleted for (auto elem : m_dispose_list) { auto rd = elem->read_handle(); auto wr = elem->write_handle(); if ( (rd == wr && !is_alive(rd)) || (rd != wr && !is_alive(rd) && !is_alive(wr))) { elem->dispose(); } } m_dispose_list.clear(); }
void launch(Actor* self, execution_unit*) { CPPA_REQUIRE(self != nullptr); CPPA_PUSH_AID(self->id()); CPPA_LOG_TRACE(CPPA_ARG(self)); intrusive_ptr<Actor> mself{self}; self->attach_to_scheduler(); std::thread([=] { CPPA_PUSH_AID(mself->id()); CPPA_LOG_TRACE(""); detail::cs_thread fself; for (;;) { if (mself->resume(&fself, nullptr) == resumable::done) { return; } // await new data before resuming actor await_data(mself.get()); CPPA_REQUIRE(self->mailbox().blocked() == false); } self->detach_from_scheduler(); }).detach(); }
actor_facade(const program& prog, kernel_ptr kernel, const dim_vec& global_dimensions, const dim_vec& global_offsets, const dim_vec& local_dimensions, arg_mapping map_args, result_mapping map_result, size_t result_size) : m_kernel(kernel) , m_program(prog.m_program) , m_context(prog.m_context) , m_queue(prog.m_queue) , m_global_dimensions(global_dimensions) , m_global_offsets(global_offsets) , m_local_dimensions(local_dimensions) , m_map_args(std::move(map_args)) , m_map_result(std::move(map_result)) , m_result_size(result_size) { CPPA_LOG_TRACE("id: " << this->id()); }
void local_actor::quit(std::uint32_t reason) { CPPA_LOG_TRACE("reason = " << reason << ", class " << detail::demangle(typeid(*this))); if (reason == exit_reason::unallowed_function_call) { // this is the only reason that causes an exception cleanup(reason); CPPA_LOG_WARNING("actor tried to use a blocking function"); // when using receive(), the non-blocking nature of event-based // actors breaks any assumption the user has about his code, // in particular, receive_loop() is a deadlock when not throwing // an exception here aout(this) << "*** warning: event-based actor killed because it tried " "to use receive()\n"; throw actor_exited(reason); } planned_exit_reason(reason); }
void thread_pool_scheduler::destroy() { CPPA_LOG_TRACE(""); m_queue.push_back(&m_dummy); CPPA_LOG_DEBUG("join supervisor"); m_supervisor.join(); // make sure job queue is empty, because destructor of m_queue would // otherwise delete elements it shouldn't CPPA_LOG_DEBUG("flush queue"); auto ptr = m_queue.try_pop(); while (ptr != nullptr) { if (ptr != &m_dummy) { /*FIXME bool hidden = ptr->is_hidden(); ptr->deref(); std::atomic_thread_fence(std::memory_order_seq_cst); if (!hidden)*/ get_actor_registry()->dec_running(); } ptr = m_queue.try_pop(); } super::destroy(); }
void operator()() { CPPA_LOG_TRACE(""); detail::cs_thread fself; job_ptr job = nullptr; for (;;) { aggressive(job) || moderate(job) || relaxed(job); CPPA_LOG_DEBUG("dequeued new job"); if (job == m_dummy) { CPPA_LOG_DEBUG("received dummy (quit)"); // dummy of doom received ... m_job_queue->push_back(job); // kill the next guy return; // and say goodbye } if (job->resume(&fself) == resumable::done) { CPPA_LOG_DEBUG("actor is done"); /*FIXME bool hidden = job->is_hidden(); job->deref(); if (!hidden)*/ get_actor_registry()->dec_running(); } job = nullptr; } }
void update() { CPPA_LOG_TRACE(""); for (auto& elem_pair : m_alterations) { auto& elem = elem_pair.first; auto old = event::none; auto last = end(m_meta); auto iter = lower_bound(begin(m_meta), last, elem.fd, m_less); if (iter != last) old = iter->mask; auto mask = next_bitmask(old, elem.mask, elem_pair.second); auto ptr = elem.ptr.get(); CPPA_LOG_DEBUG("new bitmask for " << elem.ptr.get() << ": " << eb2str(mask)); if (iter == last || iter->fd != elem.fd) { CPPA_LOG_INFO_IF(mask == event::none, "cannot erase " << ptr << " (not found in m_meta)"); if (mask != event::none) { m_meta.insert(iter, elem); d()->handle_event(fd_meta_event::add, elem.fd, event::none, mask, ptr); } } else if (iter->fd == elem.fd) { CPPA_REQUIRE(iter->ptr == elem.ptr); if (mask == event::none) { m_meta.erase(iter); d()->handle_event(fd_meta_event::erase, elem.fd, old, mask, ptr); } else { iter->mask = mask; d()->handle_event(fd_meta_event::mod, elem.fd, old, mask, ptr); } } } m_alterations.clear(); }
void middleman_event_handler::erase_later(continuable* ptr, event_bitmask e) { CPPA_LOG_TRACE(CPPA_ARG(ptr) << ", e = " << eb2str(e)); alteration(ptr, e, fd_meta_event::erase); }
void middleman_event_handler::add_later(continuable* ptr, event_bitmask e) { CPPA_LOG_TRACE(CPPA_ARG(ptr) << ", " << CPPA_TARG(e, eb2str) << ", socket = " << ptr->read_handle()); alteration(ptr, e, fd_meta_event::add); }
resumable::resume_result resume(detail::cs_thread*, execution_unit* host) override { auto d = static_cast<Derived*>(this); d->m_host = host; CPPA_LOG_TRACE("id = " << d->id()); auto done_cb = [&]() -> bool { CPPA_LOG_TRACE(""); d->bhvr_stack().clear(); d->bhvr_stack().cleanup(); d->on_exit(); if (!d->bhvr_stack().empty()) { CPPA_LOG_DEBUG("on_exit did set a new behavior in on_exit"); d->planned_exit_reason(exit_reason::not_exited); return false; // on_exit did set a new behavior } auto rsn = d->planned_exit_reason(); if (rsn == exit_reason::not_exited) { rsn = exit_reason::normal; d->planned_exit_reason(rsn); } d->cleanup(rsn); return true; }; auto actor_done = [&] { return d->bhvr_stack().empty() || d->planned_exit_reason() != exit_reason::not_exited; }; // actors without behavior or that have already defined // an exit reason must not be resumed CPPA_REQUIRE(!d->m_initialized || !actor_done()); if (!d->m_initialized) { d->m_initialized = true; auto bhvr = d->make_behavior(); if (bhvr) d->become(std::move(bhvr)); // else: make_behavior() might have just called become() if (actor_done() && done_cb()) return resume_result::done; // else: enter resume loop } try { for (;;) { auto ptr = d->next_message(); if (ptr) { if (d->invoke_message(ptr)) { if (actor_done() && done_cb()) { CPPA_LOG_DEBUG("actor exited"); return resume_result::done; } // continue from cache if current message was // handled, because the actor might have changed // its behavior to match 'old' messages now while (d->invoke_message_from_cache()) { if (actor_done() && done_cb()) { CPPA_LOG_DEBUG("actor exited"); return resume_result::done; } } } // add ptr to cache if invoke_message // did not reset it (i.e. skipped, but not dropped) if (ptr) { CPPA_LOG_DEBUG("add message to cache"); d->push_to_cache(std::move(ptr)); } } else { CPPA_LOG_DEBUG("no more element in mailbox; " "going to block"); if (d->mailbox().try_block()) { return resumable::resume_later; } // else: try again } } } catch (actor_exited& what) { CPPA_LOG_INFO("actor died because of exception: actor_exited, " "reason = " << what.reason()); if (d->exit_reason() == exit_reason::not_exited) { d->quit(what.reason()); } } catch (std::exception& e) { CPPA_LOG_WARNING("actor died because of exception: " << detail::demangle(typeid(e)) << ", what() = " << e.what()); if (d->exit_reason() == exit_reason::not_exited) { d->quit(exit_reason::unhandled_exception); } } catch (...) { CPPA_LOG_WARNING("actor died because of an unknown exception"); if (d->exit_reason() == exit_reason::not_exited) { d->quit(exit_reason::unhandled_exception); } } done_cb(); return resumable::done; }
void enqueue(const message_header& hdr, any_tuple msg) override { CPPA_LOG_TRACE(""); typename util::il_indices<util::type_list<Args...>>::type indices; enqueue_impl(hdr.sender, std::move(msg), hdr.id, indices); }
void local_actor::cleanup(std::uint32_t reason) { CPPA_LOG_TRACE(CPPA_ARG(reason)); m_subscriptions.clear(); super::cleanup(reason); }
void erase(const continuable_reader_ptr& ptr, event_bitmask e) { CPPA_LOG_TRACE("ptr = " << ptr.get() << ", e = " << eb2str(e)); alteration(ptr, e, fd_meta_event::erase); }
void enqueue () { CPPA_LOG_TRACE("command::enqueue()"); this->ref(); // reference held by the OpenCL comand queue cl_int err{0}; cl_event event_k; auto data_or_nullptr = [](const dim_vec& vec) { return vec.empty() ? nullptr : vec.data(); }; err = clEnqueueNDRangeKernel(m_queue.get(), m_actor_facade->m_kernel.get(), m_actor_facade->m_global_dimensions.size(), data_or_nullptr(m_actor_facade->m_global_offsets), data_or_nullptr(m_actor_facade->m_global_dimensions), data_or_nullptr(m_actor_facade->m_local_dimensions), m_events.size(), (m_events.empty() ? nullptr : m_events.data()), &event_k); if (err != CL_SUCCESS) { CPPA_LOGMF(CPPA_ERROR, "clEnqueueNDRangeKernel: " << get_opencl_error(err)); this->deref(); // or can anything actually happen? return; } else { cl_event event_r; err = clEnqueueReadBuffer(m_queue.get(), m_arguments.back().get(), CL_FALSE, 0, sizeof(typename R::value_type) * m_result_size, m_result.data(), 1, &event_k, &event_r); if (err != CL_SUCCESS) { throw std::runtime_error("clEnqueueReadBuffer: " + get_opencl_error(err)); this->deref(); // failed to enqueue command return; } err = clSetEventCallback(event_r, CL_COMPLETE, [](cl_event, cl_int, void* data) { auto cmd = reinterpret_cast<command*>(data); cmd->handle_results(); cmd->deref(); }, this); if (err != CL_SUCCESS) { CPPA_LOGMF(CPPA_ERROR, "clSetEventCallback: " << get_opencl_error(err)); this->deref(); // callback is not set return; } err = clFlush(m_queue.get()); if (err != CL_SUCCESS) { CPPA_LOGMF(CPPA_ERROR, "clFlush: " << get_opencl_error(err)); } m_events.push_back(std::move(event_k)); m_events.push_back(std::move(event_r)); } }
void enqueue(msg_hdr_cref hdr, any_tuple msg, execution_unit*) override { CPPA_LOG_TRACE(""); typename util::il_indices<util::type_list<Args...>>::type indices; enqueue_impl(hdr.sender, std::move(msg), hdr.id, indices); }