void initialize() override { super::initialize(); // create & start workers m_workers.resize(num_workers()); for (size_t i = 0; i < num_workers(); ++i) { m_workers[i].start(i, this); } }
resumable* try_steal(Worker* self) { auto p = self->parent(); if (p->num_workers() < 2) { // you can't steal from yourself, can you? return nullptr; } // roll the dice to pick a victim other than ourselves auto victim = d(self).uniform(d(self).rengine); if (victim == self->id()) victim = p->num_workers() - 1; // steal oldest element from the victim's queue return d(p->worker_by_id(victim)).queue.take_tail(); }
void stop() override { CAF_LOG_TRACE(""); // shutdown workers class shutdown_helper : public resumable, public ref_counted { public: resumable::resume_result resume(execution_unit* ptr, size_t) override { CAF_LOG_DEBUG("shutdown_helper::resume => shutdown worker"); CAF_ASSERT(ptr != nullptr); std::unique_lock<std::mutex> guard(mtx); last_worker = ptr; cv.notify_all(); return resumable::shutdown_execution_unit; } void intrusive_ptr_add_ref_impl() override { intrusive_ptr_add_ref(this); } void intrusive_ptr_release_impl() override { intrusive_ptr_release(this); } shutdown_helper() : last_worker(nullptr) { // nop } std::mutex mtx; std::condition_variable cv; execution_unit* last_worker; }; // use a set to keep track of remaining workers shutdown_helper sh; std::set<worker_type*> alive_workers; auto num = num_workers(); for (size_t i = 0; i < num; ++i) { alive_workers.insert(worker_by_id(i)); sh.ref(); // make sure reference count is high enough } CAF_LOG_DEBUG("enqueue shutdown_helper into each worker"); while (!alive_workers.empty()) { (*alive_workers.begin())->external_enqueue(&sh); // since jobs can be stolen, we cannot assume that we have // actually shut down the worker we've enqueued sh to { // lifetime scope of guard std::unique_lock<std::mutex> guard(sh.mtx); sh.cv.wait(guard, [&] { return sh.last_worker != nullptr; }); } alive_workers.erase(static_cast<worker_type*>(sh.last_worker)); sh.last_worker = nullptr; } // shutdown utility actors stop_actors(); // wait until all workers are done for (auto& w : workers_) { w->get_thread().join(); } // run cleanup code for each resumable auto f = &abstract_coordinator::cleanup_and_release; for (auto& w : workers_) policy_.foreach_resumable(w.get(), f); policy_.foreach_central_resumable(this, f); }
size_t lualambda_master::make_lambda(const std::string& lambda_str) { size_t lambda_hash = std::hash<std::string>()(lambda_str); std::string newstr = lambda_str; if (boost::starts_with(newstr,"LUA")) { newstr = newstr.substr(3); } parallel_for (0, num_workers(), [&](size_t i) { clients[i]->doString(newstr); clients[i]->doString("lambda" + std::to_string(lambda_hash) + " = __lambda__transfer__"); }); return lambda_hash; }
void start() override { // initialize workers vector auto num = num_workers(); workers_.reserve(num); for (size_t i = 0; i < num; ++i) workers_.emplace_back(new worker_type(i, this, max_throughput_)); // start all workers now that all workers have been initialized for (auto& w : workers_) w->start(); // run remaining startup code super::start(); }
void lualambda_master::release_lambda(size_t lambda_hash) { parallel_for (0, num_workers(), [&](size_t i) { clients[i]->doString("lambda" + std::to_string(lambda_hash) + " = {}"); }); }