void private_thread::run() {
  auto job = self_.load();
  CAF_ASSERT(job != nullptr);
  CAF_SET_LOGGER_SYS(&job->system());
  CAF_PUSH_AID(job->id());
  CAF_LOG_TRACE("");
  scoped_execution_unit ctx{&job->system()};
  auto max_throughput = std::numeric_limits<size_t>::max();
  bool resume_later;
  for (;;) {
    state_ = await_resume_or_shutdown;
    do {
      resume_later = false;
      switch (job->resume(&ctx, max_throughput)) {
        case resumable::resume_later:
          resume_later = true;
          break;
        case resumable::done:
          intrusive_ptr_release(job->ctrl());
          return;
        case resumable::awaiting_message:
          intrusive_ptr_release(job->ctrl());
          break;
        case resumable::shutdown_execution_unit:
          return;
      }
    } while (resume_later);
    // wait until actor becomes ready again or was destroyed
    if (!await_resume())
      return;
  }
}
actor_system::~actor_system() {
  if (await_actors_before_shutdown_)
    await_all_actors_done();
  // shutdown system-level servers
  anon_send_exit(spawn_serv_, exit_reason::user_shutdown);
  anon_send_exit(config_serv_, exit_reason::user_shutdown);
  // release memory as soon as possible
  spawn_serv_ = nullptr;
  config_serv_ = nullptr;
  registry_.erase(atom("SpawnServ"));
  registry_.erase(atom("ConfigServ"));
  // group module is the first one, relies on MM
  groups_.stop();
  // stop modules in reverse order
  for (auto i = modules_.rbegin(); i != modules_.rend(); ++i)
    if (*i)
      (*i)->stop();
  await_detached_threads();
  registry_.stop();
  logger_.stop();
  CAF_SET_LOGGER_SYS(nullptr);
}
示例#3
0
 void run() {
   CAF_SET_LOGGER_SYS(&system());
   CAF_LOG_TRACE(CAF_ARG(id_));
   // scheduling loop
   for (;;) {
     auto job = policy_.dequeue(this);
     CAF_ASSERT(job != nullptr);
     CAF_ASSERT(job->subtype() != resumable::io_actor);
     CAF_LOG_DEBUG("resume actor:" << CAF_ARG(id_of(job)));
     CAF_PUSH_AID_FROM_PTR(dynamic_cast<abstract_actor*>(job));
     policy_.before_resume(this, job);
     auto res = job->resume(this, max_throughput_);
     policy_.after_resume(this, job);
     switch (res) {
       case resumable::resume_later: {
         // keep reference to this actor, as it remains in the "loop"
         policy_.resume_job_later(this, job);
         break;
       }
       case resumable::done: {
         policy_.after_completion(this, job);
         intrusive_ptr_release(job);
         break;
       }
       case resumable::awaiting_message: {
         // resumable will maybe be enqueued again later, deref it for now
         intrusive_ptr_release(job);
         break;
       }
       case resumable::shutdown_execution_unit: {
         policy_.after_completion(this, job);
         policy_.before_shutdown(this);
         return;
       }
     }
   }
 }
actor_system::actor_system(actor_system_config& cfg)
    : ids_(0),
      types_(*this),
      logger_(*this),
      registry_(*this),
      groups_(*this),
      middleman_(nullptr),
      dummy_execution_unit_(this),
      await_actors_before_shutdown_(true),
      detached(0),
      cfg_(cfg) {
  CAF_SET_LOGGER_SYS(this);
  for (auto& f : cfg.module_factories) {
    auto mod_ptr = f(*this);
    modules_[mod_ptr->id()].reset(mod_ptr);
  }
  auto& mmptr = modules_[module::middleman];
  if (mmptr)
    middleman_ = reinterpret_cast<io::middleman*>(mmptr->subtype_ptr());
  auto& clptr = modules_[module::opencl_manager];
  if (clptr)
    opencl_manager_ = reinterpret_cast<opencl::manager*>(clptr->subtype_ptr());
  auto& sched = modules_[module::scheduler];
  using share = scheduler::coordinator<policy::work_sharing>;
  using steal = scheduler::coordinator<policy::work_stealing>;
  using profiled_share = scheduler::profiled_coordinator<policy::work_sharing>;
  using profiled_steal = scheduler::profiled_coordinator<policy::work_stealing>;
  // set scheduler only if not explicitly loaded by user
  if (!sched) {
    enum sched_conf {
      stealing          = 0x0001,
      sharing           = 0x0002,
      profiled          = 0x0100,
      profiled_stealing = 0x0101,
      profiled_sharing  = 0x0102
    };
    sched_conf sc = stealing;
    if (cfg.scheduler_policy == atom("sharing"))
      sc = sharing;
    else if (cfg.scheduler_policy != atom("stealing"))
      std::cerr << "[WARNING] " << deep_to_string(cfg.scheduler_policy)
                << " is an unrecognized scheduler pollicy, "
                   "falling back to 'stealing' (i.e. work-stealing)"
                << std::endl;
    if (cfg.scheduler_enable_profiling)
      sc = static_cast<sched_conf>(sc | profiled);
    switch (sc) {
      default: // any invalid configuration falls back to work stealing
        sched.reset(new steal(*this));
        break;
      case sharing:
        sched.reset(new share(*this));
        break;
      case profiled_stealing:
        sched.reset(new profiled_steal(*this));
        break;
      case profiled_sharing:
        sched.reset(new profiled_share(*this));
        break;
    }
  }
  // initialize state for each module and give each module the opportunity
  // to influence the system configuration, e.g., by adding more types
  for (auto& mod : modules_)
    if (mod)
      mod->init(cfg);
  groups_.init(cfg);
  // spawn config and spawn servers (lazily to not access the scheduler yet)
  static constexpr auto Flags = hidden + lazy_init;
  spawn_serv_ = actor_cast<strong_actor_ptr>(spawn<Flags>(spawn_serv_impl));
  config_serv_ = actor_cast<strong_actor_ptr>(spawn<Flags>(config_serv_impl));
  // fire up remaining modules
  logger_.start();
  registry_.start();
  registry_.put(atom("SpawnServ"), spawn_serv_);
  registry_.put(atom("ConfigServ"), config_serv_);
  for (auto& mod : modules_)
    if (mod)
      mod->start();
  groups_.start();
}