Example #1
0
bool actor_pool::filter(upgrade_lock<detail::shared_spinlock>& guard,
                        const strong_actor_ptr& sender, message_id mid,
                        message_view& mv, execution_unit* eu) {
  auto& content = mv.content();
  CAF_LOG_TRACE(CAF_ARG(mid) << CAF_ARG(content));
  if (content.match_elements<exit_msg>()) {
    // acquire second mutex as well
    std::vector<actor> workers;
    auto em = content.get_as<exit_msg>(0).reason;
    if (cleanup(std::move(em), eu)) {
      auto tmp = mv.move_content_to_message();
      // send exit messages *always* to all workers and clear vector afterwards
      // but first swap workers_ out of the critical section
      upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
      workers_.swap(workers);
      unique_guard.unlock();
      for (auto& w : workers)
        anon_send(w, tmp);
      unregister_from_system();
    }
    return true;
  }
  if (content.match_elements<down_msg>()) {
    // remove failed worker from pool
    auto& dm = content.get_as<down_msg>(0);
    upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
    auto last = workers_.end();
    auto i = std::find(workers_.begin(), workers_.end(), dm.source);
    CAF_LOG_DEBUG_IF(i == last, "received down message for an unknown worker");
    if (i != last)
      workers_.erase(i);
    if (workers_.empty()) {
      planned_reason_ = exit_reason::out_of_workers;
      unique_guard.unlock();
      quit(eu);
    }
    return true;
  }
  if (content.match_elements<sys_atom, put_atom, actor>()) {
    auto& worker = content.get_as<actor>(2);
    worker->attach(default_attachable::make_monitor(worker.address(),
                                                    address()));
    upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
    workers_.push_back(worker);
    return true;
  }
  if (content.match_elements<sys_atom, delete_atom, actor>()) {
    upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
    auto& what = content.get_as<actor>(2);
    auto last = workers_.end();
    auto i = std::find(workers_.begin(), last, what);
    if (i != last) {
      workers_.erase(i);
    }
    return true;
  }
  if (content.match_elements<sys_atom, get_atom>()) {
    auto cpy = workers_;
    guard.unlock();
    sender->enqueue(nullptr, mid.response_id(),
                    make_message(std::move(cpy)), eu);
    return true;
  }
  if (workers_.empty()) {
    guard.unlock();
    if (sender && mid.valid()) {
      // tell client we have ignored this sync message by sending
      // and empty message back
      sender->enqueue(nullptr, mid.response_id(), message{}, eu);
    }
    return true;
  }
  return false;
}
Example #2
0
bool actor_pool::filter(upgrade_lock<detail::shared_spinlock>& guard,
                        const actor_addr& sender, message_id mid,
                        const message& msg, execution_unit* eu) {
  auto rsn = planned_reason_;
  if (rsn != caf::exit_reason::not_exited) {
    guard.unlock();
    if (mid.valid()) {
      detail::sync_request_bouncer srq{rsn};
      srq(sender, mid);
    }
    return true;
  }
  if (msg.match_elements<exit_msg>()) {
    std::vector<actor> workers;
    // send exit messages *always* to all workers and clear vector afterwards
    // but first swap workers_ out of the critical section
    upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
    workers_.swap(workers);
    planned_reason_ = msg.get_as<exit_msg>(0).reason;
    unique_guard.unlock();
    for (auto& w : workers) {
      anon_send(w, msg);
    }
    quit();
    return true;
  }
  if (msg.match_elements<down_msg>()) {
    // remove failed worker from pool
    auto& dm = msg.get_as<down_msg>(0);
    upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
    auto last = workers_.end();
    auto i = std::find(workers_.begin(), workers_.end(), dm.source);
    if (i != last) {
      workers_.erase(i);
    }
    if (workers_.empty()) {
      planned_reason_ = exit_reason::out_of_workers;
      unique_guard.unlock();
      quit();
    }
    return true;
  }
  if (msg.match_elements<sys_atom, put_atom, actor>()) {
    auto& worker = msg.get_as<actor>(2);
    if (worker == invalid_actor) {
      return true;
    }
    worker->attach(default_attachable::make_monitor(address()));
    upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
    workers_.push_back(worker);
    return true;
  }
  if (msg.match_elements<sys_atom, delete_atom, actor>()) {
    upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
    auto& what = msg.get_as<actor>(2);
    auto last = workers_.end();
    auto i = std::find(workers_.begin(), last, what);
    if (i != last) {
      workers_.erase(i);
    }
    return true;
  }
  if (msg.match_elements<sys_atom, get_atom>()) {
    auto cpy = workers_;
    guard.unlock();
    actor_cast<abstract_actor*>(sender)->enqueue(invalid_actor_addr,
                                                 mid.response_id(),
                                                 make_message(std::move(cpy)),
                                                 eu);
    return true;
  }
  if (workers_.empty()) {
    guard.unlock();
    if (sender != invalid_actor_addr && mid.valid()) {
      // tell client we have ignored this sync message by sending
      // and empty message back
      auto ptr = actor_cast<abstract_actor_ptr>(sender);
      ptr->enqueue(invalid_actor_addr, mid.response_id(), message{}, eu);
    }
    return true;
  }
  return false;
}