Пример #1
0
 inline void operator()(const actor_ptr& sender, const message_id& mid) const {
     CPPA_REQUIRE(rsn != exit_reason::not_exited);
     if (mid.is_request() && sender != nullptr) {
         sender->enqueue({nullptr, sender, mid.response_id()},
                         make_any_tuple(atom("EXITED"), rsn));
     }
 }
void sync_request_bouncer::operator()(const strong_actor_ptr& sender,
                                      const message_id& mid) const {
  if (sender && mid.is_request())
    sender->enqueue(nullptr, mid.response_id(),
                    make_message(make_error(sec::request_receiver_down)),
                    // TODO: this breaks out of the execution unit
                    nullptr);
}
Пример #3
0
void sync_request_bouncer::operator()(const actor_addr& sender,
                                      const message_id& mid) const {
    BOOST_ACTOR_REQUIRE(rsn != exit_reason::not_exited);
    if (sender && mid.is_request()) {
        auto ptr = detail::raw_access::get(sender);
        ptr->enqueue({invalid_actor_addr, ptr, mid.response_id()},
                     make_message(sync_exited_msg{sender, rsn}),
                     // TODO: this breaks out of the execution unit
                     nullptr);
    }
}
Пример #4
0
 static message_id apply(message_id x) {
   CAF_IGNORE_UNUSED(x);
   auto result = make_message_id(message_id::upstream_message_category
                                 << message_id::category_offset);
   CAF_ASSERT(x.is_async() || x == result);
   return result;
 }
 void enqueue(Actor* self, const actor_addr& sender, message_id mid,
        message& msg, execution_unit* eu) {
   auto e = self->new_mailbox_element(sender, mid, std::move(msg));
   switch (self->mailbox().enqueue(e)) {
     case detail::enqueue_result::unblocked_reader: {
       // re-schedule actor
       if (eu)
         eu->exec_later(self);
       else
         detail::singletons::get_scheduling_coordinator()->enqueue(
           self);
       break;
     }
     case detail::enqueue_result::queue_closed: {
       if (mid.is_request()) {
         detail::sync_request_bouncer f{self->exit_reason()};
         f(sender, mid);
       }
       break;
     }
     case detail::enqueue_result::success:
       // enqueued to a running actors' mailbox; nothing to do
       break;
   }
 }
Пример #6
0
inline bool local_actor::awaits(message_id response_id) {
    CPPA_REQUIRE(response_id.is_response());
    return std::any_of(m_pending_responses.begin(),
                       m_pending_responses.end(),
                       [=](message_id other) {
                           return response_id == other;
                       });
}
Пример #7
0
optional<behavior&> behavior_stack::sync_handler(message_id expected_response) {
    if (expected_response.valid()) {
        auto e = m_elements.rend();
        auto i = find_if(m_elements.rbegin(), e, [=](element_type& val) {
            return val.second == expected_response;
        });
        if (i != e) return i->first;
    }
    return none;
}
Пример #8
0
response_promise::response_promise(strong_actor_ptr self,
                                   strong_actor_ptr source,
                                   forwarding_stack stages, message_id mid)
    : self_(std::move(self)),
      source_(std::move(source)),
      stages_(std::move(stages)),
      id_(mid) {
  // form an invalid request promise when initialized from a
  // response ID, since CAF always drops messages in this case
  if (mid.is_response()) {
    source_ = nullptr;
    stages_.clear();
  }
}
Пример #9
0
 void enqueue_impl(const actor_addr& sender, any_tuple msg, message_id id,
                   util::int_list<Is...>) {
     auto opt = m_map_args(std::move(msg));
     if (opt) {
         response_promise handle{this->address(), sender, id.response_id()};
         evnt_vec events;
         args_vec arguments;
         add_arguments_to_kernel<Ret>(events, arguments, m_result_size, 
                                      get_ref<Is>(*opt)...);
         auto cmd = make_counted<command<actor_facade, Ret>>(
             handle, this, 
             std::move(events), std::move(arguments),
             m_result_size, *opt
         );
         cmd->enqueue();
     } else {
         CPPA_LOGMF(CPPA_ERROR, "actor_facade::enqueue() tuple_cast failed.");
     }
 }
Пример #10
0
bool instance::dispatch(const actor_addr& sender, const actor_addr& receiver,
                        message_id mid, const message& msg) {
  CAF_LOG_TRACE("");
  CAF_ASSERT(receiver.is_remote());
  auto path = lookup(receiver->node());
  if (! path) {
    notify<hook::message_sending_failed>(sender, receiver, mid, msg);
    return false;
  }
  auto writer = make_callback([&](serializer& sink) {
    msg.serialize(sink);
  });
  header hdr{message_type::dispatch_message, 0, mid.integer_value(),
             sender ? sender->node() : this_node(), receiver->node(),
             sender ? sender->id() : invalid_actor_id, receiver->id()};
  write(path->wr_buf, hdr, &writer);
  flush(*path);
  notify<hook::message_sent>(sender, path->next_hop, receiver, mid, msg);
  return true;
}
Пример #11
0
bool instance::dispatch(execution_unit* ctx, const strong_actor_ptr& sender,
                        const std::vector<strong_actor_ptr>& forwarding_stack,
                        const strong_actor_ptr& receiver, message_id mid,
                        const message& msg) {
  CAF_LOG_TRACE(CAF_ARG(sender) << CAF_ARG(receiver)
                << CAF_ARG(mid) << CAF_ARG(msg));
  CAF_ASSERT(receiver && system().node() != receiver->node());
  auto path = lookup(receiver->node());
  if (! path) {
    notify<hook::message_sending_failed>(sender, receiver, mid, msg);
    return false;
  }
  auto writer = make_callback([&](serializer& sink) {
    sink << forwarding_stack << msg;
  });
  header hdr{message_type::dispatch_message, 0, 0, mid.integer_value(),
             sender ? sender->node() : this_node(), receiver->node(),
             sender ? sender->id() : invalid_actor_id, receiver->id()};
  write(ctx, path->wr_buf, hdr, &writer);
  flush(*path);
  notify<hook::message_sent>(sender, path->next_hop, receiver, mid, msg);
  return true;
}
Пример #12
0
 void eq_impl(message_id mid, strong_actor_ptr sender,
              execution_unit* ctx, Ts&&... xs) {
   CAF_ASSERT(! mid.is_request());
   enqueue(std::move(sender), mid,
           make_message(std::forward<Ts>(xs)...), ctx);
 }
Пример #13
0
 inline bool is_high_priority() const {
   return mid.is_high_priority();
 }
Пример #14
0
  optional<message> invoke_fun(Actor* self, message& msg, message_id& mid,
                               Fun& fun,
                               MaybeResponseHdl hdl = MaybeResponseHdl{}) {
#   ifdef CAF_LOG_LEVEL
      auto msg_str = to_string(msg);
#   endif
    CAF_LOG_TRACE(CAF_MARG(mid, integer_value) << ", msg = " << msg_str);
    auto res = fun(msg); // might change mid
    CAF_LOG_DEBUG_IF(res, "actor did consume message: " << msg_str);
    CAF_LOG_DEBUG_IF(!res, "actor did ignore message: " << msg_str);
    if (!res) {
      return none;
    }
    if (res->empty()) {
      // make sure synchronous requests
      // always receive a response
      if (mid.is_request() && !mid.is_answered()) {
        CAF_LOG_WARNING("actor with ID " << self->id()
                        << " did not reply to a synchronous request message");
        auto fhdl = fetch_response_promise(self, hdl);
        if (fhdl) {
          fhdl.deliver(make_message(unit));
        }
      }
    } else {
      CAF_LOGF_DEBUG("res = " << to_string(*res));
      if (res->template has_types<atom_value, uint64_t>()
          && res->template get_as<atom_value>(0) == atom("MESSAGE_ID")) {
        CAF_LOG_DEBUG("message handler returned a message id wrapper");
        auto id = res->template get_as<uint64_t>(1);
        auto msg_id = message_id::from_integer_value(id);
        auto ref_opt = self->sync_handler(msg_id);
        // calls self->response_promise() if hdl is a dummy
        // argument, forwards hdl otherwise to reply to the
        // original request message
        auto fhdl = fetch_response_promise(self, hdl);
        if (ref_opt) {
          behavior cpy = *ref_opt;
          *ref_opt =
            cpy.add_continuation([=](message & intermediate)
                         ->optional<message> {
              if (!intermediate.empty()) {
                // do no use lamba expresion type to
                // avoid recursive template instantiaton
                behavior::continuation_fun f2 = [=](
                  message & m)->optional<message> {
                  return std::move(m);

                };
                auto mutable_mid = mid;
                // recursively call invoke_fun on the
                // result to correctly handle stuff like
                // sync_send(...).then(...).then(...)...
                return this->invoke_fun(self, intermediate,
                             mutable_mid, f2, fhdl);
              }
              return none;
            });
        }
        // reset res to prevent "outer" invoke_fun
        // from handling the result again
        res->reset();
      } else {
        // respond by using the result of 'fun'
        CAF_LOG_DEBUG("respond via response_promise");
        auto fhdl = fetch_response_promise(self, hdl);
        if (fhdl) {
          fhdl.deliver(std::move(*res));
          // inform caller about success
          return message{};
        }
      }
    }
    return res;
  }
Пример #15
0
bool actor_pool::filter(upgrade_lock<detail::shared_spinlock>& guard,
                        const strong_actor_ptr& sender, message_id mid,
                        message_view& mv, execution_unit* eu) {
  auto& content = mv.content();
  CAF_LOG_TRACE(CAF_ARG(mid) << CAF_ARG(content));
  if (content.match_elements<exit_msg>()) {
    // acquire second mutex as well
    std::vector<actor> workers;
    auto em = content.get_as<exit_msg>(0).reason;
    if (cleanup(std::move(em), eu)) {
      auto tmp = mv.move_content_to_message();
      // send exit messages *always* to all workers and clear vector afterwards
      // but first swap workers_ out of the critical section
      upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
      workers_.swap(workers);
      unique_guard.unlock();
      for (auto& w : workers)
        anon_send(w, tmp);
      unregister_from_system();
    }
    return true;
  }
  if (content.match_elements<down_msg>()) {
    // remove failed worker from pool
    auto& dm = content.get_as<down_msg>(0);
    upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
    auto last = workers_.end();
    auto i = std::find(workers_.begin(), workers_.end(), dm.source);
    CAF_LOG_DEBUG_IF(i == last, "received down message for an unknown worker");
    if (i != last)
      workers_.erase(i);
    if (workers_.empty()) {
      planned_reason_ = exit_reason::out_of_workers;
      unique_guard.unlock();
      quit(eu);
    }
    return true;
  }
  if (content.match_elements<sys_atom, put_atom, actor>()) {
    auto& worker = content.get_as<actor>(2);
    worker->attach(default_attachable::make_monitor(worker.address(),
                                                    address()));
    upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
    workers_.push_back(worker);
    return true;
  }
  if (content.match_elements<sys_atom, delete_atom, actor>()) {
    upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
    auto& what = content.get_as<actor>(2);
    auto last = workers_.end();
    auto i = std::find(workers_.begin(), last, what);
    if (i != last) {
      workers_.erase(i);
    }
    return true;
  }
  if (content.match_elements<sys_atom, get_atom>()) {
    auto cpy = workers_;
    guard.unlock();
    sender->enqueue(nullptr, mid.response_id(),
                    make_message(std::move(cpy)), eu);
    return true;
  }
  if (workers_.empty()) {
    guard.unlock();
    if (sender && mid.valid()) {
      // tell client we have ignored this sync message by sending
      // and empty message back
      sender->enqueue(nullptr, mid.response_id(), message{}, eu);
    }
    return true;
  }
  return false;
}
Пример #16
0
void blocking_actor::receive_impl(receive_cond& rcc,
                                  message_id mid,
                                  detail::blocking_behavior& bhvr) {
  CAF_LOG_TRACE(CAF_ARG(mid));
  // we start iterating the cache and iterating mailbox elements afterwards
  cached_sequence seq1{mailbox().cache()};
  mailbox_sequence seq2{this, bhvr.timeout()};
  message_sequence_combinator seq{&seq1, &seq2};
  detail::default_invoke_result_visitor visitor{this};
  // read incoming messages until we have a match or a timeout
  for (;;) {
    // check loop pre-condition
    if (!rcc.pre())
      return;
    // mailbox sequence is infinite, but at_end triggers the
    // transition from seq1 to seq2 if we iterated our cache
    if (seq.at_end())
      CAF_RAISE_ERROR("reached the end of an infinite sequence");
    // reset the timeout each iteration
    if (!seq.await_value(true)) {
      // short-circuit "loop body"
      bhvr.nested.handle_timeout();
      if (!rcc.post())
        return;
      continue;
    }
    // skip messages in the loop body until we have a match
    bool skipped;
    bool timed_out;
    do {
      skipped = false;
      timed_out = false;
      auto& x = seq.value();
      // skip messages that don't match our message ID
      if ((mid.valid() && mid != x.mid)
          || (!mid.valid() && x.mid.is_response())) {
        skipped = true;
      } else {
        // blocking actors can use nested receives => restore current_element_
        auto prev_element = current_element_;
        current_element_ = &x;
        switch (bhvr.nested(visitor, x.content())) {
          case match_case::skip:
             skipped = true;
             break;
          default:
            break;
          case match_case::no_match: {
            auto sres = bhvr.fallback(*current_element_);
            // when dealing with response messages, there's either a match
            // on the first handler or we produce an error to
            // get a match on the second (error) handler
            if (sres.flag != rt_skip) {
              visitor.visit(sres);
            } else if (mid.valid()) {
             // invoke again with an unexpected_response error
             auto& old = *current_element_;
             auto err = make_error(sec::unexpected_response,
                                   old.move_content_to_message());
             mailbox_element_view<error> tmp{std::move(old.sender), old.mid,
                                             std::move(old.stages), err};
             current_element_ = &tmp;
             bhvr.nested(tmp.content());
            } else {
              skipped = true;
            }
          }
        }
        current_element_ = prev_element;
      }
      if (skipped) {
        seq.advance();
        if (seq.at_end())
          CAF_RAISE_ERROR("reached the end of an infinite sequence");
        if (!seq.await_value(false)) {
          timed_out = true;
        }
      }
    } while (skipped && !timed_out);
    if (timed_out)
      bhvr.nested.handle_timeout();
    else
      seq.erase_and_advance();
    // check loop post condition
    if (!rcc.post())
      return;
  }
}
Пример #17
0
 handle_message_result handle_message(Actor* self, mailbox_element* node,
                                      Fun& fun, message_id awaited_response) {
   bool handle_sync_failure_on_mismatch = true;
   if (dptr()->hm_should_skip(node)) {
     return hm_skip_msg;
   }
   switch (this->filter_msg(self, node)) {
     case msg_type::normal_exit:
       CAF_LOG_DEBUG("dropped normal exit signal");
       return hm_drop_msg;
     case msg_type::expired_sync_response:
       CAF_LOG_DEBUG("dropped expired sync response");
       return hm_drop_msg;
     case msg_type::expired_timeout:
       CAF_LOG_DEBUG("dropped expired timeout message");
       return hm_drop_msg;
     case msg_type::inactive_timeout:
       CAF_LOG_DEBUG("skipped inactive timeout message");
       return hm_skip_msg;
     case msg_type::non_normal_exit:
       CAF_LOG_DEBUG("handled non-normal exit signal");
       // this message was handled
       // by calling self->quit(...)
       return hm_msg_handled;
     case msg_type::timeout: {
       CAF_LOG_DEBUG("handle timeout message");
       auto& tm = node->msg.get_as<timeout_msg>(0);
       self->handle_timeout(fun, tm.timeout_id);
       if (awaited_response.valid()) {
         self->mark_arrived(awaited_response);
         self->remove_handler(awaited_response);
       }
       return hm_msg_handled;
     }
     case msg_type::timeout_response:
       handle_sync_failure_on_mismatch = false;
       CAF_ANNOTATE_FALLTHROUGH;
     case msg_type::sync_response:
       CAF_LOG_DEBUG("handle as synchronous response: "
                << CAF_TARG(node->msg, to_string) << ", "
                << CAF_MARG(node->mid, integer_value) << ", "
                << CAF_MARG(awaited_response, integer_value));
       if (awaited_response.valid() && node->mid == awaited_response) {
         auto previous_node = dptr()->hm_begin(self, node);
         auto res = invoke_fun(self, node->msg, node->mid, fun);
         if (!res && handle_sync_failure_on_mismatch) {
           CAF_LOG_WARNING("sync failure occured in actor "
                    << "with ID " << self->id());
           self->handle_sync_failure();
         }
         self->mark_arrived(awaited_response);
         self->remove_handler(awaited_response);
         dptr()->hm_cleanup(self, previous_node);
         return hm_msg_handled;
       }
       return hm_cache_msg;
     case msg_type::ordinary:
       if (!awaited_response.valid()) {
         auto previous_node = dptr()->hm_begin(self, node);
         auto res = invoke_fun(self, node->msg, node->mid, fun);
         if (res) {
           dptr()->hm_cleanup(self, previous_node);
           return hm_msg_handled;
         }
         // no match (restore self members)
         dptr()->hm_revert(self, previous_node);
       }
       CAF_LOG_DEBUG_IF(awaited_response.valid(),
                 "ignored message; await response: "
                   << awaited_response.integer_value());
       return hm_cache_msg;
   }
   // should be unreachable
   CAF_CRITICAL("invalid message type");
 }
Пример #18
0
 inline bool is_high_priority() const {
   return mid.category() == message_id::urgent_message_category;
 }
Пример #19
0
void basp_broker_state::deliver(const node_id& src_nid, actor_id src_aid,
                                strong_actor_ptr dest, message_id mid,
                                std::vector<strong_actor_ptr>& stages,
                                message& msg) {
  CAF_LOG_TRACE(CAF_ARG(src_nid) << CAF_ARG(src_aid) << CAF_ARG(dest)
                << CAF_ARG(msg) << CAF_ARG(mid));
  auto src = src_nid == this_node() ? system().registry().get(src_aid)
                                    : proxies().get_or_put(src_nid, src_aid);
  // Intercept link messages. Forwarding actor proxies signalize linking
  // by sending link_atom/unlink_atom message with src = dest.
  if (msg.type_token() == make_type_token<atom_value, strong_actor_ptr>()) {
    switch (static_cast<uint64_t>(msg.get_as<atom_value>(0))) {
      default:
        break;
      case link_atom::value.uint_value(): {
        if (src_nid != this_node()) {
          CAF_LOG_WARNING("received link message for an other node");
          return;
        }
        auto ptr = msg.get_as<strong_actor_ptr>(1);
        if (!ptr) {
          CAF_LOG_WARNING("received link message with invalid target");
          return;
        }
        if (!src) {
          CAF_LOG_DEBUG("received link for invalid actor, report error");
          anon_send(actor_cast<actor>(ptr),
                    make_error(sec::remote_linking_failed));
          return;
        }
        static_cast<actor_proxy*>(ptr->get())->local_link_to(src->get());
        return;
      }
      case unlink_atom::value.uint_value(): {
        if (src_nid != this_node()) {
          CAF_LOG_WARNING("received unlink message for an other node");
          return;
        }
        auto ptr = msg.get_as<strong_actor_ptr>(1);
        if (!ptr) {
          CAF_LOG_DEBUG("received unlink message with invalid target");
          return;
        }
        if (!src) {
          CAF_LOG_DEBUG("received unlink for invalid actor, report error");
          return;
        }
        static_cast<actor_proxy*>(ptr->get())->local_unlink_from(src->get());
        return;
      }
    }
  }
  if (!dest) {
    auto rsn = exit_reason::remote_link_unreachable;
    CAF_LOG_INFO("cannot deliver message, destination not found");
    self->parent().notify<hook::invalid_message_received>(src_nid, src,
                                                          invalid_actor_id,
                                                          mid, msg);
    if (mid.valid() && src) {
      detail::sync_request_bouncer srb{rsn};
      srb(src, mid);
    }
    return;
  }
  self->parent().notify<hook::message_received>(src_nid, src, dest, mid, msg);
  dest->enqueue(make_mailbox_element(std::move(src), mid, std::move(stages),
                                      std::move(msg)),
                nullptr);
}
Пример #20
0
bool actor_pool::filter(upgrade_lock<detail::shared_spinlock>& guard,
                        const actor_addr& sender, message_id mid,
                        const message& msg, execution_unit* eu) {
  auto rsn = planned_reason_;
  if (rsn != caf::exit_reason::not_exited) {
    guard.unlock();
    if (mid.valid()) {
      detail::sync_request_bouncer srq{rsn};
      srq(sender, mid);
    }
    return true;
  }
  if (msg.match_elements<exit_msg>()) {
    std::vector<actor> workers;
    // send exit messages *always* to all workers and clear vector afterwards
    // but first swap workers_ out of the critical section
    upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
    workers_.swap(workers);
    planned_reason_ = msg.get_as<exit_msg>(0).reason;
    unique_guard.unlock();
    for (auto& w : workers) {
      anon_send(w, msg);
    }
    quit();
    return true;
  }
  if (msg.match_elements<down_msg>()) {
    // remove failed worker from pool
    auto& dm = msg.get_as<down_msg>(0);
    upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
    auto last = workers_.end();
    auto i = std::find(workers_.begin(), workers_.end(), dm.source);
    if (i != last) {
      workers_.erase(i);
    }
    if (workers_.empty()) {
      planned_reason_ = exit_reason::out_of_workers;
      unique_guard.unlock();
      quit();
    }
    return true;
  }
  if (msg.match_elements<sys_atom, put_atom, actor>()) {
    auto& worker = msg.get_as<actor>(2);
    if (worker == invalid_actor) {
      return true;
    }
    worker->attach(default_attachable::make_monitor(address()));
    upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
    workers_.push_back(worker);
    return true;
  }
  if (msg.match_elements<sys_atom, delete_atom, actor>()) {
    upgrade_to_unique_lock<detail::shared_spinlock> unique_guard{guard};
    auto& what = msg.get_as<actor>(2);
    auto last = workers_.end();
    auto i = std::find(workers_.begin(), last, what);
    if (i != last) {
      workers_.erase(i);
    }
    return true;
  }
  if (msg.match_elements<sys_atom, get_atom>()) {
    auto cpy = workers_;
    guard.unlock();
    actor_cast<abstract_actor*>(sender)->enqueue(invalid_actor_addr,
                                                 mid.response_id(),
                                                 make_message(std::move(cpy)),
                                                 eu);
    return true;
  }
  if (workers_.empty()) {
    guard.unlock();
    if (sender != invalid_actor_addr && mid.valid()) {
      // tell client we have ignored this sync message by sending
      // and empty message back
      auto ptr = actor_cast<abstract_actor_ptr>(sender);
      ptr->enqueue(invalid_actor_addr, mid.response_id(), message{}, eu);
    }
    return true;
  }
  return false;
}
Пример #21
0
response_promise::response_promise(const actor_addr& from,
                                   const actor_addr& to,
                                   const message_id& id)
: m_from(from), m_to(to), m_id(id) {
    CPPA_REQUIRE(id.is_response() || !id.valid());
}