int TAO_Object_Adapter::dispatch_servant (const TAO::ObjectKey &key, TAO_ServerRequest &req, CORBA::Object_out forward_to) { ACE_FUNCTION_TIMEPROBE (TAO_OBJECT_ADAPTER_DISPATCH_SERVANT_START); // This object is magical, i.e., it has a non-trivial constructor // and destructor. TAO::Portable_Server::Servant_Upcall servant_upcall (&this->orb_core_); // Set up state in the POA et al (including the POA Current), so // that we know that this servant is currently in an upcall. const char *operation = req.operation (); int result = servant_upcall.prepare_for_upcall (key, operation, forward_to); if (result != TAO_Adapter::DS_OK) return result; // Preprocess request. if (req.collocated ()) { servant_upcall.pre_invoke_collocated_request (); } else { servant_upcall.pre_invoke_remote_request (req); } // Servant dispatch. { ACE_FUNCTION_TIMEPROBE (TAO_SERVANT_DISPATCH_START); do_dispatch (req, servant_upcall); } #if TAO_HAS_INTERCEPTORS == 1 // ServerInterceptor might have raised ForwardRequest. In case of // remote calls invocations the LocationForwardReply would have been // sent in earlier stage, but in colocal scenario no message is sent // and the LocationForward object must be passed over here to // calling operation's mem-space. if (req.collocated() && req.pi_reply_status () == PortableInterceptor::LOCATION_FORWARD) { forward_to = req.forward_location (); result = TAO_Adapter::DS_FORWARD; } #endif return result; }
nsresult PyG_Base::InvokeNativeViaPolicyInternal( const char *szMethodName, PyObject **ppResult, const char *szFormat, va_list va ) { if ( m_pPyObject == NULL || szMethodName == NULL ) return NS_ERROR_NULL_POINTER; PyObject *temp = nsnull; if (ppResult == nsnull) ppResult = &temp; nsresult nr = do_dispatch(m_pPyObject, ppResult, szMethodName, szFormat, va); // If temp is NULL, they provided a buffer, and we dont touch it. // If not NULL, *ppResult = temp, and _we_ do own it. Py_XDECREF(temp); return nr; }
void sched(void) { ENABLE_GLOBAL_INTERRUPTS(); ker_log_start(); for(;;){ SOS_MEASUREMENT_IDLE_END(); DISABLE_GLOBAL_INTERRUPTS(); if (int_ready != 0) { ENABLE_GLOBAL_INTERRUPTS(); handle_callback(); } else if( schedpq.msg_cnt != 0 ) { ENABLE_GLOBAL_INTERRUPTS(); do_dispatch(); } else { SOS_MEASUREMENT_IDLE_START(); ENABLE_GLOBAL_INTERRUPTS(); } } }
void task_io_service::dispatch(Handler& handler) { if (thread_call_stack::contains(this)) { fenced_block b(fenced_block::full); asio_handler_invoke_helpers::invoke(handler, handler); } else { // Allocate and construct an operation to wrap the handler. typedef completion_handler<Handler> op; typename op::ptr p = { asio::detail::addressof(handler), asio_handler_alloc_helpers::allocate( sizeof(op), handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((p.p, "io_service", this, "dispatch")); do_dispatch(p.p); p.v = p.p = 0; } }
void strand_service::dispatch(strand_service::implementation_type& impl, Handler& handler) { // If we are already in the strand then the handler can run immediately. if (call_stack<strand_impl>::contains(impl)) { fenced_block b(fenced_block::full); boost_asio_handler_invoke_helpers::invoke(handler, handler); return; } // Allocate and construct an operation to wrap the handler. typedef completion_handler<Handler> op; typename op::ptr p = { boost::asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(handler); BOOST_ASIO_HANDLER_CREATION((this->context(), *p.p, "strand", impl, 0, "dispatch")); bool dispatch_immediately = do_dispatch(impl, p.p); operation* o = p.p; p.v = p.p = 0; if (dispatch_immediately) { // Indicate that this strand is executing on the current thread. call_stack<strand_impl>::context ctx(impl); // Ensure the next handler, if any, is scheduled on block exit. on_dispatch_exit on_exit = { &io_context_, impl }; (void)on_exit; completion_handler<Handler>::do_complete( &io_context_, o, boost::system::error_code(), 0); } }
void message_dispatcher::dispatch(const sopmq::shared::net::network_operation_result& result, StampMessage_ptr stampMessage) { do_dispatch(_stampMessageHandlers, result, stampMessage); }
void message_dispatcher::dispatch(const sopmq::shared::net::network_operation_result& result, PublishResponseMessage_ptr publishResponseMessage) { do_dispatch(_publishResponseMessageHandlers, result, publishResponseMessage); }
void message_dispatcher::dispatch(const sopmq::shared::net::network_operation_result& result, GossipMessage_ptr gossipMessage) { do_dispatch(_gossipMessageHandlers, result, gossipMessage); }
void message_dispatcher::dispatch(const sopmq::shared::net::network_operation_result& result, GetChallengeMessage_ptr getChallengeMessage) { do_dispatch(_getChallengeMessageHandlers, result, getChallengeMessage); }
void message_dispatcher::dispatch(const sopmq::shared::net::network_operation_result& result, ConsumeResponseMessage_ptr consumeResponseMessage) { do_dispatch(_consumeResponseMessageHandlers, result, consumeResponseMessage); }
void message_dispatcher::dispatch(const sopmq::shared::net::network_operation_result& result, AuthAckMessage_ptr authAckMessage) { do_dispatch(_authAckMessageHandlers, result, authAckMessage); }