/// Bind the dispatcher with the request id. int TAO_Muxed_TMS::bind_dispatcher (CORBA::ULong request_id, ACE_Intrusive_Auto_Ptr<TAO_Reply_Dispatcher> rd) { ACE_GUARD_RETURN (ACE_Lock, ace_mon, *this->lock_, -1); if (rd == 0) { if (TAO_debug_level > 0) { TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - TAO_Muxed_TMS::bind_dispatcher, ") ACE_TEXT ("null reply dispatcher\n"))); } return 0; } int const result = this->dispatcher_table_.bind (request_id, rd); if (result != 0) { if (TAO_debug_level > 0) TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - TAO_Muxed_TMS::bind_dispatcher, ") ACE_TEXT ("bind dispatcher failed: result = %d, request id = %d\n"), result, request_id)); return -1; } return 0; }
void TAO_RTScheduler_ORB_Initializer::post_init (PortableInterceptor::ORBInitInfo_ptr info) { // @@ This is busted. TAO_ORBInitInfo should do proper reference // counting. // Narrow to a TAO_ORBInitInfo object to get access to the // orb_core() TAO extension. //TAO_ORBInitInfo_var tao_info = TAO_ORBInitInfo::_narrow (info // ); if (TAO_debug_level > 0) TAOLIB_DEBUG ((LM_DEBUG, "In post_init\n")); CORBA::Object_var rt_current_obj = info->resolve_initial_references (TAO_OBJID_RTCURRENT); RTCORBA::Current_var rt_current = RTCORBA::Current::_narrow (rt_current_obj.in ()); if (CORBA::is_nil (rt_current.in ())) { TAOLIB_DEBUG ((LM_DEBUG, "(%P|%t) ::post_init\n" "(%P|%t) Unable to narrow to RTCORBA::Current\n")); throw ::CORBA::INTERNAL (); } this->current_->rt_current (rt_current.in ()); }
CORBA::Boolean TAO_Linear_Network_Priority_Mapping::to_network ( RTCORBA::Priority corba_priority, RTCORBA::NetworkPriority &network_priority) { if (TAO_debug_level) TAOLIB_DEBUG ((LM_DEBUG, "TAO_Linear_Network_Priority_Mapping::to_network corba_priority %d\n", corba_priority)); int const total_slots = sizeof (dscp) / sizeof (int); int array_slot = static_cast<int> (((corba_priority - RTCORBA::minPriority) / double (RTCORBA::maxPriority - RTCORBA::minPriority)) * total_slots); if (array_slot == total_slots) array_slot -= 1; network_priority = dscp[array_slot]; if (TAO_debug_level) TAOLIB_DEBUG ((LM_DEBUG, "TAO_Linear_Network_Priority_Mapping::to_network = %x\n", network_priority)); return 1; }
CORBA::Boolean CORBA::ValueBase::_tao_write_repository_id (TAO_OutputCDR &strm, ACE_CString& id) { #ifdef TAO_HAS_VALUETYPE_OUT_INDIRECTION VERIFY_MAP (TAO_OutputCDR, repo_id_map, Repo_Id_Map); char* pos = 0; if (strm.get_repo_id_map ()->get()->find (id, pos) == 0) { if (!strm.write_long (TAO_OBV_GIOP_Flags::Indirection_tag)) { return false; } CORBA::Long offset= -strm.offset (pos); if (TAO_debug_level) { TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - %N:%l ValueBase::_tao_write_repository_id, id %C indirection %d\n"), id.c_str(), offset)); } if (!strm.write_long (offset)) { return false; } } else { if (strm.align_write_ptr (ACE_CDR::LONG_SIZE) != 0) { throw CORBA::INTERNAL (); } if (strm.get_repo_id_map ()->get ()->bind (id, strm.current()->wr_ptr ()) != 0) { throw CORBA::INTERNAL (); } if (TAO_debug_level) { TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - %N:%l ValueBase::_tao_write_repository_id, bound %C - %x\n"), id.c_str (), strm.current()->wr_ptr ())); } if (! strm.write_string (id.c_str ())) { return false; } } #else if (! strm.write_string (id.c_str ())) { return 0; } #endif return 1; }
int TAO_Connection_Handler::handle_input_internal ( ACE_HANDLE h, ACE_Event_Handler * eh) { // Let the transport know that it is used (void) this->transport ()->update_transport (); // Grab the transport id now and use the cached value for printing // since the transport could disappear by the time the thread // returns. size_t const t_id = this->transport ()->id (); if (TAO_debug_level > 6) { ACE_HANDLE const handle = eh->get_handle(); TAOLIB_DEBUG ((LM_DEBUG, "TAO (%P|%t) - Connection_Handler[%d]::handle_input_internal, " "handle = %d/%d\n", t_id, handle, h)); } TAO_Resume_Handle resume_handle (this->orb_core (), eh->get_handle ()); int return_value = 0; this->pre_io_hook (return_value); if (return_value != 0) return return_value; return_value = this->transport ()->handle_input (resume_handle); this->pos_io_hook (return_value); // Bug 1647; might need to change resume_handle's flag or // change handle_input return value. resume_handle.handle_input_return_value_hook(return_value); if (TAO_debug_level > 6) { ACE_HANDLE const handle = eh->get_handle (); TAOLIB_DEBUG ((LM_DEBUG, "TAO (%P|%t) - Connection_Handler[%d]::handle_input_internal, " "handle = %d/%d, retval = %d\n", t_id, handle, h, return_value)); } if (return_value == -1) { resume_handle.set_flag (TAO_Resume_Handle::TAO_HANDLE_LEAVE_SUSPENDED); } return return_value; }
bool TAO_DTP_Task::add_request (TAO::CSD::TP_Request* request) { { ACE_GUARD_RETURN (TAO_SYNCH_MUTEX, guard, this->queue_lock_, false); ++this->num_queue_requests_; if ((this->num_queue_requests_ > this->max_request_queue_depth_) && (this->max_request_queue_depth_ != 0)) { this->accepting_requests_ = false; } if (!this->accepting_requests_) { if (TAO_debug_level > 4) { TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - DTP_Task::add_request() ") ACE_TEXT ("not accepting requests.\n") ACE_TEXT ("TAO (%P|%t) - DTP_Task::add_request() ") ACE_TEXT ("num_queue_requests_ : [%d]\n") ACE_TEXT ("TAO (%P|%t) - DTP_Task::add_request() ") ACE_TEXT ("max_request_queue_depth_ : [%d]\n"), this->num_queue_requests_, this->max_request_queue_depth_)); } --this->num_queue_requests_; return false; } // We have made the decision that the request is going to be placed upon // the queue_. Inform the request that it is about to be placed into // a request queue. Some requests may not need to do anything in // preparation of being placed into a queue. Others, however, may need // to perfom a "clone" operation on some underlying request data before // the request can be properly placed into a queue. request->prepare_for_queue(); this->queue_.put(request); } { ACE_GUARD_RETURN (TAO_SYNCH_MUTEX, guard, this->work_lock_, false); this->check_queue_ = true; this->work_available_.signal (); if (TAO_debug_level > 4 ) { TAOLIB_DEBUG((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - DTP_Task::add_request() - ") ACE_TEXT ("work available\n"))); } } return true; }
int TAO_Muxed_TMS::reply_timed_out (CORBA::ULong request_id) { int result = 0; ACE_Intrusive_Auto_Ptr<TAO_Reply_Dispatcher> rd(0); // Grab the reply dispatcher for this id. { ACE_GUARD_RETURN (ACE_Lock, ace_mon, *this->lock_, -1); result = this->dispatcher_table_.unbind (request_id, rd); } if (result == 0 && rd) { if (TAO_debug_level > 8) { TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - TAO_Muxed_TMS::reply_timed_out, ") ACE_TEXT ("id = %d\n"), request_id)); } // Do not move it outside the scope of the lock. A follower thread // could have timedout unwinding the stack and the reply // dispatcher, and that would mean the present thread could be left // with a dangling pointer and may crash. To safeguard against such // cases we dispatch with the lock held. // Dispatch the reply. rd->reply_timed_out (); } else { if (TAO_debug_level > 0) TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - TAO_Muxed_TMS::reply_timed_out, ") ACE_TEXT ("unbind dispatcher failed, id %d: result = %d\n"), request_id, result)); // Result = 0 means that the mux strategy was not able // to find a registered reply handler, either because the reply // was not our reply - just forget about it - or it was ours, but // the reply timed out - just forget about the reply. result = 0; } return result; }
template <class TYPE, class ACE_LOCK> void TAO_Singleton<TYPE, ACE_LOCK>::dump (void) { #if defined (ACE_HAS_DUMP) ACE_TRACE ("TAO_Singleton<TYPE, ACE_LOCK>::dump"); #if !defined (ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES) TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("instance_ = %@"), TAO_Singleton<TYPE, ACE_LOCK>::instance_i ())); TAOLIB_DEBUG ((LM_DEBUG, ACE_END_DUMP)); #endif /* ACE_LACKS_STATIC_DATA_MEMBER_TEMPLATES */ #endif /* ACE_HAS_DUMP */ }
bool TAO_DTP_POA_Strategy::poa_activated_event_i (TAO_ORB_Core& orb_core) { this->dtp_task_.thr_mgr (orb_core.thr_mgr ()); // Activates the worker threads, and waits until all have been started. if (!this->config_initialized_) { TAO_DTP_Config_Registry * config_repo = ACE_Dynamic_Service<TAO_DTP_Config_Registry>::instance ("DTP_Config_Registry"); if (config_repo == 0) { if (TAO_debug_level > 0) { TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - DTP_POA_Strategy - ") ACE_TEXT ("cannot retrieve configuration repo\n"))); } return false; } else { TAO_DTP_Definition config_entry; if (!config_repo->find (this->dynamic_tp_config_name_, config_entry)) { TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - DTP_POA_Strategy - ") ACE_TEXT ("warning: config not found...using ") ACE_TEXT ("defaults!\n"))); } this->set_dtp_config (config_entry); //this->dtp_task_.set_init_pool_threads(config_entry.init_threads_); //this->dtp_task_.set_min_pool_threads(config_entry.min_threads_); //this->dtp_task_.set_max_pool_threads(config_entry.max_threads_); //this->dtp_task_.set_thread_idle_time(config_entry.timeout_); //this->dtp_task_.set_thread_stack_size(config_entry.stack_size_); //this->dtp_task_.set_max_request_queue_depth(config_entry.queue_depth_); } } return (this->dtp_task_.open () == 0); }
ssize_t TAO_DIOP_Transport::recv (char *buf, size_t len, const ACE_Time_Value * /* max_wait_time */) { ACE_INET_Addr from_addr; ssize_t const n = this->connection_handler_->peer ().recv (buf, len, from_addr); if (TAO_debug_level > 0) { TAOLIB_DEBUG ((LM_DEBUG, "TAO (%P|%t) - DIOP_Transport::recv, received %d bytes from %C:%d %d\n", n, from_addr.get_host_name (), from_addr.get_port_number (), ACE_ERRNO_GET)); } // Most of the errors handling is common for // Now the message has been read if (n == -1 && TAO_debug_level > 4) { TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - DIOP_Transport::recv, %p\n"), ACE_TEXT ("TAO - read message failure ") ACE_TEXT ("recv ()\n"))); } // Error handling if (n == -1) { if (errno == EWOULDBLOCK) return 0; return -1; } // @@ What are the other error handling here?? else if (n == 0) { return -1; } // Remember the from addr to eventually use it as remote // addr for the reply. this->connection_handler_->addr (from_addr); return n; }
int TAO_IIOP_Connection_Handler::set_tos (int tos) { if (tos != this->dscp_codepoint_) { int result = 0; #if defined (ACE_HAS_IPV6) ACE_INET_Addr local_addr; if (this->peer ().get_local_addr (local_addr) == -1) return -1; else if (local_addr.get_type () == AF_INET6) # if !defined (IPV6_TCLASS) // IPv6 defines option IPV6_TCLASS for specifying traffic class/priority // but not many implementations yet (very new;-). { if (TAO_debug_level) { TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT("TAO (%P|%t) - IIOP_Connection_Handler::") ACE_TEXT("set_dscp_codepoint -> IPV6_TCLASS not supported yet\n"))); } return 0; } # else /* !IPV6_TCLASS */ result = this->peer ().set_option (IPPROTO_IPV6, IPV6_TCLASS, (int *) &tos , (int) sizeof (tos)); else
int TAO_IIOP_Connection_Handler::handle_timeout (const ACE_Time_Value &, const void *) { // Using this to ensure this instance will be deleted (if necessary) // only after reset_state(). Without this, when this refcount==1 - // the call to close() will cause a call to remove_reference() which // will delete this. At that point this->reset_state() is in no // man's territory and that causes SEGV on some platforms (Windows!) TAO_Auto_Reference<TAO_IIOP_Connection_Handler> safeguard (*this); // NOTE: Perhaps not the best solution, as it feels like the upper // layers should be responsible for this? // We don't use this upcall for I/O. This is only used by the // Connector to indicate that the connection timedout. Therefore, // we should call close() int const ret = this->close (); this->reset_state (TAO_LF_Event::LFS_TIMEOUT); if (TAO_debug_level > 9) { TAOLIB_DEBUG ((LM_DEBUG, "TAO (%P|%t) - TAO_IIOP_Connection_Handler[%d]::" "handle_timeout reset state to LFS_TIMEOUT\n", this->transport ()-> id())); } return ret; }
int TAO_IIOP_Connection_Handler::close_connection (void) { // To maintain maximum compatibility, we only set this socket option // if the user has provided a linger timeout. int const linger = this->orb_core()->orb_params()->linger (); if (linger != -1) { struct linger lval; lval.l_onoff = 1; lval.l_linger = (u_short)linger; if (this->peer ().set_option(SOL_SOCKET, SO_LINGER, (void*) &lval, sizeof (lval)) == -1) { if (TAO_debug_level) { TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) Unable to set ") ACE_TEXT ("SO_LINGER on %d\n"), this->peer ().get_handle ())); } } } return this->close_connection_eh (this); }
int TAO_SCIOP_Endpoint::set (const ACE_INET_Addr &addr, int use_dotted_decimal_addresses) { char tmp_host[MAXHOSTNAMELEN + 1]; if (use_dotted_decimal_addresses || addr.get_host_name (tmp_host, sizeof (tmp_host)) != 0) { const char *tmp = addr.get_host_addr (); if (tmp == 0) { if (TAO_debug_level > 0) TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("\n\nTAO (%P|%t) ") ACE_TEXT ("SCIOP_Endpoint::set ") ACE_TEXT ("- %p\n\n"), ACE_TEXT ("cannot determine hostname"))); return -1; } else this->host_ = tmp; } else this->host_ = CORBA::string_dup (tmp_host); this->port_ = addr.get_port_number(); return 0; }
bool TAO::CSD::TP_Task::add_request(TP_Request* request) { ACE_GUARD_RETURN (TAO_SYNCH_MUTEX, guard, this->lock_, false); if (!this->accepting_requests_) { TAOLIB_DEBUG((LM_DEBUG,"(%P|%t) TP_Task::add_request() - " "not accepting requests\n")); return false; } // We have made the decision that the request is going to be placed upon // the queue_. Inform the request that it is about to be placed into // a request queue. Some requests may not need to do anything in // preparation of being placed into a queue. Others, however, may need // to perfom a "clone" operation on some underlying request data before // the request can be properly placed into a queue. request->prepare_for_queue(); this->queue_.put(request); this->work_available_.signal(); return true; }
CORBA::Boolean CORBA::ValueBase::_tao_unmarshal_codebase_url_indirection (TAO_InputCDR &strm, ACE_CString& codebase_url) { CORBA::Long offset = 0; if (!strm.read_long (offset) || offset >= 0) { return false; } void* pos = strm.rd_ptr () + offset - sizeof (CORBA::Long); if (strm.get_codebase_url_map()->get()->find (pos, codebase_url) != 0) { throw CORBA::INTERNAL (); } else if (TAO_debug_level) { TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - %N:%l ValueBase::_tao_unmarshal_codebase_url_indirection, found %x=%C\n"), pos, codebase_url.c_str ())); } return 1; }
// Generate and return an unique request id for the current // invocation. We can actually return a predecided ULong, since we // allow only one invocation over this connection at a time. CORBA::ULong TAO_Exclusive_TMS::request_id (void) { ++this->request_id_generator_; // if TAO_Transport::bidirectional_flag_ // == 1 --> originating side // == 0 --> other side // == -1 --> no bi-directional connection was negotiated // The originating side must have an even request ID, and the other // side must have an odd request ID. Make sure that is the case. int const bidir_flag = this->transport_->bidirectional_flag (); if ((bidir_flag == 1 && ACE_ODD (this->request_id_generator_)) || (bidir_flag == 0 && ACE_EVEN (this->request_id_generator_))) ++this->request_id_generator_; if (TAO_debug_level > 4) TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - Exclusive_TMS::request_id - <%d>\n"), this->request_id_generator_)); return this->request_id_generator_; }
void TAO_Policy_Validator::add_validator (TAO_Policy_Validator *validator) { // The validator we're adding can't be part of another list ACE_ASSERT (validator->next_ == 0); // Why would we want to add ourself to our list if (this != validator) { // Get to the end of the list and make sure that the // new validator isn't already part of our list TAO_Policy_Validator* current = this; while (current->next_ != 0) { if (current->next_ == validator) { if (TAO_debug_level > 3) { TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Skipping validator [%@] ") ACE_TEXT ("since it would create a circular list\n"), validator)); } return; } current = current->next_; } // Add the new validator to the end of the list current->next_ = validator; } }
TAO_IIOP_Connection_Handler::~TAO_IIOP_Connection_Handler (void) { if (TAO_debug_level > 9) { TAO_Transport *tport = this->transport (); TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT("TAO (%P|%t) - IIOP_Connection_Handler[%d]::") ACE_TEXT("~IIOP_Connection_Handler, ") ACE_TEXT("this=%@, transport=%@\n"), tport != 0 ? tport->id () : 0, this, tport)); } delete this->transport (); int const result = this->release_os_resources (); if (result == -1 && TAO_debug_level) { TAOLIB_ERROR ((LM_ERROR, ACE_TEXT("TAO (%P|%t) - IIOP_Connection_Handler::") ACE_TEXT("~IIOP_Connection_Handler, ") ACE_TEXT("release_os_resources() failed %m\n"))); } }
void Asynch_Invocation_Adapter::invoke ( Messaging::ReplyHandler_ptr reply_handler_ptr, const TAO_Reply_Handler_Stub &reply_handler_stub) { TAO_Stub * stub = this->get_stub (); if (TAO_debug_level >= 4) { TAOLIB_DEBUG ((LM_DEBUG, "TAO_Messaging (%P|%t) - Asynch_Invocation_Adapter::" "invoke\n")); } // If the reply handler is nil, we do not create a reply dispatcher. // The ORB will drop replies to which it cannot associate a reply // dispatcher. if (!CORBA::is_nil (reply_handler_ptr)) { // New reply dispatcher on the heap or allocator, because // we will go out of scope and hand over the reply dispatcher // to the ORB. TAO_Asynch_Reply_Dispatcher *rd = 0; // Get the allocator we could use. ACE_Allocator* ami_allocator = stub->orb_core ()->lane_resources().ami_response_handler_allocator(); // If we have an allocator, use it, else use the heap. if (ami_allocator) { ACE_NEW_MALLOC ( rd, static_cast<TAO_Asynch_Reply_Dispatcher *> ( ami_allocator->malloc (sizeof (TAO_Asynch_Reply_Dispatcher))), TAO_Asynch_Reply_Dispatcher (reply_handler_stub, reply_handler_ptr, stub->orb_core (), ami_allocator)); } else { ACE_NEW (rd, TAO_Asynch_Reply_Dispatcher (reply_handler_stub, reply_handler_ptr, stub->orb_core (), 0)); } if (rd == 0) { throw ::CORBA::NO_MEMORY (); } this->safe_rd_.reset (rd); } Invocation_Adapter::invoke (0, 0); }
int TAO_UIOP_Connector::set_validate_endpoint (TAO_Endpoint *endpoint) { TAO_UIOP_Endpoint *uiop_endpoint = this->remote_endpoint (endpoint); if (uiop_endpoint == 0) return -1; const ACE_UNIX_Addr &remote_address = uiop_endpoint->object_addr (); // @@ Note, POSIX.1g renames AF_UNIX to AF_LOCAL. // Verify that the remote ACE_UNIX_Addr was initialized properly. // Failure can occur if hostname lookup failed when initializing the // remote ACE_INET_Addr. if (remote_address.get_type () != AF_UNIX) { if (TAO_debug_level > 0) { TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - UIOP failure.\n") ACE_TEXT ("TAO (%P|%t) - This is most likely ") ACE_TEXT ("due to a hostname lookup ") ACE_TEXT ("failure.\n"))); } return -1; } return 0; }
/*static*/ void TAO_Queued_Data::release (TAO_Queued_Data *qd) { //// TODO ACE_Message_Block::release (qd->msg_block_); if (qd->allocator_) { ACE_DES_FREE (qd, qd->allocator_->free, TAO_Queued_Data); return; } // @todo: Need to be removed at some point of time! if (TAO_debug_level == 4) { // This debug is for testing purposes! TAOLIB_DEBUG ((LM_DEBUG, "TAO (%P|%t) - Queued_Data[%d]::release\n", "Using global pool for releasing\n")); } delete qd; }
// Generate and return an unique request id for the current // invocation. CORBA::ULong TAO_Muxed_TMS::request_id (void) { // @@ What is a good error return value? ACE_GUARD_RETURN (ACE_Lock, ace_mon, *this->lock_, 0); ++this->request_id_generator_; // if TAO_Transport::bidirectional_flag_ // == 1 --> originating side // == 0 --> other side // == -1 --> no bi-directional connection was negotiated // The originating side must have an even request ID, and the other // side must have an odd request ID. Make sure that is the case. int const bidir_flag = this->transport_->bidirectional_flag (); if ((bidir_flag == 1 && ACE_ODD (this->request_id_generator_)) || (bidir_flag == 0 && ACE_EVEN (this->request_id_generator_))) ++this->request_id_generator_; if (TAO_debug_level > 4) TAOLIB_DEBUG ((LM_DEBUG, "TAO (%P|%t) - Muxed_TMS[%d]::request_id, <%d>\n", this->transport_->id (), this->request_id_generator_)); return this->request_id_generator_; }
int TAO_Muxed_TMS::dispatch_reply (TAO_Pluggable_Reply_Params ¶ms) { int result = 0; ACE_Intrusive_Auto_Ptr<TAO_Reply_Dispatcher> rd(0); // Grab the reply dispatcher for this id. { ACE_GUARD_RETURN (ACE_Lock, ace_mon, *this->lock_, -1); result = this->dispatcher_table_.unbind (params.request_id_, rd); } if (result == 0 && rd) { if (TAO_debug_level > 8) TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - TAO_Muxed_TMS::dispatch_reply, ") ACE_TEXT ("id = %d\n"), params.request_id_)); // Dispatch the reply. // They return 1 on success, and -1 on failure. result = rd->dispatch_reply (params); } else { if (TAO_debug_level > 0) TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - TAO_Muxed_TMS::dispatch_reply, ") ACE_TEXT ("unbind dispatcher failed, id %d: result = %d\n"), params.request_id_, result)); // Result = 0 means that the mux strategy was not able // to find a registered reply handler, either because the reply // was not our reply - just forget about it - or it was ours, but // the reply timed out - just forget about the reply. result = 0; } return result; }
void TAO_Resume_Handle::handle_input_return_value_hook (int& return_value) { // RT8248: The return value is only changed from 1 to 0 if: // 1) the handle_input return value wants an immediate callback // on the handle (i.e. will return "1") // 2) this->resume_handle was already called // 3) reactor->resume_handler was called by this->resume_handle // The value is changed because you can't ask for an immediate callback // on a handle that you have already given up ownership of. (RT8248) if ( return_value == 1 && this->flag_ == TAO_HANDLE_ALREADY_RESUMED && this->orb_core_ && this->orb_core_->reactor ()->resumable_handler () && this->handle_ != ACE_INVALID_HANDLE) { // a return value of "1" means "call me back immediately; // but we can't "call me back immediately" on an // already-resumed handle return_value = 0; if (TAO_debug_level > 6) { TAOLIB_DEBUG ((LM_DEBUG, "TAO (%P|%t) - Resume_Handle::handle_input_return_value_hook, " "overriding return value of 1 with retval = %d\n", return_value)); } } else if ( return_value == -1 ) { // this covers the "connection close" case, where you want // to leave the handle suspended if you're return -1 to // remove the handle from the Reactor. (See ChangeLog entry // Fri Dec 16 14:40:54 2005) this->flag_ = TAO_HANDLE_LEAVE_SUSPENDED; if (TAO_debug_level > 6) { TAOLIB_DEBUG ((LM_DEBUG, "TAO (%P|%t) - Resume_Handle::handle_input_return_value_hook, " "handle_input returning -1, so handle is not resumed.\n")); } } }
CORBA::Boolean CORBA::ValueBase::_tao_unmarshal_value_indirection (TAO_InputCDR &strm, CORBA::ValueBase *&value) { if (strm.get_value_map().is_nil ()) throw CORBA::INTERNAL (); CORBA::Long offset = 0; if (!strm.read_long (offset) || offset >= 0) { return 0; } void* pos = strm.rd_ptr () + offset - sizeof (CORBA::Long); if (9 < TAO_debug_level) { TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) ValueBase::_tao_unmarshal_value_indirection, pos %x\n"), pos)); TAO_InputCDR::Value_Map* map = strm.get_value_map()->get (); for (TAO_InputCDR::Value_Map::ITERATOR it = map->begin (); it != map->end (); ++ it) { TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) ValueBase::_tao_unmarshal_value_indirection, %x=%x\n"), it->ext_id_, it->int_id_)); } } void * v = 0; if (strm.get_value_map()->get()->find (pos, v) != 0) { TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - %N:%l ") ACE_TEXT ("ValueBase::_tao_unmarshal_value_indirection, ") ACE_TEXT ("did not find %x in map %x\n"), pos, (void *) strm.get_value_map()->get())); throw CORBA::INTERNAL (); } else if (TAO_debug_level) { TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - %N:%l ValueBase::_tao_unmarshal_value_indirection, found %x=%x\n"), pos,v)); } value = reinterpret_cast<CORBA::ValueBase *>(v); return true; }
int TAO_RTScheduler_Loader::init (int, ACE_TCHAR* []) { ACE_TRACE ("TAO_RTScheduler_Loader::init"); if (TAO_debug_level > 0) TAOLIB_DEBUG ((LM_DEBUG, "In RTScheduler_Loader::init\n")); // Only allow initialization once. if (this->initialized_) return 0; this->initialized_ = true; ACE_Service_Gestalt *gestalt = ACE_Service_Config::current (); ACE_Service_Object * const rts_loader = ACE_Dynamic_Service<ACE_Service_Object>::instance ( gestalt, "RTScheduler_Loader", true); if (rts_loader != 0 && rts_loader != this) { return rts_loader->init (0, 0); } // Register the ORB initializer. try { PortableInterceptor::ORBInitializer_ptr temp_orb_initializer = PortableInterceptor::ORBInitializer::_nil (); /// Register the RTCORBA ORBInitializer. ACE_NEW_THROW_EX (temp_orb_initializer, TAO_RTScheduler_ORB_Initializer, CORBA::NO_MEMORY ( CORBA::SystemException::_tao_minor_code ( TAO::VMCID, ENOMEM), CORBA::COMPLETED_NO)); PortableInterceptor::ORBInitializer_var orb_initializer = temp_orb_initializer; PortableInterceptor::register_orb_initializer (orb_initializer.in ()); } catch (const ::CORBA::Exception& ex) { ex._tao_print_exception ( "Unexpected exception caught while initializing the RTScheduler:"); return 1; } return 0; }
int TAO_System_Id_With_Multiple_Id_Strategy::bind_using_system_id ( PortableServer::Servant servant, CORBA::Short priority, TAO_Active_Object_Map_Entry *&entry) { ACE_NEW_RETURN (entry, TAO_Active_Object_Map_Entry, -1); int result = this->active_object_map_->user_id_map_->bind_create_key (entry, entry->user_id_); if (result == 0) { entry->servant_ = servant; entry->priority_ = priority; result = this->active_object_map_->id_hint_strategy_->bind (*entry); if (result != 0) { this->active_object_map_->user_id_map_->unbind (entry->user_id_); delete entry; } #if defined (TAO_HAS_MONITOR_POINTS) && (TAO_HAS_MONITOR_POINTS == 1) this->active_object_map_->monitor_->receive ( this->active_object_map_->user_id_map_->current_size ()); #endif /* TAO_HAS_MONITOR_POINTS==1 */ } else { delete entry; } #if (TAO_HAS_MINIMUM_CORBA == 0) if (result == 0 && TAO_debug_level > 7) { CORBA::String_var idstr ( PortableServer::ObjectId_to_string (entry->user_id_)); CORBA::String_var repository_id ( servant ? servant->_repository_id () : 0); ACE_CString hex_idstr; hexstring (hex_idstr, idstr.in (), entry->user_id_.length ()); TAOLIB_DEBUG ((LM_DEBUG, "TAO (%P|%t) - TAO_System_Id_With_Multiple_Id_Strategy::" "bind_using_system_id: type=%C, id=%C\n", repository_id.in (), hex_idstr.c_str() )); } #endif return result; }
void TAO_Default_Client_Strategy_Factory::report_option_value_error ( const ACE_TCHAR* option_name, const ACE_TCHAR* option_value) { TAOLIB_DEBUG((LM_DEBUG, ACE_TEXT ("Client_Strategy_Factory - unknown argument") ACE_TEXT (" <%s> for <%s>\n"), option_value, option_name)); }
int TAO::ORB::open_services (ACE_Intrusive_Auto_Ptr<ACE_Service_Gestalt> pcfg, int &argc, ACE_TCHAR **argv) { { ACE_MT (ACE_GUARD_RETURN (TAO_SYNCH_RECURSIVE_MUTEX, guard, TAO_Ubergestalt_Ready_Condition::instance ()->mutex (), -1)); // Wait in line, while the default ORB (which isn't us) completes // initialization of the globaly required service objects if (service_open_count == 1) { if (TAO_debug_level > 4) TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - Waiting for the default ") ACE_TEXT ("ORB to complete the global ") ACE_TEXT ("initialization\n"))); ACE_MT (while (!is_ubergestalt_ready) TAO_Ubergestalt_Ready_Condition::instance ()->wait ()); if (TAO_debug_level > 4) TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - The default ") ACE_TEXT ("ORB must have completed the global ") ACE_TEXT ("initialization...\n"))); } else { if (TAO_debug_level > 4) TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - We are%Cthe default ") ACE_TEXT ("ORB ...\n"), (service_open_count == 0) ? " " : " not ")); } ++service_open_count; }