CORBA::Object_ptr TAO_DLL_Parser::parse_string (const char *ior, CORBA::ORB_ptr orb) { // Skip the prefix, we know it is there because this method in only // called if <match_prefix> returns 1. const char *name = ior + sizeof (::dll_prefix) - 1; TAO_ORB_Core *oc = orb->orb_core (); TAO_Object_Loader *loader = ACE_Dynamic_Service<TAO_Object_Loader>::instance (oc->configuration(), name); if (loader == 0) { throw CORBA::INV_OBJREF (CORBA::SystemException::_tao_minor_code ( 0, EINVAL), CORBA::COMPLETED_NO); } return loader->create_object (orb, 0, 0); }
TAO_BEGIN_VERSIONED_NAMESPACE_DECL TAO_Thread_Lane_Resources::TAO_Thread_Lane_Resources ( TAO_ORB_Core &orb_core, TAO_New_Leader_Generator *new_leader_generator ) : orb_core_ (orb_core), acceptor_registry_ (0), connector_registry_ (0), transport_cache_ (0), leader_follower_ (0), new_leader_generator_ (new_leader_generator), input_cdr_dblock_allocator_ (0), input_cdr_buffer_allocator_ (0), input_cdr_msgblock_allocator_ (0), transport_message_buffer_allocator_ (0), output_cdr_dblock_allocator_ (0), output_cdr_buffer_allocator_ (0), output_cdr_msgblock_allocator_ (0), amh_response_handler_allocator_ (0), ami_response_handler_allocator_ (0) { // Create the transport cache. ACE_NEW (this->transport_cache_, TAO::Transport_Cache_Manager ( orb_core.resource_factory ()->purge_percentage (), orb_core.resource_factory ()->create_purging_strategy (), orb_core.resource_factory ()->cache_maximum (), orb_core.resource_factory ()->locked_transport_cache (), orb_core.orbid ())); }
int Locator_Repository::setup_multicast (ACE_Reactor* reactor, const char* ior) { ACE_ASSERT (reactor != 0); #if defined (ACE_HAS_IP_MULTICAST) TAO_ORB_Core* core = TAO_ORB_Core_instance (); // See if the -ORBMulticastDiscoveryEndpoint option was specified. ACE_CString mde (core->orb_params ()->mcast_discovery_endpoint ()); if (mde.length () != 0) { if (this->ior_multicast_.init (ior, mde.c_str (), TAO_SERVICEID_IMPLREPOSERVICE) == -1) { return -1; } } else { // Port can be specified as param, env var, or default CORBA::UShort port = core->orb_params ()->service_port (TAO::MCAST_IMPLREPOSERVICE); if (port == 0) { // Check environment var. for multicast port. const char* port_number = ACE_OS::getenv ("ImplRepoServicePort"); if (port_number != 0) port = static_cast<CORBA::UShort> (ACE_OS::atoi (port_number)); } if (port == 0) port = TAO_DEFAULT_IMPLREPO_SERVER_REQUEST_PORT; if (this->ior_multicast_.init (ior, port, ACE_DEFAULT_MULTICAST_ADDR, TAO_SERVICEID_IMPLREPOSERVICE) == -1) { return -1; } } // Register event handler for the ior multicast. if (reactor->register_handler (&this->ior_multicast_, ACE_Event_Handler::READ_MASK) == -1) { if (this->opts_.debug() > 0) ORBSVCS_DEBUG ((LM_DEBUG, "ImR: cannot register Event handler\n")); return -1; } #else /* ACE_HAS_IP_MULTICAST*/ ACE_UNUSED_ARG (reactor); ACE_UNUSED_ARG (ior); #endif /* ACE_HAS_IP_MULTICAST*/ return 0; }
bool Profile_Transport_Resolver::use_parallel_connect (void) const { TAO_ORB_Core *oc = this->stub_->orb_core(); return (oc->orb_params()->use_parallel_connects() #if 0 // it was decided that even with blocked connects // parallel connects could be useful, at least for cache // processing. oc->client_factory()->connect_strategy() != TAO_Client_Strategy_Factory::TAO_BLOCKED_CONNECT #endif /* 0 */ ); }
int Simple_Server_i::handle_timeout (const ACE_Time_Value &, const void *) { ACE_GUARD_RETURN (TAO_SYNCH_MUTEX, ace_mon, this->lock_, 0); // We are the (client) leader. Signal the leader-follower pattern to // elect a new leader TAO_ORB_Core *oc = orb_->orb_core(); oc->lf_strategy ().set_upcall_thread (oc->leader_follower ()); // Block until another thread is elected leader and handles a *new* event. ACE_DEBUG ((LM_DEBUG, "(%P|%t) handle_timeout () called - waiting...\n")); this->cond_.wait(); return 0; }
bool TAO::CSD::TP_Strategy::poa_activated_event_i(TAO_ORB_Core& orb_core) { this->task_.thr_mgr(orb_core.thr_mgr()); // Activates the worker threads, and waits until all have been started. return (this->task_.open(&(this->num_threads_)) == 0); }
void TAO_POA_Policy_Set::validate_policies (TAO_Policy_Validator &validator, TAO_ORB_Core &orb_core) { // Just give a last chance for all the unloaded validators in other // libraries to be registered orb_core.load_policy_validators (validator); // Validate that all of the specified policies make sense. validator.validate (this->impl_ ); // Verify that all policies are legal for the currently loaded // POA extensions. for (CORBA::ULong i = 0; i < this->impl_.num_policies (); i++) { CORBA::Policy_var policy = this->impl_.get_policy_by_index (i); CORBA::PolicyType type = policy->policy_type (); if (!(validator.legal_policy (type))) { #if !defined (CORBA_E_MICRO) // An invalid policy was specified. Let the user know about // it. throw PortableServer::POA::InvalidPolicy (); #else TAOLIB_ERROR ((LM_ERROR, "Invalid policy\n")); #endif } } }
void TAO_PortableGroup_Acceptor_Registry::open (const TAO_Profile* profile, TAO_ORB_Core &orb_core) { Entry *entry; if (this->find (profile, entry) == 1) { // Found it. Increment the reference count. ++entry->cnt; } else { // Not found. Open a new acceptor. // Now get the list of available protocol factories. TAO_ProtocolFactorySetItor end = orb_core.protocol_factories ()->end (); // int found = 0; // If usable protocol (factory) is found then this will be // set equal to 1. for (TAO_ProtocolFactorySetItor factory = orb_core.protocol_factories ()->begin (); factory != end; ++factory) { if ((*factory)->factory ()->tag () == profile->tag ()) { this->open_i (profile, orb_core, factory); // found = 1; // A usable protocol was found. } else continue; } } }
/* static */ RTCORBA::ServerProtocolPolicy_ptr TAO_POA_RT_Policy_Validator::server_protocol_policy_from_thread_pool (TAO_Thread_Pool *thread_pool, TAO_ORB_Core &orb_core) { RTCORBA::ProtocolList protocols; if (thread_pool) { TAO_Thread_Lane **lanes = thread_pool->lanes (); for (CORBA::ULong i = 0; i != thread_pool->number_of_lanes (); ++i) { TAO_Thread_Lane_Resources &resources = lanes[i]->resources (); TAO_Acceptor_Registry &acceptor_registry = resources.acceptor_registry (); TAO_POA_RT_Policy_Validator::server_protocol_policy_from_acceptor_registry (protocols, acceptor_registry, orb_core); } } else { TAO_Thread_Lane_Resources_Manager &thread_lane_resources_manager = orb_core.thread_lane_resources_manager (); TAO_Thread_Lane_Resources &resources = thread_lane_resources_manager.default_lane_resources (); TAO_Acceptor_Registry &acceptor_registry = resources.acceptor_registry (); TAO_POA_RT_Policy_Validator::server_protocol_policy_from_acceptor_registry (protocols, acceptor_registry, orb_core); } // Set ServerProtocolPolicy. TAO_ServerProtocolPolicy *server_protocol_policy = 0; ACE_NEW_RETURN (server_protocol_policy, TAO_ServerProtocolPolicy (protocols), 0); return server_protocol_policy; }
bool TAO_DTP_POA_Strategy::poa_activated_event_i (TAO_ORB_Core& orb_core) { this->dtp_task_.thr_mgr (orb_core.thr_mgr ()); // Activates the worker threads, and waits until all have been started. if (!this->config_initialized_) { TAO_DTP_Config_Registry * config_repo = ACE_Dynamic_Service<TAO_DTP_Config_Registry>::instance ("DTP_Config_Registry"); if (config_repo == 0) { if (TAO_debug_level > 0) { TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - DTP_POA_Strategy - ") ACE_TEXT ("cannot retrieve configuration repo\n"))); } return false; } else { TAO_DTP_Definition config_entry; if (!config_repo->find (this->dynamic_tp_config_name_, config_entry)) { TAOLIB_DEBUG ((LM_DEBUG, ACE_TEXT ("TAO (%P|%t) - DTP_POA_Strategy - ") ACE_TEXT ("warning: config not found...using ") ACE_TEXT ("defaults!\n"))); } this->set_dtp_config (config_entry); //this->dtp_task_.set_init_pool_threads(config_entry.init_threads_); //this->dtp_task_.set_min_pool_threads(config_entry.min_threads_); //this->dtp_task_.set_max_pool_threads(config_entry.max_threads_); //this->dtp_task_.set_thread_idle_time(config_entry.timeout_); //this->dtp_task_.set_thread_stack_size(config_entry.stack_size_); //this->dtp_task_.set_max_request_queue_depth(config_entry.queue_depth_); } } return (this->dtp_task_.open () == 0); }
void TAO_PortableGroup_Acceptor_Registry::open_i (const TAO_Profile* profile, TAO_ORB_Core &orb_core, TAO_ProtocolFactorySetItor &factory) { TAO_Acceptor *acceptor = (*factory)->factory ()->make_acceptor (); if (acceptor != 0) { // Extract the desired endpoint/protocol version if one // exists. const TAO_GIOP_Message_Version &version = profile->version (); char buffer [MAX_ADDR_LENGTH]; // Removed the constness of profile. We're not changing // anything, but need to call a nonconst function. TAO_Profile* nc_profile = const_cast<TAO_Profile *> (profile); nc_profile->endpoint ()->addr_to_string (buffer, MAX_ADDR_LENGTH); if (acceptor->open (&orb_core, orb_core.lane_resources ().leader_follower ().reactor(), version.major, version.minor, buffer, 0) == -1) { delete acceptor; if (TAO_debug_level > 0) ORBSVCS_ERROR ((LM_ERROR, ACE_TEXT ("TAO (%P|%t) ") ACE_TEXT ("unable to open acceptor ") ACE_TEXT ("for <%s>%p\n"), buffer, "")); throw CORBA::BAD_PARAM ( CORBA::SystemException::_tao_minor_code ( TAO_ACCEPTOR_REGISTRY_OPEN_LOCATION_CODE, EINVAL), CORBA::COMPLETED_NO); } // Add acceptor to list. Entry tmp_entry; tmp_entry.acceptor = acceptor; tmp_entry.endpoint = nc_profile->endpoint ()->duplicate (); tmp_entry.cnt = 1; if (this->registry_.enqueue_tail (tmp_entry) == -1) { delete acceptor; if (TAO_debug_level > 0) ORBSVCS_ERROR ((LM_ERROR, ACE_TEXT ("TAO (%P|%t) ") ACE_TEXT ("unable to add acceptor to registry") ACE_TEXT ("for <%s>%p\n"), buffer, "")); throw CORBA::BAD_PARAM ( CORBA::SystemException::_tao_minor_code ( TAO_ACCEPTOR_REGISTRY_OPEN_LOCATION_CODE, EINVAL), CORBA::COMPLETED_NO); } } else { if (TAO_debug_level > 0) ORBSVCS_ERROR ((LM_ERROR, ACE_TEXT ("TAO (%P|%t) ") ACE_TEXT ("unable to create acceptor ") )); throw CORBA::BAD_PARAM ( CORBA::SystemException::_tao_minor_code ( TAO_ACCEPTOR_REGISTRY_OPEN_LOCATION_CODE, EINVAL), CORBA::COMPLETED_NO); } }