static void create_reactor (void) { ACE_Reactor_Impl *impl = 0; if (opt_wfmo_reactor) { #if defined (ACE_WIN32) && !defined (ACE_HAS_WINCE) ACE_NEW (impl, ACE_WFMO_Reactor); #endif /* ACE_WIN32 */ } else if (opt_select_reactor) { ACE_NEW (impl, ACE_Select_Reactor); } ACE_Reactor *reactor = 0; ACE_NEW (reactor, ACE_Reactor (impl)); ACE_Reactor::instance (reactor); }
ACE_Naming_Context::ACE_Naming_Context (void) : name_options_ (0), name_space_ (0), netnameserver_host_ (0) { ACE_TRACE ("ACE_Naming_Context::ACE_Naming_Context"); ACE_NEW (this->name_options_, ACE_Name_Options); }
void test_caching_strategy_type (void) { HASH_MAP_CACHING_STRATEGY *hash_map_caching_strategy = 0; MAP_CACHING_STRATEGY *map_caching_strategy = 0; switch (caching_strategy_type) { case ACE_NULL: ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("\nNull_Caching_Strategy\n\n"))); ACE_NEW (map_caching_strategy, MAP_NULL_ADAPTER); ACE_NEW (hash_map_caching_strategy, HASH_MAP_NULL_ADAPTER); break; case ACE_LRU: ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("\nLRU_Caching_Strategy\n\n"))); ACE_NEW (map_caching_strategy, MAP_LRU_ADAPTER); ACE_NEW (hash_map_caching_strategy, HASH_MAP_LRU_ADAPTER); break; case ACE_LFU: ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("\nLFU_Caching_Strategy\n\n"))); ACE_NEW (map_caching_strategy, MAP_LFU_ADAPTER); ACE_NEW (hash_map_caching_strategy, HASH_MAP_LFU_ADAPTER); break; case ACE_FIFO: ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("\nFIFO_Caching_Strategy\n\n"))); ACE_NEW (map_caching_strategy, MAP_FIFO_ADAPTER); ACE_NEW (hash_map_caching_strategy, HASH_MAP_FIFO_ADAPTER); break; case ACE_ALL: // Just to remove warnings! break; } ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("map cache\n"))); functionality_test_cache (*map_caching_strategy); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("\nhash map cache\n"))); functionality_test_hash_cache (*hash_map_caching_strategy); delete map_caching_strategy; delete hash_map_caching_strategy; }
void TAO_LF_Multi_Event::add_event (TAO_Connection_Handler *ev) { Event_Node *node = 0; ACE_NEW (node, Event_Node); node->next_ = this->events_; node->ptr_ = ev; this->events_ = node; }
//---------------------------------------------------------------------------- ACE::HTBP::Session::Session (void) : proxy_addr_ (0), destroy_proxy_addr_ (0), inbound_ (0), outbound_ (0), closed_ (false), handler_ (0), reactor_(0), stream_ (0), sock_flags_(0) { ACE::HTBP::ID_Requestor req; ACE_TCHAR * htid = req.get_HTID(); ACE_Auto_Array_Ptr<ACE_TCHAR> guard (htid); session_id_.local_ = ACE_TEXT_ALWAYS_CHAR(htid); session_id_.id_ = ACE::HTBP::Session::next_session_id(); ACE_NEW (inbound_, ACE::HTBP::Channel (this)); ACE_NEW (outbound_, ACE::HTBP::Channel (this)); }
ACE_IOStream<STREAM>::ACE_IOStream (STREAM &stream, u_int streambuf_size) : iostream (0), STREAM (stream) { ACE_NEW (streambuf_, ACE_Streambuf_T<STREAM> ((STREAM *) this, streambuf_size)); iostream::init (this->streambuf_); }
ACE_TSS_Guard<ACE_LOCK>::ACE_TSS_Guard (ACE_LOCK &lock, bool block) { this->init_key (); Guard_Type *guard = 0; ACE_NEW (guard, Guard_Type (lock, block)); #if defined (ACE_HAS_THR_C_DEST) ACE_TSS_Adapter *tss_adapter = 0; ACE_NEW (tss_adapter, ACE_TSS_Adapter ((void *) guard, ACE_TSS_Guard<ACE_LOCK>::cleanup)); ACE_Thread::setspecific (this->key_, (void *) tss_adapter); #else ACE_Thread::setspecific (this->key_, (void *) guard); #endif /* ACE_HAS_THR_C_DEST */ }
TAO_Condition<MUTEX>::TAO_Condition (MUTEX &m) : mutex_ (&m), delete_lock_ (false), cond_ (0) { // @todo: Need to add the allocatore here.. ACE_NEW (this->cond_, TAO_SYNCH_CONDITION (*this->mutex_)); }
// // Standard_EINode // Standard_EINode::Standard_EINode (void) : is_active_ (false), reconnect_timeout_ (1), connect_task_ (this) { Local_Data_Channel_i * channel = 0; ACE_NEW (channel, Local_Data_Channel_i (this)); this->local_channel_.reset (channel); }
ACE_TSS<TYPE>::ACE_TSS (TYPE *ts_obj) : once_ (false), key_ (ACE_OS::NULL_key) { // If caller has passed us a non-NULL TYPE *, then we'll just use // this to initialize the thread-specific value. Thus, subsequent // calls to operator->() will return this value. This is useful // since it enables us to assign objects to thread-specific data // that have arbitrarily complex constructors! if (ts_obj != 0) { if (this->ts_init () == -1) { // Save/restore errno. ACE_Errno_Guard error (errno); // What should we do if this call fails?! #if defined (ACE_HAS_WINCE) ::MessageBox (0, ACE_TEXT ("ACE_Thread::keycreate() failed!"), ACE_TEXT ("ACE_TSS::ACE_TSS"), MB_OK); #else ACE_OS::fprintf (stderr, "ACE_Thread::keycreate() failed!"); #endif /* ACE_HAS_WINCE */ return; } #if defined (ACE_HAS_THR_C_DEST) // Encapsulate a ts_obj and it's destructor in an // ACE_TSS_Adapter. ACE_TSS_Adapter *tss_adapter = 0; ACE_NEW (tss_adapter, ACE_TSS_Adapter ((void *) ts_obj, ACE_TSS<TYPE>::cleanup)); // Put the adapter in thread specific storage if (ACE_Thread::setspecific (this->key_, (void *) tss_adapter) != 0) { delete tss_adapter; ACE_ERROR ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("ACE_Thread::setspecific() failed!"))); } #else if (ACE_Thread::setspecific (this->key_, (void *) ts_obj) != 0) ACE_ERROR ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("ACE_Thread::setspecific() failed!"))); #endif /* ACE_HAS_THR_C_DEST */ } }
static void create_reactor (void) { ACE_Reactor_Impl *impl = 0; #if defined (TEST_CAN_USE_WFMO_REACTOR) if (opt_wfmo_reactor) ACE_NEW (impl, ACE_WFMO_Reactor); #endif /* TEST_CAN_USE_WFMO_REACTOR */ if (impl == 0 && opt_select_reactor) ACE_NEW (impl, ACE_Select_Reactor); ACE_Reactor *reactor = 0; ACE_NEW (reactor, ACE_Reactor (impl)); ACE_Reactor::instance (reactor); }
TimeStamp_Protocol_Object::TimeStamp_Protocol_Object (TAO_AV_Callback *callback, TAO_AV_Transport *transport) :TAO_AV_Protocol_Object (callback,transport) { ACE_DEBUG ((LM_DEBUG, "TimeStamp_Protocol_Object::TimeStamp_Protocol_Object\n")); ACE_NEW (this->frame_, ACE_Message_Block); this->frame_->size (4 * this->transport_->mtu ()); }
void Connection_Manager::connect_to_receivers (void) { // Connect to all receivers that we know about. for (Receivers::iterator iterator = this->receivers_.begin (); iterator != this->receivers_.end (); ++iterator) { // Initialize the QoS AVStreams::streamQoS_var the_qos (new AVStreams::streamQoS); ACE_CString flowname = (*iterator).ext_id_; // Create the forward flow specification to describe the flow. TAO_Forward_FlowSpec_Entry sender_entry (flowname.c_str (), "IN", "USER_DEFINED", "", "UDP", 0); // Set the flow specification for the stream between receiver // and distributer AVStreams::flowSpec flow_spec (1); flow_spec.length (1); flow_spec [0] = CORBA::string_dup (sender_entry.entry_to_string ()); // Create the stream control for this stream. TAO_StreamCtrl *streamctrl; ACE_NEW (streamctrl, TAO_StreamCtrl); // Servant Reference Counting to manage lifetime PortableServer::ServantBase_var safe_streamctrl = streamctrl; // Register streamctrl. AVStreams::StreamCtrl_var streamctrl_object = streamctrl->_this (); // Bind the flowname and the corresponding stream controller to // the stream controller map this->streamctrls_.bind (flowname, streamctrl_object); // Bind the sender and receiver MMDevices. (void) streamctrl->bind_devs (this->sender_.in (), (*iterator).int_id_.in (), the_qos.inout (), flow_spec); } }
void AsyncListManager::init_list (void) { CORBA::ULong len = static_cast<CORBA::ULong> (this->repo_->servers ().current_size ()); Locator_Repository::SIMap::ENTRY* entry = 0; Locator_Repository::SIMap::CONST_ITERATOR it (this->repo_->servers ()); this->server_list_.length (len); this->waiters_ = 0; for (CORBA::ULong i = 0; i < len; i++) { it.next (entry); it.advance (); Server_Info_Ptr info = entry->int_id_; info->setImRInfo (&this->server_list_[i]); if (this->pinger_ != 0) { ListLiveListener *l = 0; ACE_NEW (l, ListLiveListener (info->ping_id (), info->pid, i, this, *this->pinger_)); LiveListener_ptr llp (l); if (!l->start ()) { this->server_list_[i].activeStatus = ImplementationRepository::ACTIVE_NO; l->cancel (); } else { if (!evaluate_status (i, l->status(), info->pid)) { this->waiters_++; } else { l->cancel (); } } } } if (ImR_Locator_i::debug() > 4) { ORBSVCS_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) AsyncListManager(%@)::init_list, %d waiters") ACE_TEXT (" out of %d regsitered servers\n"), this, this->waiters_, len)); } }
void TAO_ECG_Mcast_EH::add_new_subscriptions (Address_Set& multicast_addresses) { typedef ACE_Unbounded_Set_Iterator<ACE_INET_Addr> Address_Iterator; for (Address_Iterator k = multicast_addresses.begin (); k != multicast_addresses.end (); ++k) { Subscription new_subscription; new_subscription.mcast_addr = *k; ACE_NEW (new_subscription.dgram, ACE_SOCK_Dgram_Mcast); size_t const subscriptions_size = this->subscriptions_.size (); this->subscriptions_.size (subscriptions_size + 1); this->subscriptions_[subscriptions_size] = new_subscription; ACE_SOCK_Dgram_Mcast *socket = new_subscription.dgram; if (socket->open (new_subscription.mcast_addr, this->net_if_, 1) == -1) { ORBSVCS_ERROR ((LM_ERROR, "Error: %d - Unable to open multicast socket\n", ACE_ERRNO_GET)); } if ( socket->enable (ACE_NONBLOCK) != 0 ) { ORBSVCS_ERROR ((LM_ERROR, "Error: %d - Unable to enable nonblocking on mcast_eh\n", ACE_ERRNO_GET)); } if (socket->join (new_subscription.mcast_addr, 1, this->net_if_) == -1) { ORBSVCS_ERROR ((LM_ERROR, "Error: %d - Unable to join multicast group\n", ACE_ERRNO_GET)); } if (this->recvbuf_size_ != 0 && (((ACE_SOCK_Dgram *)socket)->set_option(SOL_SOCKET, SO_RCVBUF, (void *) &this->recvbuf_size_, sizeof (this->recvbuf_size_)) == -1) && errno != ENOTSUP ) { ORBSVCS_ERROR ((LM_ERROR, "Error: %d - Unable to set mcast_eh recvbuf_size:%d\n", ACE_ERRNO_GET, this->recvbuf_size_)); } (void) this->reactor ()->register_handler ( socket->get_handle (), this, ACE_Event_Handler::READ_MASK); } }
High_Priority_Task::High_Priority_Task (void) : ACE_Task<ACE_SYNCH> (ACE_Thread_Manager::instance ()), priority_ (ACE_Sched_Params::next_priority ( ACE_SCHED_FIFO, ACE_Sched_Params::priority_min (ACE_SCHED_FIFO, ACE_SCOPE_THREAD), ACE_SCOPE_THREAD)), done_ (0) { ACE_NEW (time_, u_long[high_iterations]); }
void Oid::init_value(const unsigned long *raw_oid, size_t oid_len) { if (smival.value.oid.ptr) delete [] smival.value.oid.ptr; ACE_NEW(smival.value.oid.ptr, SmiUINT32[ oid_len]); ACE_OS::memcpy((SmiLPBYTE) smival.value.oid.ptr, (SmiLPBYTE) raw_oid, (size_t) (oid_len * sizeof(SmiUINT32))); smival.value.oid.len = oid_len; }
TAO_UIOP_Connection_Handler::TAO_UIOP_Connection_Handler (TAO_ORB_Core *orb_core) : TAO_UIOP_SVC_HANDLER (orb_core->thr_mgr (), 0, 0), TAO_Connection_Handler (orb_core) { TAO_UIOP_Transport* specific_transport = 0; ACE_NEW (specific_transport, TAO_UIOP_Transport (this, orb_core)); // store this pointer (indirectly increment ref count) this->transport (specific_transport); }
void ImR_DSI_Forwarder::_dispatch (TAO_ServerRequest &request, TAO::Portable_Server::Servant_Upcall * /*context */ ) { // No need to do any of this if the client isn't waiting. if (request.response_expected ()) { if (!CORBA::is_nil (request.forward_location ())) { request.init_reply (); request.tao_send_reply (); // No need to invoke in this case. return; } } // Create DSI request object. CORBA::ServerRequest *dsi_request = 0; ACE_NEW (dsi_request, CORBA::ServerRequest (request)); try { TAO_AMH_DSI_Response_Handler_ptr rhp; ACE_NEW (rhp, TAO_AMH_DSI_Response_Handler(request)); TAO_AMH_DSI_Response_Handler_var rh(rhp); rh->init (request, 0); // Delegate to user. this->invoke (dsi_request, rh.in()); } catch (const CORBA::Exception& ex) { // Only if the client is waiting. if (request.response_expected () && !request.sync_with_server ()) { request.tao_send_reply_exception (ex); } } CORBA::release (dsi_request); }
void TransportSendBuffer::insert(SequenceNumber sequence, const buffer_type& value) { // Age off oldest sample if we are at capacity: if (this->buffers_.size() == this->capacity_) { BufferMap::iterator it(this->buffers_.begin()); if (it == this->buffers_.end()) return; if ( OpenDDS::DCPS::Transport_debug_level >= 10) { ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%P|%t) TransportSendBuffer::insert() - ") ACE_TEXT("aging off PDU: 0x%x as buffer(0x%x,0x%x)\n"), it->first.getValue(), it->second.first, it->second.second )); } release(it->second); this->buffers_.erase(it); } std::pair<BufferMap::iterator, bool> pair = this->buffers_.insert(BufferMap::value_type(sequence, buffer_type())); if (pair.first == this->buffers_.end()) return; buffer_type& buffer(pair.first->second); // Copy sample's TransportQueueElements: TransportSendStrategy::QueueType*& elems = buffer.first; ACE_NEW(elems, TransportSendStrategy::QueueType(value.first->size(), 1)); CopyChainVisitor visitor(*elems, &this->retained_allocator_, &this->retained_mb_allocator_, &this->retained_db_allocator_); value.first->accept_visitor(visitor); // Copy sample's message/data block descriptors: ACE_Message_Block*& data = buffer.second; data = TransportQueueElement::clone(value.second, &this->retained_mb_allocator_, &this->retained_db_allocator_); if ( OpenDDS::DCPS::Transport_debug_level >= 10) { ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%P|%t) TransportSendBuffer::insert() - ") ACE_TEXT("saved PDU: 0x%x as buffer(0x%x,0x%x)\n"), sequence.getValue(), buffer.first, buffer.second )); } }
void MIF_Scheduler::update_scheduling_segment (const RTScheduling::Current::IdType &/*guid*/, const char* /*name*/, CORBA::Policy_ptr sched_policy, CORBA::Policy_ptr /*implicit_sched_param*/) { size_t count = 0; RTScheduling::Current::IdType_var guid = this->current_->id (); ACE_OS::memcpy (&count, guid->get_buffer (), guid->length ()); MIF_Scheduling::SegmentSchedulingParameterPolicy_var sched_param = MIF_Scheduling::SegmentSchedulingParameterPolicy::_narrow (sched_policy); CORBA::Short desired_priority = sched_param->importance (); if (TAO_debug_level > 0) ACE_DEBUG ((LM_DEBUG, "%t MIF_Scheduler::update_scheduling_segment - Importance %d\n", desired_priority)); DT* new_dt = 0; ACE_NEW (new_dt, DT (this->lock_, count)); new_dt->msg_priority (desired_priority); if (ready_que_.message_count () > 0) { DT* run_dt; ACE_Message_Block* msg = 0; ready_que_.dequeue_head (msg); run_dt = dynamic_cast<DT*> (msg); if ((desired_priority == 100) || run_dt->msg_priority () >= (unsigned int)desired_priority) { ready_que_.enqueue_prio (new_dt); lock_.acquire (); run_dt->resume (); new_dt->suspend (); lock_.release (); free_que_.enqueue_prio (run_dt); } else { ready_que_.enqueue_prio (run_dt); delete new_dt; } } else delete new_dt; }
TAO_BEGIN_VERSIONED_NAMESPACE_DECL TAO_Naming_Loader::TAO_Naming_Loader (TAO_Naming_Server *server) : naming_server_(server) { // Constructor // If no server was provided, then we will construct one of the // base class type. if (naming_server_ == 0) ACE_NEW (naming_server_, TAO_Naming_Server); }
template <class T> void ACE_Future_Set<T>::update (const ACE_Future<T> &future) { ACE_Message_Block *mb; FUTURE &local_future = const_cast<ACE_Future<T> &> (future); ACE_NEW (mb, ACE_Message_Block ((char *) local_future.get_rep (), 0)); // Enqueue in priority order. this->future_notification_queue_->enqueue (mb, 0); }
ACE_Future_Set<T>::ACE_Future_Set (ACE_Message_Queue<ACE_SYNCH> *new_queue) : delete_queue_ (0) { if (new_queue) this->future_notification_queue_ = new_queue; else { ACE_NEW (this->future_notification_queue_, ACE_Message_Queue<ACE_SYNCH>); this->delete_queue_ = 1; } }
void test_impl::pass_obj_graph_out ( Supports_Test::graph_out graph_param) { vt_graph_impl * the_vt_graph = 0; ACE_NEW (the_vt_graph, vt_graph_impl (4)); graph_param = the_vt_graph->_this (); ACE_ASSERT (graph_param->size () == 4); graph_param->add_node ("NEW1"); ACE_ASSERT (graph_param->size () == 5); }
ACE_Timer_List_T<TYPE, FUNCTOR, ACE_LOCK>::ACE_Timer_List_T (FUNCTOR* uf, FreeList* fl) : Base(uf, fl) , head_ (new ACE_Timer_Node_T<TYPE>) , id_counter_ (0) { ACE_TRACE ("ACE_Timer_List_T::ACE_Timer_List_T"); this->head_->set_next (this->head_); this->head_->set_prev (this->head_); ACE_NEW (iterator_, Iterator(*this)); }
ACE_Null_Caching_Utility<KEY, VALUE, CONTAINER, ITERATOR, ATTRIBUTES>::ACE_Null_Caching_Utility (ACE_Cleanup_Strategy<KEY, VALUE, CONTAINER> *cleanup_strategy, int delete_cleanup_strategy) : cleanup_strategy_ (cleanup_strategy), delete_cleanup_strategy_ (delete_cleanup_strategy) { if (cleanup_strategy == 0) { ACE_NEW (this->cleanup_strategy_, CLEANUP_STRATEGY); this->delete_cleanup_strategy_ = 1; } }
template <class T, class ACE_LOCK> void ACE_Locked_Free_List<T, ACE_LOCK>::alloc (size_t n) { for (; n > 0; n--) { T *temp = 0; ACE_NEW (temp, T); temp->set_next (this->free_list_); this->free_list_ = temp; this->size_++; } }
// Constructor Prime_Scheduler::Prime_Scheduler (const ACE_TCHAR *newname, Prime_Scheduler *new_scheduler) : scheduler_ (new_scheduler) { ACE_NEW (this->name_, ACE_TCHAR[ACE_OS::strlen (newname) + 1]); ACE_OS::strcpy ((ACE_TCHAR *) this->name_, newname); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%t) Prime_Scheduler %s created\n"), this->name_)); }
void TestImpl::get_strings (ArrayOfString_out strings) { ACE_NEW (strings, ArrayOfString); for (CORBA::ULong i = 0; i < maxd; i++) { strings[i] = data[i]; } orb_->shutdown(); }