void Asynch_Invocation_Adapter::invoke ( Messaging::ReplyHandler_ptr reply_handler_ptr, const TAO_Reply_Handler_Stub &reply_handler_stub) { TAO_Stub * stub = this->get_stub (); if (TAO_debug_level >= 4) { TAOLIB_DEBUG ((LM_DEBUG, "TAO_Messaging (%P|%t) - Asynch_Invocation_Adapter::" "invoke\n")); } // If the reply handler is nil, we do not create a reply dispatcher. // The ORB will drop replies to which it cannot associate a reply // dispatcher. if (!CORBA::is_nil (reply_handler_ptr)) { // New reply dispatcher on the heap or allocator, because // we will go out of scope and hand over the reply dispatcher // to the ORB. TAO_Asynch_Reply_Dispatcher *rd = 0; // Get the allocator we could use. ACE_Allocator* ami_allocator = stub->orb_core ()->lane_resources().ami_response_handler_allocator(); // If we have an allocator, use it, else use the heap. if (ami_allocator) { ACE_NEW_MALLOC ( rd, static_cast<TAO_Asynch_Reply_Dispatcher *> ( ami_allocator->malloc (sizeof (TAO_Asynch_Reply_Dispatcher))), TAO_Asynch_Reply_Dispatcher (reply_handler_stub, reply_handler_ptr, stub->orb_core (), ami_allocator)); } else { ACE_NEW (rd, TAO_Asynch_Reply_Dispatcher (reply_handler_stub, reply_handler_ptr, stub->orb_core (), 0)); } if (rd == 0) { throw ::CORBA::NO_MEMORY (); } this->safe_rd_.reset (rd); } Invocation_Adapter::invoke (0, 0); }
ACE_Unbounded_Set_Ex<T, C>::ACE_Unbounded_Set_Ex (ACE_Allocator *alloc) : head_ (0), cur_size_ (0), allocator_ (alloc) { // ACE_TRACE ("ACE_Unbounded_Set_Ex<T, C>::ACE_Unbounded_Set_Ex"); if (this->allocator_ == 0) this->allocator_ = ACE_Allocator::instance (); ACE_NEW_MALLOC (this->head_, (NODE*) this->allocator_->malloc (sizeof (NODE)), NODE); // Make the list circular by pointing it back to itself. this->head_->next_ = this->head_; }
ACE_Unbounded_Set_Ex<T, C>::ACE_Unbounded_Set_Ex (const ACE_Unbounded_Set_Ex<T, C> &us) : head_ (0), cur_size_ (0), allocator_ (us.allocator_), comp_ (us.comp_) { ACE_TRACE ("ACE_Unbounded_Set_Ex<T, C>::ACE_Unbounded_Set_Ex"); if (this->allocator_ == 0) this->allocator_ = ACE_Allocator::instance (); ACE_NEW_MALLOC (this->head_, (NODE*) this->allocator_->malloc (sizeof (NODE)), NODE); this->head_->next_ = this->head_; this->copy_nodes (us); }
JAWS_Cache_Manager<KEY,FACTORY,HASH_FUNC,EQ_FUNC> ::JAWS_Cache_Manager (ACE_Allocator *alloc, JAWS_Cache_Object_Factory *cof, size_t hashsize, size_t maxsize, size_t maxobjsize, size_t minobjsize, size_t highwater, size_t lowwater, int timetolive, int counted) : allocator_ (alloc), factory_ (cof), hashsize_ (hashsize), maxsize_ (maxsize), maxobjsize_ (maxobjsize), minobjsize_ (minobjsize), highwater_ (highwater), lowwater_ (lowwater), waterlevel_ (0), timetolive_ (timetolive), counted_ (counted), hash_ (0), heap_ (0) { // Some sanity checking needed here -- if (this->lowwater_ > this->highwater_) this->lowwater_ = this->highwater_ / 2; if (this->maxobjsize_ > (this->highwater_ - this->lowwater_) * 1024) this->maxobjsize_ = (this->highwater_ - this->lowwater_) * (1024/2); if (this->minobjsize_ > this->maxobjsize_) this->minobjsize_ = this->maxobjsize_ / 2; if (this->allocator_ == 0) this->allocator_ = ACE_Allocator::instance (); if (this->factory_ == 0) this->factory_ = Object_Factory::instance (); ACE_NEW_MALLOC (this->hash_, (Cache_Hash *) this->allocator_->malloc (sizeof (Cache_Hash)), Cache_Hash (alloc, hashsize)); if (this->hash_ == 0) { this->hashsize_ = 0; return; } ACE_NEW_MALLOC (this->heap_, (Cache_Heap *) this->allocator_->malloc (sizeof (Cache_Heap)), Cache_Heap (alloc, maxsize)); if (this->heap_ == 0) { this->maxsize_ = 0; ACE_DES_FREE_TEMPLATE3(this->hash_, this->allocator_->free, JAWS_Cache_Hash, KEY, HASH_FUNC, EQ_FUNC); this->hash_ = 0; this->hashsize_ = 0; } }