int ReactorInterceptor::handle_exception_i(ACE_Guard<ACE_Thread_Mutex>& guard) { process_command_queue(); condition_.signal(); if (registration_counter_ == 0 && destroy_) { guard.release(); delete this; } return 0; }
template <class ACE_LOCK> int ACE_TSS_Guard<ACE_LOCK>::release (void) { // ACE_TRACE ("ACE_TSS_Guard<ACE_LOCK>::release"); ACE_Guard<ACE_LOCK> *guard = 0; #if defined (ACE_HAS_THR_C_DEST) ACE_TSS_Adapter *tss_adapter = 0; ACE_Thread::getspecific (this->key_, (void **) &tss_adapter); guard = (ACE_Guard<ACE_LOCK> *)tss_adapter->ts_obj_; #else ACE_Thread::getspecific (this->key_, (void **) &guard); #endif /* ACE_HAS_THR_C_DEST */ return guard->release (); }
template <class ACE_LOCK> int ACE_TSS_Guard<ACE_LOCK>::release (void) { // ACE_TRACE ("ACE_TSS_Guard<ACE_LOCK>::release"); ACE_Guard<ACE_LOCK> *guard = 0; #if defined (ACE_HAS_THR_C_DEST) ACE_TSS_Adapter *tss_adapter = 0; void *temp = tss_adapter; // Need this temp to keep G++ from complaining. ACE_Thread::getspecific (this->key_, &temp); tss_adapter = static_cast <ACE_TSS_Adapter *> (temp); guard = static_cast <ACE_Guard<ACE_LOCK> *> (tss_adapter->ts_obj_); #else void *temp = guard; // Need this temp to keep G++ from complaining. ACE_Thread::getspecific (this->key_, &temp); guard = static_cast <ACE_Guard<ACE_LOCK> *> (temp); #endif /* ACE_HAS_THR_C_DEST */ return guard->release (); }
// FUZZ: disable check_for_ACE_Guard bool TAO_Notify_SequencePushConsumer::dispatch_from_queue (Request_Queue& requests, ACE_Guard <TAO_SYNCH_MUTEX> & ace_mon) // FUZZ: enable check_for_ACE_Guard { bool result = true; if (DEBUG_LEVEL > 0) { ORBSVCS_DEBUG ( (LM_DEBUG, ACE_TEXT ("(%P|%t) SequencePushConsumer dispatch queued requests. queue size:%u\n"), requests.size ())); } CORBA::ULong queue_size = ACE_Utils::truncate_cast<CORBA::ULong> (requests.size ()); CORBA::Long max_batch_size = queue_size; if (this->max_batch_size_.is_valid () ) { max_batch_size = this->max_batch_size_.value (); } CORBA::Long batch_size = queue_size; if (batch_size > max_batch_size) { batch_size = max_batch_size; } if (batch_size > 0) { CosNotification::EventBatch batch (batch_size); batch.length (batch_size); Request_Queue completed; CORBA::Long pos = 0; TAO_Notify_Method_Request_Event_Queueable * request = 0; while (pos < batch_size && requests.dequeue_head (request) == 0) { if (DEBUG_LEVEL > 0) { ORBSVCS_DEBUG ( (LM_DEBUG, ACE_TEXT ("(%P|%t) Sequence Dispatch Method_Request_Dispatch @%@\n"), request)); } const TAO_Notify_Event * ev = request->event (); ev->convert (batch [pos]); ++pos; // note enqueue at head, use queue as stack. completed.enqueue_head (request); } batch.length (pos); ACE_ASSERT (pos > 0); ace_mon.release (); bool from_timeout = false; TAO_Notify_Consumer::DispatchStatus status = this->dispatch_batch (batch); ace_mon.acquire (); switch (status) { case DISPATCH_SUCCESS: { TAO_Notify_Method_Request_Event_Queueable * request = 0; while (completed.dequeue_head (request) == 0) { request->complete (); request->release (); } result = true; break; } case DISPATCH_FAIL_TIMEOUT: from_timeout = true; // Fall through case DISPATCH_FAIL: { TAO_Notify_Method_Request_Event_Queueable * request = 0; while (completed.dequeue_head (request) == 0) { if (request->should_retry ()) { if (DEBUG_LEVEL > 0) ORBSVCS_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Consumer %d: Will retry %d\n"), static_cast <int> (this->proxy ()->id ()), request->sequence ())); requests.enqueue_head (request); result = false; } else { if (DEBUG_LEVEL > 0) ORBSVCS_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Consumer %d: Discarding %d\n"), static_cast<int> (this->proxy ()->id ()), request->sequence ())); request->complete (); request->release (); } } while (requests.dequeue_head (request) == 0) { if (request->should_retry ()) { if (DEBUG_LEVEL > 0) ORBSVCS_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Consumer %d: Will retry %d\n"), static_cast<int> (this->proxy ()->id ()), request->sequence ())); requests.enqueue_head (request); result = false; } else { if (DEBUG_LEVEL > 0) ORBSVCS_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Consumer %d: Discarding %d\n"), static_cast<int> (this->proxy ()->id ()), request->sequence ())); request->complete (); request->release (); } } ace_mon.release(); try { this->proxy_supplier ()->destroy (from_timeout); } catch (const CORBA::Exception&) { // todo is there something meaningful we can do here? ; } ace_mon.acquire(); break; } case DISPATCH_RETRY: case DISPATCH_DISCARD: { TAO_Notify_Method_Request_Event_Queueable * request = 0; while (completed.dequeue_head (request) == 0) { if (request->should_retry ()) { if (DEBUG_LEVEL > 0) ORBSVCS_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Consumer %d: Will retry %d\n"), static_cast<int> (this->proxy ()->id ()), request->sequence ())); requests.enqueue_head (request); result = false; } else { if (DEBUG_LEVEL > 0) ORBSVCS_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Consumer %d: Discarding %d\n"), static_cast<int> (this->proxy ()->id ()), request->sequence ())); request->complete (); request->release (); } } break; } default: { result = false; break; } } } return result; }
static void test_timed_wait (int nesting_level, ACE_TEST_MUTEX *rm) { // Make sure that we're inside of a recursive level. if (nesting_level == 0) test_timed_wait (nesting_level + 1, rm); else { ACE_OS::srand ((u_int) ACE_OS::time (0)); for (size_t i = 0; i < ACE_MAX_ITERATIONS / 2; i++) { int result = 0; // First attempt to acquire the mutex with a timeout to verify // that mutex timeouts are working. ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) = trying timed acquire on ") ACE_TEXT ("iteration %d\n"), i)); ACE_Time_Value delta (1, 0); // One second timeout ACE_Time_Value timeout = ACE_OS::gettimeofday (); timeout += delta; // Must pass absolute time to acquire(). if (rm->acquire (timeout) != 0) { if (errno == ETIME) ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) = mutex acquisition ") ACE_TEXT ("timed out\n"))); else if (errno == ENOTSUP) { #if !defined (ACE_HAS_MUTEX_TIMEOUTS) if (!reported_notsup) { ACE_DEBUG ((LM_INFO, ACE_TEXT ("(%P|%t) %p, but ACE_HAS_MUTEX_TIMEOUTS is not defined - Ok\n"), ACE_TEXT ("mutex timed acquire"))); reported_notsup = 1; } #else ACE_DEBUG ((LM_ERROR, ACE_TEXT ("(%P|%t) %p - maybe ACE_HAS_MUTEX_TIMEOUTS should not be defined?\n"), ACE_TEXT ("mutex timed acquire"))); #endif /* ACE_HAS_MUTEX_TIMEOUTS */ } else { ACE_ERROR ((LM_ERROR, ACE_TEXT ("(%P|%t) %p\n%a"), ACE_TEXT ("mutex timeout failed\n"))); return; } } else { result = rm->release (); ACE_TEST_ASSERT (result == 0); } // Now try the standard mutex. ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) = trying to acquire on iteration %d\n"), i)); result = rm->acquire (); ACE_TEST_ASSERT (result == 0); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) = acquired on iteration %d\n"), i)); // Sleep for a random amount of time between 0 and 2 seconds. // Note that it's ok to use rand() here because we are running // within the critical section defined by the Thread_Mutex. ACE_OS::sleep (ACE_OS::rand () % 2); result = rm->release (); ACE_TEST_ASSERT (result == 0); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) = released on iteration %d\n"), i)); // FUZZ: disable check_for_ACE_Guard // Basic ACE_Guard usage - automatically acquire the mutex on // guard construction and automatically release it on // destruction. { // Construct an ACE_Guard to implicitly acquire the mutex. ACE_Guard<ACE_TEST_MUTEX> guard (*rm); ACE_TEST_ASSERT (guard.locked () != 0); // Perform some operation which might exit the current scope // prematurely, e.g. by returning or throwing an exception. // ... // ACE_Guard object is destroyed when exiting scope and guard // destructor automatically releases mutex. } // Use an ACE_Guard to automatically acquire a mutex, but release // the mutex early. { // Construct an ACE_Guard to implicitly acquire the mutex. ACE_Guard<ACE_TEST_MUTEX> guard (*rm); ACE_TEST_ASSERT (guard.locked () != 0); // Perform some operation which might exit the current scope // prematurely, e.g. by returning or throwing an exception. // ... // Release the mutex since we no longer need it. guard.release (); ACE_TEST_ASSERT (guard.locked () == 0); // Do something else which does not require the mutex to be locked. // ... // ACE_Guard object's destructor will not release the mutex. } // Use an ACE_Guard to automatically acquire a mutex, but // relinquish ownership of the lock so that the mutex is not // automatically released on guard destruction. This is useful // when an operation might not release the mutex in some // conditions, in which case responsibility for releasing it is // passed to someone else. { // Construct an ACE_Guard to implicitly acquire the mutex. ACE_Guard<ACE_TEST_MUTEX> guard (*rm); ACE_TEST_ASSERT (guard.locked () != 0); // Perform some operation which might exit the current scope // prematurely, e.g. by returning or throwing an exception. // ... // Relinquish ownership of the mutex lock. Someone else must // now release it. guard.disown (); ACE_TEST_ASSERT (guard.locked () == 0); // ACE_Guard object's destructor will not release the mutex. } // We are now responsible for releasing the mutex. result = rm->release (); ACE_TEST_ASSERT (result == 0); // Construct an ACE_Guard without automatically acquiring the lock. { // Construct an ACE_Guard object without automatically // acquiring the mutex or taking ownership of an existing // lock. The third parameter tells the guard that the mutex // has not been locked. ACE_Guard<ACE_TEST_MUTEX> guard (*rm, 0, 0); ACE_TEST_ASSERT (guard.locked () == 0); // Conditionally acquire the mutex. if (i % 2 == 0) { guard.acquire (); ACE_TEST_ASSERT (guard.locked () != 0); } // Perform some operation that might exit the current scope // prematurely, e.g. by returning or throwing an exception. // ... // ACE_Guard object is destroyed when exiting scope and guard // destructor automatically releases if it was acquired above. } // Use an ACE_Guard to take ownership of a previously acquired // mutex. timeout = ACE_OS::gettimeofday (); timeout += delta; // Must pass absolute time to acquire(). if (rm->acquire (timeout) == 0) { // Construct an ACE_Guard object without automatically // acquiring the mutex, but instead take ownership of the // existing lock. The third parameter tells the guard that // the mutex has already been locked. ACE_Guard<ACE_TEST_MUTEX> guard (*rm, 0, 1); ACE_TEST_ASSERT (guard.locked () != 0); // Perform some operation which might exit the current scope // prematurely, e.g. by returning or throwing an exception. // ... // ACE_Guard object is destroyed when exiting scope and guard // destructor automatically releases mutex. } // FUZZ: enable check_for_ACE_Guard } return; } }
// virtual: this is the default, overridden for SequencePushConsumer // FUZZ: disable check_for_ACE_Guard bool TAO_Notify_Consumer::dispatch_from_queue ( Request_Queue & requests, ACE_Guard <TAO_SYNCH_MUTEX> & ace_mon) { // FUZZ: enable check_for_ACE_Guard bool result = true; TAO_Notify_Method_Request_Event_Queueable * request = 0; if (requests.dequeue_head (request) == 0) { ace_mon.release (); DispatchStatus status = this->dispatch_request (request); switch (status) { case DISPATCH_SUCCESS: { request->complete (); request->release (); result = true; ace_mon.acquire (); break; } case DISPATCH_RETRY: { if (DEBUG_LEVEL > 0) ORBSVCS_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Consumer %d: Will retry %d\n"), static_cast<int> (this->proxy ()->id ()), request->sequence () )); ace_mon.acquire (); requests.enqueue_head (request); // put the failed event back where it was result = false; break; } case DISPATCH_DISCARD: { if (DEBUG_LEVEL > 0) ORBSVCS_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Consumer %d: Error during ") ACE_TEXT ("dispatch. Discarding event:%d.\n"), static_cast<int> (this->proxy ()->id ()), request->sequence () )); request->complete (); ace_mon.acquire (); result = true; break; } case DISPATCH_FAIL: { if (DEBUG_LEVEL > 0) ORBSVCS_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Consumer %d: Failed. ") ACE_TEXT ("Discarding event %d.\n"), static_cast<int> (this->proxy ()->id ()), request->sequence () )); request->complete (); ace_mon.acquire (); while (requests.dequeue_head (request) == 0) { ace_mon.release (); request->complete (); ace_mon.acquire (); } ace_mon.release (); try { this->proxy_supplier ()->destroy (); } catch (const CORBA::Exception&) { // todo is there something reasonable to do here? } ace_mon.acquire (); result = true; break; } default: { ace_mon.acquire (); result = false; break; } } } return result; }