void observer_list::do_notify_entry_observers( observer_proxy*& last, bool worker ) { // Pointer p marches though the list from last (exclusively) to the end. observer_proxy *p = last, *prev = p; for(;;) { task_scheduler_observer_v3* tso=NULL; // Hold lock on list only long enough to advance to the next proxy in the list. { scoped_lock lock(mutex(), /*is_writer=*/false); do { if( p ) { // We were already processing the list. if( observer_proxy* q = p->my_next ) { if( p == prev ) remove_ref_fast(prev); // sets prev to NULL if successful p = q; } else { // Reached the end of the list. if( p == prev ) { // Keep the reference as we store the 'last' pointer in scheduler } else { // The last few proxies were empty ++p->my_ref_count; if( prev ) { lock.release(); remove_ref(prev); } } last = p; return; } } else { // Starting pass through the list p = my_head; if( !p ) return; } tso = p->my_observer; } while( !tso ); ++p->my_ref_count; ++tso->my_busy_count; } __TBB_ASSERT( !prev || p!=prev, NULL ); // Release the proxy pinned before p if( prev ) remove_ref(prev); // Do not hold any locks on the list while calling user's code. // Do not intercept any exceptions that may escape the callback so that // they are either handled by the TBB scheduler or passed to the debugger. tso->on_scheduler_entry(worker); intptr_t bc = --tso->my_busy_count; __TBB_ASSERT_EX( bc>=0, "my_busy_count underflowed" ); prev = p; } }
IoBlock::~IoBlock() { if (io_block) remove_ref(io_block); if (io_target) remove_ref(io_target); //cout << "IoBlock " << this << " destructed, target was " << this->io_target << endl; this->io_target = (IoObject*)0xA5A5A5A5; }
// TODO: merge with do_notify_.. methods bool observer_list::ask_permission_to_leave() { __TBB_ASSERT( this != &the_global_observer_list, "This method cannot be used on the list of global observers" ); if( !my_head ) return true; // Pointer p marches though the list observer_proxy *p = NULL, *prev = NULL; bool result = true; while( result ) { task_scheduler_observer* tso = NULL; // Hold lock on list only long enough to advance to the next proxy in the list. { scoped_lock lock(mutex(), /*is_writer=*/false); do { if( p ) { // We were already processing the list. observer_proxy* q = p->my_next; // read next, remove the previous reference if( p == prev ) remove_ref_fast(prev); // sets prev to NULL if successful if( q ) p = q; else { // Reached the end of the list. if( prev ) { lock.release(); remove_ref(p); } return result; } } else { // Starting pass through the list p = my_head; if( !p ) return result; } tso = p->get_v6_observer(); // all local observers are v6 } while( !tso ); ++p->my_ref_count; ++tso->my_busy_count; } __TBB_ASSERT( !prev || p!=prev, NULL ); // Release the proxy pinned before p if( prev ) remove_ref(prev); // Do not hold any locks on the list while calling user's code. // Do not intercept any exceptions that may escape the callback so that // they are either handled by the TBB scheduler or passed to the debugger. result = tso->on_scheduler_leaving(); intptr_t bc = --tso->my_busy_count; __TBB_ASSERT_EX( bc>=0, "my_busy_count underflowed" ); prev = p; } if( prev ) remove_ref(prev); return result; }
void observer_list::do_notify_exit_observers( observer_proxy* last, bool worker ) { // Pointer p marches though the list from the beginning to last (inclusively). observer_proxy *p = NULL, *prev = NULL; for(;;) { task_scheduler_observer_v3* tso=NULL; // Hold lock on list only long enough to advance to the next proxy in the list. { scoped_lock lock(mutex(), /*is_writer=*/false); do { if( p ) { // We were already processing the list. if( p != last ) { __TBB_ASSERT( p->my_next, "List items before 'prev' must have valid my_next pointer" ); if( p == prev ) remove_ref_fast(prev); // sets prev to NULL if successful p = p->my_next; } else { // remove the reference from the last item remove_ref_fast(p); if( p ) { lock.release(); remove_ref(p); } return; } } else { // Starting pass through the list p = my_head; __TBB_ASSERT( p, "Nonzero 'last' must guarantee that the global list is non-empty" ); } tso = p->my_observer; } while( !tso ); // The item is already refcounted if ( p != last ) // the last is already referenced since entry notification ++p->my_ref_count; ++tso->my_busy_count; } __TBB_ASSERT( !prev || p!=prev, NULL ); if( prev ) remove_ref(prev); // Do not hold any locks on the list while calling user's code. // Do not intercept any exceptions that may escape the callback so that // they are either handled by the TBB scheduler or passed to the debugger. tso->on_scheduler_exit(worker); intptr_t bc = --tso->my_busy_count; __TBB_ASSERT_EX( bc>=0, "my_busy_count underflowed" ); prev = p; } }
void irept::detatch() { #ifdef IREP_DEBUG std::cout << "DETATCH1: " << data << std::endl; #endif if(data==NULL) { data=new dt; #ifdef IREP_DEBUG std::cout << "ALLOCATED " << data << std::endl; #endif } else if(data->ref_count>1) { dt *old_data(data); data=new dt(*old_data); #ifdef IREP_DEBUG std::cout << "ALLOCATED " << data << std::endl; #endif data->ref_count=1; remove_ref(old_data); } assert(data->ref_count==1); #ifdef IREP_DEBUG std::cout << "DETATCH2: " << data << std::endl; #endif }
void irept::clear() { #ifdef SHARING remove_ref(data); data=NULL; #else data.clear(); #endif }