void mutex::scoped_lock::internal_release() { __TBB_ASSERT( my_mutex, "mutex::scoped_lock: not holding a mutex" ); #if _WIN32||_WIN64 switch( my_mutex->state ) { case INITIALIZED: __TBB_ASSERT(false,"mutex::scoped_lock: try to release the lock without acquisition"); break; case HELD: my_mutex->state = INITIALIZED; LeaveCriticalSection(&my_mutex->impl); break; case DESTROYED: __TBB_ASSERT(false,"mutex::scoped_lock: mutex already destroyed"); break; default: __TBB_ASSERT(false,"mutex::scoped_lock: illegal mutex state"); break; } #elif USE_LITHE int error_code = lithe_mutex_unlock(&my_mutex->impl); __TBB_ASSERT_EX(!error_code, "mutex::scoped_lock: lithe_mutex_unlock failed"); #else int error_code = pthread_mutex_unlock(&my_mutex->impl); __TBB_ASSERT_EX(!error_code, "mutex::scoped_lock: pthread_mutex_unlock failed"); #endif /* _WIN32||_WIN64 */ my_mutex = NULL; }
void mutex::scoped_lock::internal_acquire( mutex& m ) { #if _WIN32||_WIN64 switch( m.state ) { case INITIALIZED: case HELD: EnterCriticalSection( &m.impl ); // If a thread comes here, and another thread holds the lock, it will block // in EnterCriticalSection. When it returns from EnterCriticalSection, // m.state must be set to INITIALIZED. If the same thread tries to acquire a lock it // aleady holds, the the lock is in HELD state, thus will cause the assertion to fail. __TBB_ASSERT(m.state!=HELD, "mutex::scoped_lock: deadlock caused by attempt to reacquire held mutex"); m.state = HELD; break; case DESTROYED: __TBB_ASSERT(false,"mutex::scoped_lock: mutex already destroyed"); break; default: __TBB_ASSERT(false,"mutex::scoped_lock: illegal mutex state"); break; } #elif USE_LITHE int error_code = lithe_mutex_lock(&m.impl); __TBB_ASSERT_EX(!error_code,"mutex::scoped_lock: lithe_mutex_lock failed"); #else int error_code = pthread_mutex_lock(&m.impl); __TBB_ASSERT_EX(!error_code,"mutex::scoped_lock: pthread_mutex_lock failed"); #endif /* _WIN32||_WIN64 */ my_mutex = &m; }
void observer_list::do_notify_entry_observers( observer_proxy*& last, bool worker ) { // Pointer p marches though the list from last (exclusively) to the end. observer_proxy *p = last, *prev = p; for(;;) { task_scheduler_observer_v3* tso=NULL; // Hold lock on list only long enough to advance to the next proxy in the list. { scoped_lock lock(mutex(), /*is_writer=*/false); do { if( p ) { // We were already processing the list. if( observer_proxy* q = p->my_next ) { if( p == prev ) remove_ref_fast(prev); // sets prev to NULL if successful p = q; } else { // Reached the end of the list. if( p == prev ) { // Keep the reference as we store the 'last' pointer in scheduler } else { // The last few proxies were empty ++p->my_ref_count; if( prev ) { lock.release(); remove_ref(prev); } } last = p; return; } } else { // Starting pass through the list p = my_head; if( !p ) return; } tso = p->my_observer; } while( !tso ); ++p->my_ref_count; ++tso->my_busy_count; } __TBB_ASSERT( !prev || p!=prev, NULL ); // Release the proxy pinned before p if( prev ) remove_ref(prev); // Do not hold any locks on the list while calling user's code. // Do not intercept any exceptions that may escape the callback so that // they are either handled by the TBB scheduler or passed to the debugger. tso->on_scheduler_entry(worker); intptr_t bc = --tso->my_busy_count; __TBB_ASSERT_EX( bc>=0, "my_busy_count underflowed" ); prev = p; } }
// TODO: merge with do_notify_.. methods bool observer_list::ask_permission_to_leave() { __TBB_ASSERT( this != &the_global_observer_list, "This method cannot be used on the list of global observers" ); if( !my_head ) return true; // Pointer p marches though the list observer_proxy *p = NULL, *prev = NULL; bool result = true; while( result ) { task_scheduler_observer* tso = NULL; // Hold lock on list only long enough to advance to the next proxy in the list. { scoped_lock lock(mutex(), /*is_writer=*/false); do { if( p ) { // We were already processing the list. observer_proxy* q = p->my_next; // read next, remove the previous reference if( p == prev ) remove_ref_fast(prev); // sets prev to NULL if successful if( q ) p = q; else { // Reached the end of the list. if( prev ) { lock.release(); remove_ref(p); } return result; } } else { // Starting pass through the list p = my_head; if( !p ) return result; } tso = p->get_v6_observer(); // all local observers are v6 } while( !tso ); ++p->my_ref_count; ++tso->my_busy_count; } __TBB_ASSERT( !prev || p!=prev, NULL ); // Release the proxy pinned before p if( prev ) remove_ref(prev); // Do not hold any locks on the list while calling user's code. // Do not intercept any exceptions that may escape the callback so that // they are either handled by the TBB scheduler or passed to the debugger. result = tso->on_scheduler_leaving(); intptr_t bc = --tso->my_busy_count; __TBB_ASSERT_EX( bc>=0, "my_busy_count underflowed" ); prev = p; } if( prev ) remove_ref(prev); return result; }
void observer_list::do_notify_exit_observers( observer_proxy* last, bool worker ) { // Pointer p marches though the list from the beginning to last (inclusively). observer_proxy *p = NULL, *prev = NULL; for(;;) { task_scheduler_observer_v3* tso=NULL; // Hold lock on list only long enough to advance to the next proxy in the list. { scoped_lock lock(mutex(), /*is_writer=*/false); do { if( p ) { // We were already processing the list. if( p != last ) { __TBB_ASSERT( p->my_next, "List items before 'prev' must have valid my_next pointer" ); if( p == prev ) remove_ref_fast(prev); // sets prev to NULL if successful p = p->my_next; } else { // remove the reference from the last item remove_ref_fast(p); if( p ) { lock.release(); remove_ref(p); } return; } } else { // Starting pass through the list p = my_head; __TBB_ASSERT( p, "Nonzero 'last' must guarantee that the global list is non-empty" ); } tso = p->my_observer; } while( !tso ); // The item is already refcounted if ( p != last ) // the last is already referenced since entry notification ++p->my_ref_count; ++tso->my_busy_count; } __TBB_ASSERT( !prev || p!=prev, NULL ); if( prev ) remove_ref(prev); // Do not hold any locks on the list while calling user's code. // Do not intercept any exceptions that may escape the callback so that // they are either handled by the TBB scheduler or passed to the debugger. tso->on_scheduler_exit(worker); intptr_t bc = --tso->my_busy_count; __TBB_ASSERT_EX( bc>=0, "my_busy_count underflowed" ); prev = p; } }
void recursive_mutex::internal_destroy() { #if _WIN32||_WIN64 switch( state ) { case INITIALIZED: DeleteCriticalSection(&impl); break; case DESTROYED: __TBB_ASSERT(false,"recursive_mutex: already destroyed"); break; default: __TBB_ASSERT(false,"recursive_mutex: illegal state for destruction"); break; } state = DESTROYED; #else int error_code = pthread_mutex_destroy(&impl); __TBB_ASSERT_EX(!error_code,"recursive_mutex: pthread_mutex_destroy failed"); #endif /* _WIN32||_WIN64 */ }
void recursive_mutex::scoped_lock::internal_release() { __TBB_ASSERT( my_mutex, "recursive_mutex::scoped_lock: not holding a mutex" ); #if _WIN32||_WIN64 switch( my_mutex->state ) { case INITIALIZED: LeaveCriticalSection( &my_mutex->impl ); break; case DESTROYED: __TBB_ASSERT(false,"recursive_mutex::scoped_lock: mutex already destroyed"); break; default: __TBB_ASSERT(false,"recursive_mutex::scoped_lock: illegal mutex state"); break; } #else int error_code = pthread_mutex_unlock(&my_mutex->impl); __TBB_ASSERT_EX(!error_code, "recursive_mutex::scoped_lock: pthread_mutex_unlock failed"); #endif /* _WIN32||_WIN64 */ my_mutex = NULL; }
void recursive_mutex::scoped_lock::internal_acquire( recursive_mutex& m ) { #if _WIN32||_WIN64 switch( m.state ) { case INITIALIZED: // since we cannot look into the internal of the CriticalSection object // we won't know how many times the lock has been acquired, and thus // we won't know when we may safely set the state back to INITIALIZED // if we change the state to HELD as in mutex.cpp. thus, we won't change // the state for recursive_mutex EnterCriticalSection( &m.impl ); break; case DESTROYED: __TBB_ASSERT(false,"recursive_mutex::scoped_lock: mutex already destroyed"); break; default: __TBB_ASSERT(false,"recursive_mutex::scoped_lock: illegal mutex state"); break; } #else int error_code = pthread_mutex_lock(&m.impl); __TBB_ASSERT_EX(!error_code,"recursive_mutex::scoped_lock: pthread_mutex_lock failed"); #endif /* _WIN32||_WIN64 */ my_mutex = &m; }
bool dynamic_link( const char* library, const dynamic_link_descriptor descriptors[], size_t n, size_t required, dynamic_link_handle *handle ) { // Get library handle in case it is already loaded into the current process #if ! __TBB_DYNAMIC_LOAD_ENABLED dynamic_link_handle library_handle = NULL; #ifndef __TBB_ASSERT_EX #define __TBB_ASSERT_EX LIBRARY_ASSERT #endif __TBB_ASSERT_EX( library, "library name must be provided"); #elif _WIN32||_WIN64 dynamic_link_handle library_handle = GetModuleHandle( library ); #else dynamic_link_handle library_handle = dlopen( NULL, RTLD_LAZY ); #endif /* _WIN32||_WIN64 */ // Get descriptors from the library if ( library_handle && dynamic_link( library_handle, descriptors, n, required ) ) { #if !__TBB_DYNAMIC_LOAD_ENABLED return true; #else // The library has been loaded by another module and contains requested symbols. // But after we obtained the library's handle it can be unloaded by another thread // invalidating our handle copy. Therefore we need to pin the library in memory. #if _WIN32||_WIN64 char library_full_name[ MAX_PATH+1 ]; // Get library's name from earlier found handle if ( GetModuleFileName( library_handle, library_full_name, MAX_PATH+1 ) ) { // Pin the library library_handle = LoadLibrary( library_full_name ); if ( library_handle == NULL ) { int err = GetLastError(); DYNAMIC_LINK_WARNING( dl_lib_not_found, library_full_name, err ); } // if } // if #else /* !WIN */ Dl_info info; // Get library's name from earlier found symbol if ( dladdr( (void*)*descriptors[0].handler, &info ) ) { // Pin the library library_handle = dlopen( info.dli_fname, RTLD_LAZY ); if ( library_handle == NULL ) { char const * err = dlerror(); DYNAMIC_LINK_WARNING( dl_lib_not_found, info.dli_fname, err ); } // if } // if #endif /* !WIN */ else { // The library have been unloaded by another thread library_handle = 0; } if ( library_handle ) { // If original library was unloaded before we pinned it // and then another module loaded in its place, the earlier // found symbols would become invalid. So revalidate them. if ( !dynamic_link( library_handle, descriptors, n, required ) ) { // Wrong library. dynamic_unlink(library_handle); library_handle = 0; } } if ( !library_handle ) { // Failed to pin the library, so clear the descriptors too. for( size_t i=0; i<n; ++i ) *descriptors[i].handler = 0; } #endif /* __TBB_DYNAMIC_LOAD_ENABLED */ } else { library_handle = 0; } #if __TBB_DYNAMIC_LOAD_ENABLED if ( !library_handle ) { #if _WIN32||_WIN64 #if _XBOX library_handle = LoadLibrary (library); #else library_handle = NULL; // Construct absolute path to the library to avoid security issue. size_t const len = MAX_PATH + 1; char path[ len ]; size_t rc = abs_path( library, path, len ); if ( 0 < rc && rc < len ) { // Prevent Windows from displaying silly message boxes if it fails to load library // (e.g. because of MS runtime problems - one of those crazy manifest related ones) UINT prev_mode = SetErrorMode (SEM_FAILCRITICALERRORS); library_handle = LoadLibrary (path); SetErrorMode (prev_mode); if ( library_handle == NULL ) { int err = GetLastError(); DYNAMIC_LINK_WARNING( dl_lib_not_found, path, err ); } // if } // if #endif /* !_XBOX */ #else /* !WIN */ library_handle = NULL; // Construct absolute path to the library. size_t const len = PATH_MAX + 1; char path[ len ]; size_t rc = abs_path( library, path, len ); if ( 0 < rc && rc < len ) { library_handle = dlopen( path, RTLD_LAZY ); if ( library_handle == NULL ) { char const * err = dlerror(); DYNAMIC_LINK_WARNING( dl_lib_not_found, library, err ); } // if } // if #endif /* !WIN */ if( library_handle ) { if( !dynamic_link( library_handle, descriptors, n, required ) ) { // The loaded library does not contain all the expected entry points dynamic_unlink( library_handle ); library_handle = NULL; } } } #endif /* __TBB_DYNAMIC_LOAD_ENABLED */ if ( library_handle ) { if ( handle ) *handle = library_handle; #if __TBB_BUILD else handles.add_handle( library_handle ); #endif /* __TBB_BUILD */ return true; } return false; }