arena::arena ( market& m, unsigned max_num_workers ) { __TBB_ASSERT( !my_guard, "improperly allocated arena?" ); __TBB_ASSERT( sizeof(slot[0]) % NFS_GetLineSize()==0, "arena::slot size not multiple of cache line size" ); __TBB_ASSERT( (uintptr_t)this % NFS_GetLineSize()==0, "arena misaligned" ); my_market = &m; my_limit = 1; // Two slots are mandatory: for the master, and for 1 worker (required to support starvation resistant tasks). my_num_slots = max(2u, max_num_workers + 1); my_max_num_workers = max_num_workers; my_num_threads_active = 1; // accounts for the master __TBB_ASSERT ( my_max_num_workers < my_num_slots, NULL ); // Construct mailboxes. Mark internal synchronization elements for the tools. for( unsigned i = 0; i < my_num_slots; ++i ) { __TBB_ASSERT( !slot[i].my_scheduler && !slot[i].task_pool, NULL ); ITT_SYNC_CREATE(slot + i, SyncType_Scheduler, SyncObj_WorkerTaskPool); mailbox(i+1).construct(); ITT_SYNC_CREATE(&mailbox(i+1), SyncType_Scheduler, SyncObj_Mailbox); #if __TBB_STATISTICS slot[i].my_counters = new ( NFS_Allocate(sizeof(statistics_counters), 1, NULL) ) statistics_counters; #endif /* __TBB_STATISTICS */ } my_task_stream.initialize(my_num_slots); ITT_SYNC_CREATE(&my_task_stream, SyncType_Scheduler, SyncObj_TaskStream); my_mandatory_concurrency = false; #if __TBB_TASK_GROUP_CONTEXT my_master_default_ctx = NULL; #endif }
/** Thread-unsafe lazy one-time initialization of tools interop. Used by both dummy handlers and general TBB one-time initialization routine. **/ void ITT_DoUnsafeOneTimeInitialization () { if ( !ITT_InitializationDone ) { ITT_Present = (__TBB_load_ittnotify()!=0); ITT_InitializationDone = true; #if __TBB_ARENA_PER_MASTER ITT_SYNC_CREATE(&market::theMarketMutex, SyncType_GlobalLock, SyncObj_SchedulerInitialization); #else /* !__TBB_ARENA_PER_MASTER */ ITT_SYNC_CREATE(&governor::theArenaMutex, SyncType_GlobalLock, SyncObj_SchedulerInitialization); #endif /* !__TBB_ARENA_PER_MASTER */ } }
struct os_mutex *__cilkrts_os_mutex_create(void) { int status; struct os_mutex *mutex = (struct os_mutex *)malloc(sizeof(struct os_mutex)); pthread_mutexattr_t attr; ITT_SYNC_CREATE(mutex, "OS Mutex"); if (!mutex) { if (static_mutex_used) { __cilkrts_bug("Cilk RTS library initialization failed"); } else { static_mutex_used = 1; mutex = &static_mutex; } } status = pthread_mutexattr_init(&attr); CILK_ASSERT (status == 0); #if defined DEBUG || CILK_LIB_DEBUG #ifdef PTHREAD_MUTEX_ERRORCHECK status = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); #else status = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK_NP); #endif CILK_ASSERT (status == 0); #endif status = pthread_mutex_init (&mutex->mutex, &attr); CILK_ASSERT (status == 0); pthread_mutexattr_destroy(&attr); return mutex; }
/** Thread-unsafe lazy one-time initialization of tools interop. Used by both dummy handlers and general TBB one-time initialization routine. **/ void ITT_DoUnsafeOneTimeInitialization () { if ( !ITT_InitializationDone ) { ITT_Present = (__TBB_load_ittnotify()!=0); ITT_InitializationDone = true; ITT_SYNC_CREATE(&market::theMarketMutex, SyncType_GlobalLock, SyncObj_SchedulerInitialization); } }
void reader_writer_lock::internal_construct() { reader_head = NULL; writer_head = NULL; writer_tail = NULL; rdr_count_and_flags = 0; my_current_writer = tbb_thread::id(); #if TBB_USE_THREADING_TOOLS ITT_SYNC_CREATE(this, _T("tbb::reader_writer_lock"), _T("")); #endif /* TBB_USE_THREADING_TOOLS */ }
void mutex::internal_construct() { #if _WIN32||_WIN64 InitializeCriticalSectionEx(&impl, 4000, 0); state = INITIALIZED; #else int error_code = pthread_mutex_init(&impl,NULL); if( error_code ) tbb::internal::handle_perror(error_code,"mutex: pthread_mutex_init failed"); #endif /* _WIN32||_WIN64*/ ITT_SYNC_CREATE(&impl, _T("tbb::mutex"), _T("")); }
arena* arena::allocate_arena( unsigned number_of_slots, unsigned number_of_workers, stack_size_type stack_size ) { __TBB_ASSERT( sizeof(ArenaPrefix) % NFS_GetLineSize()==0, "ArenaPrefix not multiple of cache line size" ); __TBB_ASSERT( sizeof(mail_outbox)==NFS_MaxLineSize, NULL ); __TBB_ASSERT( stack_size>0, NULL ); size_t n = sizeof(ArenaPrefix) + number_of_slots*(sizeof(mail_outbox)+sizeof(arena_slot)); unsigned char* storage = (unsigned char*)NFS_Allocate( n, 1, NULL ); // Zero all slots to indicate that they are empty memset( storage, 0, n ); arena* a = (arena*)(storage + sizeof(ArenaPrefix)+ number_of_slots*(sizeof(mail_outbox))); __TBB_ASSERT( sizeof(a->slot[0]) % NFS_GetLineSize()==0, "arena::slot size not multiple of cache line size" ); __TBB_ASSERT( (uintptr_t)a % NFS_GetLineSize()==0, NULL ); new( &a->prefix() ) ArenaPrefix( number_of_slots, number_of_workers ); // Allocate the worker_list WorkerDescriptor * w = new WorkerDescriptor[number_of_workers]; memset( w, 0, sizeof(WorkerDescriptor)*(number_of_workers)); a->prefix().worker_list = w; // Construct mailboxes. for( unsigned j=1; j<=number_of_slots; ++j ) a->mailbox(j).construct(); a->prefix().stack_size = stack_size; size_t k; // Mark each internal sync element for the tools for( k=0; k<number_of_workers; ++k ) { ITT_SYNC_CREATE(a->slot + k, SyncType_Scheduler, SyncObj_WorkerTaskPool); ITT_SYNC_CREATE(&w[k].scheduler, SyncType_Scheduler, SyncObj_WorkerLifeCycleMgmt); ITT_SYNC_CREATE(&a->mailbox(k+1), SyncType_Scheduler, SyncObj_Mailbox); } for( ; k<number_of_slots; ++k ) { ITT_SYNC_CREATE(a->slot + k, SyncType_Scheduler, SyncObj_MasterTaskPool); ITT_SYNC_CREATE(&a->mailbox(k+1), SyncType_Scheduler, SyncObj_Mailbox); } return a; }
void recursive_mutex::internal_construct() { #if _WIN32||_WIN64 InitializeCriticalSectionEx(&impl, 4000, 0); state = INITIALIZED; #else pthread_mutexattr_t mtx_attr; int error_code = pthread_mutexattr_init( &mtx_attr ); if( error_code ) tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutexattr_init failed"); pthread_mutexattr_settype( &mtx_attr, PTHREAD_MUTEX_RECURSIVE ); error_code = pthread_mutex_init( &impl, &mtx_attr ); if( error_code ) tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutex_init failed"); pthread_mutexattr_destroy( &mtx_attr ); #endif /* _WIN32||_WIN64*/ ITT_SYNC_CREATE(&impl, _T("tbb::recursive_mutex"), _T("")); }
void spin_rw_mutex_v3::internal_construct() { ITT_SYNC_CREATE(this, _T("tbb::spin_rw_mutex"), _T("")); }
void critical_section_v4::internal_construct() { ITT_SYNC_CREATE(&my_impl, _T("ppl::critical_section"), _T("")); }
void x86_rtm_rw_mutex::internal_construct() { ITT_SYNC_CREATE(this, _T("tbb::x86_rtm_rw_mutex"), _T("")); }
void dummy_sync_create( void* obj, const tchar* objname, const tchar* objtype, int /*attribute*/ ) { ITT_DoOneTimeInitialization(); __TBB_ASSERT( ITT_Handler_sync_create!=&dummy_sync_create, NULL ); ITT_SYNC_CREATE( obj, objtype, objname ); }