void arena::close_arena () { #if !__TBB_STATISTICS_EARLY_DUMP GATHER_STATISTIC( dump_arena_statistics() ); #endif my_market->detach_arena( *this ); free_arena(); }
void arena::close_arena () { #if !__TBB_STATISTICS_EARLY_DUMP GATHER_STATISTIC( dump_arena_statistics() ); #endif my_market->detach_arena( *this ); __TBB_ASSERT( is_alive(my_guard), NULL ); free_arena(); }
void market::process( job& j ) { generic_scheduler& s = static_cast<generic_scheduler&>(j); __TBB_ASSERT( governor::is_set(&s), NULL ); #if __TBB_TRACK_PRIORITY_LEVEL_SATURATION arena *a = NULL; while ( (a = arena_in_need(a)) ) #else while ( arena *a = arena_in_need() ) #endif a->process(s); GATHER_STATISTIC( ++s.my_counters.market_roundtrips ); }
void market::adjust_demand ( arena& ab, int delta ) { if (!aa) aa = &ab; arena &a = *aa; if (delta == -12345) delta = 0 - a.my_num_workers_requested; __TBB_ASSERT( theMarket, "market instance was destroyed prematurely?" ); //runtime_warning("** %p %d %d %d %d --> ", &a, (int)a.my_num_workers_requested, (int)my_total_demand, (int)a.my_num_workers_allotted, delta); a.my_num_workers_requested += delta; my_total_demand += delta; update_allotment( my_max_num_workers ); //runtime_warning("%d %d %d", (int)a.my_num_workers_requested, (int)my_total_demand, (int)a.my_num_workers_allotted); // Must be called outside of any locks my_server->adjust_job_count_estimate( delta ); GATHER_STATISTIC( governor::local_scheduler_if_initialized() ? ++governor::local_scheduler_if_initialized()->my_counters.gate_switches : 0 ); }
void market::update_arena_top_priority ( arena& a, intptr_t new_priority ) { GATHER_STATISTIC( ++governor::local_scheduler_if_initialized()->my_counters.arena_prio_switches ); __TBB_ASSERT( a.my_top_priority != new_priority, NULL ); priority_level_info &prev_level = my_priority_levels[a.my_top_priority], &new_level = my_priority_levels[new_priority]; remove_arena_from_list(a); a.my_top_priority = new_priority; insert_arena_into_list(a); ++a.my_reload_epoch; #if __TBB_TRACK_PRIORITY_LEVEL_SATURATION // Arena's my_num_workers_present may remain positive for some time after its // my_num_workers_requested becomes zero. Thus the following two lines are // executed unconditionally. prev_level.workers_present -= a.my_num_workers_present; new_level.workers_present += a.my_num_workers_present; #endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */ prev_level.workers_requested -= a.my_num_workers_requested; new_level.workers_requested += a.my_num_workers_requested; __TBB_ASSERT( prev_level.workers_requested >= 0 && new_level.workers_requested >= 0, NULL ); }
void arena::free_arena () { __TBB_ASSERT( is_alive(my_guard), NULL ); __TBB_ASSERT( !my_num_threads_active, "There are threads in the dying arena" ); __TBB_ASSERT( !my_num_workers_requested && !my_num_workers_allotted, "Dying arena requests workers" ); __TBB_ASSERT( my_pool_state == SNAPSHOT_EMPTY || !my_max_num_workers, "Inconsistent state of a dying arena" ); #if !__TBB_STATISTICS_EARLY_DUMP GATHER_STATISTIC( dump_arena_statistics() ); #endif poison_value( my_guard ); intptr_t drained = 0; for ( unsigned i = 1; i <= my_num_slots; ++i ) drained += mailbox(i).drain(); #if __TBB_TASK_PRIORITY && TBB_USE_ASSERT for ( intptr_t i = 0; i < num_priority_levels; ++i ) __TBB_ASSERT(my_task_stream[i].empty() && my_task_stream[i].drain()==0, "Not all enqueued tasks were executed"); #elif !__TBB_TASK_PRIORITY __TBB_ASSERT(my_task_stream.empty() && my_task_stream.drain()==0, "Not all enqueued tasks were executed"); #endif /* !__TBB_TASK_PRIORITY */ #if __TBB_COUNT_TASK_NODES my_market->update_task_node_count( -drained ); #endif /* __TBB_COUNT_TASK_NODES */ my_market->release(); #if __TBB_TASK_GROUP_CONTEXT __TBB_ASSERT( my_master_default_ctx, "Master thread never entered the arena?" ); my_master_default_ctx->~task_group_context(); NFS_Free(my_master_default_ctx); #endif /* __TBB_TASK_GROUP_CONTEXT */ #if __TBB_STATISTICS for( unsigned i = 0; i < my_num_slots; ++i ) NFS_Free( my_slots[i].my_counters ); #endif /* __TBB_STATISTICS */ void* storage = &mailbox(my_num_slots); __TBB_ASSERT( my_num_threads_active == 0, NULL ); __TBB_ASSERT( my_pool_state == SNAPSHOT_EMPTY || !my_max_num_workers, NULL ); this->~arena(); #if TBB_USE_ASSERT > 1 memset( storage, 0, allocation_size(my_max_num_workers) ); #endif /* TBB_USE_ASSERT */ NFS_Free( storage ); }
void market::process( job& j ) { generic_scheduler& s = static_cast<generic_scheduler&>(j); arena *a = NULL; __TBB_ASSERT( governor::is_set(&s), NULL ); #if !__TBB_SLEEP_PERMISSION while ( (a = arena_in_need(a)) ) a->process(s); #else//__TBB_SLEEP_PERMISSION enum { query_interval = 1000, first_interval = 1, pause_time = 100 // similar to PauseTime used for the stealing loop }; for(int i = first_interval; ; i--) { while ( (a = arena_in_need(a)) ) { a->process(s); i = first_interval; } if( i == 0 ) { #if __TBB_TASK_PRIORITY arena_list_type &al = my_priority_levels[my_global_top_priority].arenas; #else /* __TBB_TASK_PRIORITY */ arena_list_type &al = my_arenas; #endif /* __TBB_TASK_PRIORITY */ if( al.empty() ) // races if any are innocent TODO: replace by an RML query interface break; // no arenas left, perhaps going to shut down if( the_global_observer_list.ask_permission_to_leave() ) break; // go sleep __TBB_Yield(); i = query_interval; } else __TBB_Pause(pause_time); } #endif//__TBB_SLEEP_PERMISSION GATHER_STATISTIC( ++s.my_counters.market_roundtrips ); }
void market::process( job& j ) { generic_scheduler& s = static_cast<generic_scheduler&>(j); while ( arena *a = arena_in_need() ) a->process(s); GATHER_STATISTIC( ++s.my_counters.market_roundtrips ); }
void market::adjust_demand ( arena& a, int delta ) { __TBB_ASSERT( theMarket, "market instance was destroyed prematurely?" ); if ( !delta ) return; my_arenas_list_mutex.lock(); int prev_req = a.my_num_workers_requested; a.my_num_workers_requested += delta; if ( a.my_num_workers_requested <= 0 ) { a.my_num_workers_allotted = 0; if ( prev_req <= 0 ) { my_arenas_list_mutex.unlock(); return; } delta = -prev_req; } #if __TBB_TASK_ARENA else if ( prev_req < 0 ) { delta = a.my_num_workers_requested; } #else /* __TBB_TASK_ARENA */ __TBB_ASSERT( prev_req >= 0, "Part-size request to RML?" ); #endif /* __TBB_TASK_ARENA */ #if __TBB_TASK_PRIORITY intptr_t p = a.my_top_priority; priority_level_info &pl = my_priority_levels[p]; pl.workers_requested += delta; __TBB_ASSERT( pl.workers_requested >= 0, NULL ); #if !__TBB_TASK_ARENA __TBB_ASSERT( a.my_num_workers_requested >= 0, NULL ); #else //TODO: understand the assertion and modify #endif if ( a.my_num_workers_requested <= 0 ) { if ( a.my_top_priority != normalized_normal_priority ) { GATHER_STATISTIC( ++governor::local_scheduler_if_initialized()->my_counters.arena_prio_resets ); update_arena_top_priority( a, normalized_normal_priority ); } a.my_bottom_priority = normalized_normal_priority; } if ( p == my_global_top_priority ) { if ( !pl.workers_requested ) { while ( --p >= my_global_bottom_priority && !my_priority_levels[p].workers_requested ) continue; if ( p < my_global_bottom_priority ) reset_global_priority(); else update_global_top_priority(p); } update_allotment( my_global_top_priority ); } else if ( p > my_global_top_priority ) { #if !__TBB_TASK_ARENA __TBB_ASSERT( pl.workers_requested > 0, NULL ); #else //TODO: understand the assertion and modify #endif update_global_top_priority(p); a.my_num_workers_allotted = min( (int)my_max_num_workers, a.my_num_workers_requested ); my_priority_levels[p - 1].workers_available = my_max_num_workers - a.my_num_workers_allotted; update_allotment( p - 1 ); } else if ( p == my_global_bottom_priority ) { if ( !pl.workers_requested ) { while ( ++p <= my_global_top_priority && !my_priority_levels[p].workers_requested ) continue; if ( p > my_global_top_priority ) reset_global_priority(); else { my_global_bottom_priority = p; #if __TBB_TRACK_PRIORITY_LEVEL_SATURATION my_lowest_populated_level = max( my_lowest_populated_level, p ); #endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */ } } else update_allotment( p ); } else if ( p < my_global_bottom_priority ) { __TBB_ASSERT( a.my_num_workers_requested > 0, NULL ); int prev_bottom = my_global_bottom_priority; my_global_bottom_priority = p; update_allotment( prev_bottom ); } else { __TBB_ASSERT( my_global_bottom_priority < p && p < my_global_top_priority, NULL ); update_allotment( p ); } assert_market_valid(); #else /* !__TBB_TASK_PRIORITY */ my_total_demand += delta; update_allotment(); #endif /* !__TBB_TASK_PRIORITY */ my_arenas_list_mutex.unlock(); // Must be called outside of any locks my_server->adjust_job_count_estimate( delta ); GATHER_STATISTIC( governor::local_scheduler_if_initialized() ? ++governor::local_scheduler_if_initialized()->my_counters.gate_switches : 0 ); }
inline void market::update_global_top_priority ( intptr_t newPriority ) { GATHER_STATISTIC( ++governor::local_scheduler_if_initialized()->my_counters.market_prio_switches ); my_global_top_priority = newPriority; my_priority_levels[newPriority].workers_available = my_max_num_workers; advance_global_reload_epoch(); }