Exemplo n.º 1
0
size_t LargeObjectCacheImpl<Props>::CacheBin::reportStat(int num, FILE *f)
{
#if __TBB_MALLOC_LOCACHE_STAT
    if (first)
        printf("%d(%lu): total %lu KB thr %ld lastCln %lu oldest %lu\n",
               num, num*Props::CacheStep+Props::MinSize,
               cachedSize/1024, ageThreshold, lastCleanedAge, oldest);
#else
    suppress_unused_warning(num);
    suppress_unused_warning(f);
#endif
    return cachedSize;
}
Exemplo n.º 2
0
// TODO: This function seems deserving refactoring
inline bool arena::may_have_tasks ( generic_scheduler* s, arena_slot& slot, bool& tasks_present, bool& dequeuing_possible ) {
    suppress_unused_warning(slot);
    if ( !s ) {
        // This slot is vacant
        __TBB_ASSERT( slot.task_pool == EmptyTaskPool, NULL );
        __TBB_ASSERT( slot.tail == slot.head, "Someone is tinkering with a vacant arena slot" );
        return false;
    }
    dequeuing_possible |= s->worker_outermost_level();
    if ( s->my_pool_reshuffling_pending ) {
        // This primary task pool is nonempty and may contain tasks at the current
        // priority level. Its owner is winnowing lower priority tasks at the moment.
        tasks_present = true;
        return true;
    }
    if ( s->my_offloaded_tasks ) {
        tasks_present = true;
        if ( s->my_local_reload_epoch < *s->my_ref_reload_epoch ) {
            // This scheduler's offload area is nonempty and may contain tasks at the
            // current priority level.
            return true;
        }
    }
    return false;
}
Exemplo n.º 3
0
//! Performs thread-safe lazy one-time general TBB initialization.
void DoOneTimeInitializations() {
    suppress_unused_warning(_pad);
    __TBB_InitOnce::lock();
    // No fence required for load of InitializationDone, because we are inside a critical section.
    if( !__TBB_InitOnce::InitializationDone ) {
        __TBB_InitOnce::add_ref();
        if( GetBoolEnvironmentVariable("TBB_VERSION") )
            PrintVersion();
        bool itt_present = false;
#if DO_ITT_NOTIFY
        ITT_DoUnsafeOneTimeInitialization();
        itt_present = ITT_Present;
#endif /* DO_ITT_NOTIFY */
        initialize_cache_aligned_allocator();
        governor::initialize_rml_factory();
        Scheduler_OneTimeInitialization( itt_present );
        // Force processor groups support detection
        governor::default_num_threads();
        // Dump version data
        governor::print_version_info();
        PrintExtraVersionInfo( "Tools support", itt_present ? "enabled" : "disabled" );
        __TBB_InitOnce::InitializationDone = true;
    }
    __TBB_InitOnce::unlock();
}
Exemplo n.º 4
0
arena* market::arena_in_need ( arena* prev_arena )
{
    if( !has_any_demand() )
        return NULL;
    arenas_list_mutex_type::scoped_lock lock(my_arenas_list_mutex, /*is_writer=*/false);
    assert_market_valid();
#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION
    if ( prev_arena ) {
        priority_level_info &pl = my_priority_levels[prev_arena->my_top_priority];
        --prev_arena->my_num_workers_present;
        --pl.workers_present;
        if ( !--prev_arena->my_references && !prev_arena->my_num_workers_requested ) {
            detach_arena( *a );
            lock.release();
            a->free_arena();
            lock.acquire();
        }
    }
#else
    suppress_unused_warning(prev_arena);
#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */
    int p = my_global_top_priority;
    arena *a = NULL;
    do {
        priority_level_info &pl = my_priority_levels[p];
#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION
        __TBB_ASSERT( p >= my_lowest_populated_level, NULL );
        if ( pl.workers_present >= pl.workers_requested )
            continue;
#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */
        a = arena_in_need( pl.arenas, pl.next_arena );
    } while ( !a && --p >= my_global_bottom_priority );
    return a;
}
Exemplo n.º 5
0
void task_group_context::init () {
    __TBB_STATIC_ASSERT ( sizeof(my_version_and_traits) >= 4, "Layout of my_version_and_traits must be reconsidered on this platform" );
    __TBB_STATIC_ASSERT ( sizeof(task_group_context) == 2 * NFS_MaxLineSize, "Context class has wrong size - check padding and members alignment" );
    __TBB_ASSERT ( (uintptr_t(this) & (sizeof(my_cancellation_requested) - 1)) == 0, "Context is improperly aligned" );
    __TBB_ASSERT ( __TBB_load_relaxed(my_kind) == isolated || __TBB_load_relaxed(my_kind) == bound, "Context can be created only as isolated or bound" );
    my_parent = NULL;
    my_cancellation_requested = 0;
    my_exception = NULL;
    my_owner = NULL;
    my_state = 0;
    itt_caller = ITT_CALLER_NULL;
#if __TBB_TASK_PRIORITY
    my_priority = normalized_normal_priority;
#endif /* __TBB_TASK_PRIORITY */
#if __TBB_FP_CONTEXT
    __TBB_STATIC_ASSERT( sizeof(my_cpu_ctl_env) == sizeof(internal::uint64_t), "The reserved space for FPU settings are not equal sizeof(uint64_t)" );
    __TBB_STATIC_ASSERT( sizeof(cpu_ctl_env) <= sizeof(my_cpu_ctl_env), "FPU settings storage does not fit to uint64_t" );
    suppress_unused_warning( my_cpu_ctl_env.space );

    cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
    new ( &ctl ) cpu_ctl_env;
    if ( my_version_and_traits & fp_settings )
        ctl.get_env();
#endif
}
Exemplo n.º 6
0
void governor::sign_off(generic_scheduler* s) {
    suppress_unused_warning(s);
    __TBB_ASSERT( theTLS.get()==s, "attempt to unregister a wrong scheduler instance" );
    theTLS.set(NULL);
#if __TBB_SURVIVE_THREAD_SWITCH
    __cilk_tbb_unwatch_thunk &ut = s->my_cilk_unwatch_thunk;
    if ( ut.routine )
        (*ut.routine)(ut.data);
#endif /* __TBB_SURVIVE_THREAD_SWITCH */
}
Exemplo n.º 7
0
void itt_set_sync_name_v3( void* obj, const tchar* name) {
    ITT_SYNC_RENAME(obj, name);
    suppress_unused_warning(obj && name);
}