Exemple #1
0
void itt_set_sync_name_v3( void *obj, const tchar* name) {
    __TBB_ASSERT( ITT_Handler_sync_rename!=&dummy_sync_rename, NULL );
    ITT_SYNC_RENAME(obj, name);
}
Exemple #2
0
void FireUpJobs( MyServer& server, MyClient& client, int max_thread, int n_extra, Checker* checker ) {
    ASSERT( max_thread>=0, NULL );
#if _WIN32||_WIN64
    ::rml::server::execution_resource_t me;
    server.register_master( me );
#endif /* _WIN32||_WIN64 */
    client.server = &server;
    MyTeam team(server,size_t(max_thread));
    MyServer::size_type n_thread = 0;
    for( int iteration=0; iteration<4; ++iteration ) {
        for( size_t i=0; i<team.max_thread; ++i )
            team.info[i].ran = false;
        switch( iteration ) {
            default:
                n_thread = int(max_thread);
                break;
            case 1:
                // No change in number of threads
                break;
            case 2:
                // Decrease number of threads.
                n_thread = int(max_thread)/2;
                break;
            // Case 3 is same code as the default, but has effect of increasing the number of threads.
        }
        team.barrier = 0;
        REMARK("client %d: server.run with n_thread=%d\n", client.client_id(), int(n_thread) );
        server.independent_thread_number_changed( n_extra );
        if( checker ) {
            // Give RML time to respond to change in number of threads.
            Harness::Sleep(1);
        }
        int n_delivered = server.try_increase_load( n_thread, StrictTeam );
        ASSERT( !StrictTeam || n_delivered==int(n_thread), "server failed to satisfy strict request" );
        if( n_delivered<0 ) {
            REMARK( "client %d: oversubscription occurred (by %d)\n", client.client_id(), -n_delivered );
            server.independent_thread_number_changed( -n_extra );
            n_delivered = 0;
        } else {
            team.n_thread = n_delivered;
            ::rml::job* job_array[JobArraySize];
            job_array[n_delivered] = (::rml::job*)intptr_t(-1);
            server.get_threads( n_delivered, &team, job_array );
            __TBB_ASSERT( job_array[n_delivered]== (::rml::job*)intptr_t(-1), NULL );
            for( int i=0; i<n_delivered; ++i ) {
                MyJob* j = static_cast<MyJob*>(job_array[i]);
                int s = j->state;
                ASSERT( s==MyJob::idle||s==MyJob::busy, NULL );
            }
            server.independent_thread_number_changed( -n_extra );
            REMARK("client %d: team size is %d\n", client.client_id(), n_delivered);
            if( checker ) {
                checker->check_number_of_threads_delivered( n_delivered, n_thread, n_extra );
            }
            // Protocol requires that master wait until workers have called "done_processing"
            while( team.barrier!=n_delivered ) {
                ASSERT( team.barrier>=0, NULL );
                ASSERT( team.barrier<=n_delivered, NULL );
                __TBB_Yield();
            }
            REMARK("client %d: team completed\n", client.client_id() );
            for( int i=0; i<n_delivered; ++i ) {
                ASSERT( team.info[i].ran, "thread on team allegedly delivered, but did not run?" );
            }
        }
        for( MyServer::size_type i=n_delivered; i<MyServer::size_type(max_thread); ++i ) {
            ASSERT( !team.info[i].ran, "thread on team ran with illegal index" );
        }
    }
#if _WIN32||_WIN64
    server.unregister_master( me );
#endif
}
Exemple #3
0
//! Downgrade writer to a reader
void spin_rw_mutex_v3::internal_downgrade() {
    ITT_NOTIFY(sync_releasing, this);
    __TBB_FetchAndAddW( &state, (intptr_t)(ONE_READER-WRITER));
    __TBB_ASSERT( state & READERS, "invalid state after downgrade: no readers" );
}
Exemple #4
0
 //! Add one and return new value.
 int add_ref() {
     int k = ++my_ref_count;
     __TBB_ASSERT(k>=1,"reference count underflowed before add_ref");
     return k;
 }
Exemple #5
0
 server_thread& thread() {
     __TBB_ASSERT( my_thread, "thread_map::value_type::thread() called when !my_thread" );
     return *my_thread;
 }
Exemple #6
0
 /*override*/ void independent_thread_number_changed( int ) {__TBB_ASSERT(false,NULL);}
Exemple #7
0
void governor::setBlockingTerminate(const task_scheduler_init *tsi) {
    __TBB_ASSERT(!IsBlockingTerminationInProgress, "It's impossible to create task_scheduler_init while blocking termination is in progress.");
    if (BlockingTSI)
        throw_exception(eid_blocking_sch_init);
    BlockingTSI = tsi;
}
Exemple #8
0
void __TBB_InitOnce::remove_ref() {
    int k = --count;
    __TBB_ASSERT(k>=0,"removed __TBB_InitOnce ref that was not added?"); 
    if( k==0 ) 
        governor::release_resources();
}
void market::adjust_demand ( arena& a, int delta ) {
    __TBB_ASSERT( theMarket, "market instance was destroyed prematurely?" );
    if ( !delta )
        return;
    my_arenas_list_mutex.lock();
    int prev_req = a.my_num_workers_requested;
    a.my_num_workers_requested += delta;
    if ( a.my_num_workers_requested <= 0 ) {
        a.my_num_workers_allotted = 0;
        if ( prev_req <= 0 ) {
            my_arenas_list_mutex.unlock();
            return;
        }
        delta = -prev_req;
    }
#if __TBB_TASK_ARENA
    else if ( prev_req < 0 ) {
        delta = a.my_num_workers_requested;
    }
#else  /* __TBB_TASK_ARENA */
    __TBB_ASSERT( prev_req >= 0, "Part-size request to RML?" );
#endif /* __TBB_TASK_ARENA */
#if __TBB_TASK_PRIORITY
    intptr_t p = a.my_top_priority;
    priority_level_info &pl = my_priority_levels[p];
    pl.workers_requested += delta;
    __TBB_ASSERT( pl.workers_requested >= 0, NULL );
#if !__TBB_TASK_ARENA
    __TBB_ASSERT( a.my_num_workers_requested >= 0, NULL );
#else
    //TODO: understand the assertion and modify
#endif
    if ( a.my_num_workers_requested <= 0 ) {
        if ( a.my_top_priority != normalized_normal_priority ) {
            GATHER_STATISTIC( ++governor::local_scheduler_if_initialized()->my_counters.arena_prio_resets );
            update_arena_top_priority( a, normalized_normal_priority );
        }
        a.my_bottom_priority = normalized_normal_priority;
    }
    if ( p == my_global_top_priority ) {
        if ( !pl.workers_requested ) {
            while ( --p >= my_global_bottom_priority && !my_priority_levels[p].workers_requested )
                continue;
            if ( p < my_global_bottom_priority )
                reset_global_priority();
            else
                update_global_top_priority(p);
        }
        update_allotment( my_global_top_priority );
    }
    else if ( p > my_global_top_priority ) {
#if !__TBB_TASK_ARENA
        __TBB_ASSERT( pl.workers_requested > 0, NULL );
#else
        //TODO: understand the assertion and modify
#endif
        update_global_top_priority(p);
        a.my_num_workers_allotted = min( (int)my_max_num_workers, a.my_num_workers_requested );
        my_priority_levels[p - 1].workers_available = my_max_num_workers - a.my_num_workers_allotted;
        update_allotment( p - 1 );
    }
    else if ( p == my_global_bottom_priority ) {
        if ( !pl.workers_requested ) {
            while ( ++p <= my_global_top_priority && !my_priority_levels[p].workers_requested )
                continue;
            if ( p > my_global_top_priority )
                reset_global_priority();
            else {
                my_global_bottom_priority = p;
#if __TBB_TRACK_PRIORITY_LEVEL_SATURATION
                my_lowest_populated_level = max( my_lowest_populated_level, p );
#endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */
            }
        }
        else
            update_allotment( p );
    }
    else if ( p < my_global_bottom_priority ) {
        __TBB_ASSERT( a.my_num_workers_requested > 0, NULL );
        int prev_bottom = my_global_bottom_priority;
        my_global_bottom_priority = p;
        update_allotment( prev_bottom );
    }
    else {
        __TBB_ASSERT( my_global_bottom_priority < p && p < my_global_top_priority, NULL );
        update_allotment( p );
    }
    assert_market_valid();
#else /* !__TBB_TASK_PRIORITY */
    my_total_demand += delta;
    update_allotment();
#endif /* !__TBB_TASK_PRIORITY */
    my_arenas_list_mutex.unlock();
    // Must be called outside of any locks
    my_server->adjust_job_count_estimate( delta );
    GATHER_STATISTIC( governor::local_scheduler_if_initialized() ? ++governor::local_scheduler_if_initialized()->my_counters.gate_switches : 0 );
}
Exemple #10
0
void arena::process( generic_scheduler& s ) {
    __TBB_ASSERT( is_alive(my_guard), NULL );
    __TBB_ASSERT( governor::is_set(&s), NULL );
    __TBB_ASSERT( !s.innermost_running_task, NULL );

    __TBB_ASSERT( my_num_slots != 1, NULL );
    // Start search for an empty slot from the one we occupied the last time
    unsigned index = s.arena_index < my_num_slots ? s.arena_index : s.random.get() % (my_num_slots - 1) + 1,
             end = index;
    __TBB_ASSERT( index != 0, "A worker cannot occupy slot 0" );
    __TBB_ASSERT( index < my_num_slots, NULL );

    // Find a vacant slot
    for ( ;; ) {
        if ( !slot[index].my_scheduler && __TBB_CompareAndSwapW( &slot[index].my_scheduler, (intptr_t)&s, 0 ) == 0 )
            break;
        if ( ++index == my_num_slots )
            index = 1;
        if ( index == end ) {
            // Likely this arena is already saturated
            if ( --my_num_threads_active == 0 )
                close_arena();
            return;
        }
    }
    ITT_NOTIFY(sync_acquired, &slot[index]);
    s.my_arena = this;
    s.arena_index = index;
    s.attach_mailbox( affinity_id(index+1) );

    slot[index].hint_for_push = index ^ unsigned(&s-(generic_scheduler*)NULL)>>16; // randomizer seed
    slot[index].hint_for_pop  = index; // initial value for round-robin

    unsigned new_limit = index + 1;
    unsigned old_limit = my_limit;
    while ( new_limit > old_limit ) {
        if ( my_limit.compare_and_swap(new_limit, old_limit) == old_limit )
            break;
        old_limit = my_limit;
    }

    for ( ;; ) {
        // Try to steal a task.
        // Passing reference count is technically unnecessary in this context,
        // but omitting it here would add checks inside the function.
        __TBB_ASSERT( is_alive(my_guard), NULL );
        task* t = s.receive_or_steal_task( s.dummy_task->prefix().ref_count, /*return_if_no_work=*/true );
        if (t) {
            // A side effect of receive_or_steal_task is that innermost_running_task can be set.
            // But for the outermost dispatch loop of a worker it has to be NULL.
            s.innermost_running_task = NULL;
            s.local_wait_for_all(*s.dummy_task,t);
        }
        ++my_num_threads_leaving;
        __TBB_ASSERT ( slot[index].head == slot[index].tail, "Worker cannot leave arena while its task pool is not empty" );
        __TBB_ASSERT( slot[index].task_pool == EmptyTaskPool, "Empty task pool is not marked appropriately" );
        // Revalidate quitting condition
        // This check prevents relinquishing more than necessary workers because 
        // of the non-atomicity of the decision making procedure
        if ( num_workers_active() >= my_num_workers_allotted || !my_num_workers_requested )
            break;
        --my_num_threads_leaving;
        __TBB_ASSERT( !slot[0].my_scheduler || my_num_threads_active > 0, "Who requested more workers after the last one left the dispatch loop and the master's gone?" );
    }
#if __TBB_STATISTICS
    ++s.my_counters.arena_roundtrips;
    *slot[index].my_counters += s.my_counters;
    s.my_counters.reset();
#endif /* __TBB_STATISTICS */
    __TBB_store_with_release( slot[index].my_scheduler, (generic_scheduler*)NULL );
    s.inbox.detach();
    __TBB_ASSERT( s.inbox.is_idle_state(true), NULL );
    __TBB_ASSERT( !s.innermost_running_task, NULL );
    __TBB_ASSERT( is_alive(my_guard), NULL );
    // Decrementing my_num_threads_active first prevents extra workers from leaving
    // this arena prematurely, but can result in some workers returning back just
    // to repeat the escape attempt. If instead my_num_threads_leaving is decremented
    // first, the result is the opposite - premature leaving is allowed and gratuitous
    // return is prevented. Since such a race has any likelihood only when multiple
    // workers are in the stealing loop, and consequently there is a lack of parallel
    // work in this arena, we'd rather let them go out and try get employment in 
    // other arenas (before returning into this one again).
    --my_num_threads_leaving;
    if ( !--my_num_threads_active )
        close_arena();
}
Exemple #11
0
static void initialize_hardware_concurrency_info () {
    int err;
    int availableProcs = 0;
    int numMasks = 1;
#if __linux__
#if __TBB_MAIN_THREAD_AFFINITY_BROKEN
    int maxProcs = INT_MAX; // To check the entire mask.
    int pid = 0; // Get the mask of the calling thread.
#else
    int maxProcs = sysconf(_SC_NPROCESSORS_ONLN);
    int pid = getpid();
#endif
#else /* FreeBSD >= 7.1 */
    int maxProcs = sysconf(_SC_NPROCESSORS_ONLN);
#endif
    basic_mask_t* processMask;
    const size_t BasicMaskSize =  sizeof(basic_mask_t);
    for (;;) {
        const int curMaskSize = BasicMaskSize * numMasks;
        processMask = new basic_mask_t[numMasks];
        memset( processMask, 0, curMaskSize );
#if __linux__
        err = sched_getaffinity( pid, curMaskSize, processMask );
        if ( !err || errno != EINVAL || curMaskSize * CHAR_BIT >= 256 * 1024 )
            break;
#else /* FreeBSD >= 7.1 */
        // CPU_LEVEL_WHICH - anonymous (current) mask, CPU_LEVEL_CPUSET - assigned mask
#if __TBB_MAIN_THREAD_AFFINITY_BROKEN
        err = cpuset_getaffinity( CPU_LEVEL_WHICH, CPU_WHICH_TID, -1, curMaskSize, processMask );
#else
        err = cpuset_getaffinity( CPU_LEVEL_WHICH, CPU_WHICH_PID, -1, curMaskSize, processMask );
#endif
        if ( !err || errno != ERANGE || curMaskSize * CHAR_BIT >= 16 * 1024 )
            break;
#endif /* FreeBSD >= 7.1 */
        delete[] processMask;
        numMasks <<= 1;
    }
    if ( !err ) {
        // We have found the mask size and captured the process affinity mask into processMask.
        num_masks = numMasks; // do here because it's needed for affinity_helper to work
#if __linux__
        // For better coexistence with libiomp which might have changed the mask already,
        // check for its presense and ask it to restore the mask.
        dynamic_link_handle libhandle;
        if ( dynamic_link( "libiomp5.so", iompLinkTable, 1, &libhandle, DYNAMIC_LINK_GLOBAL ) ) {
            // We have found the symbol provided by libiomp5 for restoring original thread affinity.
            affinity_helper affhelp;
            affhelp.protect_affinity_mask( /*restore_process_mask=*/false );
            if ( libiomp_try_restoring_original_mask()==0 ) {
                // Now we have the right mask to capture, restored by libiomp.
                const int curMaskSize = BasicMaskSize * numMasks;
                memset( processMask, 0, curMaskSize );
                get_thread_affinity_mask( curMaskSize, processMask );
            } else
                affhelp.dismiss();  // thread mask has not changed
            dynamic_unlink( libhandle );
            // Destructor of affinity_helper restores the thread mask (unless dismissed).
        }
#endif
        for ( int m = 0; availableProcs < maxProcs && m < numMasks; ++m ) {
            for ( size_t i = 0; (availableProcs < maxProcs) && (i < BasicMaskSize * CHAR_BIT); ++i ) {
                if ( CPU_ISSET( i, processMask + m ) )
                    ++availableProcs;
            }
        }
        process_mask = processMask;
    }
    else {
        // Failed to get the process affinity mask; assume the whole machine can be used.
        availableProcs = (maxProcs == INT_MAX) ? sysconf(_SC_NPROCESSORS_ONLN) : maxProcs;
        delete[] processMask;
    }
    theNumProcs = availableProcs > 0 ? availableProcs : 1; // Fail safety strap
    __TBB_ASSERT( theNumProcs <= sysconf(_SC_NPROCESSORS_ONLN), NULL );
}
Exemple #12
0
bool market::update_arena_priority ( arena& a, intptr_t new_priority ) {
    arenas_list_mutex_type::scoped_lock lock(my_arenas_list_mutex);

    __TBB_ASSERT( my_global_top_priority >= a.my_top_priority || a.my_num_workers_requested <= 0, NULL );
    assert_market_valid();
    if ( a.my_top_priority == new_priority ) {
        return false;
    }
    else if ( a.my_top_priority > new_priority ) {
        if ( a.my_bottom_priority > new_priority )
            a.my_bottom_priority = new_priority;
        return false;
    }
    else if ( a.my_num_workers_requested <= 0 ) {
        return false;
    }

    __TBB_ASSERT( my_global_top_priority >= a.my_top_priority, NULL );

    intptr_t p = a.my_top_priority;
    intptr_t highest_affected_level = max(p, new_priority);
    update_arena_top_priority( a, new_priority );

    if ( my_global_top_priority < new_priority ) {
        update_global_top_priority(new_priority);
    }
    else if ( my_global_top_priority == new_priority ) {
        advance_global_reload_epoch();
    }
    else {
        __TBB_ASSERT( new_priority < my_global_top_priority, NULL );
        __TBB_ASSERT( new_priority > my_global_bottom_priority, NULL );
        if ( p == my_global_top_priority && !my_priority_levels[p].workers_requested ) {
            // Global top level became empty
            __TBB_ASSERT( my_global_bottom_priority < p, NULL );
            for ( --p; !my_priority_levels[p].workers_requested; --p ) continue;
            __TBB_ASSERT( p >= new_priority, NULL );
            update_global_top_priority(p);
            highest_affected_level = p;
        }
    }
    if ( p == my_global_bottom_priority ) {
        // Arena priority was increased from the global bottom level.
        __TBB_ASSERT( p < new_priority, NULL );                     // n
        __TBB_ASSERT( new_priority <= my_global_top_priority, NULL );
        while ( !my_priority_levels[my_global_bottom_priority].workers_requested )
            ++my_global_bottom_priority;
        __TBB_ASSERT( my_global_bottom_priority <= new_priority, NULL );
        __TBB_ASSERT( my_priority_levels[my_global_bottom_priority].workers_requested > 0, NULL );
    }
    update_allotment( highest_affected_level );

    __TBB_ASSERT( my_global_top_priority >= a.my_top_priority, NULL );
    assert_market_valid();
    return true;
}
Exemple #13
0
interface6::task_scheduler_observer* observer_proxy::get_v6_observer() {
    __TBB_ASSERT(my_version == 6, NULL);
    return static_cast<interface6::task_scheduler_observer*>(my_observer);
}
Exemple #14
0
void task_scheduler_observer_v3::observe( bool enable ) {
    if( enable ) {
        if( !my_proxy ) {
            my_proxy = new observer_proxy( *this );
            if ( !my_proxy->is_global() ) {
                // Local observer activation
                generic_scheduler* s = governor::local_scheduler_if_initialized();
#if __TBB_TASK_ARENA
                intptr_t tag = my_proxy->get_v6_observer()->my_context_tag;
                if( tag != interface6::task_scheduler_observer::implicit_tag ) { // explicit arena
                    task_arena *a = reinterpret_cast<task_arena*>(tag);
                    a->check_init();
                    my_proxy->my_list = &a->my_arena->my_observers;
                } else
#endif
                {
                    if( !s ) s = governor::init_scheduler( (unsigned)task_scheduler_init::automatic, 0, true );
                    __TBB_ASSERT( __TBB_InitOnce::initialization_done(), NULL );
                    __TBB_ASSERT( s && s->my_arena, NULL );
                    my_proxy->my_list = &s->my_arena->my_observers;
                }
                my_proxy->my_list->insert(my_proxy);
                my_busy_count = 0;
                // Notify newly activated observer and other pending ones if it belongs to current arena
                if(s && &s->my_arena->my_observers == my_proxy->my_list )
                    my_proxy->my_list->notify_entry_observers( s->my_last_local_observer, s->is_worker() );
            } else {
                // Obsolete. Global observer activation
                if( !__TBB_InitOnce::initialization_done() )
                    DoOneTimeInitializations();
                my_busy_count = 0;
                my_proxy->my_list = &the_global_observer_list;
                my_proxy->my_list->insert(my_proxy);
                if( generic_scheduler* s = governor::local_scheduler_if_initialized() ) {
                    // Notify newly created observer of its own thread.
                    // Any other pending observers are notified too.
                    the_global_observer_list.notify_entry_observers( s->my_last_global_observer, s->is_worker() );
                }
            }
        }
    } else {
        // Make sure that possible concurrent proxy list cleanup does not conflict
        // with the observer destruction here.
        if ( observer_proxy* proxy = (observer_proxy*)__TBB_FetchAndStoreW(&my_proxy, 0) ) {
            // List destruction should not touch this proxy after we've won the above interlocked exchange.
            __TBB_ASSERT( proxy->my_observer == this, NULL );
            __TBB_ASSERT( is_alive(proxy->my_ref_count), "Observer's proxy died prematurely" );
            __TBB_ASSERT( proxy->my_ref_count >= 1, "reference for observer missing" );
            observer_list &list = *proxy->my_list;
            {
                // Ensure that none of the list walkers relies on observer pointer validity
                observer_list::scoped_lock lock(list.mutex(), /*is_writer=*/true);
                proxy->my_observer = NULL;
            }
            intptr_t trait = proxy->my_version == 6 ? interface6::task_scheduler_observer::v6_trait : 0;
            // Proxy may still be held by other threads (to track the last notified observer)
            list.remove_ref(proxy);
            while( my_busy_count )
                __TBB_Yield();
            store<relaxed>( my_busy_count, trait );
        }
    }
}
Exemple #15
0
static void initialize_hardware_concurrency_info () {
    int err;
    int availableProcs = 0;
    int numMasks = 1;
#if __linux__
#if __TBB_MAIN_THREAD_AFFINITY_BROKEN
    int maxProcs = INT_MAX; // To check the entire mask.
    int pid = 0; // Get the mask of the calling thread.
#else
    int maxProcs = sysconf(_SC_NPROCESSORS_ONLN);
    int pid = getpid();
#endif
    cpu_set_t *processMask;
    const size_t BasicMaskSize =  sizeof(cpu_set_t);
    for (;;) {
        int curMaskSize = BasicMaskSize * numMasks;
        processMask = new cpu_set_t[numMasks];
        memset( processMask, 0, curMaskSize );
        err = sched_getaffinity( pid, curMaskSize, processMask );
        if ( !err || errno != EINVAL || curMaskSize * CHAR_BIT >= 256 * 1024 )
            break;
        delete[] processMask;
        numMasks <<= 1;
    }
#else /* FreeBSD >= 7.1 */
    int maxProcs = sysconf(_SC_NPROCESSORS_ONLN);
    cpuset_t *processMask;
    const size_t BasicMaskSize = sizeof(cpuset_t);
    for (;;) {
        int curMaskSize = BasicMaskSize * numMasks;
        processMask = new cpuset_t[numMasks];
        memset( processMask, 0, curMaskSize );
        // CPU_LEVEL_WHICH - anonymous (current) mask, CPU_LEVEL_CPUSET - assigned mask
#if __TBB_MAIN_THREAD_AFFINITY_BROKEN
        err = cpuset_getaffinity( CPU_LEVEL_WHICH, CPU_WHICH_TID, -1, curMaskSize, processMask );
#else
        err = cpuset_getaffinity( CPU_LEVEL_WHICH, CPU_WHICH_PID, -1, curMaskSize, processMask );
#endif
        if ( !err || errno != ERANGE || curMaskSize * CHAR_BIT >= 16 * 1024 )
            break;
        delete[] processMask;
        numMasks <<= 1;
    }
#endif /* FreeBSD >= 7.1 */
    if ( !err ) {
        for ( int m = 0; availableProcs < maxProcs && m < numMasks; ++m ) {
            for ( size_t i = 0; (availableProcs < maxProcs) && (i < BasicMaskSize * CHAR_BIT); ++i ) {
                if ( CPU_ISSET( i, processMask + m ) )
                    ++availableProcs;
            }
        }
        num_masks = numMasks;
        process_mask = processMask;
    }
    else {
        availableProcs = (maxProcs == INT_MAX) ? sysconf(_SC_NPROCESSORS_ONLN) : maxProcs;
        delete[] processMask;
    }
    theNumProcs = availableProcs > 0 ? availableProcs : 1; // Fail safety strap
    __TBB_ASSERT( theNumProcs <= sysconf(_SC_NPROCESSORS_ONLN), NULL );
}
void governor::terminate_scheduler( generic_scheduler* s ) {
    __TBB_ASSERT( s == theTLS.get(), "Attempt to terminate non-local scheduler instance" );
    if( !--(s->my_ref_count) )
        s->cleanup_master();
}
Exemple #17
0
int NumberOfProcessorGroups() {
    __TBB_ASSERT( hardware_concurrency_info == initialization_complete, "NumberOfProcessorGroups is used before AvailableHwConcurrency" );
    return ProcessorGroupInfo::NumGroups;
}
Exemple #18
0
void init_condvar_module()
{
    __TBB_ASSERT( (uintptr_t)__TBB_init_condvar==(uintptr_t)&init_condvar_using_event, NULL );
    if( dynamic_link( "Kernel32.dll", CondVarLinkTable, 4 ) )
        __TBB_destroy_condvar = (void (WINAPI *)(PCONDITION_VARIABLE))&destroy_condvar_noop;
}
void allocate_root_with_context_proxy::free( task& task ) const {
    internal::generic_scheduler* v = governor::local_scheduler();
    __TBB_ASSERT( v, "thread does not have initialized task_scheduler_init object?" );
    // No need to do anything here as long as unbinding is performed by context destructor only.
    v->free_task<local_task>( task );
}
 virtual void apply_active() const {
     __TBB_ASSERT( my_active_value>=2, NULL );
     // -1 to take master into account
     market::set_active_num_workers( my_active_value-1 );
 }
Exemple #21
0
 ~ref_count() {__TBB_ASSERT( !my_ref_count, "premature destruction of refcounted object" );}
    virtual void apply_active() const {
#if __TBB_WIN8UI_SUPPORT
        __TBB_ASSERT( false, "For Windows Store* apps we must not set stack size" );
#endif
    }
Exemple #23
0
 //! Subtract one and return new value.
 int remove_ref() {
     int k = --my_ref_count; 
     __TBB_ASSERT(k>=0,"reference count underflow");
     return k;
 }
bool arena::is_out_of_work() {
    // TODO: rework it to return at least a hint about where a task was found; better if the task itself.
    for(;;) {
        pool_state_t snapshot = prefix().my_pool_state;
        switch( snapshot ) {
            case SNAPSHOT_EMPTY:
                return true;
            case SNAPSHOT_FULL: {
                // Use unique id for "busy" in order to avoid ABA problems.
                const pool_state_t busy = pool_state_t(this);
                // Request permission to take snapshot
                if( prefix().my_pool_state.compare_and_swap( busy, SNAPSHOT_FULL )==SNAPSHOT_FULL ) {
                    // Got permission. Take the snapshot.
                    // NOTE: This is not a lock, as the state can be set to FULL at 
                    //       any moment by a thread that spawns/enqueues new task.
                    size_t n = my_limit;
                    // Make local copies of volatile parameters. Their change during
                    // snapshot taking procedure invalidates the attempt, and returns
                    // this thread into the dispatch loop.
#if __TBB_TASK_PRIORITY
                    intptr_t top_priority = my_top_priority;
                    uintptr_t reload_epoch = my_reload_epoch;
                    // Inspect primary task pools first
#endif /* __TBB_TASK_PRIORITY */
                    size_t k; 
                    for( k=0; k<n; ++k ) {
                        if( my_slots[k].task_pool != EmptyTaskPool && my_slots[k].head < my_slots[k].tail )
                            // k-th primary task pool is nonempty and does contain tasks.
                            break;
                    }
                    __TBB_ASSERT( k <= n, NULL );
                    bool work_absent = k == n;
#if __TBB_TASK_PRIORITY
                    // Variable tasks_present indicates presence of tasks at any priority
                    // level, while work_absent refers only to the current priority.
                    bool tasks_present = !work_absent || my_orphaned_tasks;
                    bool dequeuing_possible = false;
                    if ( work_absent ) {
                        // Check for the possibility that recent priority changes
                        // brought some tasks to the current priority level

                        uintptr_t abandonment_epoch = my_abandonment_epoch;
                        // Master thread's scheduler needs special handling as it 
                        // may be destroyed at any moment (workers' schedulers are 
                        // guaranteed to be alive while at least one thread is in arena).
                        // Have to exclude concurrency with task group state change propagation too.
                        my_market->my_arenas_list_mutex.lock();
                        generic_scheduler *s = my_slots[0].my_scheduler;
                        if ( s && __TBB_CompareAndSwapW(&my_slots[0].my_scheduler, (intptr_t)LockedMaster, (intptr_t)s) == (intptr_t)s ) {
                            __TBB_ASSERT( my_slots[0].my_scheduler == LockedMaster && s != LockedMaster, NULL );
                            work_absent = !may_have_tasks( s, my_slots[0], tasks_present, dequeuing_possible );
                            __TBB_store_with_release( my_slots[0].my_scheduler, s );
                        }
                        my_market->my_arenas_list_mutex.unlock();
                        // The following loop is subject to data races. While k-th slot's
                        // scheduler is being examined, corresponding worker can either
                        // leave to RML or migrate to another arena.
                        // But the races are not prevented because all of them are benign.
                        // First, the code relies on the fact that worker thread's scheduler
                        // object persists until the whole library is deinitialized.  
                        // Second, in the worst case the races can only cause another 
                        // round of stealing attempts to be undertaken. Introducing complex 
                        // synchronization into this coldest part of the scheduler's control 
                        // flow does not seem to make sense because it both is unlikely to 
                        // ever have any observable performance effect, and will require 
                        // additional synchronization code on the hotter paths.
                        for( k = 1; work_absent && k < n; ++k )
                            work_absent = !may_have_tasks( my_slots[k].my_scheduler, my_slots[k], tasks_present, dequeuing_possible );
                        // Preclude premature switching arena off because of a race in the previous loop.
                        work_absent = work_absent
                                      && !__TBB_load_with_acquire(my_orphaned_tasks)
                                      && abandonment_epoch == my_abandonment_epoch;
                    }
#endif /* __TBB_TASK_PRIORITY */
                    // Test and test-and-set.
                    if( prefix().my_pool_state==busy ) {
#if __TBB_TASK_PRIORITY
                        bool no_fifo_tasks = my_task_stream[top_priority].empty();
                        work_absent = work_absent && (!dequeuing_possible || no_fifo_tasks)
                                      && top_priority == my_top_priority && reload_epoch == my_reload_epoch;
#else
                        bool no_fifo_tasks = my_task_stream.empty();
                        work_absent = work_absent && no_fifo_tasks;
#endif /* __TBB_TASK_PRIORITY */
                        if( work_absent ) {
#if __TBB_TASK_PRIORITY
                            if ( top_priority > my_bottom_priority ) {
                                if ( my_market->lower_arena_priority(*this, top_priority - 1, top_priority)
                                     && !my_task_stream[top_priority].empty() )
                                {
                                    atomic_update( my_skipped_fifo_priority, top_priority, std::less<uintptr_t>());
                                }
                            }
                            else if ( !tasks_present && !my_orphaned_tasks && no_fifo_tasks ) {
#endif /* __TBB_TASK_PRIORITY */
                                // save current demand value before setting SNAPSHOT_EMPTY,
                                // to avoid race with advertise_new_work.
                                int current_demand = (int)my_max_num_workers;
                                if( prefix().my_pool_state.compare_and_swap( SNAPSHOT_EMPTY, busy )==busy ) {
                                    // This thread transitioned pool to empty state, and thus is 
                                    // responsible for telling RML that there is no other work to do.
                                    my_market->adjust_demand( *this, -current_demand );
#if __TBB_TASK_PRIORITY
                                    // Check for the presence of enqueued tasks "lost" on some of
                                    // priority levels because updating arena priority and switching
                                    // arena into "populated" (FULL) state happen non-atomically.
                                    // Imposing atomicity would require task::enqueue() to use a lock,
                                    // which is unacceptable. 
                                    bool switch_back = false;
                                    for ( int p = 0; p < num_priority_levels; ++p ) {
                                        if ( !my_task_stream[p].empty() ) {
                                            switch_back = true;
                                            if ( p < my_bottom_priority || p > my_top_priority )
                                                my_market->update_arena_priority(*this, p);
                                        }
                                    }
                                    if ( switch_back )
                                        advertise_new_work<true>();
#endif /* __TBB_TASK_PRIORITY */
                                    return true;
                                }
                                return false;
#if __TBB_TASK_PRIORITY
                            }
#endif /* __TBB_TASK_PRIORITY */
                        }
                        // Undo previous transition SNAPSHOT_FULL-->busy, unless another thread undid it.
                        prefix().my_pool_state.compare_and_swap( SNAPSHOT_FULL, busy );
                    }
                } 
                return false;
            }
            default:
                // Another thread is taking a snapshot.
                return false;
        }
    }
}
Exemple #25
0
 rml::job& job() {
     __TBB_ASSERT( my_job, "thread_map::value_type::job() called when !my_job" );
     return *my_job;
 }
concurrent_monitor::~concurrent_monitor() {
    abort_all();
    __TBB_ASSERT( waitset_ec.empty(), "waitset not empty?" );
}
Exemple #27
0
ptrdiff_t concurrent_queue_base::internal_size() const {
    __TBB_ASSERT( sizeof(ptrdiff_t)<=sizeof(size_t), NULL );
    return ptrdiff_t(my_rep->tail_counter-my_rep->head_counter);
}
Exemple #28
0
static __itt_string_handle *ITT_get_string_handle(int idx) {
    __TBB_ASSERT(idx >= 0, NULL);
    return idx < NUM_STRINGS ? strings_for_itt[idx].itt_str_handle : NULL;
}
Exemple #29
0
//! Release read lock on the given mutex
void spin_rw_mutex_v3::internal_release_reader()
{
    __TBB_ASSERT( state & READERS, "invalid state of a read lock: no readers" );
    ITT_NOTIFY(sync_releasing, this); // release reader
    __TBB_FetchAndAddWrelease( &state,-(intptr_t)ONE_READER);
}
Exemple #30
0
void dummy_sync_rename( void* obj, const tchar* new_name ) {
    ITT_DoOneTimeInitialization();
    __TBB_ASSERT( ITT_Handler_sync_rename!=&dummy_sync_rename, NULL );
    ITT_SYNC_RENAME(obj, new_name);
}