Ejemplo n.º 1
0
 static void init(int num, rml::MemoryPool *pl, void **crThread, void **aTerm) {
     threadNum = num;
     pool = pl;
     crossThread = crThread;
     afterTerm = aTerm;
     startB.initialize(threadNum);
     mallocDone.initialize(threadNum);
 }
    size_t operator()(){
        struct _{ static void  retrieve_from_cache(self_type* _this, size_t thread_index){
            parameter_pack& p = _this->m_parameter_pack;
            access_sequence_type::iterator const begin_it =_this->m_access_sequence.begin()+ thread_index * _this->per_thread_sample_size;
            access_sequence_type::iterator const end_it = begin_it +  _this->per_thread_sample_size;

            _this->m_barrier.wait();
            tbb::tick_count start = tbb::tick_count::now();

            size_t local_loops_count =0;
            do {
                size_t part_of_the_sample_so_far = (local_loops_count * p.time_check_granularity_ops) % _this->per_thread_sample_size;
                access_sequence_type::iterator const iteration_begin_it = begin_it + part_of_the_sample_so_far;
                access_sequence_type::iterator const iteration_end_it = iteration_begin_it +
                        (std::min)(p.time_check_granularity_ops, _this->per_thread_sample_size - part_of_the_sample_so_far);

                for (access_sequence_type::iterator it = iteration_begin_it; it < iteration_end_it; ++it){
                    typename cache_type::handle h = _this->m_cache(*it);
                    micro_benchmarking::utils::busy_wait(p.time_of_item_use_usec);
                    micro_benchmarking::utils::disable_elimination(h.value());
                }
                ++local_loops_count;
            }while((tbb::tick_count::now()-start).seconds() < p.time_window_sec);
            _this->loops_count+=local_loops_count;
        }};
        m_barrier.initialize(m_parameter_pack.threads_number);

        NativeParallelFor(m_parameter_pack.threads_number,std::bind1st(std::ptr_fun(&_::retrieve_from_cache),this));

        return loops_count * m_parameter_pack.time_check_granularity_ops;
    }
Ejemplo n.º 3
0
 BusyBodyScoped( int nThread_, int workRatiox100_, tbb::enumerable_thread_specific<double> &locals_, int &unprotected_count_, bool test_throw_) :
     locals(locals_),
     nThread(nThread_),
     WorkRatiox100(workRatiox100_),
     unprotected_count(unprotected_count_),
     test_throw(test_throw_) {
     sBarrier.initialize(nThread_);
 }
int TestMain () {
    if ( P < 2 )
        return Harness::Skipped;
    theNumObservers = 0;
    theWorkersBarrier.initialize(P);
    // Fully- and under-utilized mode
    for ( int M = 1; M < P; M <<= 1 ) {
        if ( M > P/2 ) {
            ASSERT( P & (P-1), "Can get here only in case of non power of two cores" );
            M = P/2;
            if ( M & (M-1) )
                break; // Already tested this configuration
        }
        int T = P / M;
        ASSERT( T > 1, NULL );
        REMARK( "Masters: %d; Arena size: %d\n", M, T );
        theMasterBarrier.initialize(M);
        theGlobalBarrier.initialize(M * T);
        TestObserver(M, T, 0);
        TestObserver(M, T, tmLocalObservation | ( T==P? tmAutoinitialization : 0) );
        CleanLocalState();
        TestObserver(M, T, tmSynchronized);
        TestObserver(M, T, tmSynchronized | tmLocalObservation
#if __TBB_TASK_ARENA
                     | ( T==P? tmLeavingControl : 0)
#endif
                     );
    }
    // Oversubscribed mode
    for ( int i = 0; i < 5; ++i ) {
        REMARK( "Masters: %d; Arena size: %d\n", P-1, P );
        TestObserver(P-1, P, 0);
        TestObserver(P-1, P, tmLocalObservation);
    }
    Harness::Sleep(20);
    return Harness::Done;
}
Ejemplo n.º 5
0
// -- test of producer/consumer with atomic buffer cnt and semaphore
// nTokens are total number of tokens through the pipe
// pWait is the wait time for the producer
// cWait is the wait time for the consumer
void testProducerConsumer( unsigned totTokens, unsigned nTokens, unsigned pWait, unsigned cWait) {
    semaphore pSem;
    semaphore cSem;
    tbb::atomic<unsigned> pTokens;
    tbb::atomic<unsigned> cTokens;
    cTokens = 0;
    unsigned cBuffer[MAX_TOKENS];
    FilterBase* myFilters[2];  // one producer, one consumer
    REMARK("Testing producer/consumer with %lu total tokens, %lu tokens at a time, producer wait(%lu), consumer wait (%lu)\n", totTokens, nTokens, pWait, cWait);
    ASSERT(nTokens <= MAX_TOKENS, "Not enough slots for tokens");
    myFilters[0] = new FilterBase(imaProducer, totTokens, pTokens, cTokens, pWait, cSem, pSem, (unsigned *)NULL, &(cBuffer[0]));
    myFilters[1] = new FilterBase(imaConsumer, totTokens, cTokens, pTokens, cWait, pSem, cSem, cBuffer, (unsigned *)NULL);
    pTokens = nTokens;
    ProduceConsumeBody myBody(myFilters);
    sBarrier.initialize(2);
    NativeParallelFor(2, myBody);
    delete myFilters[0];
    delete myFilters[1];
}
Ejemplo n.º 6
0
 Body( int nthread_, int niters_ ) : nthread(nthread_), nIters(niters_) { sBarrier.initialize(nthread_); }
Ejemplo n.º 7
0
 Body(int nThread_, int nIter_, semaphore &mySem_,
         vector<int>& ourCounts_,
         vector<double>& tottime_
         ) : nThreads(nThread_), nIters(nIter_), mySem(mySem_), ourCounts(ourCounts_), tottime(tottime_) { sBarrier.initialize(nThread_); pCount = 0; }
Ejemplo n.º 8
0
 static void initBarrier(unsigned thrds) { barrier.initialize(thrds); }