Пример #1
0
    void operator()( int id ) const {
        const int ITERS = 1000;
        void *local[ITERS];

        startB.wait();
        for (int i=id*OBJ_CNT; i<(id+1)*OBJ_CNT; i++) {
            afterTerm[i] = pool_malloc(pool, i%2? 8*1024 : 9*1024);
            memset(afterTerm[i], i, i%2? 8*1024 : 9*1024);
            crossThread[i] = pool_malloc(pool, i%2? 9*1024 : 8*1024);
            memset(crossThread[i], i, i%2? 9*1024 : 8*1024);
        }

        for (int i=1; i<ITERS; i+=2) {
            local[i-1] = pool_malloc(pool, 6*1024);
            memset(local[i-1], i, 6*1024);
            local[i] = pool_malloc(pool, 16*1024);
            memset(local[i], i, 16*1024);
        }
        mallocDone.wait();
        int myVictim = threadNum-id-1;
        for (int i=myVictim*OBJ_CNT; i<(myVictim+1)*OBJ_CNT; i++)
            pool_free(pool, crossThread[i]);
        for (int i=0; i<ITERS; i++)
            pool_free(pool, local[i]);
    }
    void operator() ( int id ) const {
        ASSERT( id < 2, "Only two test driver threads are expected" );
        // a barrier is required to ensure both threads started; otherwise the test may deadlock:
        // the first thread would execute FireAndForgetTask at shutdown and wait for FafCanFinish,
        // while the second thread wouldn't even start waiting for the loader lock hold by the first one.
        if ( id == 0 ) {
            driver_barrier.wait();
            // Prepare global data
            g_Root1 = new( tbb::task::allocate_root() ) tbb::empty_task;
            g_Root2 = new( tbb::task::allocate_root() ) tbb::empty_task;
            g_Root3 = new( tbb::task::allocate_root() ) tbb::empty_task;
            g_Task = new( g_Root3->allocate_child() ) tbb::empty_task;
            g_Root3->set_ref_count(2);
            // Run tests
            NativeParallelFor( NumTestFuncs, TestThreadBody() );
            ASSERT( g_NumTestsExecuted == NumTestFuncs, "Test driver: Wrong number of tests executed" );

            // This test checks the validity of temporarily restoring the value of 
            // the last TLS slot for a given key during the termination of an 
            // auto-initialized master thread (in governor::auto_terminate). 
            // If anything goes wrong, generic_scheduler::cleanup_master() will assert.
            // The context for this task must be valid till the task completion.
            tbb::task &r = *new( tbb::task::allocate_root(*g_Ctx) ) FireAndForgetTask;
            r.spawn(r);
        }
        else {
            tbb::task_group_context ctx;
            g_Ctx = &ctx;
            driver_barrier.wait();
            spin_wait_until_eq( FafStarted, true );
            UseAFewNewTlsKeys();
            FafCanFinish = true;
            spin_wait_until_eq( FafCompleted, true );
        }
    }
 void operator()(int id) const {
     barrier->wait();
     Harness::Sleep(2*id);
     void *o = pool_malloc(pool, id%2? 64 : 128*1024);
     barrier->wait();
     pool_free(pool, o);
 }
    void Run ( uint_t idx ) {
#if TBBTEST_USE_TBB
        tbb::task_scheduler_init init;
#endif
        AssertLive();
        if ( idx == 0 ) {
            ASSERT ( !m_taskGroup && !m_tasksSpawned, "SharedGroupBody must be reset before reuse");
            m_taskGroup = new Concurrency::task_group;
            Spawn( c_numTasks0 );
            Wait();
            if ( m_sharingMode & VagabondGroup )
                m_barrier.wait();
            else
                DeleteTaskGroup();
        }
        else {
            while ( m_tasksSpawned == 0 )
                __TBB_Yield();
            ASSERT ( m_taskGroup, "Task group is not initialized");
            Spawn (c_numTasks1);
            if ( m_sharingMode & ParallelWait )
                Wait();
            if ( m_sharingMode & VagabondGroup ) {
                ASSERT ( idx == 1, "In vagabond mode SharedGroupBody must be used with 2 threads only" );
                m_barrier.wait();
                DeleteTaskGroup();
            }
        }
        AssertLive();
    }
Пример #5
0
 static void init(int num, rml::MemoryPool *pl, void **crThread, void **aTerm) {
     threadNum = num;
     pool = pl;
     crossThread = crThread;
     afterTerm = aTerm;
     startB.initialize(threadNum);
     mallocDone.initialize(threadNum);
 }
 void operator()(int id) const {
     if (!id) {
         Harness::LIBRARY_HANDLE lib =
             Harness::OpenLibrary(TEST_LIBRARY_NAME("test_malloc_used_by_lib_dll"));
         ASSERT(lib, "Can't load " TEST_LIBRARY_NAME("test_malloc_used_by_lib_dll"));
         runPtr = Harness::GetAddress(lib, "callDll");
         unloadCallback.lib = lib;
     }
     startBarr.wait();
     (*runPtr)();
     endBarr.wait(unloadCallback);
 }
 void operator()(int id) const {
     void *p = pool_malloc(pool, id%2? 8 : 9000);
     ASSERT(p && liveRegions, NULL);
     barrier->wait();
     if (!id) {
         bool ok = pool_destroy(pool);
         ASSERT(ok, NULL);
         ASSERT(!liveRegions, "Expected all regions were released.");
     }
     // other threads must wait till pool destruction,
     // to not call thread destruction cleanup before this
     barrier->wait();
 }
    size_t operator()(){
        struct _{ static void  retrieve_from_cache(self_type* _this, size_t thread_index){
            parameter_pack& p = _this->m_parameter_pack;
            access_sequence_type::iterator const begin_it =_this->m_access_sequence.begin()+ thread_index * _this->per_thread_sample_size;
            access_sequence_type::iterator const end_it = begin_it +  _this->per_thread_sample_size;

            _this->m_barrier.wait();
            tbb::tick_count start = tbb::tick_count::now();

            size_t local_loops_count =0;
            do {
                size_t part_of_the_sample_so_far = (local_loops_count * p.time_check_granularity_ops) % _this->per_thread_sample_size;
                access_sequence_type::iterator const iteration_begin_it = begin_it + part_of_the_sample_so_far;
                access_sequence_type::iterator const iteration_end_it = iteration_begin_it +
                        (std::min)(p.time_check_granularity_ops, _this->per_thread_sample_size - part_of_the_sample_so_far);

                for (access_sequence_type::iterator it = iteration_begin_it; it < iteration_end_it; ++it){
                    typename cache_type::handle h = _this->m_cache(*it);
                    micro_benchmarking::utils::busy_wait(p.time_of_item_use_usec);
                    micro_benchmarking::utils::disable_elimination(h.value());
                }
                ++local_loops_count;
            }while((tbb::tick_count::now()-start).seconds() < p.time_window_sec);
            _this->loops_count+=local_loops_count;
        }};
        m_barrier.initialize(m_parameter_pack.threads_number);

        NativeParallelFor(m_parameter_pack.threads_number,std::bind1st(std::ptr_fun(&_::retrieve_from_cache),this));

        return loops_count * m_parameter_pack.time_check_granularity_ops;
    }
Пример #9
0
    void operator()( int id ) const {
        rml::MemPoolPolicy pol(CrossThreadGetMem, CrossThreadPutMem);
        const int objLen = 10*id;

        pool_create_v1(id, &pol, &pool[id]);
        obj[id] = (char*)pool_malloc(pool[id], objLen);
        ASSERT(obj[id], NULL);
        memset(obj[id], id, objLen);

        {
            const size_t lrgSz = 2*16*1024;
            void *ptrLarge = pool_malloc(pool[id], lrgSz);
            ASSERT(ptrLarge, NULL);
            memset(ptrLarge, 1, lrgSz);

            // consume all small objects
            while (pool_malloc(pool[id], 5*1024))
                ;
            // releasing of large object can give a chance to allocate more
            pool_free(pool[id], ptrLarge);

            ASSERT(pool_malloc(pool[id], 5*1024), NULL);
        }

        barrier.wait();
        int myPool = number_of_threads-id-1;
        for (int i=0; i<10*myPool; i++)
            ASSERT(myPool==obj[myPool][i], NULL);
        pool_free(pool[myPool], obj[myPool]);
        pool_destroy(pool[myPool]);
    }
 void operator() (int) const {
     tbb::internal::spin_wait_while_eq(gPtr, (void*)NULL);
     scalable_free(gPtr);
     my_barr->wait();
     my_ward.wait_to_finish();
     ++FinishedTasks;
 }
 void operator()( int /*id*/ ) const {
     startB->wait();
     for (int i=0; i<iters; i++) {
         void *o = pool_malloc(pool, reqSize);
         ASSERT(o, NULL);
         pool_free(pool, o);
     }
 }
Пример #12
0
 BusyBodyScoped( int nThread_, int workRatiox100_, tbb::enumerable_thread_specific<double> &locals_, int &unprotected_count_, bool test_throw_) :
     locals(locals_),
     nThread(nThread_),
     WorkRatiox100(workRatiox100_),
     unprotected_count(unprotected_count_),
     test_throw(test_throw_) {
     sBarrier.initialize(nThread_);
 }
 /*override*/ tbb::task* execute() {
     ASSERT( !(~theLocalState->m_flags & m_flag), NULL );
     if( N < 2 )
         return NULL;
     bool globalBarrierActive = false;
     if ( theLocalState->m_isMaster ) {
         if ( theGlobalBarrierActive ) {
             // This is the root task. Its N is equal to the number of threads.
             // Spawn a task for each worker.
             set_ref_count(N);
             for ( int i = 1; i < N; ++i )
                 spawn( *new( allocate_child() ) FibTask(20, m_flag, m_observer) );
             if ( theTestMode & tmSynchronized ) {
                 theGlobalBarrier.wait();
                 ASSERT( m_observer.m_entries >= N, "Wrong number of on_entry calls after the first barrier" );
                 // All the spawned tasks have been stolen by workers.
                 // Now wait for workers to spawn some more tasks for this thread to steal back.
                 theGlobalBarrier.wait();
                 ASSERT( !theGlobalBarrierActive, "Workers are expected to have reset this flag" );
             }
             else
                 theGlobalBarrierActive = false;
             wait_for_all();
             return NULL;
         }
     }
     else {
         if ( theGlobalBarrierActive ) {
             if ( theTestMode & tmSynchronized ) {
                 theGlobalBarrier.wait();
                 globalBarrierActive = true;
             }
             theGlobalBarrierActive = false;
         }
     }
     set_ref_count(3);
     spawn( *new( allocate_child() ) FibTask(N-1, m_flag, m_observer) );
     spawn( *new( allocate_child() ) FibTask(N-2, m_flag, m_observer) );
     if ( globalBarrierActive ) {
         // It's the first task executed by a worker. Release the master thread.
         theGlobalBarrier.wait();
     }
     wait_for_all();
     return NULL;
 }
Пример #14
0
 void operator()( int /*id*/ ) const {
     const int ITERS = 10000;
     startB->wait();
     for (int i=0; i<ITERS; i++) {
         void *o = pool_malloc(pool, reqSize);
         ASSERT(o, NULL);
         pool_free(pool, o);
     }
 }
 void Wait () {
     while ( m_threadsReady != m_numThreads )
         __TBB_Yield();
     const uint_t numSpawned = c_numTasks0 + c_numTasks1 * (m_numThreads - 1);
     ASSERT ( m_tasksSpawned == numSpawned, "Wrong number of spawned tasks. The test is broken" );
     REMARK("Max spawning parallelism is %u out of %u\n", Harness::ConcurrencyTracker::PeakParallelism(), g_MaxConcurrency);
     if ( m_sharingMode & ParallelWait ) {
         m_barrier.wait( &Harness::ConcurrencyTracker::Reset );
         {
             Harness::ConcurrencyTracker ct;
             m_taskGroup->wait();
         }
         if ( Harness::ConcurrencyTracker::PeakParallelism() == 1 )
             REPORT ( "Warning: No parallel waiting detected in TestParallelWait\n" );
         m_barrier.wait();
     }
     else
         m_taskGroup->wait();
     ASSERT ( m_tasksSpawned == numSpawned, "No tasks should be spawned after wait starts. The test is broken" );
     ASSERT ( s_tasksExecuted == numSpawned, "Not all spawned tasks were executed" );
 }
Пример #16
0
 void operator()(int thread_id ) const {
     bool existed;
     sBarrier.wait();
     for(int i = 0; i < nIters; ++i ) {
         existed = thread_id & 1;
         int oldval = locals->local(existed);
         ASSERT(existed == (i > 0), "Error on first reference");
         ASSERT(!existed || (oldval == thread_id), "Error on fetched value");
         existed = thread_id & 1;
         locals->local(existed) = thread_id;
         ASSERT(existed, "Error on assignment");
     }
 }
Пример #17
0
void State::exercise( bool is_owner ) {
    barrier.wait();
    if( is_owner ) {
        Cover(0);
        if( ja.try_acquire() ) {
            Cover(1);
            ++job_created; 
            ja.set_and_release(job);
            Cover(2);
            if( ja.try_acquire() ) {
                Cover(3);
                ja.release();
                Cover(4);
                if( ja.try_acquire() ) {
                    Cover(5);
                    ja.release();
                }
            }
            Cover(6);
        } else {
            Cover(7);
        }
        if( DelayMask&1<<N ) {
            while( !job_received ) 
                __TBB_Yield();
        }
    } else {
        // Using extra bit of DelayMask for choosing whether to run wait_for_job or not.
        if( DelayMask&1<<N ) {
            rml::job* j= &ja.wait_for_job(); 
            if( j!=&job ) REPORT("%p\n",j);
            ASSERT( j==&job, NULL );
            job_received = true;
        }
        Cover(8);
    }   
    rml::job* j;
    if( ja.try_plug(j) ) {
        ASSERT( j==&job || !j, NULL );
        if( j ) {
            Cover(9+is_owner);
            ++job_destroyed;
        } else {
            __TBB_ASSERT( !is_owner, "owner failed to create job but plugged self" );
            Cover(11);
        } 
    } else {
        Cover(12+is_owner);
    }
}
int TestMain () {
    if ( P < 2 )
        return Harness::Skipped;
    theNumObservers = 0;
    theWorkersBarrier.initialize(P);
    // Fully- and under-utilized mode
    for ( int M = 1; M < P; M <<= 1 ) {
        if ( M > P/2 ) {
            ASSERT( P & (P-1), "Can get here only in case of non power of two cores" );
            M = P/2;
            if ( M & (M-1) )
                break; // Already tested this configuration
        }
        int T = P / M;
        ASSERT( T > 1, NULL );
        REMARK( "Masters: %d; Arena size: %d\n", M, T );
        theMasterBarrier.initialize(M);
        theGlobalBarrier.initialize(M * T);
        TestObserver(M, T, 0);
        TestObserver(M, T, tmLocalObservation | ( T==P? tmAutoinitialization : 0) );
        CleanLocalState();
        TestObserver(M, T, tmSynchronized);
        TestObserver(M, T, tmSynchronized | tmLocalObservation
#if __TBB_TASK_ARENA
                     | ( T==P? tmLeavingControl : 0)
#endif
                     );
    }
    // Oversubscribed mode
    for ( int i = 0; i < 5; ++i ) {
        REMARK( "Masters: %d; Arena size: %d\n", P-1, P );
        TestObserver(P-1, P, 0);
        TestObserver(P-1, P, tmLocalObservation);
    }
    Harness::Sleep(20);
    return Harness::Done;
}
Пример #19
0
void operator()(const int tid) const {
    sBarrier.wait();
    for(int i=0; i < nIters; ++i) {
        Harness::Sleep( tid * tickCounts );
        tbb::tick_count t0 = tbb::tick_count::now();
        mySem.P();
        tbb::tick_count t1 = tbb::tick_count::now();
        tottime[tid] += (t1-t0).seconds();
        int curval = ++pCount;
        if(curval > ourCounts[tid]) ourCounts[tid] = curval;
        Harness::Sleep( innerWait );
        --pCount;
        ASSERT((int)pCount >= 0, NULL);
        mySem.V();
    }
}
Пример #20
0
    void operator()(const int /* threadID */ ) const {
        int nIters = MAX_WORK/nThread;
        sBarrier.wait();
        tbb::tick_count t0 = tbb::tick_count::now();
        for(int j = 0; j < nIters; j++) {

            for(int i = 0; i < MAX_WORK * (100 - WorkRatiox100); i++) {
                locals.local() += 1.0;
            }
            {
                tbb::critical_section::scoped_lock my_lock(cs);
                for(int i = 0; i < MAX_WORK * WorkRatiox100; i++) {
                    locals.local() += 1.0;
                }
                unprotected_count++;
            }
        }
        locals.local() = (tbb::tick_count::now() - t0).seconds();
    }
Пример #21
0
// -- test of producer/consumer with atomic buffer cnt and semaphore
// nTokens are total number of tokens through the pipe
// pWait is the wait time for the producer
// cWait is the wait time for the consumer
void testProducerConsumer( unsigned totTokens, unsigned nTokens, unsigned pWait, unsigned cWait) {
    semaphore pSem;
    semaphore cSem;
    tbb::atomic<unsigned> pTokens;
    tbb::atomic<unsigned> cTokens;
    cTokens = 0;
    unsigned cBuffer[MAX_TOKENS];
    FilterBase* myFilters[2];  // one producer, one consumer
    REMARK("Testing producer/consumer with %lu total tokens, %lu tokens at a time, producer wait(%lu), consumer wait (%lu)\n", totTokens, nTokens, pWait, cWait);
    ASSERT(nTokens <= MAX_TOKENS, "Not enough slots for tokens");
    myFilters[0] = new FilterBase(imaProducer, totTokens, pTokens, cTokens, pWait, cSem, pSem, (unsigned *)NULL, &(cBuffer[0]));
    myFilters[1] = new FilterBase(imaConsumer, totTokens, cTokens, pTokens, cWait, pSem, cSem, cBuffer, (unsigned *)NULL);
    pTokens = nTokens;
    ProduceConsumeBody myBody(myFilters);
    sBarrier.initialize(2);
    NativeParallelFor(2, myBody);
    delete myFilters[0];
    delete myFilters[1];
}
Пример #22
0
void FilterBase::Consume(const int /*tid*/) {
    unsigned myToken;
    sBarrier.wait();
    do {
        while(!myTokens)
            mySem.P();
        // we have a slot available.
        --myTokens;  // moving this down reduces spurious wakeups
        myToken = myBuffer[curToken&(MAX_TOKENS-1)];
        if(myToken) {
            ASSERT(myToken == curToken*3+1, "Error in received token");
            ++curToken;
            Harness::Sleep(myWait);
            unsigned temp = ++otherTokens;
            if(temp == 1)
                nextSem.V();
        }
    } while(myToken);
    // end of processing
    ASSERT(curToken + 1 == totTokens, "Didn't receive enough tokens");
}
Пример #23
0
// send a bunch of non-Null "tokens" to consumer, then a NULL.
void FilterBase::Produce(const int /*tid*/) {
    nextBuffer[0] = 0;  // just in case we provide no tokens
    sBarrier.wait();
    while(totTokens) {
        while(!myTokens)
            mySem.P();
        // we have a slot available.
        --myTokens;  // moving this down reduces spurious wakeups
        --totTokens;
        if(totTokens)
            nextBuffer[curToken&(MAX_TOKENS-1)] = curToken*3+1;
        else
            nextBuffer[curToken&(MAX_TOKENS-1)] = (unsigned)NULL;
        ++curToken;
        Harness::Sleep(myWait);
        unsigned temp = ++otherTokens;
        if(temp == 1)
            nextSem.V();
    }
    nextSem.V();  // final wakeup
}
 void operator()( int i ) const {
     theLocalState->m_isMaster = true;
     uintptr_t f = i <= MaxFlagIndex ? 1<<i : 0;
     MyObserver o(f);
     if ( theTestMode & tmSynchronized )
         theMasterBarrier.wait();
     // when mode is local observation but not synchronized and when num threads == default
     if ( theTestMode & tmAutoinitialization )
         o.observe(true); // test autoinitialization can be done by observer
     // when mode is local synchronized observation and when num threads == default
     if ( theTestMode & tmLeavingControl )
         o.test_leaving();
     // Observer in enabled state must outlive the scheduler to ensure that
     // all exit notifications are called.
     tbb::task_scheduler_init init(m_numThreads);
     // when local & non-autoinitialized observation mode
     if ( theTestMode & tmLocalObservation )
         o.observe(true);
     for ( int j = 0; j < 2; ++j ) {
         tbb::task &t = *new( tbb::task::allocate_root() ) FibTask(m_numThreads, f, o);
         tbb::task::spawn_root_and_wait(t);
         thePrevMode = theTestMode;
     }
     if( o.is_leaving_test() ) {
         REMARK( "Testing on_scheduler_leaving()\n");
         ASSERT(o.m_workerEntries > 0, "Unbelievable");
         // TODO: start from 0?
         for ( int j = o.m_workerExits; j < o.m_workerEntries; j++ ) {
             REMARK( "Round %d: entries %d, exits %d\n", j, (int)o.m_workerEntries, (int)o.m_workerExits );
             ASSERT_WARNING(o.m_workerExits == j, "Workers unexpectedly leaved arena");
             o.dismiss_one();
             double n_seconds = 5;
             (Harness::TimedWaitWhileEq(n_seconds))(o.m_workerExits, j);
             ASSERT( n_seconds >= 0, "Time out while waiting for a worker to leave arena");
             __TBB_Yield();
         }
     }
 }
Пример #25
0
    void operator()(const int /* threadID */ ) const {
        int nIters = MAX_WORK/nThread;
        sBarrier.wait();
        tbb::tick_count t0 = tbb::tick_count::now();
        for(int j = 0; j < nIters; j++) {

            for(int i = 0; i < MAX_WORK * (100 - WorkRatiox100); i++) {
                locals.local() += 1.0;
            }
            cs.lock();
            ASSERT( !cs.try_lock(), "recursive try_lock must fail" );
#if TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
            if(test_throw && j == (nIters / 2)) {
                bool was_caught = false,
                     unknown_exception = false;
                try {
                    cs.lock();
                }
                catch(tbb::improper_lock& e) {
                    ASSERT( e.what(), "Error message is absent" );
                    was_caught = true;
                }
                catch(...) {
                    was_caught = unknown_exception = true;
                }
                ASSERT(was_caught, "Recursive lock attempt did not throw");
                ASSERT(!unknown_exception, "tbb::improper_lock exception is expected");
            }
#endif /* TBB_USE_EXCEPTIONS && !__TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN  */
            for(int i = 0; i < MAX_WORK * WorkRatiox100; i++) {
                locals.local() += 1.0;
            }
            unprotected_count++;
            cs.unlock();
        }
        locals.local() = (tbb::tick_count::now() - t0).seconds();
    }
Пример #26
0
 static void initBarrier(unsigned thrds) { barrier.initialize(thrds); }
void CMemTest::NULLReturn(UINT MinSize, UINT MaxSize, int total_threads)
{
    const int MB_PER_THREAD = TOTAL_MB_ALLOC / total_threads;
    // find size to guarantee getting NULL for 1024 B allocations
    const int MAXNUM_1024 = (MB_PER_THREAD + (MB_PER_THREAD>>2)) * 1024;

    std::vector<MemStruct> PointerList;
    void *tmp;
    CountErrors=0;
    int CountNULL, num_1024;
    if (FullLog) REPORT("\nNULL return & check errno:\n");
    UINT Size;
    Limit limit_total(TOTAL_MB_ALLOC), no_limit(0);
    void **buf_1024 = (void**)Tmalloc(MAXNUM_1024*sizeof(void*));

    ASSERT(buf_1024, NULL);
    /* We must have space for pointers when memory limit is hit.
       Reserve enough for the worst case, taking into account race for
       limited space between threads.
    */
    PointerList.reserve(TOTAL_MB_ALLOC*MByte/MinSize);

    /* There is a bug in the specific version of GLIBC (2.5-12) shipped
       with RHEL5 that leads to erroneous working of the test
       on Intel64 and IPF systems when setrlimit-related part is enabled.
       Switching to GLIBC 2.5-18 from RHEL5.1 resolved the issue.
     */
    if (perProcessLimits)
        limitBarrier->wait(limit_total);
    else
        limitMem(MB_PER_THREAD);

    /* regression test against the bug in allocator when it dereference NULL
       while lack of memory
    */
    for (num_1024=0; num_1024<MAXNUM_1024; num_1024++) {
        buf_1024[num_1024] = Tcalloc(1024, 1);
        if (! buf_1024[num_1024]) {
            ASSERT_ERRNO(errno == ENOMEM, NULL);
            break;
        }
    }
    for (int i=0; i<num_1024; i++)
        Tfree(buf_1024[i]);
    Tfree(buf_1024);

    do {
        Size=rand()%(MaxSize-MinSize)+MinSize;
        tmp=Tmalloc(Size);
        if (tmp != NULL)
        {
            myMemset(tmp, 0, Size);
            PointerList.push_back(MemStruct(tmp, Size));
        }
    } while(tmp != NULL);
    ASSERT_ERRNO(errno == ENOMEM, NULL);
    if (FullLog) REPORT("\n");

    // preparation complete, now running tests
    // malloc
    if (FullLog) REPORT("malloc....");
    CountNULL = 0;
    while (CountNULL==0)
        for (int j=0; j<COUNT_TESTS; j++)
        {
            Size=rand()%(MaxSize-MinSize)+MinSize;
            errno = ENOMEM+j+1;
            tmp=Tmalloc(Size);
            if (tmp == NULL)
            {
                CountNULL++;
                if ( CHECK_ERRNO(errno != ENOMEM) ) {
                    CountErrors++;
                    if (ShouldReportError()) REPORT("NULL returned, error: errno (%d) != ENOMEM\n", errno);
                }
            }
            else
            {
                // Technically, if malloc returns a non-NULL pointer, it is allowed to set errno anyway.
                // However, on most systems it does not set errno.
                bool known_issue = false;
#if __linux__
                if( CHECK_ERRNO(errno==ENOMEM) ) known_issue = true;
#endif /* __linux__ */
                if ( CHECK_ERRNO(errno != ENOMEM+j+1) && !known_issue) {
                    CountErrors++;
                    if (ShouldReportError()) REPORT("error: errno changed to %d though valid pointer was returned\n", errno);
                }
                myMemset(tmp, 0, Size);
                PointerList.push_back(MemStruct(tmp, Size));
            }
        }
    if (FullLog) REPORT("end malloc\n");
    if (CountErrors) REPORT("%s\n",strError);
    else if (FullLog) REPORT("%s\n",strOk);
    error_occurred |= ( CountErrors>0 ) ;

    CountErrors=0;
    //calloc
    if (FullLog) REPORT("calloc....");
    CountNULL = 0;
    while (CountNULL==0)
        for (int j=0; j<COUNT_TESTS; j++)
        {
            Size=rand()%(MaxSize-MinSize)+MinSize;
            errno = ENOMEM+j+1;
            tmp=Tcalloc(COUNT_ELEM_CALLOC,Size);
            if (tmp == NULL)
            {
                CountNULL++;
                if ( CHECK_ERRNO(errno != ENOMEM) ){
                    CountErrors++;
                    if (ShouldReportError()) REPORT("NULL returned, error: errno(%d) != ENOMEM\n", errno);
                }
            }
            else
            {
                // Technically, if calloc returns a non-NULL pointer, it is allowed to set errno anyway.
                // However, on most systems it does not set errno.
                bool known_issue = false;
#if __linux__
                if( CHECK_ERRNO(errno==ENOMEM) ) known_issue = true;
#endif /* __linux__ */
                if ( CHECK_ERRNO(errno != ENOMEM+j+1) && !known_issue ) {
                    CountErrors++;
                    if (ShouldReportError()) REPORT("error: errno changed to %d though valid pointer was returned\n", errno);
                }
                PointerList.push_back(MemStruct(tmp, Size));
            }
        }
    if (FullLog) REPORT("end calloc\n");
    if (CountErrors) REPORT("%s\n",strError);
    else if (FullLog) REPORT("%s\n",strOk);
    error_occurred |= ( CountErrors>0 ) ;
    CountErrors=0;
    if (FullLog) REPORT("realloc....");
    CountNULL = 0;
    if (PointerList.size() > 0)
        while (CountNULL==0)
            for (size_t i=0; i<(size_t)COUNT_TESTS && i<PointerList.size(); i++)
            {
                errno = 0;
                tmp=Trealloc(PointerList[i].Pointer,PointerList[i].Size*2);
                if (PointerList[i].Pointer == tmp) // the same place
                {
                    bool known_issue = false;
#if __linux__
                    if( errno==ENOMEM ) known_issue = true;
#endif /* __linux__ */
                    if (errno != 0 && !known_issue) {
                        CountErrors++;
                        if (ShouldReportError()) REPORT("valid pointer returned, error: errno not kept\n");
                    }
                    PointerList[i].Size *= 2;
                }
                else if (tmp != PointerList[i].Pointer && tmp != NULL) // another place
                {
                    bool known_issue = false;
#if __linux__
                    if( errno==ENOMEM ) known_issue = true;
#endif /* __linux__ */
                    if (errno != 0 && !known_issue) {
                        CountErrors++;
                        if (ShouldReportError()) REPORT("valid pointer returned, error: errno not kept\n");
                    }
                    // newly allocated area have to be zeroed
                    myMemset((char*)tmp + PointerList[i].Size, 0, PointerList[i].Size);
                    PointerList[i].Pointer = tmp;
                    PointerList[i].Size *= 2;
                }
                else if (tmp == NULL)
                {
                    CountNULL++;
                    if ( CHECK_ERRNO(errno != ENOMEM) )
                    {
                        CountErrors++;
                        if (ShouldReportError()) REPORT("NULL returned, error: errno(%d) != ENOMEM\n", errno);
                    }
                    // check data integrity
                    if (NonZero(PointerList[i].Pointer, PointerList[i].Size)) {
                        CountErrors++;
                        if (ShouldReportError()) REPORT("NULL returned, error: data changed\n");
                    }
                }
            }
    if (FullLog) REPORT("realloc end\n");
    if (CountErrors) REPORT("%s\n",strError);
    else if (FullLog) REPORT("%s\n",strOk);
    error_occurred |= ( CountErrors>0 ) ;
    for (UINT i=0; i<PointerList.size(); i++)
    {
        Tfree(PointerList[i].Pointer);
    }

    if (perProcessLimits)
        limitBarrier->wait(no_limit);
    else
        limitMem(0);
}
Пример #28
0
 Body( int nthread_, int niters_ ) : nthread(nthread_), nIters(niters_) { sBarrier.initialize(nthread_); }
Пример #29
0
 Body(int nThread_, int nIter_, semaphore &mySem_,
         vector<int>& ourCounts_,
         vector<double>& tottime_
         ) : nThreads(nThread_), nIters(nIter_), mySem(mySem_), ourCounts(ourCounts_), tottime(tottime_) { sBarrier.initialize(nThread_); pCount = 0; }
 tbb::task* execute () {
     theLocalState->m_flags = 0;
     theWorkersBarrier.wait();
     return NULL;
 }