/** The 'tolerance' value inside the test specifies the limit. */
void TestSimpleDelay( int ntrial, double duration, double tolerance ) {
    double total_worktime = 0;
    // Iteration -1 warms up the code cache.
    for( int trial=-1; trial<ntrial; ++trial ) {
        tbb::tick_count t0 = tbb::tick_count::now();
        if( duration ) WaitForDuration(duration);
        tbb::tick_count t1 = tbb::tick_count::now();
        if( trial>=0 ) {
            total_worktime += (t1-t0).seconds(); 
        }
    }
    // Compute average worktime and average delta
    double worktime = total_worktime/ntrial;
    double delta = worktime-duration;
    REMARK("worktime=%g delta=%g tolerance=%g\n", worktime, delta, tolerance);

    // Check that delta is acceptable
    if( delta<0 ) 
        REPORT("ERROR: delta=%g < 0\n",delta); 
    if( delta>tolerance )
        REPORT("%s: delta=%g > %g=tolerance where duration=%g\n",delta>3*tolerance?"ERROR":"Warning",delta,tolerance,duration);
}
int TestMain () {
    if( MinThread<1 ) {
        REPORT("ERROR: MinThread=%d, but must be at least 1\n",MinThread); MinThread = 1;
    }
#if !TBB_DEPRECATED
    TestIteratorTraits<tbb::concurrent_vector<Foo>::iterator,Foo>();
    TestIteratorTraits<tbb::concurrent_vector<Foo>::const_iterator,const Foo>();
    TestSequentialFor<FooWithAssign> ();
    TestResizeAndCopy();
    TestAssign();
#if HAVE_m128
    TestSSE();
#endif /* HAVE_m128 */    
#endif
    TestCapacity();
    ASSERT( !FooCount, NULL );
    for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) {
        tbb::task_scheduler_init init( nthread );
        TestParallelFor( nthread );
        TestConcurrentGrowToAtLeast();
        TestConcurrentGrowBy( nthread );
    }
    ASSERT( !FooCount, NULL );
#if !TBB_DEPRECATED
    TestComparison();
#if !__TBB_FLOATING_POINT_BROKEN
    TestFindPrimes();
#endif
    TestSort();
#if __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN
    REPORT("Known issue: exception safety test is skipped.\n");
#elif TBB_USE_EXCEPTIONS
    TestExceptions();
#endif /* TBB_USE_EXCEPTIONS */
#endif /* !TBB_DEPRECATED */
    ASSERT( !FooCount, NULL );
    REMARK("sizeof(concurrent_vector<int>) == %d\n", (int)sizeof(tbb::concurrent_vector<int>));
    return Harness::Done;
}
void TestRehash() {
    REMARK("testing rehashing\n");
    MyTable w;
    w.insert( std::make_pair(MyKey::make(-5), MyData()) );
    w.rehash(); // without this, assertion will fail
    MyTable::iterator it = w.begin();
    int i = 0; // check for non-rehashed buckets
    for( ; it != w.end(); i++ )
        w.count( (it++)->first );
    ASSERT( i == 1, NULL );
    for( i=0; i<1000; i=(i<29 ? i+1 : i*2) ) {
        for( int j=max(256+i, i*2); j<10000; j*=3 ) {
            MyTable v;
            FillTable( v, i );
            ASSERT(int(v.size()) == i, NULL);
            ASSERT(int(v.bucket_count()) <= j, NULL);
            v.rehash( j );
            ASSERT(int(v.bucket_count()) >= j, NULL);
            CheckTable( v, i );
        }
    }
}
    static void test() {
        TType v;
        source_type* all_source_nodes[MaxNSources];
        sink_node_helper<N,SType>::print_parallel_remark();
        REMARK(" >\n");
        for(int i=0; i < MaxPorts; ++i) {
            all_sink_nodes[i] = NULL;
        }
        // try test for # sources 1 .. MaxNSources
        for(int nInputs = 1; nInputs <= MaxNSources; ++nInputs) {
            tbb::flow::graph g;
            SType* my_split = makeSplit<N,SType>::create(g);

            // add sinks first so when sources start spitting out values they are there to catch them
            sink_node_helper<N, SType>::add_sink_nodes((*my_split), g);

            // now create nInputs source_nodes, each spitting out i, i+nInputs, i+2*nInputs ...
            // each element of the tuple is i*(n+1), where n is the tuple element index (1-N)
            for(int i = 0; i < nInputs; ++i) {
                // create source node
                source_type *s = new source_type(g, source_body<TType>(i, nInputs) );
                tbb::flow::make_edge(*s, *my_split);
                all_source_nodes[i] = s;
            }

            g.wait_for_all();

            // check that we got Count values in each output queue, and all the index values
            // are there.
            sink_node_helper<N, SType>::check_sink_values();

            sink_node_helper<N, SType>::remove_sink_nodes(*my_split);
            for(int i = 0; i < nInputs; ++i) {
                delete all_source_nodes[i];
            }
            makeSplit<N,SType>::destroy(my_split);
        }
    }
示例#5
0
int TestMain() {
    // Test with varying number of threads.
    for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) {
        // Initialize TBB task scheduler
        REMARK("\nTesting with nthread=%d\n", nthread);
        tbb::task_scheduler_init init(nthread);
        
        // Run test several times with different types
        run_function_spec();
        run_function<size_t,int>("size_t", "int");
        run_function<int,double>("int", "double");
        check_type_counter = 0;
        run_function<check_type,size_t>("check_type", "size_t");
        ASSERT(!check_type_counter, "Error in check_type creation/destruction");
        // check_type as the second type in the pipeline only works if check_type
        // is also the first type.  The middle_filter specialization for <check_type, check_type>
        // changes the state of the check_type items, and this is checked in the output_filter
        // specialization.
        run_function<check_type, check_type>("check_type", "check_type");
        ASSERT(!check_type_counter, "Error in check_type creation/destruction");
    }
    return Harness::Done;
}
示例#6
0
// The idea is to allocate a set of objects and then deallocate them in random
// order in parallel to force occuring conflicts in backend during coalescing.
// Thus if the backend does not check the queue of postponed coalescing
// requests it will not be able to unmap all memory and a memory leak will be
// observed.
void TestCleanAllBuffers() {
    const int num_threads = 8;
    // Clean up if something was allocated before the test
    scalable_allocation_command(TBBMALLOC_CLEAN_ALL_BUFFERS,0);

    size_t memory_in_use_before = getMemSize();
    for ( int i=0; i<num_allocs; ++i ) {
        ptrs[i] = scalable_malloc( alloc_size );
        ASSERT( ptrs[i] != NULL, "scalable_malloc has return zero." );
    }
    deallocs_counter = 0;
    TestCleanAllBuffersDeallocate::initBarrier(num_threads);
    NativeParallelFor(num_threads, TestCleanAllBuffersDeallocate());
    if ( defaultMemPool->extMemPool.backend.coalescQ.blocksToFree == NULL )
        REPORT( "Warning: The queue of postponed coalescing requests is empty. Unable to create the condition for bug reproduction.\n" );
    ASSERT( scalable_allocation_command(TBBMALLOC_CLEAN_ALL_BUFFERS,0) == TBBMALLOC_OK, "The cleanup request has not cleaned anithing." );
    size_t memory_in_use_after = getMemSize();

    REMARK( "memory_in_use_before = %ld\nmemory_in_use_after = %ld\n", memory_in_use_before, memory_in_use_after );

    size_t memory_leak = memory_in_use_after - memory_in_use_before;
    ASSERT( memory_leak == 0, "The backend has not processed the queue of postponed coalescing requests during cleanup." );
}
示例#7
0
void TestReaderWriterLockOnNThreads(int nThreads) {
    // Stress-test all interfaces
    for (int pc=0; pc<=100; pc+=20) {
        REMARK("Testing with %d threads, percent of MAX_WORK=%d...", nThreads, pc);
        StressRWLBody myStressBody(nThreads, pc);
        NativeParallelFor(nThreads, myStressBody);
        REMARK(" OK.\n");
    }

    int i;
    n_tested__sim_readers = 0;
    REMARK("Testing with %d threads, direct/unscoped locking mode...", nThreads); // TODO: choose direct or unscoped?
    // TODO: refactor the following two for loops into a shared function 
    for( i=0; i<100; ++i ) {
        Harness::SpinBarrier bar0(nThreads);

        CorrectRWLBody myCorrectBody(nThreads,bar0);
        active_writers = active_readers = 0;
        sim_readers = false;
        NativeParallelFor(nThreads, myCorrectBody);

        if( sim_readers || nThreads==1 ) {
            if( ++n_tested__sim_readers>5 )
                break;
        }
    }
    ASSERT(i<100, "There were no simultaneous readers.");
    REMARK(" OK.\n");

    n_tested__sim_readers = 0;
    REMARK("Testing with %d threads, scoped locking mode...", nThreads);
    for( i=0; i<100; ++i ) {
        Harness::SpinBarrier bar0(nThreads);
        CorrectRWLScopedBody myCorrectScopedBody(nThreads, bar0);
        active_writers = active_readers = 0;
        sim_readers = false;
        NativeParallelFor(nThreads, myCorrectScopedBody);
        if( sim_readers || nThreads==1 ) {
            if( ++n_tested__sim_readers>5 )
                break;
        }
    }
    ASSERT(i<100, "There were no simultaneous readers.");
    REMARK(" OK.\n");
}
void test_parallel_invoke()
{
    REMARK (__FUNCTION__);
    // Testing with pointers to functions
    for (int n = 2; n <=10; n++)
    {
        INIT_TEST;
        call_parallel_invoke(n, test_pointer0, test_pointer1, test_pointer2, test_pointer3, test_pointer4,
            test_pointer5, test_pointer6, test_pointer7, test_pointer8, test_pointer9, NULL);
        VALIDATE_INVOKE_RUN(n, "pointers to function");
    }

    // Testing parallel_invoke with functors
    for (int n = 2; n <=10; n++)
    {
        INIT_TEST;
        call_parallel_invoke(n, functor0, functor1, functor2, functor3, functor4,
            functor5, functor6, functor7, functor8, functor9, NULL);
        VALIDATE_INVOKE_RUN(n, "functors");
    }

#if __TBB_FUNCTION_BY_CONSTREF_IN_TEMPLATE_BROKEN
    // some old compilers can't cope with passing function name into parallel_invoke
#else
    // and some compile but generate broken code that does not call the function
    if (function_by_constref_in_template_codegen_broken())
        return;

    // Testing parallel_invoke with functions
    for (int n = 2; n <=10; n++)
    {
        INIT_TEST;
        call_parallel_invoke(n, test0, test1, test2, test3, test4, test5, test6, test7, test8, test9, NULL);
        VALIDATE_INVOKE_RUN(n, "functions");
    }
#endif
}
//! Test traversing the table with an iterator.
void TraverseTable( MyTable& table, size_t n, size_t expected_size ) {
    REMARK("testing traversal\n");
    size_t actual_size = table.size();
    ASSERT( actual_size==expected_size, NULL );
    size_t count = 0;
    bool* array = new bool[n];
    memset( array, 0, n*sizeof(bool) );
    const MyTable& const_table = table;
    MyTable::const_iterator ci = const_table.begin();
    for( MyTable::iterator i = table.begin(); i!=table.end(); ++i ) {
        // Check iterator
        int k = i->first.value_of();
        ASSERT( UseKey(k), NULL );
        ASSERT( (*i).first.value_of()==k, NULL );
        ASSERT( 0<=k && size_t(k)<n, "out of bounds key" );
        ASSERT( !array[k], "duplicate key" );
        array[k] = true;
        ++count;

        // Check lower/upper bounds
        std::pair<MyTable::iterator, MyTable::iterator> er = table.equal_range(i->first);
        std::pair<MyTable::const_iterator, MyTable::const_iterator> cer = const_table.equal_range(i->first);
        ASSERT(cer.first == er.first && cer.second == er.second, NULL);
        ASSERT(cer.first == i, NULL);
        ASSERT(std::distance(cer.first, cer.second) == 1, NULL);

        // Check const_iterator
        MyTable::const_iterator cic = ci++;
        ASSERT( cic->first.value_of()==k, NULL );
        ASSERT( (*cic).first.value_of()==k, NULL );
    }
    ASSERT( ci==const_table.end(), NULL );
    delete[] array;
    if( count!=expected_size ) {
        REPORT("Line %d: count=%ld but should be %ld\n",__LINE__,long(count),long(expected_size));
    }
}
示例#10
0
void
TestMultifunctionNode() {
    typedef tbb::flow::multifunction_node<int, tbb::flow::tuple<int, int>, P> multinode_type;
    REMARK("Testing multifunction_node");
    test_reversal<P,multinode_type> my_test;
    REMARK(":");
    tbb::flow::graph g;
    multinode_type mf(g, tbb::flow::serial, mf_body<multinode_type>(serial_fn_state0));
    tbb::flow::queue_node<int> qin(g);
    tbb::flow::queue_node<int> qodd_out(g);
    tbb::flow::queue_node<int> qeven_out(g);
    tbb::flow::make_edge(qin,mf);
    tbb::flow::make_edge(tbb::flow::output_port<0>(mf), qeven_out);
    tbb::flow::make_edge(tbb::flow::output_port<1>(mf), qodd_out);
    g.wait_for_all();
    for( int ii = 0; ii < 2 ; ++ii) {
        serial_fn_state0 = 0;
        if(ii == 0) REMARK(" reset preds"); else REMARK(" 2nd");
        qin.try_put(0);
        // wait for node to be active
        BACKOFF_WAIT(serial_fn_state0 == 0, "timed out waiting for first put");
        qin.try_put(1);
        BACKOFF_WAIT((!my_test(mf)), "Timed out waiting");
        ASSERT(my_test(mf), "fail second put test");
        g.my_root_task->cancel_group_execution();
        // release node
        serial_fn_state0 = 2;
        g.wait_for_all();
        ASSERT(my_test(mf), "fail cancel group test");
        if( ii == 1) {
            REMARK(" rf_clear_edges");
            g.reset(tbb::flow::rf_clear_edges);
            ASSERT(tbb::flow::output_port<0>(mf).my_successors.empty(), "output_port<0> not reset (rf_clear_edges)");
            ASSERT(tbb::flow::output_port<1>(mf).my_successors.empty(), "output_port<1> not reset (rf_clear_edges)");
        }
        else
        {
            g.reset();
        }
        ASSERT(mf.my_predecessors.empty(), "edge didn't reset");
        ASSERT((ii == 0 && !qin.my_successors.empty()) || (ii == 1 && qin.my_successors.empty()), "edge didn't reset");
    }
    REMARK(" done\n");
}
void RunPrioritySwitchBetweenTwoMasters ( int idx, uintptr_t opts ) {
    ASSERT( idx < NumTests, NULL );
    REMARK( "Config %d: idx=%i, opts=%u\r", ++g_CurConfig, idx, (unsigned)opts );
    NativeParallelFor ( 2, MasterBody<NodeType>(idx, opts) );
    Harness::Sleep(50);
}
示例#12
0
void TestBufferingNode(const char * name) {
    tbb::flow::graph g;
    B                bnode(g);
    tbb::flow::function_node<int,int,tbb::flow::rejecting> fnode(g, tbb::flow::serial, serial_fn_body<int>(serial_fn_state0));
    REMARK("Testing %s:", name);
    for(int icnt = 0; icnt < 2; icnt++) {
        bool reverse_edge = (icnt & 0x2) != 0;
        serial_fn_state0 = 0;  // reset to waiting state.
        REMARK(" make_edge");
        tbb::flow::make_edge(bnode, fnode);
        ASSERT(!bnode.my_successors.empty(), "buffering node has no successor after make_edge");
        REMARK(" try_put");
        bnode.try_put(1);  // will forward to the fnode
        BACKOFF_WAIT(serial_fn_state0 == 0, "Timed out waiting for first put");
        if(reverse_edge) {
            REMARK(" try_put2");
            bnode.try_put(2);  // will reverse the edge
            // cannot do a wait_for_all here; the function_node is still executing
            BACKOFF_WAIT(!bnode.my_successors.empty(), "Timed out waiting after 2nd put");
            // at this point the only task running is the one for the function_node.
            ASSERT(bnode.my_successors.empty(), "successor not removed");
        }
        else {
            ASSERT(!bnode.my_successors.empty(), "buffering node has no successor after forwarding message");
        }
        serial_fn_state0 = 0;  // release the function_node.
        if(reverse_edge) {
            // have to do a second release because the function_node will get the 2nd item
            BACKOFF_WAIT( serial_fn_state0 == 0, "Timed out waiting after 2nd put");
            serial_fn_state0 = 0;  // release the function_node.
        }
        g.wait_for_all();
        REMARK(" remove_edge");
        tbb::flow::remove_edge(bnode, fnode);
        ASSERT(bnode.my_successors.empty(), "buffering node has a successor after remove_edge");
    }
    tbb::flow::join_node<tbb::flow::tuple<int,int>,tbb::flow::reserving> jnode(g);
    tbb::flow::make_edge(bnode, tbb::flow::input_port<0>(jnode));  // will spawn a task
    g.wait_for_all();
    ASSERT(!bnode.my_successors.empty(), "buffering node has no successor after attaching to join");
    REMARK(" reverse");
    bnode.try_put(1);  // the edge should reverse
    g.wait_for_all();
    ASSERT(bnode.my_successors.empty(), "buffering node has a successor after reserving");
    REMARK(" reset()");
    g.wait_for_all();
    g.reset();  // should be in forward direction again
    ASSERT(!bnode.my_successors.empty(), "buffering node has no successor after reset()");
    REMARK(" remove_edge");
    g.reset(tbb::flow::rf_clear_edges);
    ASSERT(bnode.my_successors.empty(), "buffering node has a successor after reset(rf_clear_edges)");
    tbb::flow::make_edge(bnode, tbb::flow::input_port<0>(jnode));  // add edge again
    // reverse edge by adding to buffer.
    bnode.try_put(1);  // the edge should reverse
    g.wait_for_all();
    ASSERT(bnode.my_successors.empty(), "buffering node has a successor after reserving");
    REMARK(" remove_edge(reversed)");
    g.reset(tbb::flow::rf_clear_edges);
    ASSERT(bnode.my_successors.empty(), "buffering node has no successor after reset()");
    ASSERT(tbb::flow::input_port<0>(jnode).my_predecessors.empty(), "predecessor not reset");
    REMARK("  done\n");
    g.wait_for_all();
}
示例#13
0
test_reversal() { REMARK("<rejecting>"); }
示例#14
0
void
TestLimiterNode() {
    int out_int;
    tbb::flow::graph g;
    tbb::flow::limiter_node<int> ln(g,1);
    REMARK("Testing limiter_node: preds and succs");
    ASSERT(ln.decrement.my_predecessor_count == 0, "error in pred count");
    ASSERT(ln.decrement.my_initial_predecessor_count == 0, "error in initial pred count");
    ASSERT(ln.decrement.my_current_count == 0, "error in current count");
    ASSERT(ln.init_decrement_predecessors == 0, "error in decrement predecessors");
    ASSERT(ln.my_threshold == 1, "error in my_threshold");
    tbb::flow::queue_node<int> inq(g);
    tbb::flow::queue_node<int> outq(g);
    tbb::flow::broadcast_node<tbb::flow::continue_msg> bn(g);

    tbb::flow::make_edge(inq,ln);
    tbb::flow::make_edge(ln,outq);
    tbb::flow::make_edge(bn,ln.decrement);

    g.wait_for_all();
    ASSERT(!(ln.my_successors.empty()),"successors empty after make_edge");
    ASSERT(ln.my_predecessors.empty(), "input edge reversed");
    inq.try_put(1);
    g.wait_for_all();
    ASSERT(outq.try_get(out_int) && out_int == 1, "limiter_node didn't pass first value");
    ASSERT(ln.my_predecessors.empty(), "input edge reversed");
    inq.try_put(2);
    g.wait_for_all();
    ASSERT(!outq.try_get(out_int), "limiter_node incorrectly passed second input");
    ASSERT(!ln.my_predecessors.empty(), "input edge to limiter_node not reversed");
    bn.try_put(tbb::flow::continue_msg());
    g.wait_for_all();
    ASSERT(outq.try_get(out_int) && out_int == 2, "limiter_node didn't pass second value");
    g.wait_for_all();
    ASSERT(!ln.my_predecessors.empty(), "input edge was reversed(after try_get())");
    g.reset();
    ASSERT(ln.my_predecessors.empty(), "input edge not reset");
    inq.try_put(3);
    g.wait_for_all();
    ASSERT(outq.try_get(out_int) && out_int == 3, "limiter_node didn't pass third value");

    REMARK(" rf_clear_edges");
    // currently the limiter_node will not pass another message
    g.reset(tbb::flow::rf_clear_edges);
    ASSERT(ln.decrement.my_predecessor_count == 0, "error in pred count");
    ASSERT(ln.decrement.my_initial_predecessor_count == 0, "error in initial pred count");
    ASSERT(ln.decrement.my_current_count == 0, "error in current count");
    ASSERT(ln.init_decrement_predecessors == 0, "error in decrement predecessors");
    ASSERT(ln.my_threshold == 1, "error in my_threshold");
    ASSERT(ln.my_predecessors.empty(), "preds not reset(rf_clear_edges)");
    ASSERT(ln.my_successors.empty(), "preds not reset(rf_clear_edges)");
    ASSERT(inq.my_successors.empty(), "Arc not removed on reset(rf_clear_edges)");
    ASSERT(inq.my_successors.empty(), "Arc not removed on reset(rf_clear_edges)");
    ASSERT(bn.my_successors.empty(), "control edge not removed on reset(rf_clear_edges)");
    tbb::flow::make_edge(inq,ln);
    tbb::flow::make_edge(ln,outq);
    inq.try_put(4);
    inq.try_put(5);
    g.wait_for_all();
    ASSERT(outq.try_get(out_int),"missing output after reset(rf_clear_edges)");
    ASSERT(out_int == 4, "input incorrect (4)");
    bn.try_put(tbb::flow::continue_msg());
    g.wait_for_all();
    ASSERT(!outq.try_get(out_int),"second output incorrectly passed (rf_clear_edges)");
    REMARK(" done\n");
}
示例#15
0
test_reversal() { REMARK("<queueing>"); }
示例#16
0
// continue_node has only predecessor count
// they do not have predecessors, only the counts
// successor edges cannot be reversed
void TestContinueNode() {
    tbb::flow::graph g;
    tbb::flow::function_node<int> fnode0(g, tbb::flow::serial, serial_fn_body<int>(serial_fn_state0));
    tbb::flow::continue_node<int> cnode(g, 1, serial_continue_body<int>(serial_continue_state0));
    tbb::flow::function_node<int> fnode1(g, tbb::flow::serial, serial_fn_body<int>(serial_fn_state1));
    tbb::flow::make_edge(fnode0, cnode);
    tbb::flow::make_edge(cnode, fnode1);
    REMARK("Testing continue_node:");
    for( int icnt = 0; icnt < 2; ++icnt ) {
        REMARK( " initial%d", icnt);
        ASSERT(cnode.my_predecessor_count == 2, "predecessor addition didn't increment count");
        ASSERT(!cnode.successors().empty(), "successors empty though we added one");
        ASSERT(cnode.my_current_count == 0, "state of continue_receiver incorrect");
        serial_continue_state0 = 0;
        serial_fn_state0 = 0;
        serial_fn_state1 = 0;

        fnode0.try_put(1);  // start the first function node.
        BACKOFF_WAIT(!serial_fn_state0, "Timed out waiting for function_node to start");
        // Now the body of function_node 0 is executing.
        serial_fn_state0 = 0;  // release the node
        // wait for node to count the message (or for the node body to execute, which would be wrong)
        BACKOFF_WAIT(serial_continue_state0 == 0 && cnode.my_current_count == 0, "Timed out waiting for continue_state0 to change");
        ASSERT(serial_continue_state0 == 0, "Improperly released continue_node");
        ASSERT(cnode.my_current_count == 1, "state of continue_receiver incorrect");
        if(icnt == 0) {  // first time through, let the continue_node fire
            REMARK(" firing");
            fnode0.try_put(1);  // second message
            BACKOFF_WAIT(serial_fn_state0 == 0, "timeout waiting for continue_body to execute");
            // Now the body of function_node 0 is executing.
            serial_fn_state0 = 0;  // release the node

            BACKOFF_WAIT(!serial_continue_state0,"continue_node didn't start");  // now we wait for the continue_node.
            ASSERT(cnode.my_current_count == 0, " my_current_count not reset before body of continue_node started");
            serial_continue_state0 = 0;  // release the continue_node
            BACKOFF_WAIT(!serial_fn_state1,"successor function_node didn't start");    // wait for the successor function_node to enter body
            serial_fn_state1 = 0;  // release successor function_node.
            g.wait_for_all();

            // try a try_get()
            {
                int i;
                ASSERT(!cnode.try_get(i), "try_get not rejected");
            }

            REMARK(" reset");
            ASSERT(!cnode.my_successors.empty(), "Empty successors in built graph (before reset)");
            ASSERT(cnode.my_predecessor_count == 2, "predecessor_count reset (before reset)");
            g.reset();  // should still be the same
            ASSERT(!cnode.my_successors.empty(), "Empty successors in built graph (after reset)" );
            ASSERT(cnode.my_predecessor_count == 2, "predecessor_count reset (after reset)");
        }
        else {  // we're going to see if the rf_clear_edges resets things.
            g.wait_for_all();
            REMARK(" reset(rf_clear_edges)");
            ASSERT(!cnode.my_successors.empty(), "Empty successors in built graph (before reset)");
            ASSERT(cnode.my_predecessor_count == 2, "predecessor_count reset (before reset)");
            g.reset(tbb::flow::rf_clear_edges);  // should be in forward direction again
            ASSERT(cnode.my_current_count == 0, "state of continue_receiver incorrect after reset(rf_clear_edges)");
            ASSERT(cnode.my_successors.empty(), "buffering node has a successor after reset(rf_clear_edges)");
            ASSERT(cnode.my_predecessor_count == cnode.my_initial_predecessor_count, "predecessor count not reset");
        }
    }

    REMARK(" done\n");

}
示例#17
0
void FireUpJobs( MyServer& server, MyClient& client, int max_thread, int n_extra, Checker* checker ) {
    ASSERT( max_thread>=0, NULL );
#if _WIN32||_WIN64
    ::rml::server::execution_resource_t me;
    server.register_master( me );
#endif /* _WIN32||_WIN64 */
    client.server = &server;
    MyTeam team(server,size_t(max_thread));
    MyServer::size_type n_thread = 0;
    for( int iteration=0; iteration<4; ++iteration ) {
        for( size_t i=0; i<team.max_thread; ++i )
            team.info[i].ran = false;
        switch( iteration ) {
            default:
                n_thread = int(max_thread);
                break;
            case 1:
                // No change in number of threads
                break;
            case 2:
                // Decrease number of threads.
                n_thread = int(max_thread)/2;
                break;
            // Case 3 is same code as the default, but has effect of increasing the number of threads.
        }
        team.barrier = 0;
        REMARK("client %d: server.run with n_thread=%d\n", client.client_id(), int(n_thread) );
        server.independent_thread_number_changed( n_extra );
        if( checker ) {
            // Give RML time to respond to change in number of threads.
            Harness::Sleep(1);
        }
        int n_delivered = server.try_increase_load( n_thread, StrictTeam );
        ASSERT( !StrictTeam || n_delivered==int(n_thread), "server failed to satisfy strict request" );
        if( n_delivered<0 ) {
            REMARK( "client %d: oversubscription occurred (by %d)\n", client.client_id(), -n_delivered );
            server.independent_thread_number_changed( -n_extra );
            n_delivered = 0;
        } else {
            team.n_thread = n_delivered;
            ::rml::job* job_array[JobArraySize];
            job_array[n_delivered] = (::rml::job*)intptr_t(-1);
            server.get_threads( n_delivered, &team, job_array );
            __TBB_ASSERT( job_array[n_delivered]== (::rml::job*)intptr_t(-1), NULL );
            for( int i=0; i<n_delivered; ++i ) {
                MyJob* j = static_cast<MyJob*>(job_array[i]);
                int s = j->state;
                ASSERT( s==MyJob::idle||s==MyJob::busy, NULL );
            }
            server.independent_thread_number_changed( -n_extra );
            REMARK("client %d: team size is %d\n", client.client_id(), n_delivered);
            if( checker ) {
                checker->check_number_of_threads_delivered( n_delivered, n_thread, n_extra );
            }
            // Protocol requires that master wait until workers have called "done_processing"
            while( team.barrier!=n_delivered ) {
                ASSERT( team.barrier>=0, NULL );
                ASSERT( team.barrier<=n_delivered, NULL );
                __TBB_Yield();
            }
            REMARK("client %d: team completed\n", client.client_id() );
            for( int i=0; i<n_delivered; ++i ) {
                ASSERT( team.info[i].ran, "thread on team allegedly delivered, but did not run?" );
            }
        }
        for( MyServer::size_type i=n_delivered; i<MyServer::size_type(max_thread); ++i ) {
            ASSERT( !team.info[i].ran, "thread on team ran with illegal index" );
        }
    }
#if _WIN32||_WIN64
    server.unregister_master( me );
#endif
}
示例#18
0
void TestNullRWMutex( const char * name ) {
    REMARK("%s ",name);
    const int n = 100;
    M m;
    tbb::parallel_for(tbb::blocked_range<size_t>(0,n,10),NullUpgradeDowngrade<M>(m, name));
}
 static void print_parallel_remark() {
     REMARK("Parallel test of split_node< %s", name_of<IT>::name());
 }
 static void print_serial_remark() {
     REMARK("Serial test of split_node< %s", name_of<IT>::name());
 }
 MinimalAllocator() {
     REMARK("%p::ctor\n", this);
 }
int TestMain() {
    REMARK( "testing with tbb condvar\n" );
    DoCondVarTest<tbb::mutex,tbb::recursive_mutex>();
    return Harness::Done;
}
void TestPeriodicConcurrentActivities () {
    REMARK( "TestPeriodicConcurrentActivities: %s / %s \n", Low == tbb::priority_low ? "Low" : "Normal", High == tbb::priority_normal ? "Normal" : "High" );
    NativeParallelFor ( 2, PeriodicActivitiesBody() );
}
示例#24
0
// function_node has predecessors and successors
// try_get() rejects
// successor edges cannot be reversed
// predecessors will reverse (only rejecting will reverse)
void TestFunctionNode() {
    tbb::flow::graph g;
    tbb::flow::queue_node<int> qnode0(g);
    tbb::flow::function_node<int,int, tbb::flow::rejecting > fnode0(g, tbb::flow::serial, serial_fn_body<int>(serial_fn_state0));
    // queueing function node
    tbb::flow::function_node<int,int> fnode1(g, tbb::flow::serial, serial_fn_body<int>(serial_fn_state0));

    tbb::flow::queue_node<int> qnode1(g);

    tbb::flow::make_edge(fnode0, qnode1);
    tbb::flow::make_edge(qnode0, fnode0);

    serial_fn_state0 = 2;  // just let it go
    // see if the darned thing will work....
    qnode0.try_put(1);
    g.wait_for_all();
    int ii;
    ASSERT(qnode1.try_get(ii) && ii == 1, "output not passed");
    tbb::flow::remove_edge(qnode0, fnode0);
    tbb::flow::remove_edge(fnode0, qnode1);

    tbb::flow::make_edge(fnode1, qnode1);
    tbb::flow::make_edge(qnode0, fnode1);

    serial_fn_state0 = 2;  // just let it go
    // see if the darned thing will work....
    qnode0.try_put(1);
    g.wait_for_all();
    ASSERT(qnode1.try_get(ii) && ii == 1, "output not passed");
    tbb::flow::remove_edge(qnode0, fnode1);
    tbb::flow::remove_edge(fnode1, qnode1);

    // rejecting
    serial_fn_state0 = 0;
    tbb::flow::make_edge(fnode0, qnode1);
    tbb::flow::make_edge(qnode0, fnode0);
    REMARK("Testing rejecting function_node:");
    ASSERT(!fnode0.my_queue, "node should have no queue");
    ASSERT(!fnode0.my_successors.empty(), "successor edge not added");
    qnode0.try_put(1);
    BACKOFF_WAIT(!serial_fn_state0,"rejecting function_node didn't start");
    qnode0.try_put(2);   // rejecting node should reject, reverse.
    BACKOFF_WAIT(fnode0.my_predecessors.empty(), "Missing predecessor ---");
    serial_fn_state0 = 2;   // release function_node body.
    g.wait_for_all();
    REMARK(" reset");
    g.reset();  // should reverse the edge from the input to the function node.
    ASSERT(!qnode0.my_successors.empty(), "empty successors after reset()");
    ASSERT(fnode0.my_predecessors.empty(), "predecessor not reversed");
    tbb::flow::remove_edge(qnode0, fnode0);
    tbb::flow::remove_edge(fnode0, qnode1);
    REMARK("\n");

    // queueing
    tbb::flow::make_edge(fnode1, qnode1);
    REMARK("Testing queueing function_node:");
    ASSERT(fnode1.my_queue, "node should have no queue");
    ASSERT(!fnode1.my_successors.empty(), "successor edge not added");
    REMARK(" add_pred");
    ASSERT(fnode1.register_predecessor(qnode0), "Cannot register as predecessor");
    ASSERT(!fnode1.my_predecessors.empty(), "Missing predecessor");
    REMARK(" reset");
    g.wait_for_all();
    g.reset();  // should reverse the edge from the input to the function node.
    ASSERT(!qnode0.my_successors.empty(), "empty successors after reset()");
    ASSERT(fnode1.my_predecessors.empty(), "predecessor not reversed");
    tbb::flow::remove_edge(qnode0, fnode1);
    tbb::flow::remove_edge(fnode1, qnode1);
    REMARK("\n");

    serial_fn_state0 = 0;  // make the function_node wait
    tbb::flow::make_edge(qnode0, fnode0);
    REMARK(" start_func");
    qnode0.try_put(1);
    BACKOFF_WAIT(serial_fn_state0 == 0, "Timed out waiting after 1st put");
    // now if we put an item to the queues the edges to the function_node will reverse.
    REMARK(" put_node(2)");
    qnode0.try_put(2);   // start queue node.
    // wait for the edges to reverse
    BACKOFF_WAIT(fnode0.my_predecessors.empty(), "Timed out waiting");
    ASSERT(!fnode0.my_predecessors.empty(), "function_node edge not reversed");
    g.my_root_task->cancel_group_execution();
    // release the function_node
    serial_fn_state0 = 2;
    g.wait_for_all();
    ASSERT(!fnode0.my_predecessors.empty() && qnode0.my_successors.empty(), "function_node edge not reversed");
    g.reset(tbb::flow::rf_clear_edges);
    ASSERT(fnode0.my_predecessors.empty() && qnode0.my_successors.empty(), "function_node edge not removed");
    ASSERT(fnode0.my_successors.empty(), "successor to fnode not removed");
    REMARK(" done\n");
}
 MinimalAllocator(const MinimalAllocator&s) : cnt_provider_t(s) {
     REMARK("%p::ctor(%p)\n", this, &s);
 }
HARNESS_EXPORT
int main(int argc, char* argv[]) {
    argC=argc;
    argV=argv;
    MaxThread = MinThread = 1;
    Tmalloc=scalable_malloc;
    Trealloc=scalable_realloc;
    Tcalloc=scalable_calloc;
    Tfree=scalable_free;
    Rposix_memalign=scalable_posix_memalign;
    Raligned_malloc=scalable_aligned_malloc;
    Raligned_realloc=scalable_aligned_realloc;
    Taligned_free=scalable_aligned_free;

    // check if we were called to test standard behavior
    for (int i=1; i< argc; i++) {
        if (strcmp((char*)*(argv+i),"-s")==0)
        {
            setSystemAllocs();
            argC--;
            break;
        }
    }

    ParseCommandLine( argC, argV );
#if __linux__
    /* According to man pthreads
       "NPTL threads do not share resource limits (fixed in kernel 2.6.10)".
       Use per-threads limits for affected systems.
     */
    if ( LinuxKernelVersion() < 2*1000000 + 6*1000 + 10)
        perProcessLimits = false;
#endif
    //-------------------------------------
#if __APPLE__
    /* Skip due to lack of memory limit enforcing under OS X*. */
#else
    limitMem(200);
    ReallocParam();
    limitMem(0);
#endif

//for linux and dynamic runtime errno is used to check allocator functions
//check if library compiled with /MD(d) and we can use errno
#if _MSC_VER
#if defined(_MT) && defined(_DLL) //check errno if test itself compiled with /MD(d) only
    char*  version_info_block = NULL;
    int version_info_block_size;
    LPVOID comments_block = NULL;
    UINT comments_block_size;
#ifdef _DEBUG
#define __TBBMALLOCDLL "tbbmalloc_debug.dll"
#else  //_DEBUG
#define __TBBMALLOCDLL "tbbmalloc.dll"
#endif //_DEBUG
    version_info_block_size = GetFileVersionInfoSize( __TBBMALLOCDLL, (LPDWORD)&version_info_block_size );
    if( version_info_block_size
        && ((version_info_block = (char*)malloc(version_info_block_size)) != NULL)
        && GetFileVersionInfo(  __TBBMALLOCDLL, NULL, version_info_block_size, version_info_block )
        && VerQueryValue( version_info_block, "\\StringFileInfo\\000004b0\\Comments", &comments_block, &comments_block_size )
        && strstr( (char*)comments_block, "/MD" )
        ){
            __tbb_test_errno = true;
     }
     if( version_info_block ) free( version_info_block );
#endif // defined(_MT) && defined(_DLL)
#else  // _MSC_VER
    __tbb_test_errno = true;
#endif // _MSC_VER

    for( int p=MaxThread; p>=MinThread; --p ) {
        REMARK("testing with %d threads\n", p );
        Harness::SpinBarrier *barrier = new Harness::SpinBarrier(p);
        NativeParallelFor( p, RoundRobin(p, barrier, Verbose) );
        delete barrier;
    }
    if( !error_occurred )
        REPORT("done\n");
    return 0;
}
示例#27
0
void RunParallelScalarTests(const char *test_name) {

    tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred);

    for (int p = MinThread; p <= MaxThread; ++p) {


        if (p == 0) continue;

        REMARK("Testing parallel %s on %d thread(s)... ", test_name, p);
        init.initialize(p);

        tbb::tick_count t0;

        T assign_sum(0);

        T combine_sum(0);

        T combine_ref_sum(0);

        T combine_each_sum(0);

        T combine_finit_sum(0);

        for (int t = -1; t < REPETITIONS; ++t) {
            if (Verbose && t == 0) t0 = tbb::tick_count::now();

            tbb::combinable<T> sums;
            FunctorAddFinit<T> my_finit_decl;
            tbb::combinable<T> finit_combinable(my_finit_decl);


            tbb::parallel_for( tbb::blocked_range<int>( 0, N, 10000 ), ParallelScalarBodyNoInit<T>( finit_combinable ) );
            tbb::parallel_for( tbb::blocked_range<int>( 0, N, 10000 ), ParallelScalarBody<T>( sums ) );

            // Use combine
            combine_sum +=  sums.combine(my_combine<T>);
            combine_ref_sum +=  sums.combine(my_combine_ref<T>);

            CombineEachHelper<T> my_helper(combine_each_sum);
            sums.combine_each(my_helper);

            // test assignment
            tbb::combinable<T> assigned;
            assigned = sums;

            assign_sum +=  assigned.combine(my_combine<T>);

            combine_finit_sum += finit_combinable.combine(my_combine<T>);
        }

        ASSERT( EXPECTED_SUM == combine_sum, NULL);
        ASSERT( EXPECTED_SUM == combine_ref_sum, NULL);
        ASSERT( EXPECTED_SUM == assign_sum, NULL);
        ASSERT( EXPECTED_SUM == combine_finit_sum, NULL);

        REMARK("done\nparallel %s, %d, %g, %g\n", test_name, p, static_cast<double>(combine_sum),
                                                      ( tbb::tick_count::now() - t0).seconds());
        init.terminate();
    }
}
 void *allocate(size_t sz) {
     void *p = cnt_provider_t::allocate(sz);
     REMARK("%p::allocate(%u) = %p\n", this, unsigned(sz), p);
     return p;
 }
示例#29
0
int TestMain () {
    for( int p=MinThread; p<=MaxThread; ++p ) {
        tbb::task_scheduler_init init( p );
        REMARK( "testing with %d workers\n", static_cast<int>(p) );
#if TBB_TEST_LOW_WORKLOAD
        // The amount of work is decreased in this mode to bring the length
        // of the runs under tools into the tolerable limits.
        const int n = 1;
#else
        const int n = 3;
#endif
        // Run each test several times.
        for( int i=0; i<n; ++i ) {
            TestNullMutex<tbb::null_mutex>( "Null Mutex" );
            TestNullMutex<tbb::null_rw_mutex>( "Null RW Mutex" );
            TestNullRWMutex<tbb::null_rw_mutex>( "Null RW Mutex" );
            Test<tbb::spin_mutex>( "Spin Mutex" );
#if _OPENMP
            Test<OpenMP_Mutex>( "OpenMP_Mutex" );
#endif /* _OPENMP */
            Test<tbb::queuing_mutex>( "Queuing Mutex" );
            Test<tbb::mutex>( "Wrapper Mutex" );
            Test<tbb::recursive_mutex>( "Recursive Mutex" );
            Test<tbb::queuing_rw_mutex>( "Queuing RW Mutex" );
            Test<tbb::spin_rw_mutex>( "Spin RW Mutex" );

            TestTryAcquire_OneThread<tbb::spin_mutex>("Spin Mutex");
            TestTryAcquire_OneThread<tbb::queuing_mutex>("Queuing Mutex");
#if USE_PTHREAD
            // under ifdef because on Windows tbb::mutex is reenterable and the test will fail
            TestTryAcquire_OneThread<tbb::mutex>("Wrapper Mutex");
#endif /* USE_PTHREAD */
            TestTryAcquire_OneThread<tbb::recursive_mutex>( "Recursive Mutex" );
            TestTryAcquire_OneThread<tbb::spin_rw_mutex>("Spin RW Mutex"); // only tests try_acquire for writers
            TestTryAcquire_OneThread<tbb::queuing_rw_mutex>("Queuing RW Mutex"); // only tests try_acquire for writers
            TestTryAcquireReader_OneThread<tbb::spin_rw_mutex>("Spin RW Mutex");
            TestTryAcquireReader_OneThread<tbb::queuing_rw_mutex>("Queuing RW Mutex");

            TestReaderWriterLock<tbb::queuing_rw_mutex>( "Queuing RW Mutex" );
            TestReaderWriterLock<tbb::spin_rw_mutex>( "Spin RW Mutex" );

            TestRecursiveMutex<tbb::recursive_mutex>( "Recursive Mutex" );

            // Test ISO C++0x interface
            TestISO<tbb::spin_mutex>( "ISO Spin Mutex" );
            TestISO<tbb::mutex>( "ISO Mutex" );
            TestISO<tbb::spin_rw_mutex>( "ISO Spin RW Mutex" );
            TestISO<tbb::recursive_mutex>( "ISO Recursive Mutex" );
            TestISO<tbb::critical_section>( "ISO Critical Section" );
            TestTryAcquire_OneThreadISO<tbb::spin_mutex>( "ISO Spin Mutex" );
#if USE_PTHREAD
            // under ifdef because on Windows tbb::mutex is reenterable and the test will fail
            TestTryAcquire_OneThreadISO<tbb::mutex>( "ISO Mutex" );
#endif /* USE_PTHREAD */
            TestTryAcquire_OneThreadISO<tbb::spin_rw_mutex>( "ISO Spin RW Mutex" );
            TestTryAcquire_OneThreadISO<tbb::recursive_mutex>( "ISO Recursive Mutex" );
            TestTryAcquire_OneThreadISO<tbb::critical_section>( "ISO Critical Section" );
            TestReaderWriterLockISO<tbb::spin_rw_mutex>( "ISO Spin RW Mutex" );
            TestRecursiveMutexISO<tbb::recursive_mutex>( "ISO Recursive Mutex" );
        }
        REMARK( "calling destructor for task_scheduler_init\n" );
    }
    return Harness::Done;
}
 void deallocate(void *p, size_t sz) {
     ASSERT(allocations>frees,0);
     REMARK("%p::deallocate(%p, %u)\n", this, p, unsigned(sz));
     cnt_provider_t::deallocate(cnt_provider_t::pointer(p), sz);
 }