void Run ( uint_t idx ) { #if TBBTEST_USE_TBB tbb::task_scheduler_init init; #endif AssertLive(); if ( idx == 0 ) { ASSERT ( !m_taskGroup && !m_tasksSpawned, "SharedGroupBody must be reset before reuse"); m_taskGroup = new Concurrency::task_group; Spawn( c_numTasks0 ); Wait(); if ( m_sharingMode & VagabondGroup ) m_barrier.wait(); else DeleteTaskGroup(); } else { while ( m_tasksSpawned == 0 ) __TBB_Yield(); ASSERT ( m_taskGroup, "Task group is not initialized"); Spawn (c_numTasks1); if ( m_sharingMode & ParallelWait ) Wait(); if ( m_sharingMode & VagabondGroup ) { ASSERT ( idx == 1, "In vagabond mode SharedGroupBody must be used with 2 threads only" ); m_barrier.wait(); DeleteTaskGroup(); } } AssertLive(); }
check_type& operator()( check_type &c) const { AssertLive(); ASSERT(!c.my_id(), "bad id value"); ASSERT(!c.is_ready(), "Already ready" ); c.function(); return c; }
void function() { AssertLive(); if( id == 0 ) { id = 1; am_ready = true; } }
U operator()( tbb::flow_control& control ) const { AssertLive(); if( --input_counter < 0 ) { control.stop(); } return U(); // default constructed }
void operator()(T c) const { AssertLive(); ASSERT(output_my_id(c), "unset id value"); ASSERT(output_is_ready(c), "not yet ready"); ++non_pointer_specialized_calls; output_counter++; }
void operator()() const { AssertLive(); ASSERT(!pq.empty(), "queue should not be empty yet"); int elem = pq.top(); pq.pop(); shared_data[elem]++; }
check_type(const check_type& other) : Harness::NoAfterlife(other) { other.AssertLive(); AssertLive(); id = other.id; am_ready = other.am_ready; ++check_type_counter; }
void operator()( tbb::flow_control& control ) const { AssertLive(); if( --input_counter < 0 ) { control.stop(); } else ++non_pointer_specialized_calls; }
U operator()(T t) const { AssertLive(); ASSERT(!middle_my_id(t), "bad id value"); ASSERT(!middle_is_ready(t), "Already ready" ); U out; my_function(out); ++non_pointer_specialized_calls; return out; }
U operator()( tbb::flow_control& control ) const { AssertLive(); if( --input_counter < 0 ) { control.stop(); } else // only count successful reads ++non_pointer_specialized_calls; return U(); // default constructed }
void operator()(T* c) const { free_on_scope_exit<T> my_ptr(c); AssertLive(); if(c) { ASSERT(output_my_id(*c), "unset id value"); ASSERT(output_is_ready(*c), "not yet ready"); } output_counter++; ++pointer_specialized_calls; }
U* operator()(T my_storage) const { AssertLive(); ASSERT(!middle_my_id(my_storage), "bad id value"); ASSERT(!middle_is_ready(my_storage), "Already ready" ); // allocate new space from buffers U* my_return = new(fetchNextBuffer()) U(); my_function(*my_return); ++second_pointer_specialized_calls; return my_return; }
int operator()(tbb::flow_control& control ) const { AssertLive(); int oldval = --input_counter; if( oldval < 0 ) { control.stop(); } else ++non_pointer_specialized_calls; return oldval+1; }
U operator()(T* my_storage) const { free_on_scope_exit<T> my_ptr(my_storage); // free_on_scope_exit marks the buffer available AssertLive(); if(my_storage) { // may have been passed in a NULL ASSERT(!middle_my_id(*my_storage), "bad id value"); ASSERT(!middle_is_ready(*my_storage), "Already ready" ); } ++first_pointer_specialized_calls; U out; my_function(out); return out; }
U* operator()(tbb::flow_control& control) const { AssertLive(); int ival = --input_counter; if(ival < 0) { control.stop(); return NULL; } ++pointer_specialized_calls; if(ival == max_counter / 2) { return NULL; // non-stop NULL } U* myReturn = new(fetchNextBuffer()) U(); return myReturn; }
void operator() () const { Harness::ConcurrencyTracker ct; AssertLive(); if ( g_Throw ) { if ( ++m_TaskCount == SKIP_CHORES ) __TBB_THROW( test_exception(EXCEPTION_DESCR1) ); __TBB_Yield(); } else { ++g_TaskCount; while( !Concurrency::is_current_task_group_canceling() ) __TBB_Yield(); } }
void operator() () const { Harness::ConcurrencyTracker ct; AssertLive(); if( m_Num < 2 ) { *m_pRes = m_Num; } else { uint_t y = ~0u; Concurrency::task_group tg; Concurrency::task_handle<FibTask_SpawnRightChildOnly> h = FibTask_SpawnRightChildOnly(&y, m_Num-1); tg.run( h ); m_Num -= 2; tg.run_and_wait( *this ); *m_pRes += y; } }
U* operator()(T* my_storage) const { free_on_scope_exit<T> my_ptr(my_storage); // free_on_scope_exit marks the buffer available AssertLive(); if(my_storage) { ASSERT(!middle_my_id(*my_storage), "bad id value"); ASSERT(!middle_is_ready(*my_storage), "Already ready" ); } // may have been passed a NULL ++pointer_specialized_calls; if(!my_storage) return NULL; ASSERT(!middle_my_id(*my_storage), "bad id value"); ASSERT(!middle_is_ready(*my_storage), "Already ready" ); U* my_return = new(fetchNextBuffer()) U(); my_function(*my_return); return my_return; }
void operator() () const { Harness::ConcurrencyTracker ct; AssertLive(); if( m_Num < 2 ) { *m_pRes = m_Num; } else { uint_t x = ~0u, // initialized only to suppress warning y = ~0u; task_group_type tg; Concurrency::task_handle<FibTask_SpawnBothChildren> h1 = FibTask_SpawnBothChildren(&y, m_Num-1), h2 = FibTask_SpawnBothChildren(&x, m_Num-2); tg.run( h1 ); tg.run( h2 ); tg.wait(); *m_pRes = x + y; } }
void operator()(T) const { AssertLive(); output_counter++; }
void operator()() const { AssertLive(); pq.push(threadID); }
int operator()(int my_input) const { AssertLive(); ++non_pointer_specialized_calls; return my_input*my_input; }
void operator()(int my_input) const { AssertLive(); ++non_pointer_specialized_calls; int myindx = output_counter++; intbuffer[myindx] = my_input; }
void operator()( tbb::flow_control& control ) const { AssertLive(); if( --input_counter < 0 ) { control.stop(); } }
U operator()(T /*my_storage*/) const { AssertLive(); return U(); }
bool is_ready() { AssertLive(); return am_ready; }
unsigned int my_id() { AssertLive(); return id; }
~check_type() { AssertLive(); --check_type_counter; }
void operator()(check_type &c) const { AssertLive(); ASSERT(c.my_id(), "unset id value"); ASSERT(c.is_ready(), "not yet ready"); output_counter++; }