void TestEnqueue( int p ) { REMARK("Testing task::enqueue for %d threads\n", p); for(int mode=0;mode<3;++mode) { tbb::task_scheduler_init init(p); EnqueuedTask::nCompletedPairs = EnqueuedTask::nOrderedPairs = 0; for(int i=0; i<nTracks; ++i) { TaskTracks[i] = -1; // to accomodate for the starting call EnqueuedTask::FireTwoTasks(TaskTracks+i); } ProgressMonitor pm; tbb::tbb_thread thr( pm ); if(mode==1) { // do some parallel work in the meantime for(int i=0; i<10; i++) { TaskGenerator& g = *new( tbb::task::allocate_root() ) TaskGenerator(2,5); tbb::task::spawn_root_and_wait(g); TimedYield( 1E-6 ); } } if( mode==2 ) { // Additionally enqueue a bunch of empty tasks. The goal is to test that tasks // allocated and enqueued by a thread are safe to use after the thread leaves TBB. tbb::task* root = new (tbb::task::allocate_root()) tbb::empty_task; root->set_ref_count(100); for( int i=0; i<100; ++i ) tbb::task::enqueue( *new (root->allocate_child()) tbb::empty_task ); init.terminate(); // master thread deregistered } thr.join(); ASSERT(EnqueuedTask::nCompletedPairs==nTracks*PairsPerTrack, NULL); ASSERT(EnqueuedTask::nOrderedPairs<EnqueuedTask::nCompletedPairs, "all task pairs executed in enqueue order; de facto guarantee is too strong?"); } }
void operator() ( ) { int track_snapshot[nTracks]; int stall_count = 0, uneven_progress_count = 0, last_progress_mask = 0; for(int i=0; i<nTracks; ++i) track_snapshot[i]=0; bool completed; do { // Yield repeatedly for at least 1 usec TimedYield( 1E-6 ); int overall_progress = 0, progress_mask = 0; const int all_progressed = (1<<nTracks) - 1; completed = true; for(int i=0; i<nTracks; ++i) { int ti = TaskTracks[i]; int pi = ti-track_snapshot[i]; if( pi ) progress_mask |= 1<<i; overall_progress += pi; completed = completed && ti==PairsPerTrack; track_snapshot[i]=ti; } // The constants in the next asserts are subjective and may need correction. if( overall_progress ) stall_count=0; else { ++stall_count; // no progress; consider it dead. ASSERT(stall_count < stall_threshold, "no progress on enqueued tasks; deadlock, or the machine is heavily oversubscribed?"); } if( progress_mask==all_progressed || progress_mask^last_progress_mask ) { uneven_progress_count = 0; last_progress_mask = progress_mask; } else if ( overall_progress > 2 ) { ++uneven_progress_count; // The threshold of 32 is 4x bigger than what was observed on a 8-core machine with oversubscription. ASSERT_WARNING(uneven_progress_count < 32, "some enqueued tasks seem stalling; no simultaneous progress, or the machine is oversubscribed? Investigate if repeated"); } } while( !completed ); }