tbb::task* execute() { tbb::task* next = NULL; if( !is_continuation ) { if( root->node_count<1000 ) { *sum = SerialSumTree(root); } else { // Create tasks before spawning any of them. tbb::task* a = NULL; tbb::task* b = NULL; if( root->left ) a = new( allocate_child() ) OptimizedSumTask(root->left,&x); if( root->right ) b = new( allocate_child() ) OptimizedSumTask(root->right,&y); recycle_as_continuation(); is_continuation = true; set_ref_count( (a!=NULL)+(b!=NULL) ); if( a ) { if( b ) spawn(*b); } else a = b; next = a; } } else { *sum = root->value; if( root->left ) *sum += x; if( root->right ) *sum += y; } return next; }
task* execute() { if (n<=block) { double **p = a->d, **q = b->d, **r = c->d; for (i=0; i<n; i++) for (j=0; j<n; j++) r[i][j] = p[i][j] + q[i][j]; } else { n/=2; RecAddTask& t1 = *new(tbb::task::allocate_child() ) RecAddTask(n, a11, b11, c11); RecAddTask& t2 = *new(tbb::task::allocate_child() ) RecAddTask(n, a12, b12, c12); RecAddTask& t3 = *new(tbb::task::allocate_child() ) RecAddTask(n, a21, b21, c21); RecAddTask& t4 = *new(tbb::task::allocate_child() ) RecAddTask(n, a12, b22, c22); set_ref_count(5); tbb::task::spawn(t1); tbb::task::spawn(t2); tbb::task::spawn(t3); tbb::task::spawn(t4); tbb::task::wait_for_all(); } return NULL; }
virtual task *execute() { ContextPtr context = new Context( *m_context ); context->set( ScenePlug::scenePathContextName, m_scenePath ); Context::Scope scopedContext( context ); m_scenePlug->transformPlug()->getValue(); m_scenePlug->boundPlug()->getValue(); m_scenePlug->attributesPlug()->getValue(); m_scenePlug->objectPlug()->getValue(); ConstInternedStringVectorDataPtr childNamesData = m_scenePlug->childNamesPlug()->getValue(); const vector<InternedString> &childNames = childNamesData->readable(); set_ref_count( 1 + childNames.size() ); ScenePlug::ScenePath childPath = m_scenePath; childPath.push_back( InternedString() ); // space for the child name for( vector<InternedString>::const_iterator it = childNames.begin(), eIt = childNames.end(); it != eIt; it++ ) { childPath[m_scenePath.size()] = *it; SceneTraversalTask *t = new( allocate_child() ) SceneTraversalTask( m_scenePlug, m_context, childPath ); spawn( *t ); } wait_for_all(); return 0; }
task* execute() { if( root->node_count<1000 ) { *sum = SerialSumTree(root); } else { Value x, y; int count = 1; tbb::task_list list; if( root->left ) { ++count; list.push_back( *new( allocate_child() ) SimpleSumTask(root->left,&x) ); } if( root->right ) { ++count; list.push_back( *new( allocate_child() ) SimpleSumTask(root->right,&y) ); } // Argument to set_ref_count is one more than size of the list, // because spawn_and_wait_for_all expects an augmented ref_count. set_ref_count(count); spawn_and_wait_for_all(list); *sum = root->value; if( root->left ) *sum += x; if( root->right ) *sum += y; } return NULL; }
task* execute () { count_t base = my_groupId * NumLeafTasks; set_ref_count(NumLeafTasks + 1); for ( count_t i = 0; i < NumLeafTasks; ++i ) spawn( *new(allocate_child()) LeafTask(base + i) ); wait_for_all(); return NULL; }
/*override*/ tbb::task* execute() { ASSERT( !(~theLocalState->m_flags & m_flag), NULL ); if( N < 2 ) return NULL; bool globalBarrierActive = false; if ( theLocalState->m_isMaster ) { if ( theGlobalBarrierActive ) { // This is the root task. Its N is equal to the number of threads. // Spawn a task for each worker. set_ref_count(N); for ( int i = 1; i < N; ++i ) spawn( *new( allocate_child() ) FibTask(20, m_flag, m_observer) ); if ( theTestMode & tmSynchronized ) { theGlobalBarrier.wait(); ASSERT( m_observer.m_entries >= N, "Wrong number of on_entry calls after the first barrier" ); // All the spawned tasks have been stolen by workers. // Now wait for workers to spawn some more tasks for this thread to steal back. theGlobalBarrier.wait(); ASSERT( !theGlobalBarrierActive, "Workers are expected to have reset this flag" ); } else theGlobalBarrierActive = false; wait_for_all(); return NULL; } } else { if ( theGlobalBarrierActive ) { if ( theTestMode & tmSynchronized ) { theGlobalBarrier.wait(); globalBarrierActive = true; } theGlobalBarrierActive = false; } } set_ref_count(3); spawn( *new( allocate_child() ) FibTask(N-1, m_flag, m_observer) ); spawn( *new( allocate_child() ) FibTask(N-2, m_flag, m_observer) ); if ( globalBarrierActive ) { // It's the first task executed by a worker. Release the master thread. theGlobalBarrier.wait(); } wait_for_all(); return NULL; }
/*override*/ tbb::task* execute() { ASSERT( !(~LocalState->MyFlags & flags), NULL ); if( n>=2 ) { set_ref_count(3); spawn(*new( allocate_child() ) FibTask(n-1,flags)); spawn_and_wait_for_all(*new( allocate_child() ) FibTask(n-2,flags)); } return NULL; }
task* execute() { //Overrieds virtual function task:: execute if( n<CutOff) { *sum = SerialFib(n); } else { long x, y; FibTask& a = *new( allocate_child() ) FibTask(n-1, &x); FibTask& b = *new( allocate_child() ) FibTask(n-2, &y); //Set ref_count to "two children plus one for the wait". set_ref_count(3); //start b spawn(b); //Start a and wait for children spawn_and_wait_for_all(a); //Do the sum *sum = x+y; } return NULL; }
/*override*/ task* execute() { if( !g_sandwich && n<2 ) { result = n; } else { int x,y; tbb::task_scheduler_init init(P_nested); task* self0 = &task::self(); set_ref_count( 3 ); if ( g_sandwich ) { spawn (*new( allocate_child() ) FibCilkSubtask(x,n-1)); spawn (*new( allocate_child() ) FibCilkSubtask(y,n-2)); } else { spawn (*new( allocate_child() ) FibTask(x,n-1)); spawn (*new( allocate_child() ) FibTask(y,n-2)); } wait_for_all(); task* self1 = &task::self(); ASSERT( self0 == self1, "failed to preserve TBB TLS" ); result = x+y; } return NULL; }
tbb::task* execute () { set_ref_count(2); spawn ( *new(allocate_child()) tbb::empty_task ); wait_for_all(); return NULL; }
tbb::task* execute () { if (P_first_ > P_last_ || A_.empty()) return NULL; else if (P_first_ == P_last_) { execute_base_case (); return NULL; } else { // Recurse on two intervals: [P_first, P_mid] and [P_mid+1, P_last] const size_t P_mid = (P_first_ + P_last_) / 2; split_t A_split = partitioner_.split (A_, P_first_, P_mid, P_last_, contiguous_cache_blocks_); // The partitioner may decide that the current block A_ // has too few rows to be worth splitting. In that case, // A_split.second (the bottom block) will be empty. We // can deal with this by treating it as the base case. if (A_split.second.empty() || A_split.second.nrows() == 0) { execute_base_case (); return NULL; } double top_timing; double top_min_timing = 0.0; double top_max_timing = 0.0; double bot_timing; double bot_min_timing = 0.0; double bot_max_timing = 0.0; FactorTask& topTask = *new( allocate_child() ) FactorTask (P_first_, P_mid, A_split.first, A_top_ptr_, seq_outputs_, par_output_, seq_, top_timing, top_min_timing, top_max_timing, contiguous_cache_blocks_); // After the task finishes, A_bot will be set to the topmost // partition of A_split.second. This will let us combine // the two subproblems (using factor_pair()) after their // tasks complete. mat_view A_bot; FactorTask& botTask = *new( allocate_child() ) FactorTask (P_mid+1, P_last_, A_split.second, &A_bot, seq_outputs_, par_output_, seq_, bot_timing, bot_min_timing, bot_max_timing, contiguous_cache_blocks_); set_ref_count (3); // 3 children (2 + 1 for the wait) spawn (topTask); spawn_and_wait_for_all (botTask); // Combine the two results factor_pair (P_first_, P_mid+1, *A_top_ptr_, A_bot); top_min_timing = (top_min_timing == 0.0) ? top_timing : top_min_timing; top_max_timing = (top_max_timing == 0.0) ? top_timing : top_max_timing; bot_min_timing = (bot_min_timing == 0.0) ? bot_timing : bot_min_timing; bot_max_timing = (bot_max_timing == 0.0) ? bot_timing : bot_max_timing; min_seq_timing_ = std::min (top_min_timing, bot_min_timing); max_seq_timing_ = std::min (top_max_timing, bot_max_timing); return NULL; } }
task* execute() { matrix d; if (n<=block) { double sum, **p = a->d, **q = b->d, **r = c->d; int i, j, k; /* for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { for (sum = 0., k = 0; k < n; k++) sum += p[i][k] * q[k][j]; r[i][j] = sum; } } */ for(int jj=0;jj<n;jj+= 16){ for(int kk=0; kk<n; kk+= 16){ for(int i=0;i<n; i++){ for(int j = jj; j<((jj+16)>n ? n:(jj+16)); j++){ temp = 0; for(int k = kk; k<((kk+16) > n ?n :(kk+16)); k++){ temp += p[i][k]*q[k][j]; } r[i][j] += temp; } } } } } else { d=newmatrix(n); n/=2; RecMultTask& t1 = *new(tbb::task::allocate_child() ) RecMultTask(n, a11, b11, d11); RecMultTask& t2 = *new(tbb::task::allocate_child() ) RecMultTask(n, a12, b21, c11); RecMultTask& t3 = *new(tbb::task::allocate_child() ) RecMultTask(n, a11, b12, d12); RecMultTask& t4 = *new(tbb::task::allocate_child() ) RecMultTask(n, a12, b22, c12); RecMultTask& t5 = *new(tbb::task::allocate_child() ) RecMultTask(n, a21, b11, d21); RecMultTask& t6 = *new(tbb::task::allocate_child() ) RecMultTask(n, a22, b21, c21); RecMultTask& t7 = *new(tbb::task::allocate_child() ) RecMultTask(n, a21, b12, d22); RecMultTask& t8 = *new(tbb::task::allocate_child() ) RecMultTask(n, a22, b22, c22); set_ref_count(9); tbb::task::spawn(t1); tbb::task::spawn(t2); tbb::task::spawn(t3); tbb::task::spawn(t4); tbb::task::spawn(t5); tbb::task::spawn(t6); tbb::task::spawn(t7); tbb::task::spawn(t8); tbb::task::wait_for_all(); RecAddTask& t9 = *new(tbb::task::allocate_child() ) RecAddTask(n, c11, c11, d11); RecAddTask& t10 = *new(tbb::task::allocate_child() ) RecAddTask(n, c12, c12, d12); RecAddTask& t11 = *new(tbb::task::allocate_child() ) RecAddTask(n, c21, c21, d21); RecAddTask& t12 = *new(tbb::task::allocate_child() ) RecAddTask(n, c22, c22, d22); set_ref_count(5); tbb::task::spawn(t9); tbb::task::spawn(t10); tbb::task::spawn(t11); tbb::task::spawn(t12); tbb::task::wait_for_all(); } return NULL; }