tbb::task* execute() {
     tbb::task* next = NULL;
     if( !is_continuation ) {
         if( root->node_count<1000 ) {
             *sum = SerialSumTree(root);
         } else {
             // Create tasks before spawning any of them.
             tbb::task* a = NULL;
             tbb::task* b = NULL;
             if( root->left )
                 a = new( allocate_child() ) OptimizedSumTask(root->left,&x);
             if( root->right )
                 b = new( allocate_child() ) OptimizedSumTask(root->right,&y);
             recycle_as_continuation();
             is_continuation = true;
             set_ref_count( (a!=NULL)+(b!=NULL) );
             if( a ) {
                 if( b ) spawn(*b);
             } else 
                 a = b;
             next = a;
         }
     } else {
         *sum = root->value;
         if( root->left ) *sum += x;
         if( root->right ) *sum += y;
     } 
     return next;
 }
 task* execute() {
     if( root->node_count<1000 ) {
         *sum = SerialSumTree(root);
     } else {
         Value x, y;
         int count = 1; 
         tbb::task_list list;
         if( root->left ) {
             ++count;
             list.push_back( *new( allocate_child() ) SimpleSumTask(root->left,&x) );
         }
         if( root->right ) {
             ++count;
             list.push_back( *new( allocate_child() ) SimpleSumTask(root->right,&y) );
         }
         // Argument to set_ref_count is one more than size of the list,
         // because spawn_and_wait_for_all expects an augmented ref_count.
         set_ref_count(count);
         spawn_and_wait_for_all(list);
         *sum = root->value;
         if( root->left ) *sum += x;
         if( root->right ) *sum += y;
     }
     return NULL;
 }
 /*override*/ tbb::task* execute() {
     ASSERT( !(~LocalState->MyFlags & flags), NULL );
     if( n>=2 ) {
         set_ref_count(3);
         spawn(*new( allocate_child() ) FibTask(n-1,flags));
         spawn_and_wait_for_all(*new( allocate_child() ) FibTask(n-2,flags));
     }
     return NULL;
 }
Esempio n. 4
0
		virtual task *execute()
		{				
			
			ContextPtr context = new Context( *m_context );
			context->set( ScenePlug::scenePathContextName, m_scenePath );
			Context::Scope scopedContext( context );
			
			m_scenePlug->transformPlug()->getValue();
			m_scenePlug->boundPlug()->getValue();
			m_scenePlug->attributesPlug()->getValue();
			m_scenePlug->objectPlug()->getValue();
				
			ConstInternedStringVectorDataPtr childNamesData = m_scenePlug->childNamesPlug()->getValue();
			const vector<InternedString> &childNames = childNamesData->readable();
			
			set_ref_count( 1 + childNames.size() );
			
			ScenePlug::ScenePath childPath = m_scenePath;
			childPath.push_back( InternedString() ); // space for the child name
			for( vector<InternedString>::const_iterator it = childNames.begin(), eIt = childNames.end(); it != eIt; it++ )
			{
				childPath[m_scenePath.size()] = *it;
				SceneTraversalTask *t = new( allocate_child() ) SceneTraversalTask( m_scenePlug, m_context, childPath );
				spawn( *t );
			}
			
			wait_for_all();
			
			return 0;
		}
Esempio n. 5
0
 task* execute () {
     count_t base = my_groupId * NumLeafTasks;
     set_ref_count(NumLeafTasks + 1);
     for ( count_t i = 0; i < NumLeafTasks; ++i )
         spawn( *new(allocate_child()) LeafTask(base + i) );
     wait_for_all();
     return NULL;
 }
 /*override*/ tbb::task* execute() {
     ASSERT( !(~theLocalState->m_flags & m_flag), NULL );
     if( N < 2 )
         return NULL;
     bool globalBarrierActive = false;
     if ( theLocalState->m_isMaster ) {
         if ( theGlobalBarrierActive ) {
             // This is the root task. Its N is equal to the number of threads.
             // Spawn a task for each worker.
             set_ref_count(N);
             for ( int i = 1; i < N; ++i )
                 spawn( *new( allocate_child() ) FibTask(20, m_flag, m_observer) );
             if ( theTestMode & tmSynchronized ) {
                 theGlobalBarrier.wait();
                 ASSERT( m_observer.m_entries >= N, "Wrong number of on_entry calls after the first barrier" );
                 // All the spawned tasks have been stolen by workers.
                 // Now wait for workers to spawn some more tasks for this thread to steal back.
                 theGlobalBarrier.wait();
                 ASSERT( !theGlobalBarrierActive, "Workers are expected to have reset this flag" );
             }
             else
                 theGlobalBarrierActive = false;
             wait_for_all();
             return NULL;
         }
     }
     else {
         if ( theGlobalBarrierActive ) {
             if ( theTestMode & tmSynchronized ) {
                 theGlobalBarrier.wait();
                 globalBarrierActive = true;
             }
             theGlobalBarrierActive = false;
         }
     }
     set_ref_count(3);
     spawn( *new( allocate_child() ) FibTask(N-1, m_flag, m_observer) );
     spawn( *new( allocate_child() ) FibTask(N-2, m_flag, m_observer) );
     if ( globalBarrierActive ) {
         // It's the first task executed by a worker. Release the master thread.
         theGlobalBarrier.wait();
     }
     wait_for_all();
     return NULL;
 }
	task* execute() {	//Overrieds virtual function task:: execute
		if( n<CutOff) {
			*sum = SerialFib(n);
		} else {
			long x, y;
			FibTask& a = *new( allocate_child() ) FibTask(n-1, &x);
			FibTask& b = *new( allocate_child() ) FibTask(n-2, &y);
			//Set ref_count to "two children plus one for the wait".
			set_ref_count(3);
			//start b
			spawn(b);
			//Start a and wait for children
			spawn_and_wait_for_all(a);
			//Do the sum
			*sum = x+y;
		}
		return NULL;
	}
Esempio n. 8
0
 /*override*/ task* execute() {
     if( !g_sandwich && n<2 ) {
         result = n;
     } else {
         int x,y;
         tbb::task_scheduler_init init(P_nested);
         task* self0 = &task::self();
         set_ref_count( 3 );
         if ( g_sandwich ) {
             spawn (*new( allocate_child() ) FibCilkSubtask(x,n-1));
             spawn (*new( allocate_child() ) FibCilkSubtask(y,n-2));
         }
         else {
             spawn (*new( allocate_child() ) FibTask(x,n-1));
             spawn (*new( allocate_child() ) FibTask(y,n-2));
         }
         wait_for_all(); 
         task* self1 = &task::self();
         ASSERT( self0 == self1, "failed to preserve TBB TLS" );
         result = x+y;
     }
     return NULL;
 }
 tbb::task* execute () {
     set_ref_count(2);
     spawn ( *new(allocate_child()) tbb::empty_task );
     wait_for_all();
     return NULL;
 }
Esempio n. 10
0
      tbb::task* execute () 
      {
	if (P_first_ > P_last_ || A_.empty())
	  return NULL;
	else if (P_first_ == P_last_)
	  {
	    execute_base_case ();
	    return NULL;
	  }
	else
	  {
	    // Recurse on two intervals: [P_first, P_mid] and [P_mid+1, P_last]
	    const size_t P_mid = (P_first_ + P_last_) / 2;
	    split_t A_split = 
	      partitioner_.split (A_, P_first_, P_mid, P_last_,
				  contiguous_cache_blocks_);
	    // The partitioner may decide that the current block A_
	    // has too few rows to be worth splitting.  In that case,
	    // A_split.second (the bottom block) will be empty.  We
	    // can deal with this by treating it as the base case.
	    if (A_split.second.empty() || A_split.second.nrows() == 0)
	      {
		execute_base_case ();
		return NULL;
	      }

	    double top_timing;
	    double top_min_timing = 0.0;
	    double top_max_timing = 0.0;
	    double bot_timing;
	    double bot_min_timing = 0.0;
	    double bot_max_timing = 0.0;

	    FactorTask& topTask = *new( allocate_child() )
	      FactorTask (P_first_, P_mid, A_split.first, A_top_ptr_, 
			  seq_outputs_, par_output_, seq_,
			  top_timing, top_min_timing, top_max_timing,
			  contiguous_cache_blocks_);
	    // After the task finishes, A_bot will be set to the topmost
	    // partition of A_split.second.  This will let us combine
	    // the two subproblems (using factor_pair()) after their
	    // tasks complete.
	    mat_view A_bot;
	    FactorTask& botTask = *new( allocate_child() )
	      FactorTask (P_mid+1, P_last_, A_split.second, &A_bot, 
			  seq_outputs_, par_output_, seq_,
			  bot_timing, bot_min_timing, bot_max_timing,
			  contiguous_cache_blocks_);
	    set_ref_count (3); // 3 children (2 + 1 for the wait)
	    spawn (topTask);
	    spawn_and_wait_for_all (botTask);
	    
	    // Combine the two results
	    factor_pair (P_first_, P_mid+1, *A_top_ptr_, A_bot);

	    top_min_timing = (top_min_timing == 0.0) ? top_timing : top_min_timing;
	    top_max_timing = (top_max_timing == 0.0) ? top_timing : top_max_timing;

	    bot_min_timing = (bot_min_timing == 0.0) ? bot_timing : bot_min_timing;
	    bot_max_timing = (bot_max_timing == 0.0) ? bot_timing : bot_max_timing;

	    min_seq_timing_ = std::min (top_min_timing, bot_min_timing);
	    max_seq_timing_ = std::min (top_max_timing, bot_max_timing);

	    return NULL;
	  }
      }