void Dispatcher::batchTasksWalk( Dispatcher::TaskBatchPtr parent, const ExecutableNode::Task &task, BatchMap ¤tBatches, TaskToBatchMap &tasksToBatches, std::set<const TaskBatch *> &ancestors ) { TaskBatchPtr batch = acquireBatch( task, currentBatches, tasksToBatches ); TaskBatches &parentRequirements = parent->requirements(); if ( std::find( parentRequirements.begin(), parentRequirements.end(), batch ) == parentRequirements.end() ) { if ( ancestors.find( batch.get() ) != ancestors.end() ) { throw IECore::Exception( ( boost::format( "Dispatched nodes cannot have cyclic dependencies. %s and %s are involved in a cycle." ) % batch->node()->relativeName( batch->node()->scriptNode() ) % parent->node()->relativeName( parent->node()->scriptNode() ) ).str() ); } parentRequirements.push_back( batch ); } ExecutableNode::Tasks taskRequirements; task.node()->requirements( task.context(), taskRequirements ); ancestors.insert( parent.get() ); for ( ExecutableNode::Tasks::const_iterator it = taskRequirements.begin(); it != taskRequirements.end(); ++it ) { batchTasksWalk( batch, *it, currentBatches, tasksToBatches, ancestors ); } ancestors.erase( parent.get() ); }
TaskBatchPtr batchTasksWalk( const TaskNode::Task &task, const std::set<const TaskBatch *> &ancestors = std::set<const TaskBatch *>() ) { // Acquire a batch with this task placed in it, // and check that we haven't discovered a cyclic // dependency. TaskBatchPtr batch = acquireBatch( task ); if( ancestors.find( batch.get() ) != ancestors.end() ) { throw IECore::Exception( ( boost::format( "Dispatched tasks cannot have cyclic dependencies but %s is involved in a cycle." ) % batch->plug()->relativeName( batch->plug()->ancestor<ScriptNode>() ) ).str() ); } // Ask the task what preTasks and postTasks it would like. TaskNode::Tasks preTasks; TaskNode::Tasks postTasks; { Context::Scope scopedTaskContext( task.context() ); task.plug()->preTasks( preTasks ); task.plug()->postTasks( postTasks ); } // Collect all the batches the postTasks belong in. // We grab these first because they need to be included // in the ancestors for cycle detection when getting // the preTask batches. TaskBatches postBatches; for( TaskNode::Tasks::const_iterator it = postTasks.begin(); it != postTasks.end(); ++it ) { postBatches.push_back( batchTasksWalk( *it ) ); } // Collect all the batches the preTasks belong in, // and add them as preTasks for our batch. std::set<const TaskBatch *> preTaskAncestors( ancestors ); preTaskAncestors.insert( batch.get() ); for( TaskBatches::const_iterator it = postBatches.begin(), eIt = postBatches.end(); it != eIt; ++it ) { preTaskAncestors.insert( it->get() ); } for( TaskNode::Tasks::const_iterator it = preTasks.begin(); it != preTasks.end(); ++it ) { addPreTask( batch.get(), batchTasksWalk( *it, preTaskAncestors ) ); } // As far as TaskBatch and doDispatch() are concerned, there // is no such thing as a postTask, so we emulate them by making // this batch a preTask of each of the postTask batches. We also // add the postTask batches as preTasks for the root, so that they // are reachable from doDispatch(). for( TaskBatches::const_iterator it = postBatches.begin(), eIt = postBatches.end(); it != eIt; ++it ) { addPreTask( it->get(), batch, /* forPostTask = */ true ); addPreTask( m_rootBatch.get(), *it ); } return batch; }
Dispatcher::TaskBatchPtr Dispatcher::acquireBatch( const ExecutableNode::Task &task, BatchMap ¤tBatches, TaskToBatchMap &tasksToBatches ) { MurmurHash taskHash = task.hash(); TaskToBatchMap::iterator it = tasksToBatches.find( taskHash ); if ( it != tasksToBatches.end() ) { return it->second; } MurmurHash hash = batchHash( task ); BatchMap::iterator bIt = currentBatches.find( hash ); if ( bIt != currentBatches.end() ) { TaskBatchPtr batch = bIt->second; std::vector<float> &frames = batch->frames(); const CompoundPlug *dispatcherPlug = task.node()->dispatcherPlug(); const IntPlug *batchSizePlug = dispatcherPlug->getChild<const IntPlug>( g_batchSize ); size_t batchSize = ( batchSizePlug ) ? batchSizePlug->getValue() : 1; if ( task.node()->requiresSequenceExecution() || ( frames.size() < batchSize ) ) { if ( task.hash() != MurmurHash() ) { float frame = task.context()->getFrame(); if ( std::find( frames.begin(), frames.end(), frame ) == frames.end() ) { if ( task.node()->requiresSequenceExecution() ) { frames.insert( std::lower_bound( frames.begin(), frames.end(), frame ), frame ); } else { frames.push_back( frame ); } } } if ( taskHash != MurmurHash() ) { tasksToBatches[taskHash] = batch; } return batch; } } TaskBatchPtr batch = new TaskBatch( task ); currentBatches[hash] = batch; if ( taskHash != MurmurHash() ) { tasksToBatches[taskHash] = batch; } return batch; }
void Dispatcher::dispatch( const std::vector<NodePtr> &nodes ) const { // clear job directory, so that if our node validation fails, // jobDirectory() won't return the result from the previous dispatch. m_jobDirectory = ""; // validate the nodes we've been given if ( nodes.empty() ) { throw IECore::Exception( getName().string() + ": Must specify at least one node to dispatch." ); } std::vector<ExecutableNodePtr> executables; const ScriptNode *script = (*nodes.begin())->scriptNode(); for ( std::vector<NodePtr>::const_iterator nIt = nodes.begin(); nIt != nodes.end(); ++nIt ) { const ScriptNode *currentScript = (*nIt)->scriptNode(); if ( !currentScript || currentScript != script ) { throw IECore::Exception( getName().string() + ": Dispatched nodes must all belong to the same ScriptNode." ); } if ( ExecutableNode *executable = runTimeCast<ExecutableNode>( nIt->get() ) ) { executables.push_back( executable ); } else if ( const Box *box = runTimeCast<const Box>( nIt->get() ) ) { for ( RecursiveOutputPlugIterator plugIt( box ); plugIt != plugIt.end(); ++plugIt ) { Node *sourceNode = plugIt->get()->source<Plug>()->node(); if ( ExecutableNode *executable = runTimeCast<ExecutableNode>( sourceNode ) ) { executables.push_back( executable ); } } } else { throw IECore::Exception( getName().string() + ": Dispatched nodes must be ExecutableNodes or Boxes containing ExecutableNodes." ); } } // create the job directory now, so it's available in preDispatchSignal(). const Context *context = Context::current(); m_jobDirectory = createJobDirectory( context ); // this object calls this->preDispatchSignal() in its constructor and this->postDispatchSignal() // in its destructor, thereby guaranteeing that we always call this->postDispatchSignal(). DispatcherSignalGuard signalGuard( this, executables ); if ( signalGuard.cancelledByPreDispatch() ) { return; } std::vector<FrameList::Frame> frames; FrameListPtr frameList = frameRange( script, context ); frameList->asList( frames ); size_t i = 0; ExecutableNode::Tasks tasks; tasks.reserve( executables.size() * frames.size() ); for ( std::vector<FrameList::Frame>::const_iterator fIt = frames.begin(); fIt != frames.end(); ++fIt ) { for ( std::vector<ExecutableNodePtr>::const_iterator nIt = executables.begin(); nIt != executables.end(); ++nIt, ++i ) { ContextPtr frameContext = new Context( *context, Context::Borrowed ); frameContext->setFrame( *fIt ); tasks.push_back( ExecutableNode::Task( *nIt, frameContext ) ); } } TaskBatchPtr rootBatch = batchTasks( tasks ); if ( !rootBatch->requirements().empty() ) { doDispatch( rootBatch.get() ); } // inform the guard that the process has been completed, so it can pass this info to // postDispatchSignal(): signalGuard.success(); }
TaskBatchPtr acquireBatch( const TaskNode::Task &task ) { // See if we've previously visited this task, and therefore // have placed it in a batch already, which we can return // unchanged. The `taskToBatchMapHash` is used as the unique // identity of a task. MurmurHash taskToBatchMapHash = task.hash(); // Prevent identical tasks from different nodes from being // coalesced. taskToBatchMapHash.append( (uint64_t)task.node() ); if( task.hash() == IECore::MurmurHash() ) { // Prevent no-ops from coalescing into a single batch, as this // would break parallelism - see `DispatcherTest.testNoOpDoesntBreakFrameParallelism()` taskToBatchMapHash.append( contextHash( task.context() ) ); } const TaskToBatchMap::const_iterator it = m_tasksToBatches.find( taskToBatchMapHash ); if( it != m_tasksToBatches.end() ) { return it->second; } // We haven't seen this task before, so we need to find // an appropriate batch to put it in. This may be one of // our current batches, or we may need to make a new one // entirely if the current batch is full. const bool requiresSequenceExecution = task.plug()->requiresSequenceExecution(); TaskBatchPtr batch = nullptr; const MurmurHash batchMapHash = batchHash( task ); BatchMap::iterator bIt = m_currentBatches.find( batchMapHash ); if( bIt != m_currentBatches.end() ) { TaskBatchPtr candidateBatch = bIt->second; // Unfortunately we have to track batch size separately from `batch->frames().size()`, // because no-ops don't update `frames()`, but _do_ count towards batch size. IntDataPtr batchSizeData = candidateBatch->blindData()->member<IntData>( g_sizeBlindDataName ); const IntPlug *batchSizePlug = task.node()->dispatcherPlug()->getChild<const IntPlug>( g_batchSize ); const int batchSizeLimit = ( batchSizePlug ) ? batchSizePlug->getValue() : 1; if( requiresSequenceExecution || ( batchSizeData->readable() < batchSizeLimit ) ) { batch = candidateBatch; batchSizeData->writable()++; } } if( !batch ) { batch = new TaskBatch( task.plug(), task.context() ); batch->blindData()->writable()[g_sizeBlindDataName] = new IntData( 1 ); m_currentBatches[batchMapHash] = batch; } // Now we have an appropriate batch, update it to include // the frame for our task, and any other relevant information. if( task.hash() != MurmurHash() ) { float frame = task.context()->getFrame(); std::vector<float> &frames = batch->frames(); if( requiresSequenceExecution ) { frames.insert( std::lower_bound( frames.begin(), frames.end(), frame ), frame ); } else { frames.push_back( frame ); } } const BoolPlug *immediatePlug = task.node()->dispatcherPlug()->getChild<const BoolPlug>( g_immediatePlugName ); if( immediatePlug && immediatePlug->getValue() ) { /// \todo Should we be scoping a context for this, to allow the plug to /// have expressions on it? If so, should we be doing the same before /// calling requiresSequenceExecution()? Or should we instead require that /// they always be constant? batch->blindData()->writable()[g_immediateBlindDataName] = g_trueBoolData; } // Remember which batch we stored this task in, for // the next time someone asks for it. m_tasksToBatches[taskToBatchMapHash] = batch; return batch; }
TaskBatchPtr acquireBatch( const TaskNode::Task &task ) { // See if we've previously visited this task, and therefore // have placed it in a batch already, which we can return // unchanged. MurmurHash taskToBatchMapHash = task.hash(); taskToBatchMapHash.append( (uint64_t)task.node() ); if( task.hash() == MurmurHash() ) { // Make sure we don't coalesce all no-ops into a single // batch. See comments in batchHash(). taskToBatchMapHash.append( task.context()->getFrame() ); } const TaskToBatchMap::const_iterator it = m_tasksToBatches.find( taskToBatchMapHash ); if( it != m_tasksToBatches.end() ) { return it->second; } // We haven't seen this task before, so we need to find // an appropriate batch to put it in. This may be one of // our current batches, or we may need to make a new one // entirely. TaskBatchPtr batch = NULL; const MurmurHash batchMapHash = batchHash( task ); BatchMap::iterator bIt = m_currentBatches.find( batchMapHash ); if( bIt != m_currentBatches.end() ) { TaskBatchPtr candidateBatch = bIt->second; const IntPlug *batchSizePlug = task.node()->dispatcherPlug()->getChild<const IntPlug>( g_batchSize ); const size_t batchSize = ( batchSizePlug ) ? batchSizePlug->getValue() : 1; if( task.plug()->requiresSequenceExecution() || ( candidateBatch->frames().size() < batchSize ) ) { batch = candidateBatch; } } if( !batch ) { batch = new TaskBatch( task.plug(), task.context() ); m_currentBatches[batchMapHash] = batch; } // Now we have an appropriate batch, update it to include // the frame for our task, and any other relevant information. if( task.hash() != MurmurHash() ) { float frame = task.context()->getFrame(); std::vector<float> &frames = batch->frames(); if( std::find( frames.begin(), frames.end(), frame ) == frames.end() ) { if( task.plug()->requiresSequenceExecution() ) { frames.insert( std::lower_bound( frames.begin(), frames.end(), frame ), frame ); } else { frames.push_back( frame ); } } } const BoolPlug *immediatePlug = task.node()->dispatcherPlug()->getChild<const BoolPlug>( g_immediatePlugName ); if( immediatePlug && immediatePlug->getValue() ) { /// \todo Should we be scoping a context for this, to allow the plug to /// have expressions on it? If so, should we be doing the same before /// calling requiresSequenceExecution()? Or should we instead require that /// they always be constant? batch->blindData()->writable()[g_immediateBlindDataName] = g_trueBoolData; } // Remember which batch we stored this task in, for // the next time someone asks for it. m_tasksToBatches[taskToBatchMapHash] = batch; return batch; }
void Dispatcher::dispatch( const std::vector<NodePtr> &nodes ) const { if ( nodes.empty() ) { throw IECore::Exception( getName().string() + ": Must specify at least one node to dispatch." ); } std::vector<ExecutableNodePtr> executables; const ScriptNode *script = (*nodes.begin())->scriptNode(); for ( std::vector<NodePtr>::const_iterator nIt = nodes.begin(); nIt != nodes.end(); ++nIt ) { const ScriptNode *currentScript = (*nIt)->scriptNode(); if ( !currentScript || currentScript != script ) { throw IECore::Exception( getName().string() + ": Dispatched nodes must all belong to the same ScriptNode." ); } if ( ExecutableNode *executable = runTimeCast<ExecutableNode>( nIt->get() ) ) { executables.push_back( executable ); } else if ( const Box *box = runTimeCast<const Box>( nIt->get() ) ) { for ( RecursiveOutputPlugIterator plugIt( box ); plugIt != plugIt.end(); ++plugIt ) { Node *sourceNode = plugIt->get()->source<Plug>()->node(); if ( ExecutableNode *executable = runTimeCast<ExecutableNode>( sourceNode ) ) { executables.push_back( executable ); } } } else { throw IECore::Exception( getName().string() + ": Dispatched nodes must be ExecutableNodes or Boxes containing ExecutableNodes." ); } } if ( preDispatchSignal()( this, executables ) ) { /// \todo: communicate the cancellation to the user return; } const Context *context = Context::current(); std::vector<FrameList::Frame> frames; FrameListPtr frameList = frameRange( script, context ); frameList->asList( frames ); size_t i = 0; ExecutableNode::Tasks tasks; tasks.reserve( executables.size() * frames.size() ); for ( std::vector<FrameList::Frame>::const_iterator fIt = frames.begin(); fIt != frames.end(); ++fIt ) { for ( std::vector<ExecutableNodePtr>::const_iterator nIt = executables.begin(); nIt != executables.end(); ++nIt, ++i ) { ContextPtr frameContext = new Context( *context, Context::Borrowed ); frameContext->setFrame( *fIt ); tasks.push_back( ExecutableNode::Task( *nIt, frameContext ) ); } } TaskBatchPtr rootBatch = batchTasks( tasks ); if ( !rootBatch->requirements().empty() ) { doDispatch( rootBatch.get() ); } postDispatchSignal()( this, executables ); }
bool TaskQueue::process(float dt, TaskPtr task) { if(task->isDone()) return true; if(task->getType() == Task::BATCH) { TaskBatchPtr t = boost::static_pointer_cast<TaskBatch>(task); for(std::list<TaskPtr>::iterator i=t->list.begin(); i != t->list.end();) { if(process(dt, *i)) i = t->list.erase(i); else i++; } return t->isDone(); } else if(task->getType() == Task::SEQUENCE) { TaskSequencePtr t = boost::static_pointer_cast<TaskSequence>(task); for(int j=0; j< MAX_CURRENT_JOBS && !t->isDone(); j++) { TaskPtr head = t->list.front(); if(process(dt, head)) { if(t->list.size()) t->list.pop_front(); } else break; } return t->isDone(); } else if(task->getType() == Task::ANIM) { TaskAnimPtr anim = boost::static_pointer_cast<TaskAnim>(task); if(!anim->node->isRunning()) return true; if(!anim->hasStarted()) { anim->launch(); } else if(anim->wait) { // insidious hack int ref = (int)anim->node->getUserData(); if(ref != anim->runningCount) { LOGD("overwriting previous anmation %d %d\n", ref, anim->runningCount); return true; } } return anim->isDone(); } else if(task->getType() == Task::SOUND) { TaskSoundPtr sound = boost::static_pointer_cast<TaskSound>(task); if(!sound->isDone()) sound->launch(); return true; } else if(task->getType() == Task::LAMBDA) { TaskLambdaPtr t = boost::static_pointer_cast<TaskLambda>(task); t->func(); return true; } else if(task->getType() == Task::EMPTY || task->getType() == Task::IGNORE) { return true; } LOGD("unknown task %s\n", task->getTag().c_str()); phassert(false && "Unknown task occured"); return true; }