// Hash used to determine how to coalesce tasks into batches. // If `batchHash( task1 ) == batchHash( task2 )` then the two // tasks can be placed in the same batch. IECore::MurmurHash batchHash( const TaskNode::Task &task ) { MurmurHash result; result.append( (uint64_t)task.node() ); // We ignore the frame because the whole point of batching // is to allow multiple frames to be placed in the same // batch if the context is otherwise identical. result.append( contextHash( task.context(), /* ignoreFrame = */ true ) ); return result; }
// Hash used to determine how to coalesce tasks into batches. // If `batchHash( task1 ) == batchHash( task2 )` then the two // tasks can be placed in the same batch. IECore::MurmurHash batchHash( const TaskNode::Task &task ) { MurmurHash result; result.append( (uint64_t)task.node() ); const Context *context = task.context(); std::vector<IECore::InternedString> names; context->names( names ); for( std::vector<IECore::InternedString>::const_iterator it = names.begin(); it != names.end(); ++it ) { // Ignore the UI values since they should be irrelevant // to execution. if( it->string().compare( 0, 3, "ui:" ) == 0 ) { continue; } // Ignore the frame, since the whole point of batching // is to allow multiple frames to be placed in the same // batch if the context is otherwise identical. /// // There is one exception to this though - if the task is // a no-op, then we don't want to coalesce, because then // every single frame of the no-op would be placed in the // same batch, and all downstream frames would then be forced // to depend unnecessarily on all upstream frames. if( *it == g_frame && task.hash() != MurmurHash() ) { continue; } result.append( *it ); context->get<const IECore::Data>( *it )->hash( result ); } return result; }
TaskBatchPtr acquireBatch( const TaskNode::Task &task ) { // See if we've previously visited this task, and therefore // have placed it in a batch already, which we can return // unchanged. The `taskToBatchMapHash` is used as the unique // identity of a task. MurmurHash taskToBatchMapHash = task.hash(); // Prevent identical tasks from different nodes from being // coalesced. taskToBatchMapHash.append( (uint64_t)task.node() ); if( task.hash() == IECore::MurmurHash() ) { // Prevent no-ops from coalescing into a single batch, as this // would break parallelism - see `DispatcherTest.testNoOpDoesntBreakFrameParallelism()` taskToBatchMapHash.append( contextHash( task.context() ) ); } const TaskToBatchMap::const_iterator it = m_tasksToBatches.find( taskToBatchMapHash ); if( it != m_tasksToBatches.end() ) { return it->second; } // We haven't seen this task before, so we need to find // an appropriate batch to put it in. This may be one of // our current batches, or we may need to make a new one // entirely if the current batch is full. const bool requiresSequenceExecution = task.plug()->requiresSequenceExecution(); TaskBatchPtr batch = nullptr; const MurmurHash batchMapHash = batchHash( task ); BatchMap::iterator bIt = m_currentBatches.find( batchMapHash ); if( bIt != m_currentBatches.end() ) { TaskBatchPtr candidateBatch = bIt->second; // Unfortunately we have to track batch size separately from `batch->frames().size()`, // because no-ops don't update `frames()`, but _do_ count towards batch size. IntDataPtr batchSizeData = candidateBatch->blindData()->member<IntData>( g_sizeBlindDataName ); const IntPlug *batchSizePlug = task.node()->dispatcherPlug()->getChild<const IntPlug>( g_batchSize ); const int batchSizeLimit = ( batchSizePlug ) ? batchSizePlug->getValue() : 1; if( requiresSequenceExecution || ( batchSizeData->readable() < batchSizeLimit ) ) { batch = candidateBatch; batchSizeData->writable()++; } } if( !batch ) { batch = new TaskBatch( task.plug(), task.context() ); batch->blindData()->writable()[g_sizeBlindDataName] = new IntData( 1 ); m_currentBatches[batchMapHash] = batch; } // Now we have an appropriate batch, update it to include // the frame for our task, and any other relevant information. if( task.hash() != MurmurHash() ) { float frame = task.context()->getFrame(); std::vector<float> &frames = batch->frames(); if( requiresSequenceExecution ) { frames.insert( std::lower_bound( frames.begin(), frames.end(), frame ), frame ); } else { frames.push_back( frame ); } } const BoolPlug *immediatePlug = task.node()->dispatcherPlug()->getChild<const BoolPlug>( g_immediatePlugName ); if( immediatePlug && immediatePlug->getValue() ) { /// \todo Should we be scoping a context for this, to allow the plug to /// have expressions on it? If so, should we be doing the same before /// calling requiresSequenceExecution()? Or should we instead require that /// they always be constant? batch->blindData()->writable()[g_immediateBlindDataName] = g_trueBoolData; } // Remember which batch we stored this task in, for // the next time someone asks for it. m_tasksToBatches[taskToBatchMapHash] = batch; return batch; }
TaskBatchPtr acquireBatch( const TaskNode::Task &task ) { // See if we've previously visited this task, and therefore // have placed it in a batch already, which we can return // unchanged. MurmurHash taskToBatchMapHash = task.hash(); taskToBatchMapHash.append( (uint64_t)task.node() ); if( task.hash() == MurmurHash() ) { // Make sure we don't coalesce all no-ops into a single // batch. See comments in batchHash(). taskToBatchMapHash.append( task.context()->getFrame() ); } const TaskToBatchMap::const_iterator it = m_tasksToBatches.find( taskToBatchMapHash ); if( it != m_tasksToBatches.end() ) { return it->second; } // We haven't seen this task before, so we need to find // an appropriate batch to put it in. This may be one of // our current batches, or we may need to make a new one // entirely. TaskBatchPtr batch = NULL; const MurmurHash batchMapHash = batchHash( task ); BatchMap::iterator bIt = m_currentBatches.find( batchMapHash ); if( bIt != m_currentBatches.end() ) { TaskBatchPtr candidateBatch = bIt->second; const IntPlug *batchSizePlug = task.node()->dispatcherPlug()->getChild<const IntPlug>( g_batchSize ); const size_t batchSize = ( batchSizePlug ) ? batchSizePlug->getValue() : 1; if( task.plug()->requiresSequenceExecution() || ( candidateBatch->frames().size() < batchSize ) ) { batch = candidateBatch; } } if( !batch ) { batch = new TaskBatch( task.plug(), task.context() ); m_currentBatches[batchMapHash] = batch; } // Now we have an appropriate batch, update it to include // the frame for our task, and any other relevant information. if( task.hash() != MurmurHash() ) { float frame = task.context()->getFrame(); std::vector<float> &frames = batch->frames(); if( std::find( frames.begin(), frames.end(), frame ) == frames.end() ) { if( task.plug()->requiresSequenceExecution() ) { frames.insert( std::lower_bound( frames.begin(), frames.end(), frame ), frame ); } else { frames.push_back( frame ); } } } const BoolPlug *immediatePlug = task.node()->dispatcherPlug()->getChild<const BoolPlug>( g_immediatePlugName ); if( immediatePlug && immediatePlug->getValue() ) { /// \todo Should we be scoping a context for this, to allow the plug to /// have expressions on it? If so, should we be doing the same before /// calling requiresSequenceExecution()? Or should we instead require that /// they always be constant? batch->blindData()->writable()[g_immediateBlindDataName] = g_trueBoolData; } // Remember which batch we stored this task in, for // the next time someone asks for it. m_tasksToBatches[taskToBatchMapHash] = batch; return batch; }