示例#1
0
Dispatcher::TaskBatchPtr Dispatcher::acquireBatch( const ExecutableNode::Task &task, BatchMap &currentBatches, TaskToBatchMap &tasksToBatches )
{
	MurmurHash taskHash = task.hash();
	TaskToBatchMap::iterator it = tasksToBatches.find( taskHash );
	if ( it != tasksToBatches.end() )
	{
		return it->second;
	}

	MurmurHash hash = batchHash( task );
	BatchMap::iterator bIt = currentBatches.find( hash );
	if ( bIt != currentBatches.end() )
	{
		TaskBatchPtr batch = bIt->second;

		std::vector<float> &frames = batch->frames();
		const CompoundPlug *dispatcherPlug = task.node()->dispatcherPlug();
		const IntPlug *batchSizePlug = dispatcherPlug->getChild<const IntPlug>( g_batchSize );
		size_t batchSize = ( batchSizePlug ) ? batchSizePlug->getValue() : 1;

		if ( task.node()->requiresSequenceExecution() || ( frames.size() < batchSize ) )
		{
			if ( task.hash() != MurmurHash() )
			{
				float frame = task.context()->getFrame();
				if ( std::find( frames.begin(), frames.end(), frame ) == frames.end() )
				{
					if ( task.node()->requiresSequenceExecution() )
					{
						frames.insert( std::lower_bound( frames.begin(), frames.end(), frame ), frame );
					}
					else
					{
						frames.push_back( frame );
					}
				}
			}

			if ( taskHash != MurmurHash() )
			{
				tasksToBatches[taskHash] = batch;
			}
			
			return batch;
		}
	}

	TaskBatchPtr batch = new TaskBatch( task );
	currentBatches[hash] = batch;
	if ( taskHash != MurmurHash() )
	{
		tasksToBatches[taskHash] = batch;
	}
	
	return batch;
}
示例#2
0
		TaskBatchPtr acquireBatch( const TaskNode::Task &task )
		{
			// See if we've previously visited this task, and therefore
			// have placed it in a batch already, which we can return
			// unchanged. The `taskToBatchMapHash` is used as the unique
			// identity of a task.
			MurmurHash taskToBatchMapHash = task.hash();
			// Prevent identical tasks from different nodes from being
			// coalesced.
			taskToBatchMapHash.append( (uint64_t)task.node() );
			if( task.hash() == IECore::MurmurHash() )
			{
				// Prevent no-ops from coalescing into a single batch, as this
				// would break parallelism - see `DispatcherTest.testNoOpDoesntBreakFrameParallelism()`
				taskToBatchMapHash.append( contextHash( task.context() ) );
			}
			const TaskToBatchMap::const_iterator it = m_tasksToBatches.find( taskToBatchMapHash );
			if( it != m_tasksToBatches.end() )
			{
				return it->second;
			}

			// We haven't seen this task before, so we need to find
			// an appropriate batch to put it in. This may be one of
			// our current batches, or we may need to make a new one
			// entirely if the current batch is full.

			const bool requiresSequenceExecution = task.plug()->requiresSequenceExecution();

			TaskBatchPtr batch = nullptr;
			const MurmurHash batchMapHash = batchHash( task );
			BatchMap::iterator bIt = m_currentBatches.find( batchMapHash );
			if( bIt != m_currentBatches.end() )
			{
				TaskBatchPtr candidateBatch = bIt->second;
				// Unfortunately we have to track batch size separately from `batch->frames().size()`,
				// because no-ops don't update `frames()`, but _do_ count towards batch size.
				IntDataPtr batchSizeData = candidateBatch->blindData()->member<IntData>( g_sizeBlindDataName );
				const IntPlug *batchSizePlug = task.node()->dispatcherPlug()->getChild<const IntPlug>( g_batchSize );
				const int batchSizeLimit = ( batchSizePlug ) ? batchSizePlug->getValue() : 1;
				if( requiresSequenceExecution || ( batchSizeData->readable() < batchSizeLimit ) )
				{
					batch = candidateBatch;
					batchSizeData->writable()++;
				}
			}

			if( !batch )
			{
				batch = new TaskBatch( task.plug(), task.context() );
				batch->blindData()->writable()[g_sizeBlindDataName] = new IntData( 1 );
				m_currentBatches[batchMapHash] = batch;
			}

			// Now we have an appropriate batch, update it to include
			// the frame for our task, and any other relevant information.

			if( task.hash() != MurmurHash() )
			{
				float frame = task.context()->getFrame();
				std::vector<float> &frames = batch->frames();
				if( requiresSequenceExecution )
				{
					frames.insert( std::lower_bound( frames.begin(), frames.end(), frame ), frame );
				}
				else
				{
					frames.push_back( frame );
				}
			}

			const BoolPlug *immediatePlug = task.node()->dispatcherPlug()->getChild<const BoolPlug>( g_immediatePlugName );
			if( immediatePlug && immediatePlug->getValue() )
			{
				/// \todo Should we be scoping a context for this, to allow the plug to
				/// have expressions on it? If so, should we be doing the same before
				/// calling requiresSequenceExecution()? Or should we instead require that
				/// they always be constant?
				batch->blindData()->writable()[g_immediateBlindDataName] = g_trueBoolData;
			}

			// Remember which batch we stored this task in, for
			// the next time someone asks for it.
			m_tasksToBatches[taskToBatchMapHash] = batch;

			return batch;
		}
示例#3
0
    TaskBatchPtr acquireBatch( const TaskNode::Task &task )
    {
        // See if we've previously visited this task, and therefore
        // have placed it in a batch already, which we can return
        // unchanged.
        MurmurHash taskToBatchMapHash = task.hash();
        taskToBatchMapHash.append( (uint64_t)task.node() );
        if( task.hash() == MurmurHash() )
        {
            // Make sure we don't coalesce all no-ops into a single
            // batch. See comments in batchHash().
            taskToBatchMapHash.append( task.context()->getFrame() );
        }
        const TaskToBatchMap::const_iterator it = m_tasksToBatches.find( taskToBatchMapHash );
        if( it != m_tasksToBatches.end() )
        {
            return it->second;
        }

        // We haven't seen this task before, so we need to find
        // an appropriate batch to put it in. This may be one of
        // our current batches, or we may need to make a new one
        // entirely.

        TaskBatchPtr batch = NULL;
        const MurmurHash batchMapHash = batchHash( task );
        BatchMap::iterator bIt = m_currentBatches.find( batchMapHash );
        if( bIt != m_currentBatches.end() )
        {
            TaskBatchPtr candidateBatch = bIt->second;
            const IntPlug *batchSizePlug = task.node()->dispatcherPlug()->getChild<const IntPlug>( g_batchSize );
            const size_t batchSize = ( batchSizePlug ) ? batchSizePlug->getValue() : 1;
            if( task.plug()->requiresSequenceExecution() || ( candidateBatch->frames().size() < batchSize ) )
            {
                batch = candidateBatch;
            }
        }

        if( !batch )
        {
            batch = new TaskBatch( task.plug(), task.context() );
            m_currentBatches[batchMapHash] = batch;
        }

        // Now we have an appropriate batch, update it to include
        // the frame for our task, and any other relevant information.

        if( task.hash() != MurmurHash() )
        {
            float frame = task.context()->getFrame();
            std::vector<float> &frames = batch->frames();
            if( std::find( frames.begin(), frames.end(), frame ) == frames.end() )
            {
                if( task.plug()->requiresSequenceExecution() )
                {
                    frames.insert( std::lower_bound( frames.begin(), frames.end(), frame ), frame );
                }
                else
                {
                    frames.push_back( frame );
                }
            }
        }

        const BoolPlug *immediatePlug = task.node()->dispatcherPlug()->getChild<const BoolPlug>( g_immediatePlugName );
        if( immediatePlug && immediatePlug->getValue() )
        {
            /// \todo Should we be scoping a context for this, to allow the plug to
            /// have expressions on it? If so, should we be doing the same before
            /// calling requiresSequenceExecution()? Or should we instead require that
            /// they always be constant?
            batch->blindData()->writable()[g_immediateBlindDataName] = g_trueBoolData;
        }

        // Remember which batch we stored this task in, for
        // the next time someone asks for it.
        m_tasksToBatches[taskToBatchMapHash] = batch;

        return batch;
    }