Пример #1
0
		void addPreTask( TaskBatch *batch, TaskBatchPtr preTask, bool forPostTask = false )
		{
			TaskBatches &preTasks = batch->preTasks();
			if( std::find( preTasks.begin(), preTasks.end(), preTask ) == preTasks.end() )
			{
				if( forPostTask )
				{
					// We're adding the preTask because the batch is a postTask
					// of it, but the batch may already have it's own standard
					// preTasks. There's no strict requirement that we separate
					// out these two types of preTasks (indeed a good dispatcher might
					// execute them in parallel), but for simple dispatchers
					// it's more intuitive to users if we separate them so the
					// standard preTasks come second.
					//
					// See `DispatcherTest.testPostTaskWithPreTasks()` for an
					// example.
					IntDataPtr postTaskIndex = batch->blindData()->member<IntData>(
						g_postTaskIndexBlindDataName, /* throwExceptions = */ false, /* createIfMissing = */ true
					);
					preTasks.insert( preTasks.begin() + postTaskIndex->readable(), preTask );
					postTaskIndex->writable()++;
				}
				else
				{
					preTasks.push_back( preTask );
				}
			}
		}
Пример #2
0
bool ParameterisedHolder<B>::setParameterisedValuesWalk( bool lazy, IECore::ParameterPtr parameter, MStatus &status )
{
	MFnDependencyNode fnDN( B::thisMObject() );
	
	// traverse child parameters if we have them
	
	bool childParametersWereSet = false;
	if( parameter->isInstanceOf( CompoundParameter::staticTypeId() ) )
	{
		CompoundParameterPtr compoundParameter = boost::static_pointer_cast<CompoundParameter>( parameter );
		const CompoundParameter::ParameterVector &childParameters = compoundParameter->orderedParameters();
		for( CompoundParameter::ParameterVector::const_iterator cIt=childParameters.begin(); cIt!=childParameters.end(); cIt++ )
		{
			bool b = setParameterisedValuesWalk( lazy, *cIt, status );
			childParametersWereSet = childParametersWereSet || b;
		}
	}

	// then set this parameter if necessary
	
	bool thisParameterWasSet = false;
	if( parameter->name()!="" && (!lazy || m_dirtyParameters.find( parameter )!=m_dirtyParameters.end()) )
	{
		ParameterToAttributeNameMap::const_iterator nIt = m_parametersToAttributeNames.find( parameter );
		if( nIt==m_parametersToAttributeNames.end() )
		{
			msg( Msg::Error, "ParameterisedHolder::setParameterisedValues", boost::format( "Unable to find plug name for parameter %s" ) % parameter->name() );
			status = MS::kFailure;
		}
		else
		{
		
			MPlug p = fnDN.findPlug( nIt->second );
			if( p.isNull() )
			{
				msg( Msg::Error, "ParameterisedHolder::setParameterisedValues", boost::format( "Unable to find plug for parameter %s" ) %  parameter->name() );
				status = MS::kFailure;
			}
			else
			{
				try
				{
					MStatus s = ParameterHandler::setValue( p, parameter );
					if( !s )
					{
						msg( Msg::Error, "ParameterisedHolder::setParameterisedValues", boost::format( "Failed to set parameter value from %s" ) % p.name().asChar() );
						status = s;
					}
					else
					{
						m_dirtyParameters.erase( parameter );
						thisParameterWasSet = true;
					}
				}
				catch( std::exception &e )
				{
					msg( Msg::Error, "ParameterisedHolder::setParameterisedValues", boost::format( "Caught exception while setting parameter value from %s : %s" ) % p.name().asChar() % e.what());
					status = MS::kFailure;
				}
				catch( ... )
				{
					msg( Msg::Error, "ParameterisedHolder::setParameterisedValues", boost::format( "Caught unknown exception while setting parameter value from %s" ) % p.name().asChar() );
					status = MS::kFailure;
				}
			}
		}
	}
	
	// increment the updateCount if necessary
	
	if( thisParameterWasSet || childParametersWereSet )
	{
		CompoundObjectPtr userData = parameter->userData();
		IntDataPtr updateCount = userData->member<IntData>( "updateCount" );
		if( !updateCount )
		{
			updateCount = new IntData( 0 );
			userData->members()["updateCount"] = updateCount;
		}
		else
		{
			updateCount->writable()++;
		}
	}
	
	return childParametersWereSet || thisParameterWasSet;
}
Пример #3
0
		TaskBatchPtr acquireBatch( const TaskNode::Task &task )
		{
			// See if we've previously visited this task, and therefore
			// have placed it in a batch already, which we can return
			// unchanged. The `taskToBatchMapHash` is used as the unique
			// identity of a task.
			MurmurHash taskToBatchMapHash = task.hash();
			// Prevent identical tasks from different nodes from being
			// coalesced.
			taskToBatchMapHash.append( (uint64_t)task.node() );
			if( task.hash() == IECore::MurmurHash() )
			{
				// Prevent no-ops from coalescing into a single batch, as this
				// would break parallelism - see `DispatcherTest.testNoOpDoesntBreakFrameParallelism()`
				taskToBatchMapHash.append( contextHash( task.context() ) );
			}
			const TaskToBatchMap::const_iterator it = m_tasksToBatches.find( taskToBatchMapHash );
			if( it != m_tasksToBatches.end() )
			{
				return it->second;
			}

			// We haven't seen this task before, so we need to find
			// an appropriate batch to put it in. This may be one of
			// our current batches, or we may need to make a new one
			// entirely if the current batch is full.

			const bool requiresSequenceExecution = task.plug()->requiresSequenceExecution();

			TaskBatchPtr batch = nullptr;
			const MurmurHash batchMapHash = batchHash( task );
			BatchMap::iterator bIt = m_currentBatches.find( batchMapHash );
			if( bIt != m_currentBatches.end() )
			{
				TaskBatchPtr candidateBatch = bIt->second;
				// Unfortunately we have to track batch size separately from `batch->frames().size()`,
				// because no-ops don't update `frames()`, but _do_ count towards batch size.
				IntDataPtr batchSizeData = candidateBatch->blindData()->member<IntData>( g_sizeBlindDataName );
				const IntPlug *batchSizePlug = task.node()->dispatcherPlug()->getChild<const IntPlug>( g_batchSize );
				const int batchSizeLimit = ( batchSizePlug ) ? batchSizePlug->getValue() : 1;
				if( requiresSequenceExecution || ( batchSizeData->readable() < batchSizeLimit ) )
				{
					batch = candidateBatch;
					batchSizeData->writable()++;
				}
			}

			if( !batch )
			{
				batch = new TaskBatch( task.plug(), task.context() );
				batch->blindData()->writable()[g_sizeBlindDataName] = new IntData( 1 );
				m_currentBatches[batchMapHash] = batch;
			}

			// Now we have an appropriate batch, update it to include
			// the frame for our task, and any other relevant information.

			if( task.hash() != MurmurHash() )
			{
				float frame = task.context()->getFrame();
				std::vector<float> &frames = batch->frames();
				if( requiresSequenceExecution )
				{
					frames.insert( std::lower_bound( frames.begin(), frames.end(), frame ), frame );
				}
				else
				{
					frames.push_back( frame );
				}
			}

			const BoolPlug *immediatePlug = task.node()->dispatcherPlug()->getChild<const BoolPlug>( g_immediatePlugName );
			if( immediatePlug && immediatePlug->getValue() )
			{
				/// \todo Should we be scoping a context for this, to allow the plug to
				/// have expressions on it? If so, should we be doing the same before
				/// calling requiresSequenceExecution()? Or should we instead require that
				/// they always be constant?
				batch->blindData()->writable()[g_immediateBlindDataName] = g_trueBoolData;
			}

			// Remember which batch we stored this task in, for
			// the next time someone asks for it.
			m_tasksToBatches[taskToBatchMapHash] = batch;

			return batch;
		}
Пример #4
0
	void test()
	{
		ConstObjectPtr res;
		IntDataPtr v = new IntData(1);

		/// limit the pool to fit only one integer.
		ObjectPoolPtr pool = new ObjectPool( v->Object::memoryUsage() );

		Cache cache( get, hash, 1000, pool );

		BOOST_CHECK_EQUAL( pool.get(), cache.objectPool() );

		BOOST_CHECK_EQUAL( size_t(1000), cache.getMaxComputations() );
		cache.setMaxComputations( 100 );
		BOOST_CHECK_EQUAL( size_t(100), cache.getMaxComputations() );
		BOOST_CHECK_EQUAL( size_t(0), cache.cachedComputations() );

		/// cache should return NULL on never computed values (hash is unknown)
		res = cache.get( ComputationParams(2), Cache::NullIfMissing );
		BOOST_CHECK( !res );
		// this is weird, but because we are using LRUCache with a dummie and we don't 
		/// want to do two queries, it ends up registering that computation with the default hash...
		BOOST_CHECK_EQUAL( size_t(1), cache.cachedComputations() );

		/// computes a value (default value)
		res = cache.get( ComputationParams(2) );
		BOOST_CHECK( res );
		BOOST_CHECK_EQUAL( size_t(1), cache.cachedComputations() );

		/// computes a value (explicit)
		res = cache.get( ComputationParams(3), Cache::ComputeIfMissing );
		BOOST_CHECK( res );
		BOOST_CHECK_EQUAL( size_t(2), cache.cachedComputations() );

		/// cache should not contain the value 2 due to memory limit (object not in ObjectPool)
		res = cache.get( ComputationParams(2), Cache::NullIfMissing );
		BOOST_CHECK( !res );
		
		// but should have the latest computed still
		res = cache.get( ComputationParams(3), Cache::NullIfMissing );
		BOOST_CHECK( res );
		BOOST_CHECK_EQUAL( size_t(2), cache.cachedComputations() );
		
		// erase the latest result
		cache.erase( ComputationParams(3) );
		BOOST_CHECK_EQUAL( size_t(1), cache.cachedComputations() );

		// confirm it's gone...
		res = cache.get( ComputationParams(3), Cache::NullIfMissing );
		BOOST_CHECK( !res );
		/// again... will count as cached unfortunately...
		BOOST_CHECK_EQUAL( size_t(2), cache.cachedComputations() );
		
		/// now increase memory limit to two IntData objects
		pool->setMaxMemoryUsage( v->Object::memoryUsage() * 2 );

		/// computes two new values
		cache.get( ComputationParams(4) );
		cache.get( ComputationParams(5) );
		BOOST_CHECK_EQUAL( size_t(4), cache.cachedComputations() );
		BOOST_CHECK( cache.get( ComputationParams(4), Cache::NullIfMissing ) );
		BOOST_CHECK( cache.get( ComputationParams(5), Cache::NullIfMissing ) );
		
		/// clears the all values
		cache.clear();
		BOOST_CHECK_EQUAL( size_t(0), cache.cachedComputations() );

		// set some values on the cache
		cache.set( ComputationParams(1), v, ObjectPool::StoreReference );
		BOOST_CHECK_EQUAL( size_t(1), cache.cachedComputations() );
		BOOST_CHECK_EQUAL( v, cache.get( ComputationParams(1), Cache::NullIfMissing ) );
		cache.set( ComputationParams(1), v, ObjectPool::StoreCopy );
		BOOST_CHECK_EQUAL( size_t(1), cache.cachedComputations() );
		BOOST_CHECK_EQUAL( v, cache.get( ComputationParams(1), Cache::NullIfMissing ) );
		cache.clear();
		v = new IntData(41);
		cache.set( ComputationParams(1), v, ObjectPool::StoreCopy );
		BOOST_CHECK_EQUAL( size_t(1), cache.cachedComputations() );
		BOOST_CHECK( *v == *cache.get( ComputationParams(1), Cache::NullIfMissing ) );
		v->writable() = 42;
		BOOST_CHECK( *v != *cache.get( ComputationParams(1), Cache::NullIfMissing ) );

		// test when the computation function does not match the already registered computation hash....
		IntDataPtr weirdValue = new IntData(666);
		cache.clear();
		cache.set( ComputationParams(1), weirdValue, ObjectPool::StoreReference );
		ConstObjectPtr v0 = cache.get( ComputationParams(1) );
		BOOST_CHECK( *weirdValue == *cache.get( ComputationParams(1), Cache::NullIfMissing ) );
		pool->clear();
		int c1 = ComputationCacheTest::getCount;
		ConstObjectPtr v1 = cache.get( ComputationParams(1) );
		int c2 = ComputationCacheTest::getCount;
		BOOST_CHECK_EQUAL( 1, static_cast< const IntData * >(v1.get())->readable() );
		BOOST_CHECK_EQUAL( c1 + 1, c2 );
		ConstObjectPtr v2 = cache.get( ComputationParams(1) );
		int c3 = ComputationCacheTest::getCount;
		BOOST_CHECK_EQUAL( 1, static_cast< const IntData * >(v2.get())->readable() );
		/// garantee that there was no recomputation.
		BOOST_CHECK_EQUAL( c2, c3 );
	}