Пример #1
0
IECore::ImagePrimitivePtr IECoreRI::SXRendererImplementation::shadePlaneToImage( const V2i &resolution ) const
{
	IECore::CompoundDataPtr result = shadePlane( resolution );
	
	Box2i window =  Box2i( V2i( 0, 0 ), V2i( resolution[0] - 1, resolution[1] - 1 ) );
	
	IECore::ImagePrimitivePtr img = new IECore::ImagePrimitive( window, window );
	IECore::FloatVectorDataPtr rData = img->createChannel<float>( "R" );
	IECore::FloatVectorDataPtr gData = img->createChannel<float>( "G" );
	IECore::FloatVectorDataPtr bData = img->createChannel<float>( "B" );
	IECore::FloatVectorDataPtr aData = img->createChannel<float>( "A" );

	std::vector<float> &r = rData->writable();
	std::vector<float> &g = gData->writable();
	std::vector<float> &b = bData->writable();
	std::vector<float> &a = aData->writable();

	unsigned numPoints = resolution[0] * resolution[1];

	r.resize( numPoints );
	g.resize( numPoints );
	b.resize( numPoints );
	a.resize( numPoints );
	
	IECore::Color3fVectorDataPtr cData = result->member<Color3fVectorData>( "Ci", false );
	IECore::Color3fVectorDataPtr oData = result->member<Color3fVectorData>( "Oi", false );
	if( !cData || !oData )
	{
		throw( Exception( "The renderer didn't return Ci/Oi when shading the points." ) );
	}
	
	const std::vector<Color3f> &c = cData->readable();
	const std::vector<Color3f> &o = oData->readable();

	if( c.size() != numPoints )
	{
		throw( Exception( boost::str( 
			boost::format( "The renderer didn't return the right number of shaded points. (%d but should be %d)." )
		 	% c.size() % numPoints
		) ) );
	}

	for( std::vector<V3f>::size_type i=0; i<c.size(); i++ )
	{
		r[i] = c[i][0];
		g[i] = c[i][1];
		b[i] = c[i][2];
		a[i] = ( o[i][0] + o[i][1] + o[i][2] ) / 3.0f;
	}
	
	return img;
}
IECore::ObjectPtr FromMayaSkinClusterConverter::doConversion( const MObject &object, IECore::ConstCompoundObjectPtr operands ) const
{
	MStatus stat;

	// our data storage objects
	IECore::StringVectorDataPtr influenceNamesData = new IECore::StringVectorData();
	IECore::M44fVectorDataPtr influencePoseData  = new IECore::M44fVectorData();
	IECore::IntVectorDataPtr pointIndexOffsetsData  = new IECore::IntVectorData();
	IECore::IntVectorDataPtr pointInfluenceCountsData = new IECore::IntVectorData();
	IECore::IntVectorDataPtr pointInfluenceIndicesData = new IECore::IntVectorData();
	IECore::FloatVectorDataPtr pointInfluenceWeightsData = new IECore::FloatVectorData();

	// get a skin cluster fn
	MFnSkinCluster skinClusterFn(object);

	MDagPathArray influencePaths;
	skinClusterFn.influenceObjects(influencePaths);

	// get the influence names
	int influencesCount = influencePaths.length();
	influenceNamesData->writable().reserve( influencesCount );

	InfluenceName in = (InfluenceName)m_influenceNameParameter->getNumericValue();
	switch( in )
	{
		case Partial :
		{
			for (int i=0; i < influencesCount; i++)
			{
				influenceNamesData->writable().push_back( influencePaths[i].partialPathName(&stat).asChar() );
			}
			break;
		}
		case Full :
		{
			for (int i=0; i < influencesCount; i++)
			{
				influenceNamesData->writable().push_back( influencePaths[i].fullPathName(&stat).asChar() );
			}
			break;
		}
	}

	// extract bind pose
	MFnDependencyNode skinClusterNodeFn( object );

	MPlug bindPreMatrixArrayPlug = skinClusterNodeFn.findPlug( "bindPreMatrix", true, &stat );

	for (int i=0; i < influencesCount; i++)
	{
		MPlug bindPreMatrixElementPlug = bindPreMatrixArrayPlug.elementByLogicalIndex(
				skinClusterFn.indexForInfluenceObject( influencePaths[i], NULL ), &stat);
		MObject matObj;
		bindPreMatrixElementPlug.getValue( matObj );
		MFnMatrixData matFn( matObj, &stat );
		MMatrix mat = matFn.matrix();
		Imath::M44f cmat = IECore::convert<Imath::M44f>( mat );

		influencePoseData->writable().push_back( cmat );
	}

	// extract the skinning information

	// get the first input geometry to the skin cluster
	// TODO: if needed, extend this to retrieve more than one output geometry
	MObjectArray outputGeoObjs;
	stat = skinClusterFn.getOutputGeometry( outputGeoObjs );

	if (! stat)
	{
		throw IECore::Exception( "FromMayaSkinClusterConverter: skinCluster node does not have any output geometry!" );
	}

	// get the dag path to the first object
	MFnDagNode dagFn( outputGeoObjs[0] );
	MDagPath geoPath;
	dagFn.getPath( geoPath );

	// generate a geo iterator for the components
	MItGeometry geoIt( outputGeoObjs[0] );
	int currentOffset = 0;

	// loop through all the points of the geometry to extract their bind information
	for ( ; !geoIt.isDone(); geoIt.next() )
	{
		MObject pointObj = geoIt.currentItem( &stat );
		MDoubleArray weights;
		unsigned int weightsCount;

		skinClusterFn.getWeights( geoPath, pointObj, weights, weightsCount );
		int pointInfluencesCount = 0;

		for ( int influenceId = 0; influenceId < int( weightsCount ); influenceId++ )
		{
			// ignore zero weights, we are generating a compressed (non-sparse) representation of the weights
			/// \todo: use a parameter to specify a threshold value rather than 0.0
			if ( weights[influenceId] != 0.0 )
			{
				pointInfluencesCount++;
				pointInfluenceWeightsData->writable().push_back( float( weights[influenceId] ) );
				pointInfluenceIndicesData->writable().push_back( influenceId );

			}
		}

		pointIndexOffsetsData->writable().push_back( currentOffset );
		pointInfluenceCountsData->writable().push_back( pointInfluencesCount );
		currentOffset += pointInfluencesCount;
	}

	// put all our results in a smooth skinning data object
	return new IECore::SmoothSkinningData( influenceNamesData, influencePoseData, pointIndexOffsetsData,
										pointInfluenceCountsData, pointInfluenceIndicesData, pointInfluenceWeightsData  );
}
Пример #3
0
void LensDistortOp::begin( const CompoundObject * operands )
{
	// Get the lens model parameters.
	IECore::CompoundObjectPtr lensModelParams( runTimeCast<CompoundObject>( lensParameter()->getValue() ) );
	
	// Load the lens object.
	m_lensModel = LensModel::create( lensModelParams );
	m_lensModel->validate();
	
	// Get the distortion mode.
	m_mode = m_modeParameter->getNumericValue();
	
	// Get our image information.
	assert( runTimeCast< ImagePrimitive >(inputParameter()->getValue()) );
	ImagePrimitive *inputImage = static_cast<ImagePrimitive *>( inputParameter()->getValue() );
	
	Imath::Box2i dataWindow( inputImage->getDataWindow() );
	Imath::Box2i displayWindow( inputImage->getDisplayWindow() );
	double displayWH[2] = { static_cast<double>( displayWindow.size().x + 1 ), static_cast<double>( displayWindow.size().y + 1 ) };
	double displayOrigin[2] = { static_cast<double>( displayWindow.min[0] ), static_cast<double>( displayWindow.min[1] ) };
	
	// Get the distorted window.
	// As the LensModel::bounds() method requires that the display window has it's origin at (0,0) in the bottom left of the image and the IECore::ImagePrimitive has it's origin in the top left,
	// convert to the correct image space and offset if by the display window's origin if it is non-zero.
	Imath::Box2i distortionSpaceBox(
		Imath::V2i( dataWindow.min[0] - displayWindow.min[0], displayWindow.size().y - ( dataWindow.max[1] - displayWindow.min[1] ) ),
		Imath::V2i( dataWindow.max[0] - displayWindow.min[0], displayWindow.size().y - ( dataWindow.min[1] - displayWindow.min[1] ) )
	);

	// Calculate the distorted data window.
	Imath::Box2i distortedWindow = m_lensModel->bounds( m_mode, distortionSpaceBox, ( displayWindow.size().x + 1 ), ( displayWindow.size().y + 1 ) );

	// Convert the distorted data window back to the same image space as IECore::ImagePrimitive.
	m_distortedDataWindow =  Imath::Box2i( 
		Imath::V2i( distortedWindow.min[0] + displayWindow.min[0], ( displayWindow.size().y - distortedWindow.max[1] ) + displayWindow.min[1] ),
		Imath::V2i( distortedWindow.max[0] + displayWindow.min[0], ( displayWindow.size().y - distortedWindow.min[1] ) + displayWindow.min[1] )
	);
	
	// Compute a 2D cache of the warped points for use in the warp() method.
	IECore::FloatVectorDataPtr cachePtr = new IECore::FloatVectorData;
	std::vector<float> &cache( cachePtr->writable() );
	cache.resize( ( m_distortedDataWindow.size().x + 1 ) * ( m_distortedDataWindow.size().y + 1 ) * 2 ); // We interleave the X and Y vector components within the cache.

	for( int y = distortedWindow.max.y, pixelIndex = 0; y >= distortedWindow.min.y; --y )
	{
		for( int x = distortedWindow.min.x; x <= distortedWindow.max.x; ++x )
		{
			// Convert to UV space with the origin in the bottom left.	
			Imath::V2f p( Imath::V2f( x, y ) );
			Imath::V2d uv( p[0] / displayWH[0], p[1] / displayWH[1] );

			// Get the distorted uv coordinate.
			Imath::V2d duv( m_mode == kDistort ? m_lensModel->distort( uv ) : m_lensModel->undistort( uv ) );

			// Transform it to image space.
			p = Imath::V2f(
				duv[0] * displayWH[0] + displayOrigin[0], ( ( displayWH[1] - 1. ) - ( duv[1] * displayWH[1] ) ) + displayOrigin[1] 
			);

			cache[pixelIndex++] = p[0];
			cache[pixelIndex++] = p[1];
		}
	}

	m_cachePtr = cachePtr;
}