コード例 #1
0
IECoreScene::PrimitiveVariable FromMayaMeshConverter::uvs( const MString &uvSet, const std::vector<int> &vertsPerFace ) const
{
	MFnMesh fnMesh( object() );

	IntVectorDataPtr indexData = new IntVectorData;
	vector<int> &indices = indexData->writable();
	indices.reserve( fnMesh.numFaceVertices() );

	// get uv data. A list of uv counts per polygon, and a bunch of uv ids:
	MIntArray uvCounts, uvIds;
	fnMesh.getAssignedUVs( uvCounts, uvIds, &uvSet );

	int uvIdIndex = 0;
	for( size_t i=0; i < vertsPerFace.size(); ++i )
	{
		int numPolyUvs = uvCounts[i];
		int numPolyVerts = vertsPerFace[i];

		if( numPolyUvs == 0 )
		{
			for( int j=0; j < numPolyVerts; ++j )
			{
				indices.push_back( 0 );
			}
		}
		else
		{
			for( int j=0; j < numPolyVerts; ++j )
			{
				indices.push_back( uvIds[ uvIdIndex++ ] );
			}
		}
	}

	V2fVectorDataPtr uvData = new V2fVectorData;
	uvData->setInterpretation( GeometricData::UV );
	std::vector<Imath::V2f> &uvs = uvData->writable();

	MFloatArray uArray, vArray;
	fnMesh.getUVs( uArray, vArray, &uvSet );

	size_t numIndices = indices.size();
	if( uArray.length() == 0 )
	{
		uvs.resize( numIndices, Imath::V2f( .0f ) );
	}
	else
	{
		uvs.reserve( uArray.length() );
		for( size_t i=0; i < uArray.length(); ++i )
		{
			uvs.emplace_back( uArray[i], vArray[i] );
		}
	}

	return PrimitiveVariable( PrimitiveVariable::FaceVarying, uvData, indexData );
}
コード例 #2
0
ファイル: MapProjection.cpp プロジェクト: ImageEngine/gaffer
IECore::ConstObjectPtr MapProjection::computeProcessedObject( const ScenePath &path, const Gaffer::Context *context, IECore::ConstObjectPtr inputObject ) const
{
	// early out if it's not a primitive with a "P" variable
	const Primitive *inputPrimitive = runTimeCast<const Primitive>( inputObject.get() );
	if( !inputPrimitive )
	{
		return inputObject;
	}

	const V3fVectorData *pData = inputPrimitive->variableData<V3fVectorData>( "P" );
	if( !pData )
	{
		return inputObject;
	}

	// early out if the uv set name hasn't been provided

	const string uvSet = uvSetPlug()->getValue();

	if( uvSet == "" )
	{
		return inputObject;
	}

	// get the camera and early out if we can't find one

	ScenePath cameraPath;
	ScenePlug::stringToPath( cameraPlug()->getValue(), cameraPath );

	ConstCameraPtr constCamera = runTimeCast<const Camera>( inPlug()->object( cameraPath ) );
	if( !constCamera )
	{
		return inputObject;
	}

	M44f cameraMatrix = inPlug()->fullTransform( cameraPath );
	M44f objectMatrix = inPlug()->fullTransform( path );
	M44f objectToCamera = objectMatrix * cameraMatrix.inverse();

	bool perspective = constCamera->getProjection() == "perspective";

	Box2f normalizedScreenWindow;
	if( constCamera->hasResolution() )
	{
		normalizedScreenWindow = constCamera->frustum();
	}
	else
	{
		// We don't know what resolution the camera is meant to render with, so take the whole aperture
		// as the screen window
		normalizedScreenWindow = constCamera->frustum( Camera::Distort );
	}

	// do the work

	PrimitivePtr result = inputPrimitive->copy();

	V2fVectorDataPtr uvData = new V2fVectorData();
	uvData->setInterpretation( GeometricData::UV );

	result->variables[uvSet] = PrimitiveVariable( PrimitiveVariable::Vertex, uvData );

	const vector<V3f> &p = pData->readable();
	vector<V2f> &uv = uvData->writable();
	uv.reserve( p.size() );

	for( size_t i = 0, e = p.size(); i < e; ++i )
	{
		V3f pCamera = p[i] * objectToCamera;
		V2f pScreen = V2f( pCamera.x, pCamera.y );
		if( perspective )
		{
			pScreen /= -pCamera.z;
		}
		uv.push_back(
			V2f(
				lerpfactor( pScreen.x, normalizedScreenWindow.min.x, normalizedScreenWindow.max.x ),
				lerpfactor( pScreen.y, normalizedScreenWindow.min.y, normalizedScreenWindow.max.y )
			)
		);
	}

	return result;
}
コード例 #3
0
ファイル: MeshFromNuke.cpp プロジェクト: ImageEngine/cortex
IECore::ObjectPtr MeshFromNuke::doConversion( IECore::ConstCompoundObjectPtr operands ) const
{
	// topology
	IntVectorDataPtr verticesPerFaceData = new IntVectorData;
	IntVectorDataPtr vertexIdsData = new IntVectorData;
	std::vector<int> &verticesPerFace = verticesPerFaceData->writable();
	std::vector<int> &vertexIds = vertexIdsData->writable();

	unsigned numPrimitives = m_geo->primitives();
	const DD::Image::Primitive **primitives = m_geo->primitive_array();
	std::vector<unsigned> tmpFaceVertices;
	for( unsigned primIndex=0; primIndex<numPrimitives; primIndex++ )
	{
		const DD::Image::Primitive *prim = primitives[primIndex];

		unsigned numFaces = prim->faces();
		for( unsigned faceIndex=0; faceIndex<numFaces; faceIndex++ )
		{
			unsigned numFaceVertices = prim->face_vertices( faceIndex );
			verticesPerFace.push_back( numFaceVertices );
			tmpFaceVertices.resize( numFaceVertices );
			prim->get_face_vertices( faceIndex, &(tmpFaceVertices[0]) );
			for( unsigned i=0; i<numFaceVertices; i++ )
			{
				vertexIds.push_back( prim->vertex( tmpFaceVertices[i] ) );
			}
		}
	}

	MeshPrimitivePtr result = new MeshPrimitive( verticesPerFaceData, vertexIdsData, "linear" );

	// points
	if( const DD::Image::PointList *pl = m_geo->point_list() )
	{
		V3fVectorDataPtr p = new V3fVectorData();
		p->writable().resize( pl->size() );
		std::transform( pl->begin(), pl->end(), p->writable().begin(), IECore::convert<Imath::V3f, DD::Image::Vector3> );
		result->variables["P"] = PrimitiveVariable( PrimitiveVariable::Vertex, p );
	}

	// uvs
	PrimitiveVariable::Interpolation uvInterpolation = PrimitiveVariable::Vertex;
	const DD::Image::Attribute *uvAttr = m_geo->get_typed_group_attribute( DD::Image::Group_Points, "uv", DD::Image::VECTOR4_ATTRIB );
	if( !uvAttr )
	{
		uvAttr = m_geo->get_typed_group_attribute( DD::Image::Group_Vertices, "uv", DD::Image::VECTOR4_ATTRIB );
		uvInterpolation = PrimitiveVariable::FaceVarying;
	}

	if( uvAttr )
	{
		V2fVectorDataPtr uvData = new V2fVectorData();
		uvData->setInterpretation( GeometricData::UV );
		std::vector<Imath::V2f> &uvs = uvData->writable();
		uvs.reserve( uvAttr->size() );
		unsigned numUVs = uvAttr->size();
		for( unsigned i=0; i<numUVs; i++ )
		{
			// as of Cortex 10, we take a UDIM centric approach
			// to UVs, which clashes with Nuke, so we must flip
			// the v values during conversion.
			uvs.emplace_back( uvAttr->vector4( i ).x, 1.0 - uvAttr->vector4( i ).y );
		}
		result->variables["uv"] = PrimitiveVariable( uvInterpolation, uvData );
	}

	// normals
	PrimitiveVariable::Interpolation nInterpolation = PrimitiveVariable::Vertex;
	const DD::Image::Attribute *nAttr = m_geo->get_typed_group_attribute( DD::Image::Group_Points, "N", DD::Image::NORMAL_ATTRIB );
	if( !nAttr )
	{
		nAttr = m_geo->get_typed_group_attribute( DD::Image::Group_Vertices, "N", DD::Image::NORMAL_ATTRIB );
		nInterpolation = PrimitiveVariable::FaceVarying;
	}

	if( nAttr )
	{
		V3fVectorDataPtr nd = new V3fVectorData();
		std::vector<Imath::V3f> &n = nd->writable();
		n.resize( nAttr->size() );
		for( unsigned i=0; i<n.size(); i++ )
		{
			n[i] = IECore::convert<Imath::V3f, DD::Image::Vector3>( nAttr->normal( i ) );
		}
		result->variables["N"] = PrimitiveVariable( nInterpolation, nd );
	}

	return result;
}
コード例 #4
0
ObjectPtr MedianCutSampler::doOperation( const CompoundObject * operands )
{
	ImagePrimitivePtr image = static_cast<ImagePrimitive *>( imageParameter()->getValue() )->copy();
	Box2i dataWindow = image->getDataWindow();

	// find the right channel
	const std::string &channelName = m_channelNameParameter->getTypedValue();
	FloatVectorDataPtr luminance = image->getChannel<float>( channelName );
	if( !luminance )
	{
		throw Exception( str( format( "No FloatVectorData channel named \"%s\"." ) % channelName ) );
	}

	// if the projection requires it, weight the luminances so they're less
	// important towards the poles of the sphere
	Projection projection = (Projection)m_projectionParameter->getNumericValue();
	if( projection==LatLong )
	{
		float radiansPerPixel = M_PI / (dataWindow.size().y + 1);
		float angle = ( M_PI - radiansPerPixel ) / 2.0f;

		float *p = &(luminance->writable()[0]);

		for( int y=dataWindow.min.y; y<=dataWindow.max.y; y++ )
		{
			float *pEnd = p + dataWindow.size().x + 1;
			float w = cosf( angle );
			while( p < pEnd )
			{
				*p *= w;
				p++;
			}
			angle -= radiansPerPixel;
		}

	}

	// make a summed area table for speed
	FloatVectorDataPtr summedLuminance = luminance;
	luminance = luminance->copy(); // we need this for the centroid computation

	SummedAreaOpPtr summedAreaOp = new SummedAreaOp();
	summedAreaOp->inputParameter()->setValue( image );
	summedAreaOp->copyParameter()->setTypedValue( false );
	summedAreaOp->channelNamesParameter()->getTypedValue().clear();
	summedAreaOp->channelNamesParameter()->getTypedValue().push_back( "Y" );
	summedAreaOp->operate();

	// do the median cut thing
	CompoundObjectPtr result = new CompoundObject;
	V2fVectorDataPtr centroids = new V2fVectorData;
	Box2iVectorDataPtr areas = new Box2iVectorData;
	result->members()["centroids"] = centroids;
	result->members()["areas"] = areas;

	dataWindow.max -= dataWindow.min;
	dataWindow.min -= dataWindow.min; // let's start indexing from 0 shall we?
	Array2D array( &(luminance->writable()[0]), extents[dataWindow.size().x+1][dataWindow.size().y+1], fortran_storage_order() );
	Array2D summedArray( &(summedLuminance->writable()[0]), extents[dataWindow.size().x+1][dataWindow.size().y+1], fortran_storage_order() );
	medianCut( array, summedArray, projection, dataWindow, areas->writable(), centroids->writable(), 0, subdivisionDepthParameter()->getNumericValue() );

	return result;
}