Esempio n. 1
0
void Transformable::getGradientTransParams(MMatrix& matGrad) const
{
	assert(matGrad.sizeCol() == 1 && matGrad.sizeRow() == getNumParams());
	
	getGradientParams(matGrad);
	size_t unParamsNum = getNumTransform();
	for(size_t i = 0; i < unParamsNum; i++)  
	{
		double tdVal  = matGrad.get(i);
		double tdValX = getParam(i);
		matGrad.assign(tdVal * m_vTransform[i]->Gradient(tdValX),i);
	}
}
Esempio n. 2
0
void Kernel::getGradTransParams(MMatrix& g, const MMatrix& X, const MMatrix& cvGrd, bool regularise) const
{
	assert(g.sizeRow()==1);
	assert(g.sizeCol()== mhps.size());
 
	getGradientParams(g, X,cvGrd, regularise);
   
	for(size_t i = 0; i < getNumTransform(); i++)
	{
		double val = g.get(i);
 		double param = getParam(i);
 		g.assign(val * getGradientTransform(param, i),i);
	}
}
Esempio n. 3
0
MMatrix MotionSyn::syc()
{
	int subject =0;

	MMatrix stylemat(1,mFactors[2]->sizeCol());
	stylemat.copyRowRow(0,*mFactors[2],0);
	
	stylemat.scale(0.5);
	stylemat.axpyRowRow(0,*mFactors[2],1,0.5);
	std::cout << stylemat << std::endl;
	assert(mFactors.size() == 3);
	//assert(subject < mFactors[1]->sizeRow() && style < mFactors[2]->sizeRow());

	MMatrix mat(1,mFactors[1]->sizeCol() * mFactors[2]->sizeCol());

	for(size_t t = 0; t < mFactors[1]->sizeCol(); t++)
	{
		for(size_t k = 0; k < mFactors[2]->sizeCol(); k++)
		{
			double val = mFactors[1]->get(subject,t) * stylemat.get(0,k);
			mat.assign(val,t * stylemat.sizeCol() + k);
		}
	}

	MMatrix val = mGPM->predict(mat);
	double step  = val.get(0);
	int length = int(2*3.141592653589/step);

	std::vector<MMatrix> X(3);
	X[0].resize(length,2);
	X[1].resize(length, mFactors[1]->sizeCol());
	X[2].resize(length, mFactors[2]->sizeCol());

	for(int i = 0; i < length; i++)
	{
		X[0].assign(cos(double(i * step)), i, 0);
		X[0].assign(sin(double(i * step)), i, 1);
   		X[1].copyRowRow(i, *mFactors[1], subject);
		X[2].copyRowRow(i, stylemat, 0);
	}

	return meanPrediction(X,CVector3D<double>(0,mInitY.get(subject,1),0));
}
Esempio n. 4
0
/* virtual */
MStatus cgfxVector::compute( const MPlug& plug, MDataBlock& data )
{
	MStatus status;

	MFnData::Type dataType = MFnData::kInvalid;
 
	if( plug == sWorldVector ||
		plug == sWorldVectorX ||
		plug == sWorldVectorY ||
		plug == sWorldVectorZ ||
		plug == sWorldVectorW)
	{
		// We do isDirection first simply because if there is an
		// error, the isDirection error is more legible than the
		// vector or matrix error.
		//
		MDataHandle dhIsDirection = data.inputValue(sIsDirection, &status);
		if (!status)
		{
			status.perror("cgfxVector: isDirection handle");
			return status;
		}

		dataType = dhIsDirection.type();

		MDataHandle dhVector = data.inputValue(sVector, &status);
		if (!status)
		{
			status.perror("cgfxVector: vector handle");
			return status;
		}

		dataType = dhVector.type();

		MMatrix matrix;

		MPlug matrixPlug(thisMObject(), sMatrix);
		if (matrixPlug.isNull())
		{
			OutputDebugString("matrixPlug is NULL!\n");
		}

		// TODO: Fix this kludge.
		//
		// We should not have to do this but for some reason, 
		// using data.inputValue() fails for the sMatrix attribute.
		// Instead, we get a plug to the attribute and then get
		// the value directly.
		//
		MObject oMatrix;

		matrixPlug.getValue(oMatrix);

		MFnMatrixData fndMatrix(oMatrix, &status);
		if (!status)
		{
			status.perror("cgfxVector: matrix data");
		}

		matrix= fndMatrix.matrix(&status);
		if (!status)
		{
			status.perror("cgfxVector: get matrix");
		}

#if 0
		// TODO: This is how we are supposed to do it.  (I think).
		//
		MDataHandle dhMatrix = data.inputValue(sMatrix, &status);
		if (!status)
		{
			status.perror("cgfxVector: matrix handle");
		}

		dataType = dhMatrix.type();

		oMatrix			= dhMatrix.data();
		MFnMatrixData fnMatrix(oMatrix, &status);
		if (!status)
		{
			status.perror("cgfxVector: matrix function set");
		}

		matrix = fnMatrix.matrix();
#endif /* 0 */

		bool	 isDirection	= dhIsDirection.asBool();
		double3& vector			= dhVector.asDouble3();

		double mat[4][4];
		matrix.get(mat);

		double ix, iy, iz, iw;	// Input vector
		float  ox, oy, oz, ow;	// Output vector

		ix = vector[0];
		iy = vector[1];
		iz = vector[2];
		iw = isDirection ? 0.0 : 1.0;

		ox = (float)(mat[0][0] * ix +
					 mat[1][0] * iy +
					 mat[2][0] * iz +
					 mat[3][0] * iw);

		oy = (float)(mat[0][1] * ix +
					 mat[1][1] * iy +
					 mat[2][1] * iz +
					 mat[3][1] * iw);

		oz = (float)(mat[0][2] * ix +
					 mat[1][2] * iy +
					 mat[2][2] * iz +
					 mat[3][2] * iw);

		ow = (float)(mat[0][3] * ix +
					 mat[1][3] * iy +
					 mat[2][3] * iz +
					 mat[3][3] * iw);

		MDataHandle dhWVector = data.outputValue(sWorldVector, &status);
		if (!status)
		{
			status.perror("cgfxVector: worldVector handle");
			return status;
		}

		MDataHandle dhWVectorW = data.outputValue(sWorldVectorW, &status);
		if (!status)
		{
			status.perror("cgfxVector: worldVectorW handle");
			return status;
		}

		dhWVector.set(ox, oy, oz);
		dhWVectorW.set(ow);
		data.setClean(sWorldVector);
		data.setClean(sWorldVectorW);
	}
	else
	{
		return MS::kUnknownParameter;
	}

	return MS::kSuccess;
}
/** Create a RIB compatible representation of a Maya polygon mesh.
 */
liqRibMeshData::liqRibMeshData( MObject mesh )
: numFaces( 0 ),
  numPoints ( 0 ),
  numNormals ( 0 ),
  nverts(),
  verts(),
  vertexParam(NULL),
  normalParam(NULL)
{
	CM_TRACE_FUNC("liqRibMeshData::liqRibMeshData("<<MFnDagNode(mesh).fullPathName().asChar()<<")");

	unsigned int i;
	unsigned int j;
  areaLight = false;
  LIQDEBUGPRINTF( "-> creating mesh\n" );
  MFnMesh fnMesh( mesh );
  objDagPath = fnMesh.dagPath();
  MStatus astatus;
  
  name = fnMesh.name();
  areaLight =( liquidGetPlugValue( fnMesh, "areaIntensity", areaIntensity, astatus ) == MS::kSuccess )? true : false ; 

  if ( areaLight ) 
  {
    MDagPath meshDagPath;
    meshDagPath = fnMesh.dagPath();
    MTransformationMatrix worldMatrix = meshDagPath.inclusiveMatrix();
    MMatrix worldMatrixM = worldMatrix.asMatrix();
    worldMatrixM.get( transformationMatrix );
  }

  numPoints = fnMesh.numVertices();
  numNormals = fnMesh.numNormals();

  // UV sets -------------------
  //
  //const unsigned numSTs( fnMesh.numUVs() );
  const unsigned numUVSets( fnMesh.numUVSets() );
  MString currentUVSetName;
  MStringArray extraUVSetNames;
  fnMesh.getCurrentUVSetName( currentUVSetName );
  {
    MStringArray UVSetNames;
    fnMesh.getUVSetNames( UVSetNames );

    for ( unsigned i( 0 ); i<numUVSets; i++ ) 
      if ( UVSetNames[i] != currentUVSetName ) 
        extraUVSetNames.append( UVSetNames[i] );
  }

  numFaces = fnMesh.numPolygons();
  const unsigned numFaceVertices( fnMesh.numFaceVertices() );

	if ( numPoints < 1 )
	{
//		MGlobal::displayInfo( MString( "fnMesh: " ) + fnMesh.name() );
//		cerr << "Liquid : Could not export degenerate mesh '"<< fnMesh.fullPathName( &astatus ).asChar() << "'" << endl << flush;
		return;
	}

  unsigned face = 0;
  unsigned faceVertex = 0;
  unsigned count;
  unsigned vertex;
  unsigned normal;
  float S;
  float T;
  MPoint point;
  liqTokenPointer pointsPointerPair;
  liqTokenPointer normalsPointerPair;
  liqTokenPointer pFaceVertexSPointer;
  liqTokenPointer pFaceVertexTPointer;

  // Allocate memory and tokens
  numFaces = numFaces;
  nverts = shared_array< liqInt >( new liqInt[ numFaces ] );
  verts = shared_array< liqInt >( new liqInt[ numFaceVertices ] );

  pointsPointerPair.set( "P", rPoint, numPoints );
  pointsPointerPair.setDetailType( rVertex );

  if ( numNormals == numPoints ) 
  {
    normalsPointerPair.set( "N", rNormal, numPoints );
    normalsPointerPair.setDetailType( rVertex );
  } 
  else 
  {
    normalsPointerPair.set( "N", rNormal, numFaceVertices );
    normalsPointerPair.setDetailType( rFaceVarying );
  }
  	
  // uv
  std::vector<liqTokenPointer> UVSetsArray;
  UVSetsArray.reserve( 1 + extraUVSetNames.length() );

  liqTokenPointer currentUVSetUPtr;
  liqTokenPointer currentUVSetVPtr;
  liqTokenPointer currentUVSetNamePtr;
  liqTokenPointer extraUVSetsUPtr;
  liqTokenPointer extraUVSetsVPtr;
  liqTokenPointer extraUVSetsNamePtr;
  if(liqglo.liqglo_outputMeshAsRMSArrays)
  {
	  currentUVSetUPtr.set( "s", rFloat, numFaceVertices );
	  currentUVSetUPtr.setDetailType( rFaceVarying );

	  currentUVSetVPtr.set( "t", rFloat, numFaceVertices );
	  currentUVSetVPtr.setDetailType( rFaceVarying );

	  currentUVSetNamePtr.set( "currentUVSet", rString, 1 );
	  currentUVSetNamePtr.setDetailType( rConstant );

	  if( numUVSets > 1 )
	  {
		  extraUVSetsUPtr.set( "u_uvSet", rFloat, numFaceVertices, numUVSets-1 );
		  extraUVSetsUPtr.setDetailType( rFaceVarying );

		  extraUVSetsVPtr.set( "v_uvSet", rFloat, numFaceVertices, numUVSets-1 );
		  extraUVSetsVPtr.setDetailType( rFaceVarying );

		  extraUVSetsNamePtr.set( "extraUVSets", rString, numUVSets-1 );
		  extraUVSetsNamePtr.setDetailType( rConstant );
	  }
  }
  else
  {
	  if ( numUVSets > 0 ) 
	  {
		liqTokenPointer pFaceVertexPointerPair;

		pFaceVertexPointerPair.set( "st", rFloat, numFaceVertices, 2 );
		pFaceVertexPointerPair.setDetailType( rFaceVarying );

		UVSetsArray.push_back( pFaceVertexPointerPair );

		for ( unsigned j( 0 ); j<extraUVSetNames.length(); j++) 
		{
		  liqTokenPointer pFaceVertexPointerPair;

		  pFaceVertexPointerPair.set( extraUVSetNames[j].asChar(), rFloat, numFaceVertices, 2 );
		  pFaceVertexPointerPair.setDetailType( rFaceVarying );

		  UVSetsArray.push_back( pFaceVertexPointerPair );
		}

		if( liqglo.liqglo_outputMeshUVs ) 
		{
		  // Match MTOR, which also outputs face-varying STs as well for some reason - Paul
		  // not anymore - Philippe
		  pFaceVertexSPointer.set( "u", rFloat, numFaceVertices );
		  pFaceVertexSPointer.setDetailType( rFaceVarying );

		  pFaceVertexTPointer.set( "v", rFloat, numFaceVertices );
		  pFaceVertexTPointer.setDetailType( rFaceVarying );
		}
	  }
  }

  vertexParam = pointsPointerPair.getTokenFloatArray();
  normalParam = normalsPointerPair.getTokenFloatArray();

  // Read the mesh from Maya
  MFloatVectorArray normals;
  fnMesh.getNormals( normals );

  for ( MItMeshPolygon polyIt ( mesh ); polyIt.isDone() == false; polyIt.next() ) 
  {
    count = polyIt.polygonVertexCount();
    nverts[face] = count;
	for( i=0; i<count; i++ )    // boucle sur les vertex de la face
    {
      vertex = polyIt.vertexIndex( i );
      verts[faceVertex] = vertex;
      point = polyIt.point( i, MSpace::kObject );
      pointsPointerPair.setTokenFloat( vertex, point.x, point.y, point.z );
      normal = polyIt.normalIndex( i );

      if( numNormals == numPoints ) 
        normalsPointerPair.setTokenFloat( vertex, normals[normal].x, normals[normal].y, normals[normal].z );
      else 
        normalsPointerPair.setTokenFloat( faceVertex, normals[normal].x, normals[normal].y, normals[normal].z );

	  if( liqglo.liqglo_outputMeshAsRMSArrays )
	  {
		  for( j=0; j<numUVSets; j++ )
		  {
			  if(j==0)
			  {
				  MString uvSetName = currentUVSetName;
				  // set uvSet name
				  currentUVSetNamePtr.setTokenString( 0, currentUVSetName.asChar() );
				  // set uv values
				  fnMesh.getPolygonUV( face, i, S, T, &uvSetName );

				  currentUVSetUPtr.setTokenFloat( faceVertex, S );
				  currentUVSetVPtr.setTokenFloat( faceVertex, 1-T );
			  }
			  else
			  {
				  MString uvSetName = extraUVSetNames[j-1];
				  // set uvSet name
				  extraUVSetsNamePtr.setTokenString( j-1, extraUVSetNames[j-1].asChar() );
				  // set uv values
				  fnMesh.getPolygonUV( face, i, S, T, &uvSetName );
				  extraUVSetsUPtr.setTokenFloat( (numFaceVertices*(j-1)) + faceVertex, S );
				  extraUVSetsVPtr.setTokenFloat( (numFaceVertices*(j-1)) + faceVertex, 1-T );
			  }
		  }
	  }
	  else
	  {
		  if ( numUVSets ) 
		  {
			  for( j=0; j<numUVSets; j++ )
			  {
				  MString uvSetName;
				  if(j==0)
				  {
					  uvSetName = currentUVSetName;
				  }
				  else
				  {
					  uvSetName = extraUVSetNames[j-1];
				  }
				  fnMesh.getPolygonUV( face, i, S, T, &uvSetName );
				  UVSetsArray[j].setTokenFloat( faceVertex, 0, S );
				  UVSetsArray[j].setTokenFloat( faceVertex, 1, 1-T );
				  //printf("V%d  %s : %f %f  =>  %f %f \n", i, uvSetName.asChar(), S, T, S, 1-T);

				  if( liqglo.liqglo_outputMeshUVs && j==0)
				  {
					  // Match MTOR, which always outputs face-varying STs as well for some reason - Paul
					  pFaceVertexSPointer.setTokenFloat( faceVertex, S );
					  pFaceVertexTPointer.setTokenFloat( faceVertex, 1-T );
				  }
			  }
		  }
		}
      // printf( "[%d] faceVertex = %d  vertex = %d\n", i, faceVertex, vertex );

      ++faceVertex;
    }
    ++face;
  }
  // Add tokens to array and clean up after
  tokenPointerArray.push_back( pointsPointerPair );
  tokenPointerArray.push_back( normalsPointerPair );

  if(liqglo.liqglo_outputMeshAsRMSArrays)
  {
	  tokenPointerArray.push_back( currentUVSetNamePtr );
	  tokenPointerArray.push_back( currentUVSetUPtr );
	  tokenPointerArray.push_back( currentUVSetVPtr );
	  if( numUVSets > 1 )
	  {
		  tokenPointerArray.push_back( extraUVSetsNamePtr );
		  tokenPointerArray.push_back( extraUVSetsUPtr );
		  tokenPointerArray.push_back( extraUVSetsVPtr );
	  }
  }
  else
  {
	  if( UVSetsArray.size() ) 
		  tokenPointerArray.insert( tokenPointerArray.end(), UVSetsArray.begin(), UVSetsArray.end() );

	  if( liqglo.liqglo_outputMeshUVs ) 
	  {
		  tokenPointerArray.push_back( pFaceVertexSPointer );
		  tokenPointerArray.push_back( pFaceVertexTPointer );
	  }
  }

  addAdditionalSurfaceParameters( mesh );
}
Esempio n. 6
0
MMatrix MotionSyn::synTrainsiton(const std::size_t identity, const std::size_t content1,const std::size_t content2,
								const std::size_t length, CVector3D<double> initPos, double &curState)
{
	std::vector<MMatrix> xStar(3);
  
	xStar[0].resize(length,2);
	xStar[1].resize(length, mFactors[1]->sizeCol());
	xStar[2].resize(length, mFactors[2]->sizeCol());
	
	MMatrix actor = mFactors[1]->subMMatrix(identity,0,1,mFactors[1]->sizeCol());

	/*std::vector<double> steps;
	double total = 0.0;
	for (std::size_t i = 0; i < length; i++)
	{
		MMatrix newContent(1,mFactors[2]->sizeCol());

		for(std::size_t t = 0; t < newContent.sizeCol(); t++)
		{
				double val =  (1 - double(t)/length) * mFactors[2]->get(content1,t) 
			  				  +		double(t)/length * mFactors[2]->get(content2,t);
				newContent.assign(val,t);
		}
		MMatrix kron = actor.kron(newContent);
		MMatrix val = mGPM->predict(kron);
		total += val.get(0);
		steps.push_back(val.get(0));
		xStar[1].copyRowRow(i,*mFactors[1],identity);
		xStar[2].copyRowRow(i,newContent,0);
	}
	total = 6.28 - total;
	for (std::size_t i = 0; i < length; i++)
	{
		total += steps[i];
		 xStar[0].assign(cos(6.28-steps[i]*(length-i)), i, 0);
		 xStar[0].assign(sin(6.28-steps[i]*(length-i)), i, 1);
	}*/
 
	for (std::size_t i = 0; i < length; i++)
	{
	 	if(i < 50)
		{
			MMatrix newContent(1,mFactors[2]->sizeCol());
			for(std::size_t t = 0; t < newContent.sizeCol(); t++)
			{
				double val =  (1 - double(t)/50) * mFactors[2]->get(content1,t) 
								+ double(t)/50 * mFactors[2]->get(content2,t);
				newContent.assign(val,t);
			}
	 		MMatrix kron = actor.kron(newContent);
			MMatrix val = mGPM->predict(kron);
			double step  = val.get(0);
			curState += step;
			xStar[2].copyRowRow(i,newContent,0);


		/*	MMatrix content = mFactors[2]->subMMatrix(content1,0,1,mFactors[2]->sizeCol());
			MMatrix kron = actor.kron(content);
			MMatrix val = mGPM->predict(kron);
			double step  = val.get(0);
			curState += step;
			xStar[2].copyRowRow(i,content,0);*/

 		}
		else
		{
			MMatrix content = mFactors[2]->subMMatrix(content2,0,1,mFactors[2]->sizeCol());
			MMatrix kron = actor.kron(content);
			MMatrix val = mGPM->predict(kron);
			double step  = val.get(0);
			curState += step;
			xStar[2].copyRowRow(i,content,0);
		}

		xStar[0].assign(cos(curState), i, 0);
		xStar[0].assign(sin(curState), i, 1);
		xStar[1].copyRowRow(i,*mFactors[1],identity);
		
	}
	return meanPrediction(xStar,initPos);
}
Esempio n. 7
0
MMatrix MotionSyn::generate(std::size_t identity,vector<std::size_t> contents,std::size_t interval)
{
	std::size_t state_size = contents.size() * 2 - 1;
 	std::size_t length = interval * state_size;
	
	MMatrix motion(length,mInitY.sizeCol());
	
	std::vector<MMatrix> xStar(3);
	xStar[0].resize(length, 2);
	xStar[1].resize(length, mFactors[1]->sizeCol());
	xStar[2].resize(length, mFactors[2]->sizeCol());
 
	double current_state = 0;
	for (std::size_t i = 0; i < state_size; i++)
	{
		MMatrix kron(1,mFactors[1]->sizeCol() * mFactors[2]->sizeCol());
		if (i % 2 == 0)
		{
 			MMatrix mat1 = mFactors[1]->subMMatrix(identity, 0, 1, mFactors[1]->sizeCol());
			MMatrix mat2 = mFactors[2]->subMMatrix(contents[i/2], 0, 1, mFactors[2]->sizeCol());
 
			kron = mat1.kron(mat2);
 			MMatrix val = mGPM->predict(kron);

			double step = val.get(0);

			for (std::size_t t = 0; t < interval; t++)
			{
				xStar[0].assign(cos(current_state + double(t*step)), t + i * interval, 0);
				xStar[0].assign(sin(current_state + double(t*step)), t + i * interval, 1);
  				xStar[1].copyRowRow(t + i * interval, *mFactors[1], identity);
				xStar[2].copyRowRow(t + i * interval, *mFactors[2], contents[i/2]);
			}
			current_state += step * interval;
 		}
		else
		{
			for (std::size_t t = 0; t < interval; t++)
			{
				MMatrix linearIpconent(1,mFactors[2]->sizeCol());
				for (std::size_t k = 0; k < linearIpconent.sizeCol(); k++)
				{
					double val = (1 - double(t) / interval) * mFactors[2]->get(contents[(i-1)/2], k) 
								   + (double(t) / interval) * mFactors[2]->get(contents[(i+1)/2], k); 

					linearIpconent.assign(val, 0, k);
				}
		 
 				MMatrix mat = mFactors[1]->subMMatrix(identity, 0, 1, mFactors[1]->sizeCol());
				
				kron = mat.kron(linearIpconent);
 
				MMatrix val = mGPM->predict(kron);
				double step = val.get(0);

				current_state += step;

				xStar[0].assign(cos(current_state), t + i * interval, 0);
				xStar[0].assign(sin(current_state), t + i * interval, 1);

				xStar[1].copyRowRow(t + i * interval, *mFactors[1], identity);
				xStar[2].copyRowRow(t + i * interval, linearIpconent, 0);
			}
		}
	}
 	return meanPrediction(xStar,CVector3D<double>(0,mInitY.get(identity,1),0));
}
MStatus viewRenderUserOperation::execute( const MHWRender::MDrawContext & drawContext )
{
	// Sample code to debug pass information
	static const bool debugPassInformation = false;
	if (debugPassInformation)
	{
		const MHWRender::MPassContext & passCtx = drawContext.getPassContext();
		const MString & passId = passCtx.passIdentifier();
		const MStringArray & passSem = passCtx.passSemantics();
		printf("viewRenderUserOperation: drawing in pass[%s], semantic[", passId.asChar());
		for (unsigned int i=0; i<passSem.length(); i++)
			printf(" %s", passSem[i].asChar());
		printf("\n");
	}

	// Example code to find the active override.
	// This is not necessary if the operations just keep a reference
	// to the override, but this demonstrates how this
	// contextual information can be extracted.
	//
	MHWRender::MRenderer *theRenderer = MHWRender::MRenderer::theRenderer();
	const MHWRender::MRenderOverride *overridePtr = NULL;
	if (theRenderer)
	{
		const MString & overrideName = theRenderer->activeRenderOverride();
		overridePtr = theRenderer->findRenderOverride( overrideName );
	}

	// Some sample code to debug lighting information in the MDrawContext
	//
	if (fDebugLightingInfo)
	{
		viewRenderOverrideUtilities::printDrawContextLightInfo( drawContext );
	}

	// Some sample code to debug other MDrawContext information
	//
	if (fDebugDrawContext)
	{
		MStatus status;
		MMatrix matrix = drawContext.getMatrix(MHWRender::MFrameContext::kWorldMtx, &status);
		double dest[4][4];
		status = matrix.get(dest);
		printf("World matrix is:\n");
		printf("\t%f, %f, %f, %f\n", dest[0][0], dest[0][1], dest[0][2], dest[0][3]);
		printf("\t%f, %f, %f, %f\n", dest[1][0], dest[1][1], dest[1][2], dest[1][3]);
		printf("\t%f, %f, %f, %f\n", dest[2][0], dest[2][1], dest[2][2], dest[2][3]);
		printf("\t%f, %f, %f, %f\n", dest[3][0], dest[3][1], dest[3][2], dest[3][3]);

		MDoubleArray viewDirection = drawContext.getTuple(MHWRender::MFrameContext::kViewDirection, &status);
		printf("Viewdirection is: %f, %f, %f\n", viewDirection[0], viewDirection[1], viewDirection[2]);

		MBoundingBox box = drawContext.getSceneBox(&status);
		printf("Screen box is:\n");
		printf("\twidth=%f, height=%f, depth=%f\n", box.width(), box.height(), box.depth());
		float center[4];
		box.center().get(center);
		printf("\tcenter=(%f, %f, %f, %f)\n", center[0], center[1], center[2], center[3]);


		int originX, originY, width, height;
		status = drawContext.getViewportDimensions(originX, originY, width, height);
		printf("Viewport dimension: center(%d, %d), width=%d, heigh=%d\n", originX, originY, width, height);
	}

	//  Draw some addition things for scene draw
	//
	M3dView mView;
	if (mPanelName.length() &&
		(M3dView::getM3dViewFromModelPanel(mPanelName, mView) == MStatus::kSuccess))
	{
		// Get the current viewport and scale it relative to that
		//
		int targetW, targetH;
		drawContext.getRenderTargetSize( targetW, targetH );

		if (fDrawLabel)
		{
			MString testString("Drawing with override: ");
			testString += overridePtr->name();
			MPoint pos(0.0,0.0,0.0);
			glColor3f( 1.0f, 1.0f, 1.0f );
			mView.drawText( testString, pos);
		}

		// Some user drawing of scene bounding boxes
		//
		if (fDrawBoundingBoxes)
		{
			MDagPath cameraPath;
			mView.getCamera( cameraPath);
			MCustomSceneDraw userDraw;
			userDraw.draw( cameraPath, targetW, targetH );
		}
	}
	return MStatus::kSuccess;
}
Esempio n. 9
0
//  ========== DtCameraGetMatrix ==========
//
//  SYNOPSIS
//	Return the camera transformation matrix. This matrix
// 	includes the camera rotation, translation, and scale
//	transforms. This function also sets the valid bits
//	for DT_CAMERA_POSITION and DT_CAMERA_ORIENTATION.
//	The matrix is in row-major order.
//
//  From PA DT:
//          Not implemented: returns a pointer to an identity matrix
//          under the OpenModel implementation.
//
//  For Maya DT:
//          This fuction returns the camera's global transformation matrix.
//
int DtCameraGetMatrix( int cameraID,
					   float** matrix )
{
	// static float mtx[4][4];
	static float globalMtx[4][4];
	static float localMtx[4][4]; 

    // Check for error.
    //
	if( ( cameraID < 0) || ( cameraID >= local->camera_ct ) )
	{
		*matrix = NULL;
		return( 0 );
	}

    // Set the valid flag.
    //
	// local->cameras[cameraID].valid_bits|=(DT_VALID_BIT_MASK&DT_CAMERA_MATRIX);

    // Get transformations.
    //
#if 0
	mtx[0][0]=1.0;mtx[0][1]=0.0;mtx[0][2]=0.0;mtx[0][3]=0.0;
	mtx[1][0]=0.0;mtx[1][1]=1.0;mtx[1][2]=0.0;mtx[1][3]=0.0;
	mtx[2][0]=0.0;mtx[2][1]=0.0;mtx[2][2]=1.0;mtx[2][3]=0.0;
	mtx[3][0]=0.0;mtx[3][1]=0.0;mtx[3][2]=0.0;mtx[3][3]=1.0;
#endif

	// Camera transformation matrix is set on the transform node.
	//	
	MStatus returnStatus = MS::kSuccess;
	MFnDagNode fnTransNode( local->cameras[cameraID].transformNode, &returnStatus );
	MDagPath dagPath;
	returnStatus = fnTransNode.getPath( dagPath );
	if( MS::kSuccess == returnStatus )
	{
		if( DtExt_Debug() & DEBUG_CAMERA )
		{
			cerr << "Got the dagPath\n";
			cerr << "length of the dagpath is " << dagPath.length() << endl;
		}
	}

	MFnDagNode fnDagPath( dagPath, &returnStatus );

	MMatrix localMatrix;
	MMatrix globalMatrix;

	localMatrix = fnTransNode.transformationMatrix ( &returnStatus );
	globalMatrix = dagPath.inclusiveMatrix();

	localMatrix.get( localMtx );
	globalMatrix.get( globalMtx );

	if( DtExt_Debug() & DEBUG_CAMERA )
	{
		int i = 0;
		int j = 0;

		cerr << "camera's global transformation matrix:\n";
		
		for( i = 0; i < 4; i++ )
		{
			for( j = 0; j < 4; j++ )
			{
				cerr << globalMtx[i][j] << " ";
			}
			cerr << endl;
		}	

		cerr << "camera's local transformation matrix:\n";

		for( i = 0; i < 4; i++ )
		{
			for( j = 0; j < 4; j++ )
			{
				cerr << localMtx[i][j] << " ";
			}
			cerr << endl;
		}	
	}

	// *matrix = (float*)&mtx;
	*matrix = (float*)&globalMtx;

	return(1);
}  // DtCameraGetMatrix //
Esempio n. 10
0
File: utils.cpp Progetto: JT-a/USD
/* static */
bool
px_vp20Utils::RenderBoundingBox(
        const MBoundingBox& bounds,
        const GfVec4f& color,
        const MMatrix& worldViewMat,
        const MMatrix& projectionMat)
{
    static const GfVec3f cubeLineVertices[24] = {
        // Vertical edges
        GfVec3f(-0.5f, -0.5f, 0.5f),
        GfVec3f(-0.5f, 0.5f, 0.5f),

        GfVec3f(0.5f, -0.5f, 0.5f),
        GfVec3f(0.5f, 0.5f, 0.5f),

        GfVec3f(0.5f, -0.5f, -0.5f),
        GfVec3f(0.5f, 0.5f, -0.5f),

        GfVec3f(-0.5f, -0.5f, -0.5f),
        GfVec3f(-0.5f, 0.5f, -0.5f),

        // Top face edges
        GfVec3f(-0.5f, 0.5f, 0.5f),
        GfVec3f(0.5f, 0.5f, 0.5f),

        GfVec3f(0.5f, 0.5f, 0.5f),
        GfVec3f(0.5f, 0.5f, -0.5f),

        GfVec3f(0.5f, 0.5f, -0.5f),
        GfVec3f(-0.5f, 0.5f, -0.5f),

        GfVec3f(-0.5f, 0.5f, -0.5f),
        GfVec3f(-0.5f, 0.5f, 0.5f),

        // Bottom face edges
        GfVec3f(-0.5f, -0.5f, 0.5f),
        GfVec3f(0.5f, -0.5f, 0.5f),

        GfVec3f(0.5f, -0.5f, 0.5f),
        GfVec3f(0.5f, -0.5f, -0.5f),

        GfVec3f(0.5f, -0.5f, -0.5f),
        GfVec3f(-0.5f, -0.5f, -0.5f),

        GfVec3f(-0.5f, -0.5f, -0.5f),
        GfVec3f(-0.5f, -0.5f, 0.5f),
    };

    static const std::string vertexShaderSource(
        "#version 140\n"
        "\n"
        "in vec3 position;\n"
        "uniform mat4 mvpMatrix;\n"
        "\n"
        "void main()\n"
        "{\n"
        "    gl_Position = vec4(position, 1.0) * mvpMatrix;\n"
        "}\n");

    static const std::string fragmentShaderSource(
        "#version 140\n"
        "\n"
        "uniform vec4 color;\n"
        "out vec4 outColor;\n"
        "\n"
        "void main()\n"
        "{\n"
        "    outColor = color;\n"
        "}\n");

    PxrMayaGLSLProgram renderBoundsProgram;

    if (!renderBoundsProgram.CompileShader(GL_VERTEX_SHADER,
                                           vertexShaderSource)) {
        MGlobal::displayError("Failed to compile bounding box vertex shader");
        return false;
    }

    if (!renderBoundsProgram.CompileShader(GL_FRAGMENT_SHADER,
                                           fragmentShaderSource)) {
        MGlobal::displayError("Failed to compile bounding box fragment shader");
        return false;
    }

    if (!renderBoundsProgram.Link()) {
        MGlobal::displayError("Failed to link bounding box render program");
        return false;
    }

    if (!renderBoundsProgram.Validate()) {
        MGlobal::displayError("Failed to validate bounding box render program");
        return false;
    }

    GLuint renderBoundsProgramId = renderBoundsProgram.GetProgramId();

    glUseProgram(renderBoundsProgramId);

    // Populate an array buffer with the cube line vertices.
    GLuint cubeLinesVBO;
    glGenBuffers(1, &cubeLinesVBO);
    glBindBuffer(GL_ARRAY_BUFFER, cubeLinesVBO);
    glBufferData(GL_ARRAY_BUFFER,
                 sizeof(cubeLineVertices),
                 cubeLineVertices,
                 GL_STATIC_DRAW);

    // Create a transformation matrix from the bounding box's center and
    // dimensions.
    MTransformationMatrix bboxTransformMatrix = MTransformationMatrix::identity;
    bboxTransformMatrix.setTranslation(bounds.center(), MSpace::kTransform);
    const double scales[3] = { bounds.width(), bounds.height(), bounds.depth() };
    bboxTransformMatrix.setScale(scales, MSpace::kTransform);

    const MMatrix mvpMatrix =
        bboxTransformMatrix.asMatrix() * worldViewMat * projectionMat;

    GLfloat mvpMatrixArray[4][4];
    mvpMatrix.get(mvpMatrixArray);

    // Populate the shader variables.
    GLuint mvpMatrixLocation = glGetUniformLocation(renderBoundsProgramId, "mvpMatrix");
    glUniformMatrix4fv(mvpMatrixLocation, 1, GL_TRUE, &mvpMatrixArray[0][0]);

    GLuint colorLocation = glGetUniformLocation(renderBoundsProgramId, "color");
    glUniform4fv(colorLocation, 1, color.data());

    // Enable the position attribute and draw.
    glEnableVertexAttribArray(0);
    glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
    glDrawArrays(GL_LINES, 0, sizeof(cubeLineVertices));
    glDisableVertexAttribArray(0);

    glBindBuffer(GL_ARRAY_BUFFER, 0);
    glDeleteBuffers(1, &cubeLinesVBO);

    glUseProgram(0);

    return true;
}
Esempio n. 11
0
bool SceneShapeUI::snap( MSelectInfo &snapInfo ) const
{
	MStatus s;

	if( snapInfo.displayStatus() != M3dView::kHilite )
	{
		MSelectionMask meshMask( MSelectionMask::kSelectMeshes );
		if( !snapInfo.selectable( meshMask ) )
		{
			return false;
		}
	}

	// early out if we have no scene to draw
	SceneShape *sceneShape = static_cast<SceneShape *>( surfaceShape() );
	const IECore::SceneInterface *sceneInterface = sceneShape->getSceneInterface().get();
	if( !sceneInterface )
	{
		return false;
	}

	IECoreGL::ConstScenePtr scene = sceneShape->glScene();
	if( !scene )
	{
		return false;
	}

	// Get the viewport that the snapping operation is taking place in.
	M3dView view = snapInfo.view();

	// Use an IECoreGL::Selector to find the point in world space that we wish to snap to.
	// We do this by first getting the origin of the selection ray and transforming it into
	// NDC space using the OpenGL projection and transformation matrices. Once we have the
	// point in NDC we can use it to define the viewport that the IECoreGL::Selector will use.

	MPoint localRayOrigin;
	MVector localRayDirection;
	snapInfo.getLocalRay( localRayOrigin, localRayDirection );
	
	Imath::V3d org( localRayOrigin[0], localRayOrigin[1], localRayOrigin[2] );
	MDagPath camera;
	view.getCamera( camera );
	MMatrix localToCamera = snapInfo.selectPath().inclusiveMatrix() * camera.inclusiveMatrix().inverse();
	
	view.beginSelect();
		Imath::M44d projectionMatrix;
		glGetDoublev( GL_PROJECTION_MATRIX, projectionMatrix.getValue() );
	view.endSelect();

	double v[4][4];
	localToCamera.get( v ); 
	Imath::M44d cam( v );
	Imath::V3d ndcPt3d = ( (org * cam ) * projectionMatrix + Imath::V3d( 1. ) ) * Imath::V3d( .5 );
	Imath::V2d ndcPt( std::max( std::min( ndcPt3d[0], 1. ), 0. ), 1. - std::max( std::min( ndcPt3d[1], 1. ), 0. ) );

	view.beginGL();
	
		glMatrixMode( GL_PROJECTION );
		glLoadMatrixd( projectionMatrix.getValue() );
		
		float radius = .001; // The radius of the selection area in NDC.
		double aspect = double( view.portWidth() ) / view.portHeight();
		Imath::V2f selectionWH( radius, radius * aspect );
		
		std::vector<IECoreGL::HitRecord> hits;
		{
			IECoreGL::Selector selector( Imath::Box2f( ndcPt - selectionWH, ndcPt + selectionWH ), IECoreGL::Selector::IDRender, hits );
				
			IECoreGL::State::bindBaseState();
			selector.baseState()->bind();
			scene->render( selector.baseState() );			
		}
				
	view.endGL();

	if( hits.empty() )
	{
		return false;
	}

	// Get the closest mesh hit.	
	float depthMin = std::numeric_limits<float>::max();
	int depthMinIndex = -1;
	for( unsigned int i=0, e = hits.size(); i < e; i++ )
	{		
		if( hits[i].depthMin < depthMin )
		{
			depthMin = hits[i].depthMin;
			depthMinIndex = i;
		}
	}

	// Get the absolute path of the hit object.
	IECore::SceneInterface::Path objPath;
	std::string objPathStr;
	sceneInterface->path( objPath );
	IECore::SceneInterface::pathToString( objPath, objPathStr );
	
	objPathStr += IECoreGL::NameStateComponent::nameFromGLName( hits[depthMinIndex].name );
	IECore::SceneInterface::stringToPath( objPathStr, objPath );

	// Validate the hit selection.
	IECore::ConstSceneInterfacePtr childInterface;
	try
	{
		childInterface = sceneInterface->scene( objPath );
	}
	catch(...)
	{
		return false;
	}

	if( !childInterface )
	{
		return false;
	}
	
	if( !childInterface->hasObject() )
	{
		return false;
	}

	// Get the mesh primitive so that we can query it's vertices.
	double time = sceneShape->time();
	IECore::ConstObjectPtr object = childInterface->readObject( time );
	IECore::ConstMeshPrimitivePtr meshPtr = IECore::runTimeCast<const IECore::MeshPrimitive>( object.get() );
	
	if ( !meshPtr )
	{
		return false;
	}
	
	// Calculate the snap point in object space.
	MPoint worldIntersectionPoint;
	selectionRayToWorldSpacePoint( camera, snapInfo, depthMin, worldIntersectionPoint );
	Imath::V3f pt( worldIntersectionPoint[0], worldIntersectionPoint[1], worldIntersectionPoint[2] );
	Imath::M44f objToWorld( worldTransform( childInterface.get(), time ) );
	pt = pt * objToWorld.inverse();

	// Get the list of vertices in the mesh.
	IECore::V3fVectorData::ConstPtr pointData( meshPtr->variableData<IECore::V3fVectorData>( "P", IECore::PrimitiveVariable::Vertex ) ); 
	const std::vector<Imath::V3f> &vertices( pointData->readable() ); 
	
	// Find the vertex that is closest to the snap point.
	Imath::V3d closestVertex;
	float closestDistance = std::numeric_limits<float>::max(); 
	
	for( std::vector<Imath::V3f>::const_iterator it( vertices.begin() ); it != vertices.end(); ++it )
	{
		Imath::V3d vert( *it );
		float d( ( pt - vert ).length() ); // Calculate the distance between the vertex and the snap point.
		if( d < closestDistance )
		{
			closestDistance = d;
			closestVertex = vert;
		}
	}

	// Snap to the vertex.
	closestVertex *= objToWorld;
	snapInfo.setSnapPoint( MPoint( closestVertex[0], closestVertex[1], closestVertex[2] ) );
	return true;
}
Esempio n. 12
0
void liqWriteArchive::writeObjectToRib(const MDagPath &objDagPath, bool writeTransform)
{
  if (!isObjectVisible(objDagPath)) {
    return;
  }

  if (debug) { cout << "liquidWriteArchive: writing object: " << objDagPath.fullPathName().asChar() << endl; }
  if (objDagPath.node().hasFn(MFn::kShape) || MFnDagNode( objDagPath ).typeName() == "liquidCoorSys") {
    // we're looking at a shape node, so write out the geometry to the RIB
    outputObjectName(objDagPath);

    liqRibNode ribNode;
    ribNode.set(objDagPath, 0, MRT_Unknown);

    // don't write out clipping planes
    if ( ribNode.object(0)->type == MRT_ClipPlane ) return;

    if ( ribNode.rib.box != "" && ribNode.rib.box != "-" ) {
      RiArchiveRecord( RI_COMMENT, "Additional RIB:\n%s", ribNode.rib.box.asChar() );
    }
    if ( ribNode.rib.readArchive != "" && ribNode.rib.readArchive != "-" ) {
      // the following test prevents a really nasty infinite loop !!
      if ( ribNode.rib.readArchive != outputFilename )
        RiArchiveRecord( RI_COMMENT, "Read Archive Data: \nReadArchive \"%s\"", ribNode.rib.readArchive.asChar() );
    }
    if ( ribNode.rib.delayedReadArchive != "" && ribNode.rib.delayedReadArchive != "-" ) {
      // the following test prevents a really nasty infinite loop !!
      if ( ribNode.rib.delayedReadArchive != outputFilename )
        RiArchiveRecord( RI_COMMENT, "Delayed Read Archive Data: \nProcedural \"DelayedReadArchive\" [ \"%s\" ] [ %f %f %f %f %f %f ]", ribNode.rib.delayedReadArchive.asChar(), ribNode.bound[0], ribNode.bound[3], ribNode.bound[1], ribNode.bound[4], ribNode.bound[2], ribNode.bound[5]);
    }

    // If it's a curve we should write the basis function
    if ( ribNode.object(0)->type == MRT_NuCurve ) {
      RiBasis( RiBSplineBasis, 1, RiBSplineBasis, 1 );
    }

    if ( !ribNode.object(0)->ignore ) {
      ribNode.object(0)->writeObject();
    }
  } else {
    // we're looking at a transform node
    bool wroteTransform = false;
    if (writeTransform && (objDagPath.apiType() == MFn::kTransform)) {
      if (debug) { cout << "liquidWriteArchive: writing transform: " << objDagPath.fullPathName().asChar() << endl; }
      // push the transform onto the RIB stack
      outputObjectName(objDagPath);
      MFnDagNode mfnDag(objDagPath);
      MMatrix tm = mfnDag.transformationMatrix();
      if (true) { // (!tm.isEquivalent(MMatrix::identity)) {
        RtMatrix riTM;
        tm.get(riTM);
        wroteTransform = true;
        outputIndentation();
        RiAttributeBegin();
        indentLevel++;
        outputIndentation();
        RiConcatTransform(riTM);
      }
    }
    // go through all the children of this node and deal with each of them
    int nChildren = objDagPath.childCount();
    if (debug) { cout << "liquidWriteArchive: object " << objDagPath.fullPathName().asChar() << "has " << nChildren << " children" << endl; }
    for(int i=0; i<nChildren; ++i) {
      if (debug) { cout << "liquidWriteArchive: writing child number " << i << endl; }
      MDagPath childDagNode;
      MStatus stat = MDagPath::getAPathTo(objDagPath.child(i), childDagNode);
      if (stat) {
        writeObjectToRib(childDagNode, outputChildTransforms);
      } else {
        MGlobal::displayWarning("error getting a dag path to child node of object " + objDagPath.fullPathName());
      }
    }
    if (wroteTransform) {
      indentLevel--;
      outputIndentation();
      RiAttributeEnd();
    }
  }
  if (debug) { cout << "liquidWriteArchive: finished writing object: " << objDagPath.fullPathName().asChar() << endl; }
}
Esempio n. 13
0
/**
 *	Export Transform node (and related animations)
 */
osg::ref_ptr<osg::Group> Transform::exporta(MObject &obj)
{
	MFnDependencyNode dn(obj);
	MFnTransform trfn(obj);
	MMatrix mat = trfn.transformationMatrix();

	osg::ref_ptr<osg::MatrixTransform> trans = new osg::MatrixTransform();
	osg::Matrix osgmat;
	mat.get( (double(*)[4]) osgmat.ptr() );
	trans->setMatrix(osgmat);

	
	if ( Config::instance()->getAnimTransformType() == Config::OSG_ANIMATION )  {
		Animation::mapInputConnections(obj , trans ) ;
	}

	else  {

		/// Check if there is any animation connected to this Transform
		MFn::Type anim_type;
		if( Config::instance()->getExportAnimations() && hasAnimation(obj,anim_type)){
#ifdef _DEBUG
			std::cout << "Transform " << dn.name().asChar() << " is animated" << std::endl;
#endif

			// Check the Transform parameters
			double shear[3];
			trfn.getShear(shear);
	//		std::cout << "SHEAR: " << shear[0] << " " << shear[1] << " " << shear[2] << std::endl;
			MPoint sp = trfn.scalePivot(MSpace::kTransform);
	//		std::cout << "SCALE PIVOT: " << sp.x << " " << sp.y << " " << sp.z << " " << sp.w << std::endl;
			MVector spt = trfn.scalePivotTranslation(MSpace::kTransform);
	//		std::cout << "SCALE PIVOT TRANSLATION: " << spt.x << " " << spt.y << " " << spt.z << std::endl;
			MPoint rp = trfn.rotatePivot(MSpace::kTransform);
	//		std::cout << "ROTATE PIVOT: " << rp.x << " " << rp.y << " " << rp.z << " " << rp.w << std::endl;
			MVector rpt = trfn.rotatePivotTranslation(MSpace::kTransform);
	//		std::cout << "ROTATE PIVOT TRANSLATION: " << rpt.x << " " << rpt.y << " " << rpt.z << std::endl;

			if( shear[0]!=0 || shear[1]!=0 || shear[2]!=0 ){
				std::cerr << "WARNING (" << dn.name().asChar() << ") - shear value not supported in animated Transforms (" << 
					shear[0] << ", " << shear[1] << ", " << shear[2] << ")" << std::endl;
			}
			if( sp.x != 0  || sp.y != 0 || sp.z != 0 || sp.w != 1 ){
				std::cerr << "WARNING (" << dn.name().asChar() << ") - scale pivot not supported in animated Transforms ; SP=(" << 
					sp.x << ", " << sp.y << ", " << sp.z << ", " << sp.w << ")" << std::endl;
			}
			if( spt.x != 0  || spt.y != 0 || spt.z != 0 ){
				std::cerr << "WARNING (" << dn.name().asChar() << ") - scale pivot translation not supported in animated Transforms ; SPT=(" << 
					spt.x << ", " << spt.y << ", " << spt.z << ")" << std::endl;
			}
			if( rp.x != 0  || rp.y != 0 || rp.z != 0 || rp.w != 1 ){
				std::cerr << "WARNING (" << dn.name().asChar() << ") - rotate pivot not supported in animated Transforms ; RP=(" << 
					rp.x << ", " << rp.y << ", " << rp.z << ", " << rp.w << ")" << std::endl;
			}
			if( rpt.x != 0  || rpt.y != 0 || rpt.z != 0 ){
				std::cerr << "WARNING (" << dn.name().asChar() << ") - rotate pivot translation not supported in animated Transforms ; RPT=(" << 
					rpt.x << ", " << rpt.y << ", " << rpt.z << ")" << std::endl;
			}

			// Create a callback to bind the animation to this transform
			osg::ref_ptr< osg::AnimationPath > ap;
#ifdef GENERIC_EXPORTER
			// New code to create the animation path. Independent of kind of animation.
			// It bakes any animation, not only animCurves and motionPaths, but also expressions or whatever
			ap = animatedTransform2AnimationPath(obj);
#else
			switch(anim_type){
				case MFn::kAnimCurve:
					ap = animCurve2AnimationPath(obj);
					break;
				case MFn::kMotionPath:
					ap = motionPath2AnimationPath(obj);
					break;
			}
#endif
			trans->setUpdateCallback( new osg::AnimationPathCallback( ap.get() ));
		}

	}	// ANIMATION_PATH


	return (osg::Group *)trans.get();
}