Esempio n. 1
0
void CameraController::frame( const Imath::Box3f &box )
{
	V3f z( 0, 0, -1 );
	V3f y( 0, 1, 0 );
	M44f t = m_data->transform->matrix;
	t.multDirMatrix( z, z );
	t.multDirMatrix( y, y );
	frame( box, z, y );
}
Esempio n. 2
0
IECore::LineSegment3f ViewportGadget::rasterToGadgetSpace( const Imath::V2f &position, const Gadget *gadget ) const
{
	LineSegment3f result;
	/// \todo The CameraController::unproject() method should be const.
	const_cast<IECore::CameraController &>( m_cameraController ).unproject( V2i( (int)position.x, (int)position.y ), result.p0, result.p1 );
	if( gadget )
	{
		M44f m = gadget->fullTransform();
		m.invert( true );
		result = result * m;
	}
	return result;
}
Esempio n. 3
0
void GraphGadget::updateNodeGadgetTransform( NodeGadget *nodeGadget )
{
	Gaffer::Node *node = nodeGadget->node();
	M44f m;
	
	Gaffer::V2fPlug *p = node->getChild<Gaffer::V2fPlug>( "__uiPosition" );
	if( p )
	{
		V2f t = p->getValue();
	 	m.translate( V3f( t[0], t[1], 0 ) );
	}

	nodeGadget->setTransform( m );
}
Esempio n. 4
0
void CameraController::track( const Imath::V2f &p )
{
	V2i resolution = m_data->resolution->readable();
	Box2f screenWindow = m_data->screenWindow->readable();

	V2f d = p - m_data->motionStart;
	V3f translate( 0.0f );
	translate.x = -screenWindow.size().x * d.x/(float)resolution.x;
	translate.y = screenWindow.size().y * d.y/(float)resolution.y;
	if( m_data->projection->readable()=="perspective" && m_data->fov )
	{
		translate *= tan( M_PI * m_data->fov->readable() / 360.0f ) * (float)m_data->centreOfInterest;
	}
	M44f t = m_data->motionMatrix;
	t.translate( translate );
	m_data->transform->matrix = t;
}
Esempio n. 5
0
Imath::M44f AimConstraint::computeConstraint( const Imath::M44f &fullTargetTransform, const Imath::M44f &fullInputTransform ) const
{
	// decompose into scale, shear, rotate and translate
	V3f s, h, r, t;
	extractSHRT( fullInputTransform, s, h, r, t );

	// figure out the aim matrix
	const V3f toDir = ( fullTargetTransform.translation() - t ).normalized();
	const M44f rotationMatrix = rotationMatrixWithUpDir( aimPlug()->getValue(), toDir, upPlug()->getValue() );

	// rebuild, replacing rotate with the aim matrix
	M44f result;
	result.translate( t );
	result.shear( h );
	result = rotationMatrix * result;
	result.scale( s );

	return result;
}
void ofxAlembic::IXform::updateWithTimeInternal(double time, Imath::M44f& transform)
{
	ISampleSelector ss(time, ISampleSelector::kNearIndex);

	M44f mat;
	M44d m = m_xform.getSchema().getValue(ss).getMatrix();
	double *src = m.getValue();
	float *dst = mat.getValue();

	for (int i = 0; i < 16; i++)
		dst[i] = src[i];

	transform = mat * transform;
	
	for (int i = 0; i < 16; i++)
	{
		xform.local_matrix.getPtr()[i] = mat.getValue()[i];
		xform.global_matrix.getPtr()[i] = transform.getValue()[i];
	}
}
void MatrixMotionTransform::load( LoadContextPtr context )
{
	Transform::load( context );
	unsigned int v = m_ioVersion;

	ConstIndexedIOPtr container = context->container( staticTypeName(), v );
	container = container->subdirectory( g_snapshotsEntry );
	m_snapshots.clear();
	IndexedIO::EntryIDList names;
	container->entryIds( names, IndexedIO::Directory );
	IndexedIO::EntryIDList::const_iterator it;
	for( it=names.begin(); it!=names.end(); it++ )
	{
		ConstIndexedIOPtr snapshotContainer = container->subdirectory( *it );
		float t; snapshotContainer->read( g_timeEntry, t );
		M44f m;
		float *f = m.getValue();
		snapshotContainer->read( g_matrixEntry, f, 16 );
		m_snapshots[t] = m;
	}
}
Esempio n. 8
0
Imath::M44f Instancer::computeBranchTransform( const ScenePath &parentPath, const ScenePath &branchPath, const Gaffer::Context *context ) const
{
	M44f result;
	ContextPtr ic = instanceContext( context, branchPath );
	if( ic )
	{
		Context::Scope scopedContext( ic );
		result = instancePlug()->transformPlug()->getValue();
	}
	
	if( branchPath.size() == 1 )
	{
		int index = instanceIndex( branchPath );
		ConstV3fVectorDataPtr p = sourcePoints( parentPath );
		if( p && (size_t)index < p->readable().size() )
		{
			M44f t;
			t.translate( p->readable()[index] );
			result *= t;
		}
	}
	return result;
}
Esempio n. 9
0
void CameraController::dolly( const Imath::V2f &p )
{
	V2i resolution = m_data->resolution->readable();
	V2f dv = V2f( (p - m_data->motionStart) ) / resolution;
	float d = dv.x - dv.y;

	if( m_data->projection->readable()=="perspective" )
	{
		// perspective
		m_data->centreOfInterest = m_data->motionCentreOfInterest * expf( -1.9f * d );
		
		M44f t = m_data->motionMatrix;
		t.translate( V3f( 0, 0, m_data->centreOfInterest - m_data->motionCentreOfInterest ) );
		
		m_data->transform->matrix = t;
	}
	else
	{
		// orthographic
		Box2f screenWindow = m_data->motionScreenWindow;

		V2f centreNDC = V2f( m_data->motionStart ) / resolution;
		V2f centre(
			lerp( screenWindow.min.x, screenWindow.max.x, centreNDC.x ),
			lerp( screenWindow.max.y, screenWindow.min.y, centreNDC.y )
		);

		float newWidth = m_data->motionScreenWindow.size().x * expf( -1.9f * d );
		newWidth = std::max( newWidth, 0.01f );

		float scale = newWidth / screenWindow.size().x;
		
		screenWindow.min = (screenWindow.min - centre) * scale + centre;
		screenWindow.max = (screenWindow.max - centre) * scale + centre;
		m_data->screenWindow->writable() = screenWindow;
	}	
}
Esempio n. 10
0
void CameraController::frame( const Imath::Box3f &box, const Imath::V3f &viewDirection, const Imath::V3f &upVector )
{
	// make a matrix to centre the camera on the box, with the appropriate view direction
	M44f cameraMatrix = rotationMatrixWithUpDir( V3f( 0, 0, -1 ), viewDirection, upVector );
	M44f translationMatrix;
	translationMatrix.translate( box.center() );
	cameraMatrix *= translationMatrix;

	// translate the camera back until the box is completely visible
	M44f inverseCameraMatrix = cameraMatrix.inverse();
	Box3f cBox = transform( box, inverseCameraMatrix );

	Box2f screenWindow = m_data->screenWindow->readable();
	if( m_data->projection->readable()=="perspective" )
	{
		// perspective. leave the field of view and screen window as is and translate
		// back till the box is wholly visible. this currently assumes the screen window
		// is centred about the camera axis.
		float z0 = cBox.size().x / screenWindow.size().x;
		float z1 = cBox.size().y / screenWindow.size().y;

		m_data->centreOfInterest = std::max( z0, z1 ) / tan( M_PI * m_data->fov->readable() / 360.0 ) + cBox.max.z +
			m_data->clippingPlanes->readable()[0];

		cameraMatrix.translate( V3f( 0.0f, 0.0f, m_data->centreOfInterest ) );
	}
	else
	{
		// orthographic. translate to front of box and set screen window
		// to frame the box, maintaining the aspect ratio of the screen window.
		m_data->centreOfInterest = cBox.max.z + m_data->clippingPlanes->readable()[0] + 0.1; // 0.1 is a fudge factor
		cameraMatrix.translate( V3f( 0.0f, 0.0f, m_data->centreOfInterest ) );

		float xScale = cBox.size().x / screenWindow.size().x;
		float yScale = cBox.size().y / screenWindow.size().y;
		float scale = std::max( xScale, yScale );

		V2f newSize = screenWindow.size() * scale;
		screenWindow.min.x = cBox.center().x - newSize.x / 2.0f;
		screenWindow.min.y = cBox.center().y - newSize.y / 2.0f;
		screenWindow.max.x = cBox.center().x + newSize.x / 2.0f;
		screenWindow.max.y = cBox.center().y + newSize.y / 2.0f;
	}

	m_data->transform->matrix = cameraMatrix;
	m_data->screenWindow->writable() = screenWindow;
}
Esempio n. 11
0
void CameraController::tumble( const Imath::V2f &p )
{
	V2f d = p - m_data->motionStart;

	V3f centreOfInterestInWorld = V3f( 0, 0, -m_data->centreOfInterest ) * m_data->motionMatrix;
	V3f xAxisInWorld = V3f( 1, 0, 0 );
	m_data->motionMatrix.multDirMatrix( xAxisInWorld, xAxisInWorld );
	xAxisInWorld.normalize();

	M44f t;
	t.translate( centreOfInterestInWorld );

		t.rotate( V3f( 0, -d.x / 100.0f, 0 ) );

		M44f xRotate;
		xRotate.setAxisAngle( xAxisInWorld, -d.y / 100.0f );

		t = xRotate * t;

	t.translate( -centreOfInterestInWorld );

	m_data->transform->matrix = m_data->motionMatrix * t;
}
Esempio n. 12
0
IECore::ConstObjectPtr MapProjection::computeProcessedObject( const ScenePath &path, const Gaffer::Context *context, IECore::ConstObjectPtr inputObject ) const
{
	// early out if it's not a primitive with a "P" variable
	const Primitive *inputPrimitive = runTimeCast<const Primitive>( inputObject.get() );
	if( !inputPrimitive )
	{
		return inputObject;
	}

	const V3fVectorData *pData = inputPrimitive->variableData<V3fVectorData>( "P" );
	if( !pData )
	{
		return inputObject;
	}

	// early out if the uv set name hasn't been provided

	const string uvSet = uvSetPlug()->getValue();

	if( uvSet == "" )
	{
		return inputObject;
	}

	// get the camera and early out if we can't find one

	ScenePath cameraPath;
	ScenePlug::stringToPath( cameraPlug()->getValue(), cameraPath );

	ConstCameraPtr constCamera = runTimeCast<const Camera>( inPlug()->object( cameraPath ) );
	if( !constCamera )
	{
		return inputObject;
	}

	M44f cameraMatrix = inPlug()->fullTransform( cameraPath );
	M44f objectMatrix = inPlug()->fullTransform( path );
	M44f objectToCamera = objectMatrix * cameraMatrix.inverse();

	bool perspective = constCamera->getProjection() == "perspective";

	Box2f normalizedScreenWindow;
	if( constCamera->hasResolution() )
	{
		normalizedScreenWindow = constCamera->frustum();
	}
	else
	{
		// We don't know what resolution the camera is meant to render with, so take the whole aperture
		// as the screen window
		normalizedScreenWindow = constCamera->frustum( Camera::Distort );
	}

	// do the work

	PrimitivePtr result = inputPrimitive->copy();

	V2fVectorDataPtr uvData = new V2fVectorData();
	uvData->setInterpretation( GeometricData::UV );

	result->variables[uvSet] = PrimitiveVariable( PrimitiveVariable::Vertex, uvData );

	const vector<V3f> &p = pData->readable();
	vector<V2f> &uv = uvData->writable();
	uv.reserve( p.size() );

	for( size_t i = 0, e = p.size(); i < e; ++i )
	{
		V3f pCamera = p[i] * objectToCamera;
		V2f pScreen = V2f( pCamera.x, pCamera.y );
		if( perspective )
		{
			pScreen /= -pCamera.z;
		}
		uv.push_back(
			V2f(
				lerpfactor( pScreen.x, normalizedScreenWindow.min.x, normalizedScreenWindow.max.x ),
				lerpfactor( pScreen.y, normalizedScreenWindow.min.y, normalizedScreenWindow.max.y )
			)
		);
	}

	return result;
}
Esempio n. 13
0
void CameraTool::viewportCameraChanged()
{
	const TransformTool::Selection &selection = cameraSelection();
	if( !selection.transformPlug )
	{
		return;
	}

	if( !view()->viewportGadget()->getCameraEditable() )
	{
		return;
	}

	// Figure out the offset from where the camera is in the scene
	// to where the user has just moved the viewport camera.

	const M44f viewportCameraTransform = view()->viewportGadget()->getCameraTransform();
	M44f cameraTransform;
	{
		Context::Scope scopedContext( selection.context.get() );
		cameraTransform = selection.scene->fullTransform( selection.path );
	}

	if( cameraTransform == viewportCameraTransform )
	{
		return;
	}

	const M44f offset = cameraTransform.inverse() * viewportCameraTransform;

	// This offset is measured in the downstream world space.
	// Transform it into the space the transform is applied in.
	// This requires a "change of basis" because it is a transformation
	// matrix.

	const M44f sceneToTransformSpace = selection.sceneToTransformSpace();
	const M44f transformSpaceOffset = sceneToTransformSpace.inverse() * offset * sceneToTransformSpace;

	// Now apply this offset to the current value on the transform plug.

	M44f plugTransform;
	{
		Context::Scope scopedContext( selection.upstreamContext.get() );
		plugTransform = selection.transformPlug->matrix();
	}
	plugTransform = plugTransform * transformSpaceOffset;
	const V3f t = plugTransform.translation();

	Eulerf e; e.extract( plugTransform );
	e.makeNear( degreesToRadians( selection.transformPlug->rotatePlug()->getValue() ) );
	const V3f r = radiansToDegrees( V3f( e ) );

	UndoScope undoScope( selection.transformPlug->ancestor<ScriptNode>(), UndoScope::Enabled, m_undoGroup );

	for( int i = 0; i < 3; ++i )
	{
		setValueOrAddKey( selection.transformPlug->rotatePlug()->getChild( i ), selection.context->getTime(), r[i] );
		setValueOrAddKey( selection.transformPlug->translatePlug()->getChild( i ), selection.context->getTime(), t[i] );
	}

	// Create an action to save/restore the current center of interest, so that
	// when the user undos a framing action, they get back to the old center of
	// interest as well as the old transform.
	Action::enact(
		selection.transformPlug,
		// Do
		boost::bind(
			&CameraTool::setCameraCenterOfInterest,
			CameraToolPtr( this ), selection.path,
			view()->viewportGadget()->getCenterOfInterest()
		),
		// Undo
		boost::bind(
			&CameraTool::setCameraCenterOfInterest,
			CameraToolPtr( this ), selection.path,
			getCameraCenterOfInterest( selection.path )
		)
	);
}
Esempio n. 14
0
Imath::M44f Instancer::instanceTransform( const IECore::V3fVectorData *p, int instanceId ) const
{
	M44f result;
	result.translate( p->readable()[instanceId] );
	return result;
}
Esempio n. 15
0
void LinearContainer::calculateChildTransforms() const
{
	if( m_clean )
	{
		return;
	}

	int axis = m_orientation - 1;
	V3f size( 0 );
	vector<Box3f> bounds;
	for( ChildContainer::const_iterator it=children().begin(); it!=children().end(); it++ )
	{
		const Gadget *child = static_cast<const Gadget *>( it->get() );
		if( !child->getVisible() )
		{
			continue;
		}

		Box3f b = child->bound();
		if( !b.isEmpty() )
		{
			for( int a=0; a<3; a++ )
			{
				if( a==axis )
				{
					size[a] += b.size()[a];
				}
				else
				{
					size[a] = max( size[a], b.size()[a] );
				}
			}
		}
		bounds.push_back( b );
	}
	size[axis] += (bounds.size() - 1) * m_spacing;

	float offset = size[axis] / 2.0f  * ( m_direction==Increasing ? -1.0f : 1.0f );

	int i = 0;
	for( ChildContainer::const_iterator it=children().begin(); it!=children().end(); it++ )
	{
		Gadget *child = static_cast<Gadget *>( it->get() );
		if( !child->getVisible() )
		{
			continue;
		}

		const Box3f &b = bounds[i++];

		V3f childOffset( 0 );
		if( !b.isEmpty() )
		{
			for( int a=0; a<3; a++ )
			{
				if( a==axis )
				{
					childOffset[a] = offset - ( m_direction==Increasing ? b.min[a] : b.max[a] );
				}
				else
				{
					switch( m_alignment )
					{
						case Min :
							childOffset[a] = -size[a]/2.0f - b.min[a];
							break;
						case Centre :
							childOffset[a] = -b.center()[a];
							break;
						default :
							// max
							childOffset[a] = size[a]/2.0f - b.max[a];
					}
				}
			}
			offset += b.size()[axis] * ( m_direction==Increasing ? 1.0f : -1.0f );
		}
		offset += m_spacing * ( m_direction==Increasing ? 1.0f : -1.0f );

		M44f m; m.translate( childOffset );
		child->setTransform( m );
	}

	m_clean = true;
}