Beispiel #1
0
void SetRotateMatrix(float* m, vertex p1, vertex p2, float radians)
{
	float R[16], T[16];
	SetIdentityMatrix(R);
	float length=sqrt(	(p2.x-p1.x)*(p2.x-p1.x)+
						(p2.y-p1.y)*(p2.y-p1.y)+
						(p2.z-p1.z)*(p2.z-p1.z) );
	float cosa=cos(radians/2.0);
	float sina=sin(radians/2.0);
	float a=sina*(p2.x-p1.x)/length;
	float b=sina*(p2.y-p1.y)/length;
	float c=sina*(p2.z-p1.z)/length;
	SetTranslateMatrix2(m, -p1.x, -p1.y, -p1.z);
	SetTranslateMatrix2(T, p1.x, p1.y, p1.z);
	SetIdentityMatrix(R);
	R[0]=1.0-2*b*b-2*c*c;
	R[1]=2*a*b-2*cosa*c;
	R[2]=2*a*c+2*cosa*b;
	R[4]=2*a*b+2*cosa*c;
	R[5]=1.0-2*a*a-2*c*c;
	R[6]=2*b*c-2*cosa*a;
	R[8]=2*a*c-2*cosa*b;
	R[9]=2*b*c+2*cosa*a;
	R[10]=1.0-2*a*a-2*b*b;
	Multiply44(R, m);
	Multiply44(T, m);
}
Beispiel #2
0
void SetOriginRotateMatrix(float* m, vertex radians)
{
	float fRx[16], fRy[16], fRz[16];
	SetIdentityMatrix(fRx);
	SetIdentityMatrix(fRy);
	SetIdentityMatrix(fRz);
	SetIdentityMatrix(m);

	fRx[0]=1; 
	fRx[5]=cos(radians.x);
	fRx[6]=-1*sin(radians.x);
	fRx[9]=-1*fRx[6];
	fRx[10]=fRx[5];
	fRx[15]=1;
	//
	fRy[0]=cos(radians.y); 
	fRy[2]=sin(radians.y);
	fRy[5]=1;
	fRy[8]=-1*fRy[2];
	fRy[10]=fRy[0];
	fRy[15]=1;
	//
	fRz[0]=cos(radians.z); 
	fRz[1]=-1*sin(radians.z);
	fRz[4]=-1*fRz[1];
	fRz[5]=fRz[0];
	fRz[10]=1;
	fRz[15]=1;
	// Must follow z/x/y order due to phantom handle design;
	Multiply44(fRz, m);
	Multiply44(fRx, m);
	Multiply44(fRy, m);
}
// add  Properties->Debugging ->environment  PATH = %PATH%; D:\VTK_bin\bin\Debug
DepthSensor::DepthSensor()
    : mNuiSensor(NULL)
    , mNextDepthFrameEvent(INVALID_HANDLE_VALUE)
    , mDepthStreamHandle(INVALID_HANDLE_VALUE)
    , mDrawDepth(NULL)
    , mdepthImageResolution(NUI_IMAGE_RESOLUTION_640x480)
    , m_pVolume(NULL)
    , cDepthImagePixels(0)
    , m_pDepthImagePixelBuffer(NULL)
    , m_pDepthFloatImage(NULL)
    , m_pPointCloud(NULL)
    , m_pShadedSurface(NULL)
    , m_bMirrorDepthFrame(false)
    , m_bTranslateResetPoseByMinDepthThreshold(true)
    , m_cLostFrameCounter(0)
    , m_bTrackingFailed(false)
    , m_bAutoResetReconstructionWhenLost(false)
    , m_bAutoResetReconstructionOnTimeout(true)
    , m_fStartTime(0)
    , m_isInit(false)
    , m_saveMeshFormat(Stl)
    , filename()
{
    // Get the depth frame size from the NUI_IMAGE_RESOLUTION enum
    DWORD WIDTH = 0, HEIGHT = 0;
    NuiImageResolutionToSize(mdepthImageResolution, WIDTH, HEIGHT);
    cDepthWidth = WIDTH;
    cDepthHeight = HEIGHT;
    cDepthImagePixels = cDepthWidth*cDepthHeight;

    //create heap storage for depth pixel data in RGBX format
    m_depthRGBX = new BYTE[cDepthWidth*cDepthHeight*cBytesPerPixel];

    // Define a cubic Kinect Fusion reconstruction volume,
    // with the Kinect at the center of the front face and the volume directly in front of Kinect.
    reconstructionParams.voxelsPerMeter = 256;	// 1000mm / 256vpm = ~3.9mm/voxel
    reconstructionParams.voxelCountX = 512;		// 512 / 256vpm = 2m wide reconstruction
    reconstructionParams.voxelCountY = 384;		// Memory = 512*384*512 * 4bytes per voxel
    reconstructionParams.voxelCountZ = 512;		// Require 512MB GPU memory

    // These parameters are for optionally clipping the input depth image 
    m_fMinDepthThreshold = NUI_FUSION_DEFAULT_MINIMUM_DEPTH;   // min depth in meters
    m_fMaxDepthThreshold = NUI_FUSION_DEFAULT_MAXIMUM_DEPTH;    // max depth in meters

    // This parameter is the temporal averaging parameter for depth integration into the reconstruction
    m_cMaxIntegrationWeight = NUI_FUSION_DEFAULT_INTEGRATION_WEIGHT;	// Reasonable for static scenes   

    SetIdentityMatrix(m_worldToCameraTransform);
    SetIdentityMatrix(m_defaultWorldToVolumeTransform);

    m_cLastDepthFrameTimeStamp.QuadPart = 0;

    // Initialize synchronization objects
    InitializeCriticalSection(&m_lockVolume);
}
Beispiel #4
0
void SetTranslateMatrix(float* m, vertex t)
{
	SetIdentityMatrix(m);
	m[3]=t.x;
	m[7]=t.y;
	m[11]=t.z;
}
Beispiel #5
0
void SetTranslateMatrix2(float* m, float x, float y, float z)
{
	SetIdentityMatrix(m);
	m[3]=x;
	m[7]=y;
	m[11]=z;
}
Beispiel #6
0
void SetScaleMatrix(float* m, float x, float y, float z)
{
	SetIdentityMatrix(m);
	m[0]=x;
	m[5]=y;
	m[10]=z;
}
Beispiel #7
0
void Device_OpenGL::Draw2DTextureFullViewport(Texture* texture)
{
    DisableDepthTest();
    PushMatrix();
    SetIdentityMatrix();
    UnbindVBO(VBO::VBO_TARGET_ARRAY_BUFFER);
    //-------------------------------------------
    float x = Screen::GetInstance()->GetRatio();
    float y = 1.0f;
    float z = -1.0f;
    float vertices[30] = { -x, y, z, 0.0f, 1.0f,
                           -x, -y, z, 0.0f, 0.0f,
                           x, -y, z, 1.0f, 0.0f,
                           -x, y, z, 0.0f, 1.0f,
                           x, -y, z, 1.0f, 0.0f,
                           x, y, z, 1.0f, 1.0f
                         };
    SetAttributePointer(ATTRIBUTE_FLOAT3_POSITION, vertices, sizeof(float) * 5);
    SetAttributePointer(ATTRIBUTE_FLOAT2_TEXCOORD_DIFFUSE, vertices + 3, sizeof(float) * 5);
    Device_Base::GetInstance()->UpdateVertexAttribPointer();
    Device_Base::GetInstance()->SetUniformTexture(UNIFORM_TEXTURE_DIFFUSE, texture);
    Device_Base::GetInstance()->SetUniformMatrix(UNIFORM_MATRIX_MODEL, Device_Base::GetInstance()->GetMatrixWorld());
    Device_Base::GetInstance()->SetUniformMatrix(UNIFORM_MATRIX_PROJECTION, m_MatrixProjection2DLogical);

    Device_Base::GetInstance()->DrawArray(Device_Base::PRIMITIVE_TRIANGLE_LIST, 0, 6);
    //-------------------------------------------
    EnableDepthTest();
    PopMatrix();

}
Beispiel #8
0
static void RagdollAddConstraints( IPhysicsEnvironment *pPhysEnv, ragdoll_t &ragdoll, const ragdollparams_t &params, const cache_ragdollconstraint_t *pConstraints, int constraintCount )
{
	constraint_ragdollparams_t constraint;
	for ( int i = 0; i < constraintCount; i++ )
	{
		constraint.Defaults();
		V_memcpy( constraint.axes, pConstraints[i].axes, sizeof(constraint.axes) );
		if ( params.jointFrictionScale > 0 )
		{
			for ( int k = 0; k < 3; k++ )
			{
				constraint.axes[k].torque *= params.jointFrictionScale;
			}
		}
		int parentIndex = pConstraints[i].parentIndex;
		int childIndex = pConstraints[i].childIndex;
		constraint.childIndex = childIndex;
		constraint.parentIndex = parentIndex;
		constraint.useClockwiseRotations = true;
		constraint.constraintToAttached = pConstraints[i].constraintToAttached;
		// UNDONE: We could transform the constraint limit axes relative to the bone space
		// using this data.  Do we need that feature?
		SetIdentityMatrix( constraint.constraintToReference );
		ragdoll.list[childIndex].pConstraint = pPhysEnv->CreateRagdollConstraint( ragdoll.list[childIndex].pObject, ragdoll.list[parentIndex].pObject, ragdoll.pGroup, constraint );
	}
}
const matrix3x4_t & C_EnvelopeFX::RenderableToWorldTransform()
{
	static matrix3x4_t mat;
	SetIdentityMatrix( mat );
	PositionMatrix( GetRenderOrigin(), mat );
	return mat;
}
Beispiel #10
0
const matrix3x4_t &	CParticleEffectBinding::RenderableToWorldTransform()
{
	static matrix3x4_t mat;
	SetIdentityMatrix( mat );
	PositionMatrix( GetRenderOrigin(), mat );
	return mat;
}
OSErr QTWired_AddWraparoundMatrixOnIdle (QTAtomContainer theContainer)
{
	MatrixRecord 	myMinMatrix, myMaxMatrix, myDeltaMatrix;
	long			myFlags = kActionFlagActionIsDelta | kActionFlagParameterWrapsAround;
	QTAtom			myActionAtom;
	OSErr			myErr = noErr;
	
	myMinMatrix.matrix[0][0] = myMinMatrix.matrix[0][1] = myMinMatrix.matrix[0][2] = EndianS32_NtoB(0xffffffff);
	myMinMatrix.matrix[1][0] = myMinMatrix.matrix[1][1] = myMinMatrix.matrix[1][2] = EndianS32_NtoB(0xffffffff);
	myMinMatrix.matrix[2][0] = myMinMatrix.matrix[2][1] = myMinMatrix.matrix[2][2] = EndianS32_NtoB(0xffffffff);

	myMaxMatrix.matrix[0][0] = myMaxMatrix.matrix[0][1] = myMaxMatrix.matrix[0][2] = EndianS32_NtoB(0x7fffffff);
	myMaxMatrix.matrix[1][0] = myMaxMatrix.matrix[1][1] = myMaxMatrix.matrix[1][2] = EndianS32_NtoB(0x7fffffff);
	myMaxMatrix.matrix[2][0] = myMaxMatrix.matrix[2][1] = myMaxMatrix.matrix[2][2] = EndianS32_NtoB(0x7fffffff);
	
	myMinMatrix.matrix[2][1] = EndianS32_NtoB(Long2Fix((1 * kSpriteTrackHeight / 4) - (kPenguinHeight / 2)));
	myMaxMatrix.matrix[2][1] = EndianS32_NtoB(Long2Fix((3 * kSpriteTrackHeight / 4) - (kPenguinHeight / 2)));

	SetIdentityMatrix(&myDeltaMatrix);
	myDeltaMatrix.matrix[2][1] = Long2Fix(1);
	
	// change location
	myErr = WiredUtils_AddSpriteSetMatrixAction(theContainer, kParentAtomIsContainer, kQTEventIdle, 0, NULL, 0, 0, NULL, &myDeltaMatrix, &myActionAtom);
	if (myErr != noErr)
		goto bail;

	myErr = WiredUtils_AddActionParameterOptions(theContainer, myActionAtom, 1, myFlags, sizeof(myMinMatrix), &myMinMatrix, sizeof(myMaxMatrix), &myMaxMatrix);

bail:
	return(myErr);
}
Beispiel #12
0
	void initializeFusion()
	{
		// 再構成パラメーターの設定
		reconstructionParameters.voxelsPerMeter = 256;
		reconstructionParameters.voxelCountX = 512;
		reconstructionParameters.voxelCountY = 384;
		reconstructionParameters.voxelCountZ = 512;

		// Reconstructionの作成
		SetIdentityMatrix( worldToCameraTransform );
		ERROR_CHECK( NuiFusionCreateColorReconstruction( &reconstructionParameters, NUI_FUSION_RECONSTRUCTION_PROCESSOR_TYPE::NUI_FUSION_RECONSTRUCTION_PROCESSOR_TYPE_AMP, -1, &worldToCameraTransform, &reconstruction ) );

		// カメラパラメーターの設定
		cameraParameters.focalLengthX = NUI_KINECT_DEPTH_NORM_FOCAL_LENGTH_X;
		cameraParameters.focalLengthY = NUI_KINECT_DEPTH_NORM_FOCAL_LENGTH_Y;
		cameraParameters.principalPointX = NUI_KINECT_DEPTH_NORM_PRINCIPAL_POINT_X;
		cameraParameters.principalPointY = NUI_KINECT_DEPTH_NORM_PRINCIPAL_POINT_Y;

		// Image Frameの作成
		ERROR_CHECK( NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE::NUI_FUSION_IMAGE_TYPE_FLOAT, depthWidth, depthHeight, &cameraParameters, &depthImageFrame ) );
		ERROR_CHECK( NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE::NUI_FUSION_IMAGE_TYPE_FLOAT, depthWidth, depthHeight, &cameraParameters, &smoothDepthImageFrame ) );
		ERROR_CHECK( NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE::NUI_FUSION_IMAGE_TYPE_COLOR, depthWidth, depthHeight, &cameraParameters, &colorImageFrame ) );
		ERROR_CHECK( NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE::NUI_FUSION_IMAGE_TYPE_POINT_CLOUD, depthWidth, depthHeight, &cameraParameters, &pointCloudImageFrame ) );
		ERROR_CHECK( NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE::NUI_FUSION_IMAGE_TYPE_COLOR, depthWidth, depthHeight, &cameraParameters, &surfaceImageFrame ) );
		/*ERROR_CHECK( NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE::NUI_FUSION_IMAGE_TYPE_COLOR, depthWidth, depthHeight, &cameraParameters, &normalImageFrame ) );*/
	}
static void RagdollAddConstraint( IPhysicsEnvironment *pPhysEnv, ragdoll_t &ragdoll, const ragdollparams_t &params, constraint_ragdollparams_t &constraint )
{
	if( constraint.childIndex == constraint.parentIndex )
	{
		DevMsg( 1, "Bogus constraint on ragdoll %s\n", params.pStudioHdr->pszName() );
		constraint.childIndex = -1;
		constraint.parentIndex = -1;
	}
	if ( constraint.childIndex >= 0 && constraint.parentIndex >= 0 )
	{
		Assert(constraint.childIndex<ragdoll.listCount);


		ragdollelement_t &childElement = ragdoll.list[constraint.childIndex];
		// save parent index
		childElement.parentIndex = constraint.parentIndex;

		if ( params.jointFrictionScale > 0 )
		{
			for ( int k = 0; k < 3; k++ )
			{
				constraint.axes[k].torque *= params.jointFrictionScale;
			}
		}
		// this parent/child pair is not usually a parent/child pair in the skeleton.  There
		// are often bones in between that are collapsed for simulation.  So we need to compute
		// the transform.
		Studio_CalcBoneToBoneTransform( params.pStudioHdr, ragdoll.boneIndex[constraint.childIndex], ragdoll.boneIndex[constraint.parentIndex], constraint.constraintToAttached );
		MatrixGetColumn( constraint.constraintToAttached, 3, childElement.originParentSpace );
		// UNDONE: We could transform the constraint limit axes relative to the bone space
		// using this data.  Do we need that feature?
		SetIdentityMatrix( constraint.constraintToReference );
		childElement.pConstraint = pPhysEnv->CreateRagdollConstraint( childElement.pObject, ragdoll.list[constraint.parentIndex].pObject, ragdoll.pGroup, constraint );
	}
}
const matrix3x4_t& CBulletManager::CBullet::RenderableToWorldTransform()
{
	static matrix3x4_t mat;
	SetIdentityMatrix( mat );
	PositionMatrix( GetRenderOrigin(), mat );
	return mat;
}
Beispiel #15
0
// Reset Reconstruction
inline void Kinect::reset()
{
    // Set Identity Matrix
    SetIdentityMatrix( worldToCameraTransform );

    // Reset Reconstruction
    ERROR_CHECK( reconstruction->ResetReconstruction( &worldToCameraTransform, nullptr ) );
}
Beispiel #16
0
/*
  call-seq: reset_transformations()
  
  Revert any transformations (scale, translate, rotate) performed on this track.
*/
static VALUE track_reset_transformations(VALUE obj)
{
  MatrixRecord matrix;
  GetTrackMatrix(TRACK(obj), &matrix);
  SetIdentityMatrix(&matrix);
  SetTrackMatrix(TRACK(obj), &matrix);
  return obj;
}
////////////////////////////////////////////////////////////////////////////////
// virtual
bool LLMediaImplQuickTime::setMovieBoxEnhanced( Rect* rect )
{
	// get movie rect
	GetMovieNaturalBoundsRect( mMovieHandle, rect );

	int natural_width  = ( rect->right - rect->left );
	int natural_height = ( rect->bottom - rect->top );

	int width  = natural_width;
	int height = natural_height;
	
	// if the user has requested a specific size, use it:
	if ((mMediaRequestedWidth != 0) && (mMediaRequestedHeight != 0))
	{
		width = mMediaRequestedWidth;
		height = mMediaRequestedHeight;
	}

	// if the user has requested, resize media to exactly fit texture
	if (mAutoScaled)
	{
		width = LLMediaManager::textureWidthFromMediaWidth( width );
		height = LLMediaManager::textureHeightFromMediaHeight( height );
	}

	// make sure it falls in valid range
	if ( width < mMinWidth )
		width = mMinWidth;

	if ( width > mMaxWidth )
		width = mMaxWidth;

	if ( height < mMinHeight )
		height = mMinHeight;

	if ( height > mMaxHeight )
		height = mMaxHeight;

	
	// scale movie to fit rect and invert vertically to match opengl image format
	MatrixRecord transform;
	SetIdentityMatrix( &transform );	// transforms are additive so start from identify matrix
	double scaleX = (double) width / natural_width;
	double scaleY = -1.0 * (double) height / natural_height;
	double centerX = width / 2.0;
	double centerY = height / 2.0;
	ScaleMatrix( &transform, X2Fix ( scaleX ), X2Fix ( scaleY ), X2Fix ( centerX ), X2Fix ( centerY ) );
	SetMovieMatrix( mMovieHandle, &transform );

	// return the new rect
	rect->right = width;
	rect->bottom = height;
	rect->left = 0;
	rect->top = 0;

	return true;
}
Beispiel #18
0
void SetScaleMatrix(float* m, float x, float y, float z, vertex center)
{
	SetIdentityMatrix(m);
	m[0]=x;
	m[3]=(1-x) * center.x;
	m[5]=y;
	m[7]=(1-y) * center.y;
	m[10]=z;
	m[11]=(1-z) * center.z; 
}
Beispiel #19
0
///
// Setea la matriz de transformacion de el ojo
///
void SetViewTransformation(ViewPointRec *view_point_ref, Matrix  view_transformation){
	SetIdentityMatrix(view_transformation);
	view_transformation[0][0] = (-view_point_ref->sin_theta);
	view_transformation[0][1] = (-view_point_ref->cos_theta) * view_point_ref->cos_phi;
	view_transformation[0][2] = (-view_point_ref->cos_theta) * view_point_ref->sin_phi;
	view_transformation[1][0] = view_point_ref->cos_theta;
	view_transformation[1][1] = (-view_point_ref->sin_theta) * view_point_ref->cos_phi;
	view_transformation[1][2] = (-view_point_ref->sin_theta) * view_point_ref->sin_phi;
	view_transformation[2][1] = view_point_ref->sin_phi;
	view_transformation[2][2] = (-view_point_ref->cos_phi);
	view_transformation[3][2] = view_point_ref->rho;

}
Beispiel #20
0
//-----------------------------------------------------------------------------
// Purpose: 
// Input  : entityIndex - 
//			attachmentIndex - 
//			&transform - 
//-----------------------------------------------------------------------------
bool FX_GetAttachmentTransform( ClientEntityHandle_t hEntity, int attachmentIndex, matrix3x4_t &transform )
{
	Vector	origin;
	QAngle	angles;

	if ( FX_GetAttachmentTransform( hEntity, attachmentIndex, &origin, &angles ) )
	{
		AngleMatrix( angles, origin, transform );
		return true;
	}

	// Entity doesn't exist
	SetIdentityMatrix( transform );
	return false;
}
Beispiel #21
0
/******************************************************************
* init_board
*******************************************************************/
object_gl* init_board(){
	object_gl *board = calloc(1, sizeof(object_gl));
	board->num_vertx = 4;
	board->num_vectr = 2;
	board->vertx_per_vectr = 3;
	board->rotation_spd = 0;
	board->rotation_dir = 1;
	board->alpha = 0.75;
	board->vertx_buffer_data = malloc(board->num_vertx * 3 * sizeof(GLfloat));
	board->normal_buffer_data = malloc(board->num_vertx * board->vertx_per_vectr * sizeof(GLfloat));
	board->color_buffer_data = malloc(board->num_vertx * 2 * sizeof(GLfloat));
	board->index_buffer_data = malloc(board->num_vectr * board->vertx_per_vectr * sizeof(GLushort));
	SetIdentityMatrix(board->model_matrix);

	return board;
}
Beispiel #22
0
BOOL CoSkyMesh::Initialize(CoGeometry* pGeometry,I3DModel* pModel,DWORD dwFlag)
{
	m_pGeometry = pGeometry;
	
	m_pModel = pModel;
	pModel->AddRef();
	
	m_dwRefIndex = pModel->Reference();
		
	SetIdentityMatrix(&m_matTransform);
	m_dwAlpha = 255;
	m_dwCurrentMotionIndex = 1;
	m_dwCurrentFrame = 0;
	
	return TRUE;
}
Beispiel #23
0
/// <summary>
/// Reset the reconstruction camera pose and clear the volume.
/// </summary>
/// <returns>S_OK on success, otherwise failure code</returns>
HRESULT CKinectFusion::ResetReconstruction()
{
    if (nullptr == m_pVolume)
    {
        return E_FAIL;
    }

    HRESULT hr = S_OK;
	
    SetIdentityMatrix(m_worldToCameraTransform);

    // Translate the reconstruction volume location away from the world origin by an amount equal
    // to the minimum depth threshold. This ensures that some depth signal falls inside the volume.
    // If set false, the default world origin is set to the center of the front face of the 
    // volume, which has the effect of locating the volume directly in front of the initial camera
    // position with the +Z axis into the volume along the initial camera direction of view.
    if (m_bTranslateResetPoseByMinDepthThreshold)
    {
        Matrix4 worldToVolumeTransform = m_defaultWorldToVolumeTransform;

        // Translate the volume in the Z axis by the minDepthThreshold distance
        float minDist = (m_fMinDepthThreshold < m_fMaxDepthThreshold) ? m_fMinDepthThreshold : m_fMaxDepthThreshold;
        worldToVolumeTransform.M43 -= (minDist * m_reconstructionParams.voxelsPerMeter);

        hr = m_pVolume->ResetReconstruction(&m_worldToCameraTransform, &worldToVolumeTransform);
    }
    else
    {
        hr = m_pVolume->ResetReconstruction(&m_worldToCameraTransform, nullptr);
    }

    m_cLostFrameCounter = 0;
    m_cFrameCounter = 0;

    if (SUCCEEDED(hr))
    {
        m_bTrackingFailed = false;

        std::cout << "Reconstruction has been reset." << std::endl;
    }
    else
    {
        std::cout << "Failed to reset reconstruction." << std::endl;
    }
	
    return hr;
}
Beispiel #24
0
const matrix3x4_t& CCollisionProperty::CollisionToWorldTransform() const
{
	static matrix3x4_t s_matTemp[4];
	static int s_nIndex = 0;

	matrix3x4_t &matResult = s_matTemp[s_nIndex];
	s_nIndex = (s_nIndex+1) & 0x3;

	if ( IsBoundsDefinedInEntitySpace() )
	{
		return m_pOuter->EntityToWorldTransform();
	}

	SetIdentityMatrix( matResult );
	MatrixSetColumn( GetCollisionOrigin(), 3, matResult );
	return matResult;
}
bool C_WalkerStrider::GetAttachment( int iAttachment, matrix3x4_t &attachmentToWorld )
{
	//
	//
	// This is a TOTAL hack, but we don't have any nodes that work well at all for mounted guns.
	//
	//
	studiohdr_t *pStudioHdr = GetModelPtr( );
	if ( !pStudioHdr || iAttachment < 1 || iAttachment > pStudioHdr->numattachments )
	{
		return false;
	}

	Vector vLocalPos( 0, 0, 0 );
	mstudioattachment_t *pAttachment = pStudioHdr->pAttachment( iAttachment-1 );
	if ( stricmp( pAttachment->pszName(), "build_point_left_gun" ) == 0 )
	{
		vLocalPos.y = sideDist;
	}
	else if ( stricmp( pAttachment->pszName(), "build_point_right_gun" ) == 0 )
	{
		vLocalPos.y = -sideDist;
	}
	else if ( stricmp( pAttachment->pszName(), "ThirdPersonCameraOrigin" ) == 0 )
	{
	}
	else
	{
		// Ok, it's not one of our magical attachments. Use the regular attachment setup stuff.
		return BaseClass::GetAttachment( iAttachment, attachmentToWorld );
	}

	if ( m_bCrouched )
	{
		vLocalPos.z += downDist;
	}

	// Now build the output matrix.
	matrix3x4_t localMatrix;
	SetIdentityMatrix( localMatrix );
	PositionMatrix( vLocalPos, localMatrix );

	ConcatTransforms( EntityToWorldTransform(), localMatrix, attachmentToWorld );
	return true;
}
static void RagdollCreateObjects( IPhysicsCollision *pPhysCollision, IPhysicsEnvironment *pPhysEnv, IPhysicsSurfaceProps *pSurfaceDatabase, ragdoll_t &ragdoll, const ragdollparams_t &params )
{
	ragdoll.listCount = 0;
	ragdoll.pGroup = NULL;
	memset( ragdoll.list, 0, sizeof(ragdoll.list) );
	if ( !params.pCollide || params.pCollide->solidCount > RAGDOLL_MAX_ELEMENTS )
		return;

	IVPhysicsKeyParser *pParse = pPhysCollision->VPhysicsKeyParserCreate( params.pCollide->pKeyValues );
	ragdoll.pGroup = pPhysEnv->CreateConstraintGroup();
	while ( !pParse->Finished() )
	{
		const char *pBlock = pParse->GetCurrentBlockName();
		if ( !strcmpi( pBlock, "solid" ) )
		{
			solid_t solid;
			// collisions off by default
			pParse->ParseSolid( &solid, NULL );
			if ( solid.index >= 0 && solid.index < params.pCollide->solidCount)
			{
				Assert( ragdoll.listCount == solid.index );
				int boneIndex = Studio_BoneIndexByName( params.pStudioHdr, solid.name );
				ragdoll.boneIndex[ragdoll.listCount] = boneIndex;

				if ( boneIndex >= 0 )
				{
					solid.params.rotInertiaLimit = 0.5;
					solid.params.pGameData = params.pGameData;
					int surfaceData = pSurfaceDatabase->GetSurfaceIndex( solid.surfaceprop );
					
					if ( surfaceData < 0 )
						surfaceData = pSurfaceDatabase->GetSurfaceIndex( "default" );

					solid.params.pName = params.pStudioHdr->name;
					ragdoll.list[ragdoll.listCount].pObject = pPhysEnv->CreatePolyObject( params.pCollide->solids[solid.index], surfaceData, vec3_origin, vec3_angle, &solid.params );
					ragdoll.list[ragdoll.listCount].pObject->SetPositionMatrix( params.pCurrentBones[boneIndex], true );
					ragdoll.list[ragdoll.listCount].parentIndex = -1;

					ragdoll.listCount++;
				}
				else
				{
					Msg( "CRagdollProp::CreateObjects:  Couldn't Lookup Bone %s\n",
						solid.name );
				}
			}
		}
		else if ( !strcmpi( pBlock, "ragdollconstraint" ) )
		{
			constraint_ragdollparams_t constraint;
			pParse->ParseRagdollConstraint( &constraint, NULL );
			if ( constraint.childIndex >= 0 && constraint.parentIndex >= 0 )
			{
				Assert(constraint.childIndex<ragdoll.listCount);

				ragdollelement_t &childElement = ragdoll.list[constraint.childIndex];
				// save parent index
				childElement.parentIndex = constraint.parentIndex;

				if ( params.jointFrictionScale > 0 )
				{
					for ( int k = 0; k < 3; k++ )
					{
						constraint.axes[k].torque *= params.jointFrictionScale;
					}
				}
				// this parent/child pair is not usually a parent/child pair in the skeleton.  There
				// are often bones in between that are collapsed for simulation.  So we need to compute
				// the transform.
				Studio_CalcBoneToBoneTransform( params.pStudioHdr, ragdoll.boneIndex[constraint.childIndex], ragdoll.boneIndex[constraint.parentIndex], constraint.constraintToAttached );
				MatrixGetColumn( constraint.constraintToAttached, 3, childElement.originParentSpace );
				// UNDONE: We could transform the constraint limit axes relative to the bone space
				// using this data.  Do we need that feature?
				SetIdentityMatrix( constraint.constraintToReference );
				pPhysEnv->DisableCollisions( ragdoll.list[constraint.parentIndex].pObject, childElement.pObject );
				childElement.pConstraint = pPhysEnv->CreateRagdollConstraint( childElement.pObject, ragdoll.list[constraint.parentIndex].pObject, ragdoll.pGroup, constraint );
			}
		}
		else
		{
			pParse->SkipBlock();
		}
	}
	pPhysCollision->VPhysicsKeyParserDestroy( pParse );
}
void Initialize(void)
{   
    /* Set background (clear) color to dark blue */ 
    glClearColor(0.0, 0.0, 0.4, 0.0);

    /* Enable depth testing */
    glEnable(GL_DEPTH_TEST);
    glDepthFunc(GL_LESS);    
    
    /* Transform matrix for the bar in the middle*/
    
    /*copy objects into buffer*/
    memcpy(vertex_buffer_data1, vertex_octagon, sizeof(vertex_octagon));
    memcpy(vertex_buffer_data2, vertex_pyramid, sizeof(vertex_pyramid));
    memcpy(vertex_buffer_data3, vertex_mainBar, sizeof(vertex_mainBar));
    memcpy(vertex_buffer_data4, vertex_cube, sizeof(vertex_cube));    
    memcpy(vertex_buffer_data5, vertex_cube, sizeof(vertex_cube));
    memcpy(vertex_buffer_data6, vertex_cube, sizeof(vertex_cube));
    memcpy(vertex_buffer_data7, vertex_cube, sizeof(vertex_cube));
/*
    memcpy(color_buffer_data1, color_octagon, sizeof(color_octagon));
    memcpy(color_buffer_data2, color_pyramid, sizeof(color_pyramid));
    memcpy(color_buffer_data3, color_mainBar, sizeof(color_mainBar));
    memcpy(color_buffer_data4, color_cube, sizeof(color_cube));    
    memcpy(color_buffer_data5, color_cube, sizeof(color_cube));
    memcpy(color_buffer_data6, color_cube, sizeof(color_cube));
    memcpy(color_buffer_data7, color_cube, sizeof(color_cube));
*/
    memcpy(index_buffer_data1, index_octagon, sizeof(index_octagon));
    memcpy(index_buffer_data2, index_pyramid, sizeof(index_pyramid));
    memcpy(index_buffer_data3, index_mainBar, sizeof(index_mainBar));
    memcpy(index_buffer_data4, index_cube, sizeof(index_cube));
    memcpy(index_buffer_data5, index_cube, sizeof(index_cube));
    memcpy(index_buffer_data6, index_cube, sizeof(index_cube));
    memcpy(index_buffer_data7, index_cube, sizeof(index_cube));

    /* Setup vertex, color, and index buffer objects */
    SetupDataBuffers();

    /* Setup shaders and shader program */
    CreateShaderProgram();  

    /* Initialize matrices */
    SetIdentityMatrix(ProjectionMatrix);
    SetIdentityMatrix(ViewMatrix);
    int i = 0;
    for(;i<OBJECTS;i++) {
		SetIdentityMatrix(ModelMatrix[i]);
	}
	
	printf("DEBUG: adding light-source ...\n");

	/* Add lighting source to the code */
	
	glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, GL_TRUE);
	glEnable(GL_LIGHTING);
	glEnable(GL_COLOR_MATERIAL);
	glEnable(GL_LIGHT0); // adds first light-source to the scene

	GLfloat ambientLight[] = {0.2, 0.2, 0.2, 1.0};
	GLfloat diffuseLight[] = {0.8, 0.8, 0.8, 1.0};
	GLfloat specularLight[]= {1.0, 1.0, 1.0, 1.0};
	GLfloat positionLight[]= {5.0, 0.0, 0.0, 0.0};
	
	//glShadeModel(GL_SMOOTH);
	
	glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight);
	glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLight);
	glLightfv(GL_LIGHT0, GL_SPECULAR, specularLight);
	glLightfv(GL_LIGHT0, GL_POSITION, positionLight);
	
	GLfloat black[] = {0.0, 0.0, 0.0, 1.0};
	GLfloat green[] = {0.0, 1.0, 0.0, 1.0};
	GLfloat white[] = {1.0, 1.0, 1.0, 1.0};
	
	glMaterialfv(GL_FRONT, GL_AMBIENT, green);
	glMaterialfv(GL_FRONT, GL_DIFFUSE, green);
	glMaterialfv(GL_FRONT, GL_SPECULAR, white);
	glMaterialf(GL_FRONT, GL_SHININESS, 60.0);
	
	printf("DEBUG: finished adding light-source\n");

    /* Set projection transform */
    float fovy = 45.0;
    float aspect = 1.0; 
    float nearPlane = 1.0; 
    float farPlane = 50.0;
    SetPerspectiveMatrix(fovy, aspect, nearPlane, farPlane, ProjectionMatrix);

    /* Set viewing transform */
    float camera_disp = -10.0;
    SetTranslation(0.0, 0.0, camera_disp, ViewMatrix);

    /* Translate and rotate cube */
    SetTranslation(0, 0, 0, TranslateOrigin);
    SetRotationX(0.0, RotateX);
    SetRotationZ(0.0, RotateZ);	

    /* Translate down */	
    SetTranslation(0, -sqrtf(sqrtf(2.0) * 1.0), 0, TranslateDown);

    /* Initial transformation matrix */
    MultiplyMatrix(RotateX, TranslateOrigin, InitialTransform);
    MultiplyMatrix(RotateZ, InitialTransform, InitialTransform);
}
OSErr SpriteUtils_SetSpriteData (QTAtomContainer theSprite, Point *theLocation, short *theVisible, short *theLayer, short *theImageIndex, ModifierTrackGraphicsModeRecord *theGraphicsMode, StringPtr theSpriteName, QTAtomContainer theActionAtoms)
{
	QTAtom				myPropertyAtom;
	OSErr				myErr = noErr;
	
	if (theSprite == NULL)
		return(paramErr);
		
	// set the sprite location data
	if (theLocation != NULL) {
		MatrixRecord	myMatrix;
		
		SetIdentityMatrix(&myMatrix);
		myMatrix.matrix[2][0] = ((long)theLocation->h << 16);
		myMatrix.matrix[2][1] = ((long)theLocation->v << 16);
		EndianUtils_MatrixRecord_NtoB(&myMatrix);

		myPropertyAtom = QTFindChildByIndex(theSprite, kParentAtomIsContainer, kSpritePropertyMatrix, 1, NULL);
		if (myPropertyAtom == 0)
			myErr = QTInsertChild(theSprite, kParentAtomIsContainer, kSpritePropertyMatrix, 1, 0, sizeof(MatrixRecord), &myMatrix, NULL);
		else
			myErr = QTSetAtomData(theSprite, myPropertyAtom, sizeof(MatrixRecord), &myMatrix);
			
		if (myErr != noErr)
			goto bail;
	}
	
	// set the sprite visibility state
	if (theVisible != NULL) {
		short 			myVisible = *theVisible;
		
		myVisible = EndianS16_NtoB(myVisible);
		
		myPropertyAtom = QTFindChildByIndex(theSprite, kParentAtomIsContainer, kSpritePropertyVisible, 1, NULL);
		if (myPropertyAtom == 0)
			myErr = QTInsertChild(theSprite, kParentAtomIsContainer, kSpritePropertyVisible, 1, 0, sizeof(short), &myVisible, NULL);
		else
			myErr = QTSetAtomData(theSprite, myPropertyAtom, sizeof(short), &myVisible);
			
		if (myErr != noErr)
			goto bail;
	}
	
	// set the sprite layer
	if (theLayer != NULL) {
		short 			myLayer = *theLayer;
		
		myLayer = EndianS16_NtoB(myLayer);

		myPropertyAtom = QTFindChildByIndex(theSprite, 0, kSpritePropertyLayer, 1, NULL);
		if (myPropertyAtom == 0)
			myErr = QTInsertChild(theSprite, 0, kSpritePropertyLayer, 1, 0, sizeof(short), &myLayer, NULL);
		else
			myErr = QTSetAtomData(theSprite, myPropertyAtom, sizeof(short), &myLayer);
			
		if (myErr != noErr)
			goto bail;
	}
	
	// set the sprite image index
	if (theImageIndex != NULL) {
		short 			myImageIndex = *theImageIndex;

		myImageIndex = EndianS16_NtoB(myImageIndex);
		
		myPropertyAtom = QTFindChildByIndex(theSprite, kParentAtomIsContainer, kSpritePropertyImageIndex, 1, NULL);
		if (myPropertyAtom == 0)
			myErr = QTInsertChild(theSprite, kParentAtomIsContainer, kSpritePropertyImageIndex, 1, 0, sizeof(short), &myImageIndex, NULL);
		else
			myErr = QTSetAtomData(theSprite, myPropertyAtom, sizeof(short), &myImageIndex);
			
		if (myErr != noErr)
			goto bail;
	}
	
	// set the sprite graphics mode
	if (theGraphicsMode != NULL) {
		ModifierTrackGraphicsModeRecord		myGraphicsMode;
		
		myGraphicsMode.graphicsMode = EndianU32_NtoB(theGraphicsMode->graphicsMode);
		myGraphicsMode.opColor.red = EndianU16_NtoB(theGraphicsMode->opColor.red);
		myGraphicsMode.opColor.green = EndianU16_NtoB(theGraphicsMode->opColor.green);
		myGraphicsMode.opColor.blue = EndianU16_NtoB(theGraphicsMode->opColor.blue);

		myPropertyAtom = QTFindChildByIndex(theSprite, kParentAtomIsContainer, kSpritePropertyGraphicsMode, 1, NULL);
		if (myPropertyAtom == 0)
			myErr = QTInsertChild(theSprite, kParentAtomIsContainer, kSpritePropertyGraphicsMode, 1, 0, sizeof(myGraphicsMode), &myGraphicsMode, NULL);
		else
			myErr = QTSetAtomData(theSprite, myPropertyAtom, sizeof(myGraphicsMode), &myGraphicsMode);
			
		if (myErr != noErr)
			goto bail;
	}
	
	// set the sprite name
	if (theSpriteName != NULL) {
		QTAtom 		mySpriteNameAtom;
		
		mySpriteNameAtom = QTFindChildByIndex(theSprite, kParentAtomIsContainer, kSpriteNameAtomType, 1, NULL);
		if (mySpriteNameAtom == 0)
			myErr = QTInsertChild(theSprite, kParentAtomIsContainer, kSpriteNameAtomType, 1, 0, theSpriteName[0] + 1, theSpriteName, NULL);
		else
			myErr = QTSetAtomData(theSprite, mySpriteNameAtom, theSpriteName[0] + 1, theSpriteName);

		if (myErr != noErr)
			goto bail;
	}
	
	// set the action atoms
	if (theActionAtoms != NULL)
		myErr = QTInsertChildren(theSprite, kParentAtomIsContainer, theActionAtoms);
	
bail:
	if ((myErr != noErr) && (theSprite != NULL))
		QTRemoveChildren(theSprite, 0);

	return(myErr);
}
Beispiel #29
0
void ResetReconstruction( INuiFusionReconstruction* pReconstruction, Matrix4* pMat )
{
	std::cout << "Reset Reconstruction" << std::endl;
	SetIdentityMatrix( *pMat );
	pReconstruction->ResetReconstruction( pMat, nullptr );
}
Beispiel #30
0
int _tmain( int argc, _TCHAR* argv[] )
{
	cv::setUseOptimized( true );

	// Sensor
	IKinectSensor* pSensor;
	HRESULT hResult = S_OK;
	hResult = GetDefaultKinectSensor( &pSensor );
	if( FAILED( hResult ) ){
		std::cerr << "Error : GetDefaultKinectSensor" << std::endl;
		return -1;
	}

	hResult = pSensor->Open();
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::Open()" << std::endl;
		return -1;
	}

	// Source
	IDepthFrameSource* pDepthSource;
	hResult = pSensor->get_DepthFrameSource( &pDepthSource );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::get_DepthFrameSource()" << std::endl;
		return -1;
	}

	// Reader
	IDepthFrameReader* pDepthReader;
	hResult = pDepthSource->OpenReader( &pDepthReader );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IDepthFrameSource::OpenReader()" << std::endl;
		return -1;
	}

	// Description
	IFrameDescription* pDescription;
	hResult = pDepthSource->get_FrameDescription( &pDescription );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IDepthFrameSource::get_FrameDescription()" << std::endl;
		return -1;
	}

	int width = 0;
	int height = 0;
	pDescription->get_Width( &width ); // 512
	pDescription->get_Height( &height ); // 424
	unsigned int bufferSize = width * height * sizeof( unsigned short );

	cv::Mat bufferMat( height, width, CV_16UC1 );
	cv::Mat depthMat( height, width, CV_8UC1 );
	cv::namedWindow( "Depth" );

	// Coordinate Mapper
	ICoordinateMapper* pCoordinateMapper;
	hResult = pSensor->get_CoordinateMapper( &pCoordinateMapper );
	if( FAILED( hResult ) ){
		std::cerr << "Error : IKinectSensor::get_CoordinateMapper()" << std::endl;
		return -1;
	}

	// Create Reconstruction
	NUI_FUSION_RECONSTRUCTION_PARAMETERS reconstructionParameter;
	reconstructionParameter.voxelsPerMeter = 256;
	reconstructionParameter.voxelCountX = 512;
	reconstructionParameter.voxelCountY = 384;
	reconstructionParameter.voxelCountZ = 512;

	Matrix4 worldToCameraTransform;
	SetIdentityMatrix( worldToCameraTransform );
	INuiFusionReconstruction* pReconstruction;
	hResult = NuiFusionCreateReconstruction( &reconstructionParameter, NUI_FUSION_RECONSTRUCTION_PROCESSOR_TYPE_AMP, -1, &worldToCameraTransform, &pReconstruction );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateReconstruction()" << std::endl;
		return -1;
	}

	// Create Image Frame
	// Depth
	NUI_FUSION_IMAGE_FRAME* pDepthFloatImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_FLOAT, width, height, nullptr, &pDepthFloatImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( FLOAT )" << std::endl;
		return -1;
	}

	// SmoothDepth
	NUI_FUSION_IMAGE_FRAME* pSmoothDepthFloatImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_FLOAT, width, height, nullptr, &pSmoothDepthFloatImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( FLOAT )" << std::endl;
		return -1;
	}

	// Point Cloud
	NUI_FUSION_IMAGE_FRAME* pPointCloudImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_POINT_CLOUD, width, height, nullptr, &pPointCloudImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( POINT_CLOUD )" << std::endl;
		return -1;
	}

	// Surface
	NUI_FUSION_IMAGE_FRAME* pSurfaceImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_COLOR, width, height, nullptr, &pSurfaceImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( COLOR )" << std::endl;
		return -1;
	}

	// Normal
	NUI_FUSION_IMAGE_FRAME* pNormalImageFrame;
	hResult = NuiFusionCreateImageFrame( NUI_FUSION_IMAGE_TYPE_COLOR, width, height, nullptr, &pNormalImageFrame );
	if( FAILED( hResult ) ){
		std::cerr << "Error : NuiFusionCreateImageFrame( COLOR )" << std::endl;
		return -1;
	}

	cv::namedWindow( "Surface" );
	cv::namedWindow( "Normal" );

	while( 1 ){
		// Frame
		IDepthFrame* pDepthFrame = nullptr;
		hResult = pDepthReader->AcquireLatestFrame( &pDepthFrame );

		if( SUCCEEDED( hResult ) ){
			hResult = pDepthFrame->AccessUnderlyingBuffer( &bufferSize, reinterpret_cast<UINT16**>( &bufferMat.data ) );
			if( SUCCEEDED( hResult ) ){
				bufferMat.convertTo( depthMat, CV_8U, -255.0f / 8000.0f, 255.0f );
				hResult = pReconstruction->DepthToDepthFloatFrame( reinterpret_cast<UINT16*>( bufferMat.data ), width * height * sizeof( UINT16 ), pDepthFloatImageFrame, NUI_FUSION_DEFAULT_MINIMUM_DEPTH/* 0.5[m] */, NUI_FUSION_DEFAULT_MAXIMUM_DEPTH/* 8.0[m] */, true );
				if( FAILED( hResult ) ){
					std::cerr << "Error :INuiFusionReconstruction::DepthToDepthFloatFrame()" << std::endl;
					return -1;
				}
			}
		}
		SafeRelease( pDepthFrame );

		// Smooting Depth Image Frame
		hResult = pReconstruction->SmoothDepthFloatFrame( pDepthFloatImageFrame, pSmoothDepthFloatImageFrame, 1, 0.04f );
		if( FAILED( hResult ) ){
			std::cerr << "Error :INuiFusionReconstruction::SmoothDepthFloatFrame" << std::endl;
			return -1;
		}

		// Reconstruction Process
		pReconstruction->GetCurrentWorldToCameraTransform( &worldToCameraTransform );
		hResult = pReconstruction->ProcessFrame( pSmoothDepthFloatImageFrame, NUI_FUSION_DEFAULT_ALIGN_ITERATION_COUNT, NUI_FUSION_DEFAULT_INTEGRATION_WEIGHT, nullptr, &worldToCameraTransform );
		if( FAILED( hResult ) ){
			static int errorCount = 0;
			errorCount++;
			if( errorCount >= 100 ) {
				errorCount = 0;
				ResetReconstruction( pReconstruction, &worldToCameraTransform );
			}
		}

		// Calculate Point Cloud
		hResult = pReconstruction->CalculatePointCloud( pPointCloudImageFrame, &worldToCameraTransform );
		if( FAILED( hResult ) ){
			std::cerr << "Error : CalculatePointCloud" << std::endl;
			return -1;
		}

		// Shading Point Clouid
		Matrix4 worldToBGRTransform = { 0.0f };
		worldToBGRTransform.M11 = reconstructionParameter.voxelsPerMeter / reconstructionParameter.voxelCountX;
		worldToBGRTransform.M22 = reconstructionParameter.voxelsPerMeter / reconstructionParameter.voxelCountY;
		worldToBGRTransform.M33 = reconstructionParameter.voxelsPerMeter / reconstructionParameter.voxelCountZ;
		worldToBGRTransform.M41 = 0.5f;
		worldToBGRTransform.M42 = 0.5f;
		worldToBGRTransform.M43 = 0.0f;
		worldToBGRTransform.M44 = 1.0f;

		hResult = NuiFusionShadePointCloud( pPointCloudImageFrame, &worldToCameraTransform, &worldToBGRTransform, pSurfaceImageFrame, pNormalImageFrame );
		if( FAILED( hResult ) ){
			std::cerr << "Error : NuiFusionShadePointCloud" << std::endl;
			return -1;
		}

		cv::Mat surfaceMat( height, width, CV_8UC4, pSurfaceImageFrame->pFrameBuffer->pBits );
		cv::Mat normalMat( height, width, CV_8UC4, pNormalImageFrame->pFrameBuffer->pBits );

		cv::imshow( "Depth", depthMat );
		cv::imshow( "Surface", surfaceMat );
		cv::imshow( "Normal", normalMat );

		int key = cv::waitKey( 30 );
		if( key == VK_ESCAPE ){
			break;
		}
		else if( key == 'r' ){
			ResetReconstruction( pReconstruction, &worldToCameraTransform );
		}
	}

	SafeRelease( pDepthSource );
	SafeRelease( pDepthReader );
	SafeRelease( pDescription );
	SafeRelease( pCoordinateMapper );
	SafeRelease( pReconstruction );
	NuiFusionReleaseImageFrame( pDepthFloatImageFrame );
	NuiFusionReleaseImageFrame( pSmoothDepthFloatImageFrame );
	NuiFusionReleaseImageFrame( pPointCloudImageFrame );
	NuiFusionReleaseImageFrame( pSurfaceImageFrame );
	NuiFusionReleaseImageFrame( pNormalImageFrame );
	if( pSensor ){
		pSensor->Close();
	}
	SafeRelease( pSensor );
	cv::destroyAllWindows();

	return 0;
}