/*!****************************************************************************
 @Function		RenderScene
 @Return		bool		true if no error occured
 @Description	Main rendering loop function of the program. The shell will
				call this function every frame.
				eglSwapBuffers() will be performed by PVRShell automatically.
				PVRShell will also manage important OS events.
				Will also manage relevent OS events. The user has access to
				these events through an abstraction layer provided by PVRShell.
******************************************************************************/
bool OGLES2IntroducingPOD::RenderScene()
{
	// Clear the color and depth buffer
	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Use shader program
	glUseProgram(m_ShaderProgram.uiId);

	/*
		Calculates the frame number to animate in a time-based manner.
		Uses the shell function PVRShellGetTime() to get the time in milliseconds.
	*/
	int iTime = PVRShellGetTime();
	int iDeltaTime = iTime - m_iTimePrev;
	m_iTimePrev	= iTime;
	m_fFrame += (float)iDeltaTime * g_fDemoFrameRate;
	if (m_fFrame > m_Scene.nNumFrame - 1) m_fFrame = 0;

	// Sets the scene animation to this frame
	m_Scene.SetFrame(m_fFrame);

	/*
		Get the direction of the first light from the scene.
	*/
	PVRTVec4 vLightDirection;
	vLightDirection = m_Scene.GetLightDirection(0);
	// For direction vectors, w should be 0
	vLightDirection.w = 0.0f;

	/*
		Set up the view and projection matrices from the camera
	*/
	PVRTMat4 mView, mProjection;
	PVRTVec3	vFrom, vTo(0.0f), vUp(0.0f, 1.0f, 0.0f);
	float fFOV;

	// Setup the camera

	// Camera nodes are after the mesh and light nodes in the array
	int i32CamID = m_Scene.pNode[m_Scene.nNumMeshNode + m_Scene.nNumLight + g_ui32Camera].nIdx;

	// Get the camera position, target and field of view (fov)
	if(m_Scene.pCamera[i32CamID].nIdxTarget != -1) // Does the camera have a target?
		fFOV = m_Scene.GetCameraPos( vFrom, vTo, g_ui32Camera); // vTo is taken from the target node
	else
		fFOV = m_Scene.GetCamera( vFrom, vTo, vUp, g_ui32Camera); // vTo is calculated from the rotation

	// We can build the model view matrix from the camera position, target and an up vector.
	// For this we usePVRTMat4LookAtRH()
	mView = PVRTMat4::LookAtRH(vFrom, vTo, vUp);

	// Calculate the projection matrix
	bool bRotate = PVRShellGet(prefIsRotated) && PVRShellGet(prefFullScreen);
	mProjection = PVRTMat4::PerspectiveFovRH(fFOV, (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight), g_fCameraNear, g_fCameraFar, PVRTMat4::OGL, bRotate);

	/*
		A scene is composed of nodes. There are 3 types of nodes:
		- MeshNodes :
			references a mesh in the pMesh[].
			These nodes are at the beginning of the pNode[] array.
			And there are nNumMeshNode number of them.
			This way the .pod format can instantiate several times the same mesh
			with different attributes.
		- lights
		- cameras
		To draw a scene, you must go through all the MeshNodes and draw the referenced meshes.
	*/
	for (unsigned int i = 0; i < m_Scene.nNumMeshNode; ++i)
	{
		SPODNode& Node = m_Scene.pNode[i];

		// Get the node model matrix
		PVRTMat4 mWorld;
		mWorld = m_Scene.GetWorldMatrix(Node);

		// Pass the model-view-projection matrix (MVP) to the shader to transform the vertices
		PVRTMat4 mModelView, mMVP;
		mModelView = mView * mWorld;
		mMVP = mProjection * mModelView;
		glUniformMatrix4fv(m_ShaderProgram.uiMVPMatrixLoc, 1, GL_FALSE, mMVP.f);

		// Pass the light direction in model space to the shader
		PVRTVec4 vLightDir;
		vLightDir = mWorld.inverse() * vLightDirection;

		PVRTVec3 vLightDirModel = *(PVRTVec3*)&vLightDir;
		vLightDirModel.normalize();

		glUniform3fv(m_ShaderProgram.uiLightDirLoc, 1, &vLightDirModel.x);

		// Load the correct texture using our texture lookup table
		GLuint uiTex = 0;

		if(Node.nIdxMaterial != -1)
			uiTex = m_puiTextureIDs[Node.nIdxMaterial];

		glBindTexture(GL_TEXTURE_2D, uiTex);

		/*
			Now that the model-view matrix is set and the materials ready,
			call another function to actually draw the mesh.
		*/
		DrawMesh(i);
	}

	// Displays the demo name using the tools. For a detailed explanation, see the training course IntroducingPVRTools
	m_Print3D.DisplayDefaultTitle("IntroducingPOD", "", ePVRTPrint3DLogoIMG);
	m_Print3D.Flush();

	return true;
}
예제 #2
0
/*!****************************************************************************
 @Function		InitView
 @Return		bool		true if no error occurred
 @Description	Code in InitView() will be called by PVRShell upon
				initialization or after a change in the rendering context.
				Used to initialize variables that are dependant on the rendering
				context (e.g. textures, vertex buffers, etc.)
******************************************************************************/
bool OGLES2FilmTV::InitView()
{
	CPVRTString ErrorStr;

	//	Initialize VBO data
	if(!LoadVbos(&ErrorStr))
	{
		PVRShellSet(prefExitMessage, ErrorStr.c_str());
		return false;
	}

	//	Load textures
	if(!LoadTextures(&ErrorStr))
	{
		PVRShellSet(prefExitMessage, ErrorStr.c_str());
		return false;
	}

	//	Load and compile the shaders & link programs
	if(!LoadShaders(&ErrorStr))
	{
		PVRShellSet(prefExitMessage, ErrorStr.c_str());
		return false;
	}

	//	Initialize Print3D
	bool bRotate = PVRShellGet(prefIsRotated) && PVRShellGet(prefFullScreen);

	if(m_Print3D.SetTextures(0,PVRShellGet(prefWidth),PVRShellGet(prefHeight), bRotate) != PVR_SUCCESS)
	{
		PVRShellSet(prefExitMessage, "ERROR: Cannot initialise Print3D\n");
		return false;
	}

	//Set OpenGL ES render states needed for this demo

	// Enable backface culling and depth test
	glCullFace(GL_BACK);
	glEnable(GL_CULL_FACE);

	glEnable(GL_DEPTH_TEST);

	glClearColor(0.0f, 0.0f, 0.0f, 1.0f);

	// Find the largest square power of two texture that fits into the viewport
	m_i32TexSize = 1;
	int iSize = PVRT_MIN(PVRShellGet(prefWidth), PVRShellGet(prefHeight));
	while (m_i32TexSize * 2 < iSize) m_i32TexSize *= 2;

	// Get the currently bound frame buffer object. On most platforms this just gives 0.
	glGetIntegerv(GL_FRAMEBUFFER_BINDING, &m_i32OriginalFB);

	for(int i = 0; i < 2; ++i)
	{
		// Create texture for the FBO
		glGenTextures(1, &m_uiTexture[i]);
		glBindTexture(GL_TEXTURE_2D, m_uiTexture[i]);
		glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, m_i32TexSize, m_i32TexSize, 0, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, 0);

		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);

		// Create FBO
		glGenFramebuffers(1, &m_uiFbo[i]);
		glBindFramebuffer(GL_FRAMEBUFFER, m_uiFbo[i]);
		glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_uiTexture[i], 0);

		glGenRenderbuffers(1, &m_uiDepthBuffer[i]);
		glBindRenderbuffer(GL_RENDERBUFFER, m_uiDepthBuffer[i]);

		glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, m_i32TexSize, m_i32TexSize);
		glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, m_uiDepthBuffer[i]);

        // Check that our FBO creation was successful
        GLuint uStatus = glCheckFramebufferStatus(GL_FRAMEBUFFER);

        if(uStatus != GL_FRAMEBUFFER_COMPLETE)
        {
            m_bFBOsCreated = false;
            PVRShellOutputDebug("ERROR: Failed to initialise FBO");
            break;
        }

		// Clear the colour buffer for this FBO
		glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
	}

	glBindFramebuffer(GL_FRAMEBUFFER, m_i32OriginalFB);

	// Setup the main camera
	PVRTVec3	vFrom, vTo(0.0f), vUp(0.0f, 1.0f, 0.0f);
	float fFOV;

	// Camera nodes are after the mesh and light nodes in the array
	int i32CamID = m_Scene.pNode[m_Scene.nNumMeshNode + m_Scene.nNumLight + g_ui32Camera].nIdx;

	// Get the camera position, target and field of view (fov)
	if(m_Scene.pCamera[i32CamID].nIdxTarget != -1) // Does the camera have a target?
		fFOV = m_Scene.GetCameraPos( vFrom, vTo, g_ui32Camera); // vTo is taken from the target node
	else
		fFOV = m_Scene.GetCamera( vFrom, vTo, vUp, g_ui32Camera); // vTo is calculated from the rotation

	m_View = PVRTMat4::LookAtRH(vFrom, vTo, vUp);

	// Calculate the projection matrix
	PVRTMat4 mProjection = PVRTMat4::PerspectiveFovRH(fFOV, (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight), g_fCameraNear, g_fCameraFar, PVRTMat4::OGL, bRotate);
	m_ViewProjection = mProjection * m_View;

	// Check to see if the GL_EXT_discard_framebuffer extension is supported
    if(m_bFBOsCreated && (m_bDiscard = CPVRTgles2Ext::IsGLExtensionSupported("GL_EXT_discard_framebuffer")) != false)
	{
		m_Extensions.LoadExtensions();
		m_bDiscard = m_Extensions.glDiscardFramebufferEXT != 0;
	}

	return true;
}
예제 #3
0
void CGroundLine::SetParameters(
	const Vector &vStart, 
	const Vector &vEnd, 
	const Vector &vStartColor,	// Color values 0-1
	const Vector &vEndColor,
	float alpha,
	float lineWidth
	)
{
	m_vStart = vStart;
	m_vEnd = vEnd;
	m_vStartColor = vStartColor;
	m_vEndColor = vEndColor;
	m_Alpha = alpha;
	m_LineWidth = lineWidth;

	Vector vTo( vEnd.x - vStart.x, vEnd.y - vStart.y, 0 );
	float flXYLen = vTo.Length();

	// Recalculate our segment list.
	unsigned int nSteps = (int)flXYLen / XY_PER_SEGMENT;
	nSteps = clamp( nSteps, 8, MAX_GROUNDLINE_SEGMENTS ) & ~1;
	unsigned int nMaxSteps = nSteps / 2;

	// First generate the sequence. We generate every other point here so it can insert fixup points to prevent
	// it from crossing world geometry.
	Vector pt[MAX_GROUNDLINE_SEGMENTS];
	Vector vStep = (Vector(m_vEnd[0], m_vEnd[1], 0) - Vector(m_vStart[0], m_vStart[1], 0)) / (nMaxSteps-1);

	pt[0] = FindBestSurfacePoint(m_vStart);

	unsigned int i;
	for(i=1; i < nMaxSteps; i++)
		pt[i<<1] = FindBestSurfacePoint(pt[(i-1)<<1] + vStep);


	CBitVec<MAX_GROUNDLINE_SEGMENTS> pointsUsed;
	pointsUsed.ClearAll();

	// Now try to make sure they don't intersect the geometry.
	for(i=0; i < nMaxSteps-1; i++)
	{
		Vector &a = pt[i<<1];
		Vector &b = pt[(i+1)<<1];

		trace_t trace;
		UTIL_TraceLine(a, b, MASK_SOLID_BRUSHONLY, NULL, COLLISION_GROUP_NONE, &trace);
		if(trace.fraction < 1)
		{
			int cIndex = (i<<1)+1;
			Vector &c = pt[cIndex];

			// Ok, this line segment intersects the world. Do a binary search to try to find the
			// point of intersection.
			Vector hi, lo;
			if(a.z < b.z)
			{
				hi = b;
				lo = a;
			}
			else
			{
				hi = a;
				lo = b;
			}

			if(BinSearchSegments(lo, hi, Vector(lo[0],lo[1],hi[2]), 15, &c))
			{
				pointsUsed.Set( cIndex );
			}
			else if(BinSearchSegments(lo, hi, Vector(hi[0],hi[1],hi[2]+500), 15, &c))
			{
				pointsUsed.Set( cIndex );
			}
		}
	}

	// Export the points.
	m_nPoints = 0;
	for(i=0; i < nSteps; i++)
	{
		// Every other point is always active.
		if( pointsUsed.Get( i ) || !(i & 1) )
		{
			m_Points[m_nPoints] = pt[i];
			++m_nPoints;
		}
	}
}
예제 #4
0
/*!****************************************************************************
 @Function		RenderScene
 @Return		bool		true if no error occurred
 @Description	Main rendering loop function of the program. The shell will
				call this function every frame.
				eglSwapBuffers() will be performed by PVRShell automatically.
				PVRShell will also manage important OS events.
				Will also manage relevant OS events. The user has access to
				these events through an abstraction layer provided by PVRShell.
******************************************************************************/
bool OGLES3PhantomMask::RenderScene()
{
    if(PVRShellIsKeyPressed(PVRShellKeyNameACTION1))
        m_bEnableSH = !m_bEnableSH;

    // Clear the colour and depth buffer
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    // Draw the background
    m_Background.Draw(m_ui32TexBackground);

    // Enable culling
    glEnable(GL_CULL_FACE);

    // Enable depth testing
    glEnable(GL_DEPTH_TEST);

    // Use shader program
    GLuint ProgramID, MVPLoc, ModelLoc;

    if(m_bEnableSH)
    {
        ProgramID = m_SHShaderProgram.uiId;
        MVPLoc	  = m_SHShaderProgram.auiLoc[eSHMVPMatrix];
        ModelLoc  = m_SHShaderProgram.auiLoc[eSHModel];
    }
    else
    {
        ProgramID = m_DiffuseShaderProgram.uiId;
        MVPLoc	  = m_DiffuseShaderProgram.auiLoc[eDifMVPMatrix];
        ModelLoc  = m_DiffuseShaderProgram.auiLoc[eDifModel];
    }

    glUseProgram(ProgramID);

    /*
    	Calculates the frame number to animate in a time-based manner.
    	Uses the shell function PVRShellGetTime() to get the time in milliseconds.
    */
    unsigned long ulTime = PVRShellGetTime();

    if(ulTime > m_ulTimePrev)
    {
        unsigned long ulDeltaTime = ulTime - m_ulTimePrev;
        m_fFrame += (float)ulDeltaTime * g_fDemoFrameRate;

        if(m_fFrame > m_Scene.nNumFrame - 1)
            m_fFrame = 0;

        // Sets the scene animation to this frame
        m_Scene.SetFrame(m_fFrame);
    }

    m_ulTimePrev = ulTime;

    /*
    	Set up the view and projection matrices from the camera
    */
    PVRTMat4 mView, mProjection;
    PVRTVec3	vFrom, vTo(0.0f), vUp(0.0f, 1.0f, 0.0f);
    float fFOV;

    // Setup the camera
    bool bRotate = PVRShellGet(prefIsRotated) && PVRShellGet(prefFullScreen);

    // Camera nodes are after the mesh and light nodes in the array
    int i32CamID = m_Scene.pNode[m_Scene.nNumMeshNode + m_Scene.nNumLight + g_ui32Camera].nIdx;

    // Get the camera position, target and field of view (fov)
    if(m_Scene.pCamera[i32CamID].nIdxTarget != -1) // Does the camera have a target?
        fFOV = m_Scene.GetCameraPos( vFrom, vTo, g_ui32Camera); // vTo is taken from the target node
    else
        fFOV = m_Scene.GetCamera( vFrom, vTo, vUp, g_ui32Camera); // vTo is calculated from the rotation

    fFOV *= bRotate ? (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight) : (float)PVRShellGet(prefHeight)/(float)PVRShellGet(prefWidth);

    // We can build the model view matrix from the camera position, target and an up vector.
    // For this we usePVRTMat4LookAtRH()
    mView = PVRTMat4::LookAtRH(vFrom, vTo, vUp);

    // Calculate the projection matrix
    mProjection = PVRTMat4::PerspectiveFovRH(fFOV, (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight), g_fCameraNear, g_fCameraFar, PVRTMat4::OGL, bRotate);

    SPODNode& Node = m_Scene.pNode[0];

    // Get the node model matrix
    PVRTMat4 mWorld;
    mWorld = m_Scene.GetWorldMatrix(Node);

    // Set the model inverse transpose matrix
    PVRTMat3 mMat3 = PVRTMat3(mWorld);

    if(m_bEnableSH)
        mMat3 *= PVRTMat3::RotationY(-1.047197f);

    glUniformMatrix3fv(ModelLoc, 1, GL_FALSE, mMat3.f);

    // Pass the model-view-projection matrix (MVP) to the shader to transform the vertices
    PVRTMat4 mModelView, mMVP;
    mModelView = mView * mWorld;
    mMVP = mProjection * mModelView;
    glUniformMatrix4fv(MVPLoc, 1, GL_FALSE, mMVP.f);

    glBindTexture(GL_TEXTURE_2D, m_ui32TexMask);
    DrawMesh(Node.nIdx);

    // Print text on screen

    if(m_bEnableSH)
    {
        // Base
        m_Print3D.DisplayDefaultTitle("PhantomMask", "Spherical Harmonics Lighting", ePVRTPrint3DSDKLogo);
    }
    else
    {
        // Base
        m_Print3D.DisplayDefaultTitle("PhantomMask", "Vertex Lighting", ePVRTPrint3DSDKLogo);
    }

    m_Print3D.Flush();

    return true;
}
예제 #5
0
/*!****************************************************************************
 @Function		InitView
 @Return		bool		true if no error occurred
 @Description	Code in InitView() will be called by PVRShell upon
				initialization or after a change in the rendering context.
				Used to initialize variables that are dependant on the rendering
				context (e.g. textures, vertex buffers, etc.)
******************************************************************************/
bool OGLES3MagicLantern::InitView()
{
	CPVRTString ErrorStr;

	// At this point m_Scene should have been already processed by InitApplication()
	// and all the POD data properly loaded, but lets do a little test just in case.
	 if (!m_Scene.IsLoaded())
	{
		PVRShellSet(prefExitMessage, "ERROR: POD file has not been loaded correctly. Cannot continue. \n");
		return false;
	}

	// Initialize VBO data
	if(!LoadVbos())
	{
		PVRShellSet(prefExitMessage, ErrorStr.c_str());
		return false;
	}

	//	Load and compile the shaders, link programs and load textures.
	if(!LoadPFX())
	{
		return false;
	}

	// Initialize Print3D
	bool bRotate = PVRShellGet(prefIsRotated) && PVRShellGet(prefFullScreen);

	if(m_Print3D.SetTextures(0,PVRShellGet(prefWidth),PVRShellGet(prefHeight), bRotate) != PVR_SUCCESS)
	{
		PVRShellSet(prefExitMessage, "ERROR: Cannot initialise Print3D\n");
		return false;
	}

	// Enable backface culling and depth test
	glCullFace(GL_BACK);
	glEnable(GL_CULL_FACE);
	glEnable(GL_DEPTH_TEST);

	// Black as clear colour
	glClearColor(0.0f, 0.0f, 0.0f, 1.0f);

	// Disable blending
	glDisable(GL_BLEND);
	
	// Set up the view and projection matrices from the camera.
	// The camera does not moves so these matrices only need to be 
	// calculated once.
	// If you want to make the camera dynamic, re-calculate the view matrix 
	// every frame.
	PVRTVec3 vFrom, vTo(0.0f), vUp(0.0f, 1.0f, 0.0f);
	float fFOV;

	// Camera nodes are after the mesh and light nodes in the array.
	// We grab camera num 0 (the only one in the scene)
	const int g_ui32Camera = 0;
	int i32CamID = m_Scene.pNode[m_Scene.nNumMeshNode + m_Scene.nNumLight + g_ui32Camera].nIdx;

	// Get the camera position and target 
	if(m_Scene.pCamera[i32CamID].nIdxTarget != -1) // Does the camera have a target?
		m_Scene.GetCameraPos( vFrom, vTo, g_ui32Camera); // vTo is taken from the target node.
	else
		m_Scene.GetCamera( vFrom, vTo, vUp, g_ui32Camera); // vTo is calculated from the rotation.

	// Calculate the FOV depending of the screen dimensions so everything fit in view
	// regardless whether the screen is rotated or not.
	// if the aspect ratio is different than 640x480 adapt FOV so the scene still looks correct.
	float fRatioWoverH = (480.0f/640.0f) * ((!bRotate) ? (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight) :  (float)PVRShellGet(prefHeight)/(float)PVRShellGet(prefWidth));
	
	fFOV = m_Scene.pCamera[i32CamID].fFOV / fRatioWoverH;

	// We can build the model view matrix from the camera position, target and an up vector.
	m_mView = PVRTMat4::LookAtRH(vFrom, vTo, vUp);

	// Calculate the projection matrix.
	m_mProjection = PVRTMat4::PerspectiveFovRH(fFOV, (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight), 	m_Scene.pCamera[i32CamID].fNear, m_Scene.pCamera[i32CamID].fFar, PVRTMat4::OGL, bRotate);

	return true;
}
예제 #6
0
/*!****************************************************************************
 @Function		InitView
 @Return		bool		true if no error occurred
 @Description	Code in InitView() will be called by PVRShell upon
				initialization or after a change in the rendering context.
				Used to initialize variables that are dependent on the rendering
				context (e.g. textures, vertex buffers, etc.)
******************************************************************************/
bool OGLES3DisplacementMap::InitView()
{
	CPVRTString ErrorStr;

	/*
		Initialize VBO data
	*/
	if(!LoadVbos(&ErrorStr))
	{
		PVRShellSet(prefExitMessage, ErrorStr.c_str());
		return false;
	}

	/*
		Load textures
	*/
	if(!LoadTextures(&ErrorStr))
	{
		PVRShellSet(prefExitMessage, ErrorStr.c_str());
		return false;
	}

	/*
		Load and compile the shaders & link programs
	*/
	if(!LoadShaders(&ErrorStr))
	{
		PVRShellSet(prefExitMessage, ErrorStr.c_str());
		return false;
	}

	/*
		Initialize Print3D
	*/
	bool bRotate = PVRShellGet(prefIsRotated) && PVRShellGet(prefFullScreen);

	if(m_Print3D.SetTextures(0,PVRShellGet(prefWidth),PVRShellGet(prefHeight), bRotate) != PVR_SUCCESS)
	{
		PVRShellSet(prefExitMessage, "ERROR: Cannot initialise Print3D\n");
		return false;
	}

	/*
		Set OpenGL ES render states needed for this training course
	*/
	// Enable backface culling and depth test
	glCullFace(GL_BACK);
	glEnable(GL_CULL_FACE);

	glEnable(GL_DEPTH_TEST);

	// Use a nice bright blue as clear colour
	glClearColor(0.6f, 0.8f, 1.0f, 1.0f);

	//Get the direction of the first light from the scene.
	m_LightDir = m_Scene.GetLightDirection(0);

	// For direction vectors, w should be 0
	m_LightDir.w = 0.0f;


	//	Set up the view and projection matrices from the camera
	PVRTVec3	vFrom, vTo(0.0f), vUp(0.0f, 1.0f, 0.0f);
	float fFOV;

	// Setup the camera

	// Camera nodes are after the mesh and light nodes in the array
	int i32CamID = m_Scene.pNode[m_Scene.nNumMeshNode + m_Scene.nNumLight + g_ui32Camera].nIdx;

	// Get the camera position, target and field of view (fov)
	if(m_Scene.pCamera[i32CamID].nIdxTarget != -1) // Does the camera have a target?
		fFOV = m_Scene.GetCameraPos( vFrom, vTo, g_ui32Camera); // vTo is taken from the target node
	else
		fFOV = m_Scene.GetCamera( vFrom, vTo, vUp, g_ui32Camera); // vTo is calculated from the rotation

	// We can build the model view matrix from the camera position, target and an up vector.
	// For this we usePVRTMat4LookAtRH()
	m_View = PVRTMat4::LookAtRH(vFrom, vTo, vUp);

	// Calculate the projection matrix
	m_Projection = PVRTMat4::PerspectiveFovRH(fFOV, (float)PVRShellGet(prefWidth)/(float)PVRShellGet(prefHeight), g_fCameraNear, g_fCameraFar, PVRTMat4::OGL, bRotate);

	// Initialize variables used for the animation
	m_ulTimePrev = PVRShellGetTime();

	return true;
}