void RiftAppSkeleton::_drawSceneMono() const { _resetGLState(); glClearColor(0.0f, 0.0f, 0.0f, 0.0f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); const int w = m_Cfg.OGL.Header.RTSize.w; const int h = m_Cfg.OGL.Header.RTSize.h; const glm::vec3 EyePos(m_chassisPos.x, m_chassisPos.y, m_chassisPos.z); const glm::vec3 LookVec(0.0f, 0.0f, -1.0f); const glm::vec3 up(0.0f, 1.0f, 0.0f); ovrPosef eyePose; eyePose.Orientation = OVR::Quatf(); eyePose.Position = OVR::Vector3f(); const OVR::Matrix4f view = _MakeModelviewMatrix( eyePose, OVR::Vector3f(0.0f), m_chassisYaw, m_chassisPos); const glm::mat4 persp = glm::perspective( 90.0f, static_cast<float>(w)/static_cast<float>(h), 0.004f, 500.0f); ovrRecti rvp = {0,0,w,h}; _DrawScenes(&view.Transposed().M[0][0], glm::value_ptr(persp), rvp); }
void OVRScene::RenderForOneEye(const float* pMview, const float* pPersp) const { printf("Rendering in OVRScene!\n"); if (m_bDraw == false) return; if (pMview == false) return; if (pPersp == false) return; const glm::mat4 modelview = glm::make_mat4(pMview); const glm::mat4 projection = glm::make_mat4(pPersp); // Assemble modelview matrix to lock camera in with real world geometry: // We still have to use the assembled HMD stereo modelview matrices from OVRSDK05AppSkeleton, // but we undo the effects of chassis yaw and position so the frustum follows the viewer. if (m_pHmd != NULL) { const ovrTrackingState ts = ovrHmd_GetTrackingState(m_pHmd, ovr_GetTimeInSeconds()); const ovrPosef& cp = ts.CameraPose; OVR::Matrix4f camMtx = OVR::Matrix4f(); camMtx *= OVR::Matrix4f::Translation(cp.Position) * OVR::Matrix4f(OVR::Quatf(cp.Orientation)); glm::mat4 ogmat = glm::make_mat4(&camMtx.Transposed().M[0][0]); DrawScene(modelview * ogmat, projection); } }
void RiftAppSkeleton::RenderThumbnails() { std::vector<Pane*>& panes = m_paneScene.m_panes; for (std::vector<Pane*>::iterator it = panes.begin(); it != panes.end(); ++it) { ShaderPane* pP = reinterpret_cast<ShaderPane*>(*it); if (pP == NULL) continue; ShaderToy* pSt = pP->m_pShadertoy; // Render a view of the shader to the FBO // We must keep the previously bound FBO and restore GLint bound_fbo = 0; glGetIntegerv(GL_FRAMEBUFFER_BINDING, &bound_fbo); bindFBO(pP->m_paneRenderBuffer); //pP->DrawToFBO(); { const glm::vec3 hp = pSt->GetHeadPos(); const glm::vec3 LookVec(0.0f, 0.0f, -1.0f); const glm::vec3 up(0.0f, 1.0f, 0.0f); ovrPosef eyePose; eyePose.Orientation = OVR::Quatf(); eyePose.Position = OVR::Vector3f(); const OVR::Matrix4f view = _MakeModelviewMatrix( eyePose, OVR::Vector3f(0.0f), static_cast<float>(M_PI), OVR::Vector3f(hp.x, hp.y, hp.z)); const glm::mat4 persp = glm::perspective( 90.0f, static_cast<float>(pP->m_paneRenderBuffer.w) / static_cast<float>(pP->m_paneRenderBuffer.h), 0.004f, 500.0f); const bool wasDrawing = m_shaderToyScene.m_bDraw; m_shaderToyScene.m_bDraw = true; m_shaderToyScene.SetShaderToy(pSt); m_shaderToyScene.RenderForOneEye(&view.Transposed().M[0][0], glm::value_ptr(persp)); m_shaderToyScene.m_bDraw = wasDrawing; m_shaderToyScene.SetShaderToy(NULL); } unbindFBO(); glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, bound_fbo); } }
void Entity_DrawChildren( const OVR::Matrix4f &view, const SxTransform& xform, SRef first ) { SRef ref; SEntity *entity; SxTransform entityXform; SxTransform childXform; OVR::Matrix4f m; for ( ref = first; ref != S_NULL_REF; ref = entity->parentLink.next ) { entity = Registry_GetEntity( ref ); assert( entity ); if ( entity->visibility <= 0.0f ) continue; OrientationToTransform( entity->orientation, &entityXform ); ConcatenateTransforms( xform, entityXform, &childXform ); // if ( strstr( entity->id, "vnc" ) ) // { // S_Log( "entityXform %s:", entity->id ); // S_Log( "xAxis: %f %f %f", entityXform.axes.x.x, entityXform.axes.x.y, entityXform.axes.x.z ); // S_Log( "yAxis: %f %f %f", entityXform.axes.y.x, entityXform.axes.y.y, entityXform.axes.y.z ); // S_Log( "zAxis: %f %f %f", entityXform.axes.z.x, entityXform.axes.z.y, entityXform.axes.z.z ); // S_Log( "origin: %f %f %f", entityXform.origin.x, entityXform.origin.y, entityXform.origin.z ); // S_Log( "scale: %f %f %f", entityXform.scale.x, entityXform.scale.y, entityXform.scale.z ); // S_Log( "childXform %s:", entity->id ); // S_Log( "xAxis: %f %f %f", childXform.axes.x.x, childXform.axes.x.y, childXform.axes.x.z ); // S_Log( "yAxis: %f %f %f", childXform.axes.y.x, childXform.axes.y.y, childXform.axes.y.z ); // S_Log( "zAxis: %f %f %f", childXform.axes.z.x, childXform.axes.z.y, childXform.axes.z.z ); // S_Log( "origin: %f %f %f", childXform.origin.x, childXform.origin.y, childXform.origin.z ); // S_Log( "scale: %f %f %f", childXform.scale.x, childXform.scale.y, childXform.scale.z ); // } m = OVR::Matrix4f( childXform.axes.x.x * childXform.scale.x, childXform.axes.x.y * childXform.scale.x, childXform.axes.x.z * childXform.scale.x, 0.0f, childXform.axes.y.x * childXform.scale.y, childXform.axes.y.y * childXform.scale.y, childXform.axes.y.z * childXform.scale.y, 0.0f, childXform.axes.z.x * childXform.scale.z, childXform.axes.z.y * childXform.scale.z, childXform.axes.z.z * childXform.scale.z, 0.0f, childXform.origin.x, childXform.origin.y, childXform.origin.z, 1.0f ); Entity_DrawEntity( entity, view * m.Transposed() ); if ( entity->firstChild != S_NULL_REF ) { // S_Log( "%s has children", entity->id ); Entity_DrawChildren( view, childXform, entity->firstChild ); } } }
void RiftAppSkeleton::_initPresentFbo() { m_presentFbo.bindVAO(); const float verts[] = { -1, -1, 1, -1, 1, 1, -1, 1 }; const float texs[] = { 0, 0, 1, 0, 1, 1, 0, 1, }; GLuint vertVbo = 0; glGenBuffers(1, &vertVbo); m_presentFbo.AddVbo("vPosition", vertVbo); glBindBuffer(GL_ARRAY_BUFFER, vertVbo); glBufferData(GL_ARRAY_BUFFER, 4*2*sizeof(GLfloat), verts, GL_STATIC_DRAW); glVertexAttribPointer(m_presentFbo.GetAttrLoc("vPosition"), 2, GL_FLOAT, GL_FALSE, 0, NULL); GLuint texVbo = 0; glGenBuffers(1, &texVbo); m_presentFbo.AddVbo("vTex", texVbo); glBindBuffer(GL_ARRAY_BUFFER, texVbo); glBufferData(GL_ARRAY_BUFFER, 4*2*sizeof(GLfloat), texs, GL_STATIC_DRAW); glVertexAttribPointer(m_presentFbo.GetAttrLoc("vTex"), 2, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(m_presentFbo.GetAttrLoc("vPosition")); glEnableVertexAttribArray(m_presentFbo.GetAttrLoc("vTex")); glUseProgram(m_presentFbo.prog()); { OVR::Matrix4f id = OVR::Matrix4f::Identity(); glUniformMatrix4fv(m_presentFbo.GetUniLoc("mvmtx"), 1, false, &id.Transposed().M[0][0]); glUniformMatrix4fv(m_presentFbo.GetUniLoc("prmtx"), 1, false, &id.Transposed().M[0][0]); } glUseProgram(0); glBindVertexArray(0); }
// This function calculates the transformation Matrix, needed for the Oculus Rift display glm::mat4 RetinaManager::CalcTransMatrix(ovrEyeType Eye) { glm::mat4 projMat; glm::mat4 modelViewMat; // Get Projection and ModelView matrices from the device OVR::Matrix4f projectionMatrix = ovrMatrix4f_Projection(this->eyeRenderDesc[Eye].Fov, 0.3f, 1000.0f, true); // Convert the matrices into OpenGl form memcpy(glm::value_ptr(projMat), &(projectionMatrix.Transposed().M[0][0]), sizeof(projectionMatrix)); modelViewMat = glm::mat4(1.0); //Identity matrix for model-view // Adjust IPD and the distance from FOV glm::mat4 translateIPD = glm::translate(glm::mat4(1.0), glm::vec3(this->eyeRenderDesc[Eye].ViewAdjust.x, this->eyeRenderDesc[Eye].ViewAdjust.y, this->eyeRenderDesc[Eye].ViewAdjust.z)); glm::mat4 translateBack = glm::translate(glm::mat4(1.0), glm::vec3(0, 0, this->paramManager.getTranslateBackOffset())); // Calc and Return the transformed Mat return projMat * modelViewMat * translateBack * translateIPD;; }
int main(int argc, char **argv) { // initialize everything if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_EVENTS) < 0) { return 1; } if (!g_oculusVR.InitVR()) { SDL_Quit(); return 1; } ovrSizei hmdResolution = g_oculusVR.GetResolution(); ovrSizei windowSize = { hmdResolution.w / 2, hmdResolution.h / 2 }; g_renderContext.Init("Oculus Rift IR Camera Bounds Renderer", 100, 100, windowSize.w, windowSize.h); SDL_ShowCursor(SDL_DISABLE); if (glewInit() != GLEW_OK) { g_renderContext.Destroy(); g_oculusVR.DestroyVR(); SDL_Quit(); return 1; } if (!g_oculusVR.InitVRBuffers(windowSize.w, windowSize.h)) { g_renderContext.Destroy(); g_oculusVR.DestroyVR(); SDL_Quit(); return 1; } ShaderManager::GetInstance()->LoadShaders(); g_application.OnStart(); while (g_application.Running()) { // handle key presses processEvents(); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glClearColor(0.2f, 0.2f, 0.6f, 0.0f); g_oculusVR.OnRenderStart(); for (int eyeIndex = 0; eyeIndex < ovrEye_Count; eyeIndex++) { OVR::Matrix4f MVPMatrix = g_oculusVR.OnEyeRender(eyeIndex); // update MVP in both shaders const ShaderProgram &shader = ShaderManager::GetInstance()->UseShaderProgram(ShaderManager::BasicShader); glUniformMatrix4fv(shader.uniforms[ModelViewProjectionMatrix], 1, GL_FALSE, &MVPMatrix.Transposed().M[0][0]); const ShaderProgram &shader2 = ShaderManager::GetInstance()->UseShaderProgram(ShaderManager::OVRFrustumShader); glUniformMatrix4fv(shader2.uniforms[ModelViewProjectionMatrix], 1, GL_FALSE, &MVPMatrix.Transposed().M[0][0]); g_application.OnRender(); g_oculusVR.RenderTrackerFrustum(); g_oculusVR.OnEyeRenderFinish(eyeIndex); } g_oculusVR.SubmitFrame(); g_oculusVR.BlitMirror(); SDL_GL_SwapWindow(g_renderContext.window); } g_renderContext.Destroy(); g_oculusVR.DestroyVR(); SDL_Quit(); return 0; }
/// Set up view matrices, then draw scene void OculusAppSkeleton::display(bool useOculus) const { /// This may save us some frame rate if (!useOculus && !m_displaySceneInControl) { glClearColor(0,0,0,0); glClear(GL_COLOR_BUFFER_BIT); return; } glEnable(GL_DEPTH_TEST); m_ok.BindRenderBuffer(); { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); const int fboWidth = m_ok.GetRenderBufferWidth(); const int fboHeight = m_ok.GetRenderBufferHeight(); const int halfWidth = fboWidth/2; if (useOculus) { const OVR::HMDInfo& hmd = m_ok.GetHMD(); // Compute Aspect Ratio. Stereo mode cuts width in half. float aspectRatio = float(hmd.HResolution * 0.5f) / float(hmd.VResolution); // Compute Vertical FOV based on distance. float halfScreenDistance = (hmd.VScreenSize / 2); float yfov = 2.0f * atan(halfScreenDistance/hmd.EyeToScreenDistance); // Post-projection viewport coordinates range from (-1.0, 1.0), with the // center of the left viewport falling at (1/4) of horizontal screen size. // We need to shift this projection center to match with the lens center. // We compute this shift in physical units (meters) to correct // for different screen sizes and then rescale to viewport coordinates. float viewCenterValue = hmd.HScreenSize * 0.25f; float eyeProjectionShift = viewCenterValue - hmd.LensSeparationDistance * 0.5f; float projectionCenterOffset = 4.0f * eyeProjectionShift / hmd.HScreenSize; // Projection matrix for the "center eye", which the left/right matrices are based on. OVR::Matrix4f projCenter = OVR::Matrix4f::PerspectiveRH(yfov, aspectRatio, 0.3f, 1000.0f); OVR::Matrix4f projLeft = OVR::Matrix4f::Translation(projectionCenterOffset, 0, 0) * projCenter; OVR::Matrix4f projRight = OVR::Matrix4f::Translation(-projectionCenterOffset, 0, 0) * projCenter; // m_oculusView transformation translation in world units. float halfIPD = hmd.InterpupillaryDistance * 0.5f; OVR::Matrix4f viewLeft = OVR::Matrix4f::Translation(halfIPD, 0, 0) * m_oculusView; OVR::Matrix4f viewRight= OVR::Matrix4f::Translation(-halfIPD, 0, 0) * m_oculusView; glViewport(0 ,0,(GLsizei)halfWidth, (GLsizei)fboHeight); glScissor (0 ,0,(GLsizei)halfWidth, (GLsizei)fboHeight); m_scene.RenderForOneEye(viewLeft, projLeft); glViewport(halfWidth,0,(GLsizei)halfWidth, (GLsizei)fboHeight); glScissor (halfWidth,0,(GLsizei)halfWidth, (GLsizei)fboHeight); m_scene.RenderForOneEye(viewRight, projRight); } else { /// Set up our 3D transformation matrices /// Remember DX and OpenGL use transposed conventions. And doesn't DX use left-handed coords? OVR::Matrix4f mview = m_controlView; OVR::Matrix4f persp = OVR::Matrix4f::PerspectiveRH( m_viewAngleDeg * M_PI / 180.0f, (float)m_windowWidth/(float)m_windowHeight, 0.004f, 500.0f); glViewport(0,0,(GLsizei)fboWidth, (GLsizei)fboHeight); m_scene.RenderForOneEye(mview, persp); /// Render avatar of Oculus user //if (UseFollowCam) const GLuint prog = m_avatarProg; glUseProgram(prog); { OVR::Matrix4f rollPitchYaw = GetRollPitchYaw(); OVR::Matrix4f eyetx = mview * OVR::Matrix4f::Translation(EyePos.x, EyePos.y, EyePos.z) * rollPitchYaw; glUniformMatrix4fv(getUniLoc(prog, "mvmtx"), 1, false, &eyetx.Transposed().M[0][0]); glUniformMatrix4fv(getUniLoc(prog, "prmtx"), 1, false, &persp.Transposed().M[0][0]); glLineWidth(4.0f); DrawOrigin2(); const float aspect = (float)GetOculusWidth() / (float)GetOculusHeight(); DrawViewFrustum(aspect); glLineWidth(1.0f); } } } m_ok.UnBindRenderBuffer(); glDisable(GL_LIGHTING); glDisable(GL_DEPTH_TEST); const OVRkill::PostProcessType post = useOculus ? OVRkill::PostProcess_Distortion : OVRkill::PostProcess_None; m_ok.PresentFbo(post); }
void Entity_DrawEntity( SEntity *entity, const OVR::Matrix4f &view ) { STexture *texture; SGeometry *geometry; uint geometryIndex; uint textureIndex; GLuint texId; float uScale; float vScale; GLuint vertexArrayObject; int triCount; int indexOffset; int batchTriCount; int triCountLeft; Prof_Start( PROF_DRAW_ENTITY ); assert( entity ); OVR::GL_CheckErrors( "before Entity_DrawEntity" ); geometry = Registry_GetGeometry( entity->geometryRef ); assert( geometry ); geometryIndex = geometry->drawIndex % BUFFER_COUNT; vertexArrayObject = geometry->vertexArrayObjects[geometryIndex]; if ( !vertexArrayObject ) { Prof_Stop( PROF_DRAW_ENTITY ); return; } glUseProgram( s_ent.shader.program ); glUniformMatrix4fv( s_ent.shader.uMvp, 1, GL_FALSE, view.Transposed().M[0] ); glBindVertexArrayOES_( vertexArrayObject ); glActiveTexture( GL_TEXTURE0 ); if ( entity->textureRef != S_NULL_REF ) { texture = Registry_GetTexture( entity->textureRef ); assert( texture ); textureIndex = texture->drawIndex % BUFFER_COUNT; texId = texture->texId[textureIndex]; glBindTexture( GL_TEXTURE_2D, texId ); if ( texId ) { assert( texture->texWidth[textureIndex] ); assert( texture->texHeight[textureIndex] ); uScale = (float)texture->width / texture->texWidth[textureIndex]; vScale = (float)texture->height / texture->texHeight[textureIndex]; glUniform4f( s_ent.shader.uColor, uScale, vScale, 1.0f, 1.0f ); } else { glUniform4f( s_ent.shader.uColor, 1.0f, 1.0f, 1.0f, 1.0f ); } if ( texture->format == SxTextureFormat_R8G8B8A8 || texture->format == SxTextureFormat_R8G8B8A8_SRGB ) { glEnable( GL_BLEND ); glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA ); } } else { glBindTexture( GL_TEXTURE_2D, 0 ); glUniform4f( s_ent.shader.uColor, 1.0f, 1.0f, 1.0f, 1.0f ); glDisable( GL_BLEND ); } indexOffset = 0; triCount = geometry->indexCounts[geometryIndex] / 3; triCountLeft = triCount; while ( triCountLeft ) { #if USE_SPLIT_DRAW batchTriCount = S_Min( triCountLeft, S_Max( 1, triCount / 10 ) ); #else // #if USE_SPLIT_DRAW batchTriCount = triCount; #endif // #else // #if USE_SPLIT_DRAW glDrawElements( GL_TRIANGLES, batchTriCount * 3, GL_UNSIGNED_SHORT, (void *)indexOffset ); indexOffset += batchTriCount * sizeof( ushort ) * 3; triCountLeft -= batchTriCount; } glBindVertexArrayOES_( 0 ); glBindTexture( GL_TEXTURE_2D, 0 ); glDisable( GL_BLEND ); OVR::GL_CheckErrors( "after Entity_DrawEntity" ); Prof_Stop( PROF_DRAW_ENTITY ); }
void RiftAppSkeleton::display_sdk() //const { ovrHmd hmd = m_Hmd; if (hmd == NULL) return; //const ovrFrameTiming hmdFrameTiming = ovrHmd_BeginFrame(m_Hmd, 0); bindFBO(m_renderBuffer); glClearColor(0.0f, 0.0f, 0.0f, 0.0f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // For passing to EndFrame once rendering is done ovrPosef renderPose[2]; ovrTexture eyeTexture[2]; for (int eyeIndex=0; eyeIndex<ovrEye_Count; eyeIndex++) { const ovrEyeType eye = hmd->EyeRenderOrder[eyeIndex]; const ovrPosef eyePose = ovrHmd_GetEyePose(m_Hmd, eye); m_eyeOri = eyePose.Orientation; // cache this for movement direction _StoreHmdPose(eyePose); const ovrGLTexture& otex = l_EyeTexture[eye]; const ovrRecti& rvp = otex.OGL.Header.RenderViewport; glViewport( rvp.Pos.x, rvp.Pos.y, rvp.Size.w, rvp.Size.h ); const OVR::Matrix4f proj = ovrMatrix4f_Projection( m_EyeRenderDesc[eye].Fov, 0.01f, 10000.0f, true); const OVR::Matrix4f view = _MakeModelviewMatrix( eyePose, -OVR::Vector3f(m_EyeRenderDesc[eye].ViewAdjust), // not sure why negative... m_chassisYaw, m_chassisPos); const OVR::Matrix4f scaledView = _MakeModelviewMatrix( eyePose, -OVR::Vector3f(m_EyeRenderDesc[eye].ViewAdjust), // not sure why negative... m_chassisYaw, m_chassisPos, m_headSize); _resetGLState(); _DrawScenes(&view.Transposed().M[0][0], &proj.Transposed().M[0][0], rvp, &scaledView.Transposed().M[0][0]); renderPose[eyeIndex] = eyePose; eyeTexture[eyeIndex] = l_EyeTexture[eye].Texture; } unbindFBO(); ovrHmd_EndFrame(m_Hmd, renderPose, eyeTexture); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); glBindBuffer(GL_ARRAY_BUFFER, 0); glUseProgram(0); }
///@todo Even though this function shares most of its code with client rendering, /// which appears to work fine, it is non-convergable. It appears that the projection /// matrices for each eye are too far apart? Could be modelview... void RiftAppSkeleton::display_stereo_undistorted() //const { ovrHmd hmd = m_Hmd; if (hmd == NULL) return; //ovrFrameTiming hmdFrameTiming = ovrHmd_BeginFrameTiming(hmd, 0); bindFBO(m_renderBuffer, m_fboScale); glClearColor(0.0f, 0.0f, 0.0f, 0.0f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); for (int eyeIndex = 0; eyeIndex < ovrEye_Count; eyeIndex++) { ovrEyeType eye = hmd->EyeRenderOrder[eyeIndex]; ovrPosef eyePose = ovrHmd_GetEyePose(hmd, eye); const ovrGLTexture& otex = l_EyeTexture[eye]; const ovrRecti& rvp = otex.OGL.Header.RenderViewport; const ovrRecti rsc = { static_cast<int>(m_fboScale * rvp.Pos.x), static_cast<int>(m_fboScale * rvp.Pos.y), static_cast<int>(m_fboScale * rvp.Size.w), static_cast<int>(m_fboScale * rvp.Size.h) }; glViewport(rsc.Pos.x, rsc.Pos.y, rsc.Size.w, rsc.Size.h); OVR::Quatf orientation = OVR::Quatf(eyePose.Orientation); OVR::Matrix4f proj = ovrMatrix4f_Projection( m_EyeRenderDesc[eye].Fov, 0.01f, 10000.0f, true); //m_EyeRenderDesc[eye].DistortedViewport; OVR::Vector3f EyePos = m_chassisPos; OVR::Matrix4f view = OVR::Matrix4f(orientation.Inverted()) * OVR::Matrix4f::RotationY(m_chassisYaw) * OVR::Matrix4f::Translation(-EyePos); OVR::Matrix4f eyeview = OVR::Matrix4f::Translation(m_EyeRenderDesc[eye].ViewAdjust) * view; _resetGLState(); _DrawScenes(&eyeview.Transposed().M[0][0], &proj.Transposed().M[0][0], rvp); } unbindFBO(); glClearColor(0.0f, 0.0f, 0.0f, 0.0f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glDisable(GL_DEPTH_TEST); glDisable(GL_CULL_FACE); // Present FBO to screen const GLuint prog = m_presentFbo.prog(); glUseProgram(prog); m_presentFbo.bindVAO(); { glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, m_renderBuffer.tex); glUniform1i(m_presentFbo.GetUniLoc("fboTex"), 0); // This is the only uniform that changes per-frame glUniform1f(m_presentFbo.GetUniLoc("fboScale"), m_fboScale); glDrawArrays(GL_TRIANGLE_FAN, 0, 4); } glBindVertexArray(0); glUseProgram(0); ovrHmd_EndFrameTiming(hmd); }
void RiftAppSkeleton::display_client() //const { ovrHmd hmd = m_Hmd; if (hmd == NULL) return; //ovrFrameTiming hmdFrameTiming = ovrHmd_BeginFrameTiming(hmd, 0); bindFBO(m_renderBuffer, m_fboScale); glClearColor(0.0f, 0.0f, 0.0f, 0.0f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); for (int eyeIndex = 0; eyeIndex < ovrEye_Count; eyeIndex++) { const ovrEyeType eye = hmd->EyeRenderOrder[eyeIndex]; const ovrPosef eyePose = ovrHmd_GetEyePose(hmd, eye); m_eyeOri = eyePose.Orientation; // cache this for movement direction _StoreHmdPose(eyePose); const ovrGLTexture& otex = l_EyeTexture[eye]; const ovrRecti& rvp = otex.OGL.Header.RenderViewport; const ovrRecti rsc = { static_cast<int>(m_fboScale * rvp.Pos.x), static_cast<int>(m_fboScale * rvp.Pos.y), static_cast<int>(m_fboScale * rvp.Size.w), static_cast<int>(m_fboScale * rvp.Size.h) }; glViewport(rsc.Pos.x, rsc.Pos.y, rsc.Size.w, rsc.Size.h); const OVR::Matrix4f proj = ovrMatrix4f_Projection( m_EyeRenderDesc[eye].Fov, 0.01f, 10000.0f, true); ///@todo Should we be using this variable? //m_EyeRenderDesc[eye].DistortedViewport; const OVR::Matrix4f view = _MakeModelviewMatrix( eyePose, m_EyeRenderDesc[eye].ViewAdjust, m_chassisYaw, m_chassisPos); const OVR::Matrix4f scaledView = _MakeModelviewMatrix( eyePose, m_EyeRenderDesc[eye].ViewAdjust, m_chassisYaw, m_chassisPos, m_headSize); _resetGLState(); _DrawScenes(&view.Transposed().M[0][0], &proj.Transposed().M[0][0], rsc, &scaledView.Transposed().M[0][0]); } unbindFBO(); // Set full viewport...? const int w = m_Cfg.OGL.Header.RTSize.w; const int h = m_Cfg.OGL.Header.RTSize.h; glViewport(0, 0, w, h); glClearColor(0.0f, 0.0f, 0.0f, 0.0f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glDisable(GL_DEPTH_TEST); glDisable(GL_CULL_FACE); // Now draw the distortion mesh... for(int eyeNum = 0; eyeNum < 2; eyeNum++) { const ShaderWithVariables& eyeShader = eyeNum == 0 ? m_presentDistMeshL : m_presentDistMeshR; const GLuint prog = eyeShader.prog(); glUseProgram(prog); //glBindVertexArray(eyeShader.m_vao); { const ovrDistortionMesh& mesh = m_DistMeshes[eyeNum]; glBindBuffer(GL_ARRAY_BUFFER, 0); const int a_pos = glGetAttribLocation(prog, "vPosition"); glVertexAttribPointer(a_pos, 4, GL_FLOAT, GL_FALSE, sizeof(ovrDistortionVertex), &mesh.pVertexData[0].ScreenPosNDC.x); glEnableVertexAttribArray(a_pos); const int a_texR = glGetAttribLocation(prog, "vTexR"); if (a_texR > -1) { glVertexAttribPointer(a_texR, 2, GL_FLOAT, GL_FALSE, sizeof(ovrDistortionVertex), &mesh.pVertexData[0].TanEyeAnglesR); glEnableVertexAttribArray(a_texR); } const int a_texG = glGetAttribLocation(prog, "vTexG"); if (a_texG > -1) { glVertexAttribPointer(a_texG, 2, GL_FLOAT, GL_FALSE, sizeof(ovrDistortionVertex), &mesh.pVertexData[0].TanEyeAnglesG); glEnableVertexAttribArray(a_texG); } const int a_texB = glGetAttribLocation(prog, "vTexB"); if (a_texB > -1) { glVertexAttribPointer(a_texB, 2, GL_FLOAT, GL_FALSE, sizeof(ovrDistortionVertex), &mesh.pVertexData[0].TanEyeAnglesB); glEnableVertexAttribArray(a_texB); } ovrVector2f uvoff = m_uvScaleOffsetOut[2*eyeNum + 1]; //DistortionData.UVScaleOffset[eyeNum][0]; ovrVector2f uvscale = m_uvScaleOffsetOut[2*eyeNum + 0]; //DistortionData.UVScaleOffset[eyeNum][1]; glUniform2f(eyeShader.GetUniLoc("EyeToSourceUVOffset"), uvoff.x, uvoff.y); glUniform2f(eyeShader.GetUniLoc("EyeToSourceUVScale"), uvscale.x, uvscale.y); #if 0 // Setup shader constants DistortionData.Shaders->SetUniform2f( "EyeToSourceUVScale", DistortionData.UVScaleOffset[eyeNum][0].x, DistortionData.UVScaleOffset[eyeNum][0].y); DistortionData.Shaders->SetUniform2f( "EyeToSourceUVOffset", DistortionData.UVScaleOffset[eyeNum][1].x, DistortionData.UVScaleOffset[eyeNum][1].y); if (distortionCaps & ovrDistortionCap_TimeWarp) { // TIMEWARP - Additional shader constants required ovrMatrix4f timeWarpMatrices[2]; ovrHmd_GetEyeTimewarpMatrices(HMD, (ovrEyeType)eyeNum, eyeRenderPoses[eyeNum], timeWarpMatrices); //WARNING!!! These matrices are transposed in SetUniform4x4f, before being used by the shader. DistortionData.Shaders->SetUniform4x4f("EyeRotationStart", Matrix4f(timeWarpMatrices[0])); DistortionData.Shaders->SetUniform4x4f("EyeRotationEnd", Matrix4f(timeWarpMatrices[1])); } // Perform distortion pRender->Render( &distortionShaderFill, DistortionData.MeshVBs[eyeNum], DistortionData.MeshIBs[eyeNum]); #endif glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, m_renderBuffer.tex); glUniform1i(eyeShader.GetUniLoc("fboTex"), 0); // This is the only uniform that changes per-frame glUniform1f(eyeShader.GetUniLoc("fboScale"), m_fboScale); glDrawElements( GL_TRIANGLES, mesh.IndexCount, GL_UNSIGNED_SHORT, &mesh.pIndexData[0]); } glBindVertexArray(0); glUseProgram(0); } ovrHmd_EndFrameTiming(hmd); }