DLL_EXPORT_API npBool xnOvrCommitFrame(xnOvrSession* session, int numberOfExtraLayers, xnOvrQuadLayer** extraLayers) { ovrLayerHeader* layers[1 + numberOfExtraLayers]; //add the default layer first layers[0] = &session->Layer.Header; //commit the default fov layer ovr_CommitTextureSwapChain(session->Session, session->SwapChain); for (auto i = 0; i < numberOfExtraLayers; i++) { //add further quad layers layers[i + 1] = &extraLayers[i]->Layer.Header; //also commit the quad layer ovr_CommitTextureSwapChain(session->Session, extraLayers[i]->SwapChain); } if(!OVR_SUCCESS(ovr_SubmitFrame(session->Session, 0, NULL, layers, 1 + numberOfExtraLayers))) { return false; } ovrSessionStatus status; if (!OVR_SUCCESS(ovr_GetSessionStatus(session->Session, &status))) { return false; } if(status.ShouldRecenter) { ovr_RecenterTrackingOrigin(session->Session); } return true; }
void OculusVR::SubmitFrame() { // set up positional data ovrViewScaleDesc viewScaleDesc; viewScaleDesc.HmdSpaceToWorldScaleInMeters = 1.0f; viewScaleDesc.HmdToEyeOffset[0] = m_hmdToEyeOffset[0]; viewScaleDesc.HmdToEyeOffset[1] = m_hmdToEyeOffset[1]; // create the main eye layers m_eyeLayer.Header.Type = ovrLayerType_EyeFov; m_eyeLayer.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft; // Because OpenGL. m_eyeLayer.ColorTexture[0] = m_renderBuffer->m_swapTextureChain; m_eyeLayer.ColorTexture[1] = m_renderBuffer->m_swapTextureChain; m_eyeLayer.Fov[0] = m_hmdDesc.DefaultEyeFov[0]; m_eyeLayer.Fov[1] = m_hmdDesc.DefaultEyeFov[1]; m_eyeLayer.Viewport[0] = OVR::Recti(0, 0, m_renderBuffer->m_bufferSize.w / 2, m_renderBuffer->m_bufferSize.h); m_eyeLayer.Viewport[1] = OVR::Recti(m_renderBuffer->m_bufferSize.w / 2, 0, m_renderBuffer->m_bufferSize.w / 2, m_renderBuffer->m_bufferSize.h); m_eyeLayer.RenderPose[0] = m_eyeRenderPose[0]; m_eyeLayer.RenderPose[1] = m_eyeRenderPose[1]; m_eyeLayer.SensorSampleTime = m_sensorSampleTime; // append all the layers to global list ovrLayerHeader* layerList = &m_eyeLayer.Header; ovrResult result = ovr_SubmitFrame(m_hmdSession, m_frameIndex, nullptr, &layerList, 1); }
void OculusWindow::finish_frame() { ovr_CommitTextureSwapChain(hmd_session_, texture_swap_chain_); ovrLayerHeader* layers = &color_layer_.Header; ovrResult result = ovr_SubmitFrame(hmd_session_, framecount_, nullptr, &layers, 1); if (!OVR_SUCCESS(result)) { gua::Logger::LOG_WARNING << "Failed to submit frame to the oculus rift.\n"; } GlfwWindow::finish_frame(); }
void GuardianSystemDemo::Render() { // Get current eye pose for rendering double eyePoseTime = 0; ovrPosef eyePose[ovrEye_Count] = {}; ovr_GetEyePoses(mSession, mFrameIndex, ovrTrue, mHmdToEyeOffset, eyePose, &eyePoseTime); // Render each eye for (int i = 0; i < ovrEye_Count; ++i) { int renderTargetIndex = 0; ovr_GetTextureSwapChainCurrentIndex(mSession, mTextureChain[i], &renderTargetIndex); ID3D11RenderTargetView* renderTargetView = mEyeRenderTargets[i][renderTargetIndex]; ID3D11DepthStencilView* depthTargetView = mEyeDepthTarget[i]; // Clear and set render/depth target and viewport DIRECTX.SetAndClearRenderTarget(renderTargetView, depthTargetView, 0.2f, 0.2f, 0.2f, 1.0f); DIRECTX.SetViewport((float)mEyeRenderViewport[i].Pos.x, (float)mEyeRenderViewport[i].Pos.y, (float)mEyeRenderViewport[i].Size.w, (float)mEyeRenderViewport[i].Size.h); // Eye XMVECTOR eyeRot = XMVectorSet(eyePose[i].Orientation.x, eyePose[i].Orientation.y, eyePose[i].Orientation.z, eyePose[i].Orientation.w); XMVECTOR eyePos = XMVectorSet(eyePose[i].Position.x, eyePose[i].Position.y, eyePose[i].Position.z, 0); XMVECTOR eyeForward = XMVector3Rotate(XMVectorSet(0, 0, -1, 0), eyeRot); // Matrices XMMATRIX viewMat = XMMatrixLookAtRH(eyePos, XMVectorAdd(eyePos, eyeForward), XMVector3Rotate(XMVectorSet(0.0f, 1.0f, 0.0f, 0.0f), eyeRot)); ovrMatrix4f proj = ovrMatrix4f_Projection(mEyeRenderLayer.Fov[i], 0.001f, 1000.0f, ovrProjection_None); XMMATRIX projMat = XMMatrixTranspose(XMMATRIX(&proj.M[0][0])); XMMATRIX viewProjMat = XMMatrixMultiply(viewMat, projMat); // Render and commit to swap chain mDynamicScene.Render(&viewProjMat, 1.0f, 1.0f, 1.0f, 1.0f, true); ovr_CommitTextureSwapChain(mSession, mTextureChain[i]); // Update eye layer mEyeRenderLayer.ColorTexture[i] = mTextureChain[i]; mEyeRenderLayer.RenderPose[i] = eyePose[i]; mEyeRenderLayer.SensorSampleTime = eyePoseTime; } // Submit frames ovrLayerHeader* layers = &mEyeRenderLayer.Header; ovrResult result = ovr_SubmitFrame(mSession, mFrameIndex++, nullptr, &layers, 1); if (!OVR_SUCCESS(result)) { printf("ovr_SubmitFrame failed"); exit(-1); } }
void vx_ovr_namespace_::OVRHMDHandleWithDevice::submitFrame() { window_->update(); ovrViewScaleDesc viewScaleDesc; viewScaleDesc.HmdSpaceToWorldScaleInMeters = 1.0f; viewScaleDesc.HmdToEyeViewOffset[0] = eyeRenderDesc_[0].HmdToEyeViewOffset; viewScaleDesc.HmdToEyeViewOffset[1] = eyeRenderDesc_[1].HmdToEyeViewOffset; ovrLayerEyeFov ld; ld.Header.Type = ovrLayerType_EyeFov; ld.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft; ld.ColorTexture[0] = textureSetLeft_; ld.Viewport[0] = OVR::Recti(texSizeLeft_); ld.Fov[0] = description_.DefaultEyeFov[0]; ld.RenderPose[0] = eyeRenderPosef_[0]; ld.ColorTexture[1] = textureSetRight_; ld.Viewport[1] = OVR::Recti(texSizeRight_); ld.Fov[1] = description_.DefaultEyeFov[1]; ld.RenderPose[1] = eyeRenderPosef_[1]; ld.SensorSampleTime = sampleTime_; ovrLayerHeader* layers = &ld.Header; ovr_SubmitFrame(session_, 0, &viewScaleDesc, &layers, 1); // Blit mirror texture to back buffer glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFramebuffer_); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0); GLint w = mirrorTexture_->OGL.Header.TextureSize.w; GLint h = mirrorTexture_->OGL.Header.TextureSize.h; glBlitFramebuffer(0, h, w, 0, 0, 0, w, h, GL_COLOR_BUFFER_BIT, GL_NEAREST); glBindFramebuffer(GL_READ_FRAMEBUFFER, 0); glfwSwapBuffers(window_->getHandle()); }
void VR::submitFrame() { int frameIndex = xapp->getCurrentBackBufferIndex(); // Increment to use next texture, just before writing int currentIndex; ovr_GetTextureSwapChainCurrentIndex(session, textureSwapChain, ¤tIndex); assert(currentIndex == frameIndex); xapp->lastPresentedFrame = frameIndex; //xapp->d3d11On12Device->AcquireWrappedResources(xapp->wrappedBackBuffers[frameIndex].GetAddressOf(), 1); //xapp->d3d11DeviceContext->CopyResource(xapp->wrappedTextures[currentIndex].Get(), xapp->wrappedBackBuffers[frameIndex].Get()); //xapp->d3d11On12Device->ReleaseWrappedResources(xapp->wrappedBackBuffers[frameIndex].GetAddressOf(), 1); //xapp->d3d11DeviceContext->Flush(); //xapp->device-> //ovr_CommitTextureSwapChain(session, textureSwapChain); ovr_CommitTextureSwapChain(session, textureSwapChain); /* pTextureSet->CurrentIndex = (pTextureSet->CurrentIndex + 1) % pTextureSet->TextureCount; */ // Submit frame with one layer we have. ovrLayerHeader* layers = &layer.Header; ovrResult result = ovr_SubmitFrame(session, 0, nullptr, &layers, 1); bool isVisible = (result == ovrSuccess); //Log }
int main(int argc, char **argv) { // Initialize SDL2's context SDL_Init(SDL_INIT_VIDEO); // Initialize Oculus' context ovrResult result = ovr_Initialize(nullptr); if (OVR_FAILURE(result)) { std::cout << "ERROR: Failed to initialize libOVR" << std::endl; SDL_Quit(); return -1; } ovrSession session; ovrGraphicsLuid luid; // Connect to the Oculus headset result = ovr_Create(&session, &luid); if (OVR_FAILURE(result)) { std::cout << "ERROR: Oculus Rift not detected" << std::endl; ovr_Shutdown(); SDL_Quit(); return -1; } int x = SDL_WINDOWPOS_CENTERED, y = SDL_WINDOWPOS_CENTERED; int winWidth = 1280; int winHeight = 720; Uint32 flags = SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN; // Create SDL2 Window SDL_Window* window = SDL_CreateWindow("OVR ZED App", x, y, winWidth, winHeight, flags); // Create OpenGL context SDL_GLContext glContext = SDL_GL_CreateContext(window); // Initialize GLEW glewInit(); // Turn off vsync to let the compositor do its magic SDL_GL_SetSwapInterval(0); // Initialize the ZED Camera sl::zed::Camera* zed = 0; zed = new sl::zed::Camera(sl::zed::HD720); sl::zed::ERRCODE zederr = zed->init(sl::zed::MODE::PERFORMANCE, 0); int zedWidth = zed->getImageSize().width; int zedHeight = zed->getImageSize().height; if (zederr != sl::zed::SUCCESS) { std::cout << "ERROR: " << sl::zed::errcode2str(zederr) << std::endl; ovr_Destroy(session); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; return -1; } GLuint zedTextureID_L, zedTextureID_R; // Generate OpenGL texture for left images of the ZED camera glGenTextures(1, &zedTextureID_L); glBindTexture(GL_TEXTURE_2D, zedTextureID_L); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); // Generate OpenGL texture for right images of the ZED camera glGenTextures(1, &zedTextureID_R); glBindTexture(GL_TEXTURE_2D, zedTextureID_R); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glBindTexture(GL_TEXTURE_2D, 0); #if OPENGL_GPU_INTEROP cudaGraphicsResource* cimg_L; cudaGraphicsResource* cimg_R; cudaError_t errL, errR; errL = cudaGraphicsGLRegisterImage(&cimg_L, zedTextureID_L, GL_TEXTURE_2D, cudaGraphicsMapFlagsNone); errR = cudaGraphicsGLRegisterImage(&cimg_R, zedTextureID_R, GL_TEXTURE_2D, cudaGraphicsMapFlagsNone); if (errL != cudaSuccess || errR != cudaSuccess) { std::cout << "ERROR: cannot create CUDA texture : " << errL << "|" << errR << std::endl; } #endif ovrHmdDesc hmdDesc = ovr_GetHmdDesc(session); // Get the texture sizes of Oculus eyes ovrSizei textureSize0 = ovr_GetFovTextureSize(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0], 1.0f); ovrSizei textureSize1 = ovr_GetFovTextureSize(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1], 1.0f); // Compute the final size of the render buffer ovrSizei bufferSize; bufferSize.w = textureSize0.w + textureSize1.w; bufferSize.h = std::max(textureSize0.h, textureSize1.h); // Initialize OpenGL swap textures to render ovrTextureSwapChain textureChain = nullptr; // Description of the swap chain ovrTextureSwapChainDesc descTextureSwap = {}; descTextureSwap.Type = ovrTexture_2D; descTextureSwap.ArraySize = 1; descTextureSwap.Width = bufferSize.w; descTextureSwap.Height = bufferSize.h; descTextureSwap.MipLevels = 1; descTextureSwap.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; descTextureSwap.SampleCount = 1; descTextureSwap.StaticImage = ovrFalse; // Create the OpenGL texture swap chain result = ovr_CreateTextureSwapChainGL(session, &descTextureSwap, &textureChain); int length = 0; ovr_GetTextureSwapChainLength(session, textureChain, &length); if (OVR_SUCCESS(result)) { for (int i = 0; i < length; ++i) { GLuint chainTexId; ovr_GetTextureSwapChainBufferGL(session, textureChain, i, &chainTexId); glBindTexture(GL_TEXTURE_2D, chainTexId); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); } } else { std::cout << "ERROR: failed creating swap texture" << std::endl; ovr_Destroy(session); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; return -1; } // Generate frame buffer to render GLuint fboID; glGenFramebuffers(1, &fboID); // Generate depth buffer of the frame buffer GLuint depthBuffID; glGenTextures(1, &depthBuffID); glBindTexture(GL_TEXTURE_2D, depthBuffID); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); GLenum internalFormat = GL_DEPTH_COMPONENT24; GLenum type = GL_UNSIGNED_INT; glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, bufferSize.w, bufferSize.h, 0, GL_DEPTH_COMPONENT, type, NULL); // Create a mirror texture to display the render result in the SDL2 window ovrMirrorTextureDesc descMirrorTexture; memset(&descMirrorTexture, 0, sizeof(descMirrorTexture)); descMirrorTexture.Width = winWidth; descMirrorTexture.Height = winHeight; descMirrorTexture.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; ovrMirrorTexture mirrorTexture = nullptr; result = ovr_CreateMirrorTextureGL(session, &descMirrorTexture, &mirrorTexture); if (!OVR_SUCCESS(result)) { std::cout << "ERROR: Failed to create mirror texture" << std::endl; } GLuint mirrorTextureId; ovr_GetMirrorTextureBufferGL(session, mirrorTexture, &mirrorTextureId); GLuint mirrorFBOID; glGenFramebuffers(1, &mirrorFBOID); glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFBOID); glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mirrorTextureId, 0); glFramebufferRenderbuffer(GL_READ_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, 0); glBindFramebuffer(GL_READ_FRAMEBUFFER, 0); // Frame index used by the compositor // it needs to be updated each new frame long long frameIndex = 0; // FloorLevel will give tracking poses where the floor height is 0 ovr_SetTrackingOriginType(session, ovrTrackingOrigin_FloorLevel); // Initialize a default Pose ovrPosef eyeRenderPose[2]; // Get the render description of the left and right "eyes" of the Oculus headset ovrEyeRenderDesc eyeRenderDesc[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); // Get the Oculus view scale description ovrVector3f hmdToEyeOffset[2]; double sensorSampleTime; // Create and compile the shader's sources Shader shader(OVR_ZED_VS, OVR_ZED_FS); // Compute the ZED image field of view with the ZED parameters float zedFovH = atanf(zed->getImageSize().width / (zed->getParameters()->LeftCam.fx *2.f)) * 2.f; // Compute the Horizontal Oculus' field of view with its parameters float ovrFovH = (atanf(hmdDesc.DefaultEyeFov[0].LeftTan) + atanf(hmdDesc.DefaultEyeFov[0].RightTan)); // Compute the useful part of the ZED image unsigned int usefulWidth = zed->getImageSize().width * ovrFovH / zedFovH; // Compute the size of the final image displayed in the headset with the ZED image's aspect-ratio kept unsigned int widthFinal = bufferSize.w / 2; float heightGL = 1.f; float widthGL = 1.f; if (usefulWidth > 0.f) { unsigned int heightFinal = zed->getImageSize().height * widthFinal / usefulWidth; // Convert this size to OpenGL viewport's frame's coordinates heightGL = (heightFinal) / (float)(bufferSize.h); widthGL = ((zed->getImageSize().width * (heightFinal / (float)zed->getImageSize().height)) / (float)widthFinal); } else { std::cout << "WARNING: ZED parameters got wrong values." "Default vertical and horizontal FOV are used.\n" "Check your calibration file or check if your ZED is not too close to a surface or an object." << std::endl; } // Compute the Vertical Oculus' field of view with its parameters float ovrFovV = (atanf(hmdDesc.DefaultEyeFov[0].UpTan) + atanf(hmdDesc.DefaultEyeFov[0].DownTan)); // Compute the center of the optical lenses of the headset float offsetLensCenterX = ((atanf(hmdDesc.DefaultEyeFov[0].LeftTan)) / ovrFovH) * 2.f - 1.f; float offsetLensCenterY = ((atanf(hmdDesc.DefaultEyeFov[0].UpTan)) / ovrFovV) * 2.f - 1.f; // Create a rectangle with the computed coordinates and push it in GPU memory. struct GLScreenCoordinates { float left, up, right, down; } screenCoord; screenCoord.up = heightGL + offsetLensCenterY; screenCoord.down = heightGL - offsetLensCenterY; screenCoord.right = widthGL + offsetLensCenterX; screenCoord.left = widthGL - offsetLensCenterX; float rectVertices[12] = { -screenCoord.left, -screenCoord.up, 0, screenCoord.right, -screenCoord.up, 0, screenCoord.right, screenCoord.down, 0, -screenCoord.left, screenCoord.down, 0 }; GLuint rectVBO[3]; glGenBuffers(1, &rectVBO[0]); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[0]); glBufferData(GL_ARRAY_BUFFER, sizeof(rectVertices), rectVertices, GL_STATIC_DRAW); float rectTexCoord[8] = { 0, 1, 1, 1, 1, 0, 0, 0 }; glGenBuffers(1, &rectVBO[1]); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[1]); glBufferData(GL_ARRAY_BUFFER, sizeof(rectTexCoord), rectTexCoord, GL_STATIC_DRAW); unsigned int rectIndices[6] = { 0, 1, 2, 0, 2, 3 }; glGenBuffers(1, &rectVBO[2]); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, rectVBO[2]); glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(rectIndices), rectIndices, GL_STATIC_DRAW); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); glBindBuffer(GL_ARRAY_BUFFER, 0); // Initialize hit value float hit = 0.02f; // Initialize a boolean that will be used to stop the application’s loop and another one to pause/unpause rendering bool end = false; bool refresh = true; // SDL variable that will be used to store input events SDL_Event events; // Initialize time variables. They will be used to limit the number of frames rendered per second. // Frame counter unsigned int riftc = 0, zedc = 1; // Chronometer unsigned int rifttime = 0, zedtime = 0, zedFPS = 0; int time1 = 0, timePerFrame = 0; int frameRate = (int)(1000 / MAX_FPS); // This boolean is used to test if the application is focused bool isVisible = true; // Enable the shader glUseProgram(shader.getProgramId()); // Bind the Vertex Buffer Objects of the rectangle that displays ZED images // vertices glEnableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[0]); glVertexAttribPointer(Shader::ATTRIB_VERTICES_POS, 3, GL_FLOAT, GL_FALSE, 0, 0); // indices glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, rectVBO[2]); // texture coordinates glEnableVertexAttribArray(Shader::ATTRIB_TEXTURE2D_POS); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[1]); glVertexAttribPointer(Shader::ATTRIB_TEXTURE2D_POS, 2, GL_FLOAT, GL_FALSE, 0, 0); // Main loop while (!end) { // Compute the time used to render the previous frame timePerFrame = SDL_GetTicks() - time1; // If the previous frame has been rendered too fast if (timePerFrame < frameRate) { // Pause the loop to have a max FPS equal to MAX_FPS SDL_Delay(frameRate - timePerFrame); timePerFrame = frameRate; } // Increment the ZED chronometer zedtime += timePerFrame; // If ZED chronometer reached 1 second if (zedtime > 1000) { zedFPS = zedc; zedc = 0; zedtime = 0; } // Increment the Rift chronometer and the Rift frame counter rifttime += timePerFrame; riftc++; // If Rift chronometer reached 200 milliseconds if (rifttime > 200) { // Display FPS std::cout << "\rRIFT FPS: " << 1000 / (rifttime / riftc) << " | ZED FPS: " << zedFPS; // Reset Rift chronometer rifttime = 0; // Reset Rift frame counter riftc = 0; } // Start frame chronometer time1 = SDL_GetTicks(); // While there is an event catched and not tested while (SDL_PollEvent(&events)) { // If a key is released if (events.type == SDL_KEYUP) { // If Q quit the application if (events.key.keysym.scancode == SDL_SCANCODE_Q) end = true; // If R reset the hit value else if (events.key.keysym.scancode == SDL_SCANCODE_R) hit = 0.0f; // If C pause/unpause rendering else if (events.key.keysym.scancode == SDL_SCANCODE_C) refresh = !refresh; } // If the mouse wheel is used if (events.type == SDL_MOUSEWHEEL) { // Increase or decrease hit value float s; events.wheel.y > 0 ? s = 1.0f : s = -1.0f; hit += 0.005f * s; } } // Get texture swap index where we must draw our frame GLuint curTexId; int curIndex; ovr_GetTextureSwapChainCurrentIndex(session, textureChain, &curIndex); ovr_GetTextureSwapChainBufferGL(session, textureChain, curIndex, &curTexId); // Call ovr_GetRenderDesc each frame to get the ovrEyeRenderDesc, as the returned values (e.g. HmdToEyeOffset) may change at runtime. eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); hmdToEyeOffset[0] = eyeRenderDesc[0].HmdToEyeOffset; hmdToEyeOffset[1] = eyeRenderDesc[1].HmdToEyeOffset; // Get eye poses, feeding in correct IPD offset ovr_GetEyePoses(session, frameIndex, ovrTrue, hmdToEyeOffset, eyeRenderPose, &sensorSampleTime); // If the application is focused if (isVisible) { // If successful grab a new ZED image if (!zed->grab(sl::zed::SENSING_MODE::RAW, false, false)) { // Update the ZED frame counter zedc++; if (refresh) { #if OPENGL_GPU_INTEROP sl::zed::Mat m = zed->retrieveImage_gpu(sl::zed::SIDE::LEFT); cudaArray_t arrIm; cudaGraphicsMapResources(1, &cimg_L, 0); cudaGraphicsSubResourceGetMappedArray(&arrIm, cimg_L, 0, 0); cudaMemcpy2DToArray(arrIm, 0, 0, m.data, m.step, zedWidth * 4, zedHeight, cudaMemcpyDeviceToDevice); cudaGraphicsUnmapResources(1, &cimg_L, 0); m = zed->retrieveImage_gpu(sl::zed::SIDE::RIGHT); cudaGraphicsMapResources(1, &cimg_R, 0); cudaGraphicsSubResourceGetMappedArray(&arrIm, cimg_R, 0, 0); cudaMemcpy2DToArray(arrIm, 0, 0, m.data, m.step, zedWidth * 4, zedHeight, cudaMemcpyDeviceToDevice); // *4 = 4 channels * 1 bytes (uint) cudaGraphicsUnmapResources(1, &cimg_R, 0); #endif // Bind the frame buffer glBindFramebuffer(GL_FRAMEBUFFER, fboID); // Set its color layer 0 as the current swap texture glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, curTexId, 0); // Set its depth layer as our depth buffer glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthBuffID, 0); // Clear the frame buffer glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glClearColor(0, 0, 0, 1); // Render for each Oculus eye the equivalent ZED image for (int eye = 0; eye < 2; eye++) { // Set the left or right vertical half of the buffer as the viewport glViewport(eye == ovrEye_Left ? 0 : bufferSize.w / 2, 0, bufferSize.w / 2, bufferSize.h); // Bind the left or right ZED image glBindTexture(GL_TEXTURE_2D, eye == ovrEye_Left ? zedTextureID_L : zedTextureID_R); #if !OPENGL_GPU_INTEROP glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, zed->retrieveImage(eye == ovrEye_Left ? sl::zed::SIDE::LEFT : sl::zed::SIDE::RIGHT).data); #endif // Bind the hit value glUniform1f(glGetUniformLocation(shader.getProgramId(), "hit"), eye == ovrEye_Left ? hit : -hit); // Bind the isLeft value glUniform1ui(glGetUniformLocation(shader.getProgramId(), "isLeft"), eye == ovrEye_Left ? 1U : 0U); // Draw the ZED image glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0); } // Avoids an error when calling SetAndClearRenderSurface during next iteration. // Without this, during the next while loop iteration SetAndClearRenderSurface // would bind a framebuffer with an invalid COLOR_ATTACHMENT0 because the texture ID // associated with COLOR_ATTACHMENT0 had been unlocked by calling wglDXUnlockObjectsNV. glBindFramebuffer(GL_FRAMEBUFFER, fboID); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, 0, 0); // Commit changes to the textures so they get picked up frame ovr_CommitTextureSwapChain(session, textureChain); } // Do not forget to increment the frameIndex! frameIndex++; } } /* Note: Even if we don't ask to refresh the framebuffer or if the Camera::grab() doesn't catch a new frame, we have to submit an image to the Rift; it needs 75Hz refresh. Else there will be jumbs, black frames and/or glitches in the headset. */ ovrLayerEyeFov ld; ld.Header.Type = ovrLayerType_EyeFov; // Tell to the Oculus compositor that our texture origin is at the bottom left ld.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft; // Because OpenGL | Disable head tracking // Set the Oculus layer eye field of view for each view for (int eye = 0; eye < 2; ++eye) { // Set the color texture as the current swap texture ld.ColorTexture[eye] = textureChain; // Set the viewport as the right or left vertical half part of the color texture ld.Viewport[eye] = OVR::Recti(eye == ovrEye_Left ? 0 : bufferSize.w / 2, 0, bufferSize.w / 2, bufferSize.h); // Set the field of view ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye]; // Set the pose matrix ld.RenderPose[eye] = eyeRenderPose[eye]; } ld.SensorSampleTime = sensorSampleTime; ovrLayerHeader* layers = &ld.Header; // Submit the frame to the Oculus compositor // which will display the frame in the Oculus headset result = ovr_SubmitFrame(session, frameIndex, nullptr, &layers, 1); if (!OVR_SUCCESS(result)) { std::cout << "ERROR: failed to submit frame" << std::endl; glDeleteBuffers(3, rectVBO); ovr_DestroyTextureSwapChain(session, textureChain); ovr_DestroyMirrorTexture(session, mirrorTexture); ovr_Destroy(session); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; return -1; } if (result == ovrSuccess && !isVisible) { std::cout << "The application is now shown in the headset." << std::endl; } isVisible = (result == ovrSuccess); // This is not really needed for this application but it may be usefull for an more advanced application ovrSessionStatus sessionStatus; ovr_GetSessionStatus(session, &sessionStatus); if (sessionStatus.ShouldRecenter) { std::cout << "Recenter Tracking asked by Session" << std::endl; ovr_RecenterTrackingOrigin(session); } // Copy the frame to the mirror buffer // which will be drawn in the SDL2 image glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFBOID); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0); GLint w = winWidth; GLint h = winHeight; glBlitFramebuffer(0, h, w, 0, 0, 0, w, h, GL_COLOR_BUFFER_BIT, GL_NEAREST); glBindFramebuffer(GL_READ_FRAMEBUFFER, 0); // Swap the SDL2 window SDL_GL_SwapWindow(window); } // Disable all OpenGL buffer glDisableVertexAttribArray(Shader::ATTRIB_TEXTURE2D_POS); glDisableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); glBindBuffer(GL_ARRAY_BUFFER, 0); glBindTexture(GL_TEXTURE_2D, 0); glUseProgram(0); glBindVertexArray(0); // Delete the Vertex Buffer Objects of the rectangle glDeleteBuffers(3, rectVBO); // Delete SDL, OpenGL, Oculus and ZED context ovr_DestroyTextureSwapChain(session, textureChain); ovr_DestroyMirrorTexture(session, mirrorTexture); ovr_Destroy(session); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; // quit return 0; }
// return true to retry later (e.g. after display lost) static bool MainLoop(bool retryCreate) { // Initialize these to nullptr here to handle device lost failures cleanly ovrMirrorTexture mirrorTexture = nullptr; OculusEyeTexture* pEyeRenderTexture[2] = { nullptr, nullptr }; Scene* roomScene = nullptr; Camera* mainCam = nullptr; ovrMirrorTextureDesc mirrorDesc = {}; ovrSession session; ovrGraphicsLuid luid; ovrResult result = ovr_Create(&session, &luid); if (!OVR_SUCCESS(result)) return retryCreate; ovrHmdDesc hmdDesc = ovr_GetHmdDesc(session); // Setup Device and Graphics // Note: the mirror window can be any size, for this sample we use 1/2 the HMD resolution if (!DIRECTX.InitDevice(hmdDesc.Resolution.w / 2, hmdDesc.Resolution.h / 2, reinterpret_cast<LUID*>(&luid))) goto Done; // Make the eye render buffers (caution if actual size < requested due to HW limits). ovrRecti eyeRenderViewport[2]; for (int eye = 0; eye < 2; ++eye) { ovrSizei idealSize = ovr_GetFovTextureSize(session, (ovrEyeType)eye, hmdDesc.DefaultEyeFov[eye], 1.0f); pEyeRenderTexture[eye] = new OculusEyeTexture(); if (!pEyeRenderTexture[eye]->Init(session, idealSize.w, idealSize.h, true)) { if (retryCreate) goto Done; FATALERROR("Failed to create eye texture."); } eyeRenderViewport[eye].Pos.x = 0; eyeRenderViewport[eye].Pos.y = 0; eyeRenderViewport[eye].Size = idealSize; if (!pEyeRenderTexture[eye]->TextureChain) { if (retryCreate) goto Done; FATALERROR("Failed to create texture."); } } // Create a mirror to see on the monitor. mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; mirrorDesc.Width = DIRECTX.WinSizeW; mirrorDesc.Height = DIRECTX.WinSizeH; result = ovr_CreateMirrorTextureDX(session, DIRECTX.CommandQueue, &mirrorDesc, &mirrorTexture); if (!OVR_SUCCESS(result)) { if (retryCreate) goto Done; FATALERROR("Failed to create mirror texture."); } // Create the room model roomScene = new Scene(false); // Create camera mainCam = new Camera(XMVectorSet(0.0f, 1.6f, 5.0f, 0), XMQuaternionIdentity()); // Setup VR components, filling out description ovrEyeRenderDesc eyeRenderDesc[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); long long frameIndex = 0; bool drawMirror = true; DIRECTX.InitFrame(drawMirror); // Main loop while (DIRECTX.HandleMessages()) { ovrSessionStatus sessionStatus; ovr_GetSessionStatus(session, &sessionStatus); if (sessionStatus.ShouldQuit) { // Because the application is requested to quit, should not request retry retryCreate = false; break; } if (sessionStatus.ShouldRecenter) ovr_RecenterTrackingOrigin(session); if (sessionStatus.IsVisible) { XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, -0.05f, 0), mainCam->GetRotVec()); XMVECTOR right = XMVector3Rotate(XMVectorSet(0.05f, 0, 0, 0), mainCam->GetRotVec()); XMVECTOR mainCamPos = mainCam->GetPosVec(); XMVECTOR mainCamRot = mainCam->GetRotVec(); if (DIRECTX.Key['W'] || DIRECTX.Key[VK_UP]) mainCamPos = XMVectorAdd( mainCamPos, forward); if (DIRECTX.Key['S'] || DIRECTX.Key[VK_DOWN]) mainCamPos = XMVectorSubtract(mainCamPos, forward); if (DIRECTX.Key['D']) mainCamPos = XMVectorAdd( mainCamPos, right); if (DIRECTX.Key['A']) mainCamPos = XMVectorSubtract(mainCamPos, right); static float Yaw = 0; if (DIRECTX.Key[VK_LEFT]) mainCamRot = XMQuaternionRotationRollPitchYaw(0, Yaw += 0.02f, 0); if (DIRECTX.Key[VK_RIGHT]) mainCamRot = XMQuaternionRotationRollPitchYaw(0, Yaw -= 0.02f, 0); mainCam->SetPosVec(mainCamPos); mainCam->SetRotVec(mainCamRot); // Animate the cube static float cubeClock = 0; roomScene->Models[0]->Pos = XMFLOAT3(9 * sin(cubeClock), 3, 9 * cos(cubeClock += 0.015f)); // Get both eye poses simultaneously, with IPD offset already included. ovrPosef EyeRenderPose[2]; ovrVector3f HmdToEyeOffset[2] = { eyeRenderDesc[0].HmdToEyeOffset, eyeRenderDesc[1].HmdToEyeOffset }; double sensorSampleTime; // sensorSampleTime is fed into the layer later ovr_GetEyePoses(session, frameIndex, ovrTrue, HmdToEyeOffset, EyeRenderPose, &sensorSampleTime); // Render Scene to Eye Buffers for (int eye = 0; eye < 2; ++eye) { DIRECTX.SetActiveContext(eye == 0 ? DrawContext_EyeRenderLeft : DrawContext_EyeRenderRight); DIRECTX.SetActiveEye(eye); CD3DX12_RESOURCE_BARRIER resBar = CD3DX12_RESOURCE_BARRIER::Transition(pEyeRenderTexture[eye]->GetD3DResource(), D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE, D3D12_RESOURCE_STATE_RENDER_TARGET); DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(1, &resBar); DIRECTX.SetAndClearRenderTarget(pEyeRenderTexture[eye]->GetRtv(), pEyeRenderTexture[eye]->GetDsv()); DIRECTX.SetViewport((float)eyeRenderViewport[eye].Pos.x, (float)eyeRenderViewport[eye].Pos.y, (float)eyeRenderViewport[eye].Size.w, (float)eyeRenderViewport[eye].Size.h); //Get the pose information in XM format XMVECTOR eyeQuat = XMVectorSet(EyeRenderPose[eye].Orientation.x, EyeRenderPose[eye].Orientation.y, EyeRenderPose[eye].Orientation.z, EyeRenderPose[eye].Orientation.w); XMVECTOR eyePos = XMVectorSet(EyeRenderPose[eye].Position.x, EyeRenderPose[eye].Position.y, EyeRenderPose[eye].Position.z, 0); // Get view and projection matrices for the Rift camera Camera finalCam(XMVectorAdd(mainCamPos, XMVector3Rotate(eyePos, mainCamRot)), XMQuaternionMultiply(eyeQuat, mainCamRot)); XMMATRIX view = finalCam.GetViewMatrix(); ovrMatrix4f p = ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.2f, 1000.0f, ovrProjection_None); XMMATRIX proj = XMMatrixSet(p.M[0][0], p.M[1][0], p.M[2][0], p.M[3][0], p.M[0][1], p.M[1][1], p.M[2][1], p.M[3][1], p.M[0][2], p.M[1][2], p.M[2][2], p.M[3][2], p.M[0][3], p.M[1][3], p.M[2][3], p.M[3][3]); XMMATRIX prod = XMMatrixMultiply(view, proj); roomScene->Render(&prod, 1, 1, 1, 1, true); resBar = CD3DX12_RESOURCE_BARRIER::Transition(pEyeRenderTexture[eye]->GetD3DResource(), D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE); DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(1, &resBar); // Commit rendering to the swap chain pEyeRenderTexture[eye]->Commit(); // kick off eye render command lists before ovr_SubmitFrame() DIRECTX.SubmitCommandList(DIRECTX.ActiveContext); } // Initialize our single full screen Fov layer. ovrLayerEyeFov ld = {}; ld.Header.Type = ovrLayerType_EyeFov; ld.Header.Flags = 0; for (int eye = 0; eye < 2; ++eye) { ld.ColorTexture[eye] = pEyeRenderTexture[eye]->TextureChain; ld.Viewport[eye] = eyeRenderViewport[eye]; ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye]; ld.RenderPose[eye] = EyeRenderPose[eye]; ld.SensorSampleTime = sensorSampleTime; } ovrLayerHeader* layers = &ld.Header; result = ovr_SubmitFrame(session, frameIndex, nullptr, &layers, 1); // exit the rendering loop if submit returns an error, will retry on ovrError_DisplayLost if (!OVR_SUCCESS(result)) goto Done; frameIndex++; } if (drawMirror) { DIRECTX.SetActiveContext(DrawContext_Final); DIRECTX.SetViewport(0.0f, 0.0f, (float)hmdDesc.Resolution.w / 2, (float)hmdDesc.Resolution.h / 2); // Render mirror ID3D12Resource* mirrorTexRes = nullptr; ovr_GetMirrorTextureBufferDX(session, mirrorTexture, IID_PPV_ARGS(&mirrorTexRes)); //DIRECTX.SetAndClearRenderTarget(DIRECTX.CurrentFrameResources().SwapChainRtvHandle, nullptr, 1.0f, 0.5f, 0.0f, 1.0f); CD3DX12_RESOURCE_BARRIER preMirrorBlitBar[] = { CD3DX12_RESOURCE_BARRIER::Transition(DIRECTX.CurrentFrameResources().SwapChainBuffer, D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_COPY_DEST), CD3DX12_RESOURCE_BARRIER::Transition(mirrorTexRes, D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_COPY_SOURCE) }; // Indicate that the back buffer will now be copied into DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(ARRAYSIZE(preMirrorBlitBar), preMirrorBlitBar); DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->CopyResource(DIRECTX.CurrentFrameResources().SwapChainBuffer, mirrorTexRes); CD3DX12_RESOURCE_BARRIER resBar = CD3DX12_RESOURCE_BARRIER::Transition(mirrorTexRes, D3D12_RESOURCE_STATE_COPY_SOURCE, D3D12_RESOURCE_STATE_RENDER_TARGET); DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(1, &resBar); } DIRECTX.SubmitCommandListAndPresent(drawMirror); } // Release resources Done: delete mainCam; delete roomScene; if (mirrorTexture) ovr_DestroyMirrorTexture(session, mirrorTexture); for (int eye = 0; eye < 2; ++eye) { delete pEyeRenderTexture[eye]; } DIRECTX.ReleaseDevice(); ovr_Destroy(session); // Retry on ovrError_DisplayLost return retryCreate || (result == ovrError_DisplayLost); }
int main(int argc, char **argv) { // Initialize SDL2's context SDL_Init(SDL_INIT_VIDEO); // Initialize Oculus' context ovrResult result = ovr_Initialize(nullptr); if (OVR_FAILURE(result)) { std::cout << "ERROR: Failed to initialize libOVR" << std::endl; SDL_Quit(); return -1; } ovrSession hmd; ovrGraphicsLuid luid; // Connect to the Oculus headset result = ovr_Create(&hmd, &luid); if (OVR_FAILURE(result)) { std::cout << "ERROR: Oculus Rift not detected" << std::endl; ovr_Shutdown(); SDL_Quit(); return -1; } int x = SDL_WINDOWPOS_CENTERED, y = SDL_WINDOWPOS_CENTERED; int winWidth = 1280; int winHeight = 720; Uint32 flags = SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN; // Create SDL2 Window SDL_Window* window = SDL_CreateWindow("OVR ZED App", x, y, winWidth, winHeight, flags); // Create OpenGL context SDL_GLContext glContext = SDL_GL_CreateContext(window); // Initialize GLEW glewInit(); // Turn off vsync to let the compositor do its magic SDL_GL_SetSwapInterval(0); // Initialize the ZED Camera sl::zed::Camera* zed = 0; zed = new sl::zed::Camera(sl::zed::HD720); sl::zed::ERRCODE zederr = zed->init(sl::zed::MODE::PERFORMANCE, 0); int zedWidth = zed->getImageSize().width; int zedHeight = zed->getImageSize().height; if (zederr != sl::zed::SUCCESS) { std::cout << "ERROR: " << sl::zed::errcode2str(zederr) << std::endl; ovr_Destroy(hmd); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; return -1; } GLuint zedTextureID_L, zedTextureID_R; // Generate OpenGL texture for left images of the ZED camera glGenTextures(1, &zedTextureID_L); glBindTexture(GL_TEXTURE_2D, zedTextureID_L); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); // Generate OpenGL texture for right images of the ZED camera glGenTextures(1, &zedTextureID_R); glBindTexture(GL_TEXTURE_2D, zedTextureID_R); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glBindTexture(GL_TEXTURE_2D, 0); #if OPENGL_GPU_INTEROP cudaGraphicsResource* cimg_L; cudaGraphicsResource* cimg_R; cudaError_t errL, errR; errL = cudaGraphicsGLRegisterImage(&cimg_L, zedTextureID_L, GL_TEXTURE_2D, cudaGraphicsMapFlagsNone); errR = cudaGraphicsGLRegisterImage(&cimg_R, zedTextureID_R, GL_TEXTURE_2D, cudaGraphicsMapFlagsNone); if (errL != cudaSuccess || errR != cudaSuccess) { std::cout << "ERROR: cannot create CUDA texture : " << errL << "|" << errR << std::endl; } #endif ovrHmdDesc hmdDesc = ovr_GetHmdDesc(hmd); // Get the texture sizes of Oculus eyes ovrSizei textureSize0 = ovr_GetFovTextureSize(hmd, ovrEye_Left, hmdDesc.DefaultEyeFov[0], 1.0f); ovrSizei textureSize1 = ovr_GetFovTextureSize(hmd, ovrEye_Right, hmdDesc.DefaultEyeFov[1], 1.0f); // Compute the final size of the render buffer ovrSizei bufferSize; bufferSize.w = textureSize0.w + textureSize1.w; bufferSize.h = std::max(textureSize0.h, textureSize1.h); // Initialize OpenGL swap textures to render ovrSwapTextureSet* ptextureSet = 0; if (OVR_SUCCESS(ovr_CreateSwapTextureSetGL(hmd, GL_SRGB8_ALPHA8, bufferSize.w, bufferSize.h, &ptextureSet))) { for (int i = 0; i < ptextureSet->TextureCount; ++i) { ovrGLTexture* tex = (ovrGLTexture*)&ptextureSet->Textures[i]; glBindTexture(GL_TEXTURE_2D, tex->OGL.TexId); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); } } else { std::cout << "ERROR: failed creating swap texture" << std::endl; ovr_Destroy(hmd); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; return -1; } // Generate frame buffer to render GLuint fboID; glGenFramebuffers(1, &fboID); // Generate depth buffer of the frame buffer GLuint depthBuffID; glGenTextures(1, &depthBuffID); glBindTexture(GL_TEXTURE_2D, depthBuffID); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); GLenum internalFormat = GL_DEPTH_COMPONENT24; GLenum type = GL_UNSIGNED_INT; glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, bufferSize.w, bufferSize.h, 0, GL_DEPTH_COMPONENT, type, NULL); // Create a mirror texture to display the render result in the SDL2 window ovrGLTexture* mirrorTexture = nullptr; result = ovr_CreateMirrorTextureGL(hmd, GL_SRGB8_ALPHA8, winWidth, winHeight, reinterpret_cast<ovrTexture**>(&mirrorTexture)); if (!OVR_SUCCESS(result)) { std::cout << "ERROR: Failed to create mirror texture" << std::endl; } GLuint mirrorFBOID; glGenFramebuffers(1, &mirrorFBOID); glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFBOID); glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mirrorTexture->OGL.TexId, 0); glFramebufferRenderbuffer(GL_READ_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, 0); glBindFramebuffer(GL_READ_FRAMEBUFFER, 0); // Initialize a default Pose ovrPosef eyeRenderPose; // Set Identity quaternion eyeRenderPose.Orientation.x = 0; eyeRenderPose.Orientation.y = 0; eyeRenderPose.Orientation.z = 0; eyeRenderPose.Orientation.w = 1; // Set World's origin position eyeRenderPose.Position.x = 0.f; eyeRenderPose.Position.y = 0.f; eyeRenderPose.Position.z = 0; ovrLayerEyeFov ld; ld.Header.Type = ovrLayerType_EyeFov; // Tell to the Oculus compositor that our texture origin is at the bottom left ld.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft | ovrLayerFlag_HeadLocked; // Because OpenGL | Disable head tracking // Set the Oculus layer eye field of view for each view for (int eye = 0; eye < 2; ++eye) { // Set the color texture as the current swap texture ld.ColorTexture[eye] = ptextureSet; // Set the viewport as the right or left vertical half part of the color texture ld.Viewport[eye] = OVR::Recti(eye == ovrEye_Left ? 0 : bufferSize.w / 2, 0, bufferSize.w / 2, bufferSize.h); // Set the field of view ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye]; // Set the pose matrix ld.RenderPose[eye] = eyeRenderPose; } double sensorSampleTime = ovr_GetTimeInSeconds(); ld.SensorSampleTime = sensorSampleTime; // Get the render description of the left and right "eyes" of the Oculus headset ovrEyeRenderDesc eyeRenderDesc[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(hmd, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(hmd, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); // Get the Oculus view scale description ovrVector3f viewOffset[2] = { eyeRenderDesc[0].HmdToEyeViewOffset, eyeRenderDesc[1].HmdToEyeViewOffset }; ovrViewScaleDesc viewScaleDesc; viewScaleDesc.HmdSpaceToWorldScaleInMeters = 1.0f; viewScaleDesc.HmdToEyeViewOffset[0] = viewOffset[0]; viewScaleDesc.HmdToEyeViewOffset[1] = viewOffset[1]; // Create and compile the shader's sources Shader shader(OVR_ZED_VS, OVR_ZED_FS); // Compute the ZED image field of view with the ZED parameters float zedFovH = atanf(zed->getImageSize().width / (zed->getParameters()->LeftCam.fx *2.f)) * 2.f; // Compute the Oculus' field of view with its parameters float ovrFovH = (atanf(hmdDesc.DefaultEyeFov[0].LeftTan) + atanf(hmdDesc.DefaultEyeFov[0].RightTan)); // Compute the useful part of the ZED image unsigned int usefulWidth = zed->getImageSize().width * ovrFovH / zedFovH; // Compute the size of the final image displayed in the headset with the ZED image's aspect-ratio kept unsigned int widthFinal = bufferSize.w / 2; unsigned int heightFinal = zed->getImageSize().height * widthFinal / usefulWidth; // Convert this size to OpenGL viewport's frame's coordinates float heightGL = (heightFinal) / (float)(bufferSize.h); float widthGL = ((zed->getImageSize().width * (heightFinal / (float)zed->getImageSize().height)) / (float)widthFinal); // Create a rectangle with the coordonates computed and push it in GPU memory. float rectVertices[12] = { -widthGL, -heightGL, 0, widthGL, -heightGL, 0, widthGL, heightGL, 0, -widthGL, heightGL, 0 }; GLuint rectVBO[3]; glGenBuffers(1, &rectVBO[0]); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[0]); glBufferData(GL_ARRAY_BUFFER, sizeof(rectVertices), rectVertices, GL_STATIC_DRAW); float rectTexCoord[8] = { 0, 1, 1, 1, 1, 0, 0, 0 }; glGenBuffers(1, &rectVBO[1]); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[1]); glBufferData(GL_ARRAY_BUFFER, sizeof(rectTexCoord), rectTexCoord, GL_STATIC_DRAW); unsigned int rectIndices[6] = { 0, 1, 2, 0, 2, 3 }; glGenBuffers(1, &rectVBO[2]); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, rectVBO[2]); glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(rectIndices), rectIndices, GL_STATIC_DRAW); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); glBindBuffer(GL_ARRAY_BUFFER, 0); // Initialize hit value float hit = 0.02f; // Initialize a boolean that will be used to stop the application’s loop and another one to pause/unpause rendering bool end = false; bool refresh = true; // SDL variable that will be used to store input events SDL_Event events; // Initialize time variables. They will be used to limit the number of frames rendered per second. // Frame counter unsigned int riftc = 0, zedc = 1; // Chronometer unsigned int rifttime = 0, zedtime = 0, zedFPS = 0; int time1 = 0, timePerFrame = 0; int frameRate = (int)(1000 / MAX_FPS); // Enable the shader glUseProgram(shader.getProgramId()); // Bind the Vertex Buffer Objects of the rectangle that displays ZED images // vertices glEnableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[0]); glVertexAttribPointer(Shader::ATTRIB_VERTICES_POS, 3, GL_FLOAT, GL_FALSE, 0, 0); // indices glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, rectVBO[2]); // texture coordinates glEnableVertexAttribArray(Shader::ATTRIB_TEXTURE2D_POS); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[1]); glVertexAttribPointer(Shader::ATTRIB_TEXTURE2D_POS, 2, GL_FLOAT, GL_FALSE, 0, 0); // Main loop while (!end) { // Compute the time used to render the previous frame timePerFrame = SDL_GetTicks() - time1; // If the previous frame has been rendered too fast if (timePerFrame < frameRate) { // Pause the loop to have a max FPS equal to MAX_FPS SDL_Delay(frameRate - timePerFrame); timePerFrame = frameRate; } // Increment the ZED chronometer zedtime += timePerFrame; // If ZED chronometer reached 1 second if (zedtime > 1000) { zedFPS = zedc; zedc = 0; zedtime = 0; } // Increment the Rift chronometer and the Rift frame counter rifttime += timePerFrame; riftc++; // If Rift chronometer reached 200 milliseconds if (rifttime > 200) { // Display FPS std::cout << "\rRIFT FPS: " << 1000 / (rifttime / riftc) << " | ZED FPS: " << zedFPS; // Reset Rift chronometer rifttime = 0; // Reset Rift frame counter riftc = 0; } // Start frame chronometer time1 = SDL_GetTicks(); // While there is an event catched and not tested while (SDL_PollEvent(&events)) { // If a key is released if (events.type == SDL_KEYUP) { // If Q quit the application if (events.key.keysym.scancode == SDL_SCANCODE_Q) end = true; // If R reset the hit value else if (events.key.keysym.scancode == SDL_SCANCODE_R) hit = 0.0f; // If C pause/unpause rendering else if (events.key.keysym.scancode == SDL_SCANCODE_C) refresh = !refresh; } // If the mouse wheel is used if (events.type == SDL_MOUSEWHEEL) { // Increase or decrease hit value float s; events.wheel.y > 0 ? s = 1.0f : s = -1.0f; hit += 0.005f * s; } } // If rendering is unpaused and // successful grab ZED image if (!zed->grab(sl::zed::SENSING_MODE::RAW, false, false)) { // Update the ZED frame counter zedc++; if (refresh) { #if OPENGL_GPU_INTEROP sl::zed::Mat m = zed->retrieveImage_gpu(sl::zed::SIDE::LEFT); cudaArray_t arrIm; cudaGraphicsMapResources(1, &cimg_L, 0); cudaGraphicsSubResourceGetMappedArray(&arrIm, cimg_L, 0, 0); cudaMemcpy2DToArray(arrIm, 0, 0, m.data, m.step, zedWidth * 4, zedHeight, cudaMemcpyDeviceToDevice); cudaGraphicsUnmapResources(1, &cimg_L, 0); m = zed->retrieveImage_gpu(sl::zed::SIDE::RIGHT); cudaGraphicsMapResources(1, &cimg_R, 0); cudaGraphicsSubResourceGetMappedArray(&arrIm, cimg_R, 0, 0); cudaMemcpy2DToArray(arrIm, 0, 0, m.data, m.step, zedWidth * 4, zedHeight, cudaMemcpyDeviceToDevice); // *4 = 4 channels * 1 bytes (uint) cudaGraphicsUnmapResources(1, &cimg_R, 0); #endif // Increment the CurrentIndex to point to the next texture within the output swap texture set. // CurrentIndex must be advanced round-robin fashion every time we draw a new frame ptextureSet->CurrentIndex = (ptextureSet->CurrentIndex + 1) % ptextureSet->TextureCount; // Get the current swap texture pointer auto tex = reinterpret_cast<ovrGLTexture*>(&ptextureSet->Textures[ptextureSet->CurrentIndex]); // Bind the frame buffer glBindFramebuffer(GL_FRAMEBUFFER, fboID); // Set its color layer 0 as the current swap texture glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex->OGL.TexId, 0); // Set its depth layer as our depth buffer glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthBuffID, 0); // Clear the frame buffer glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glClearColor(0, 0, 0, 1); // Render for each Oculus eye the equivalent ZED image for (int eye = 0; eye < 2; eye++) { // Set the left or right vertical half of the buffer as the viewport glViewport(ld.Viewport[eye].Pos.x, ld.Viewport[eye].Pos.y, ld.Viewport[eye].Size.w, ld.Viewport[eye].Size.h); // Bind the left or right ZED image glBindTexture(GL_TEXTURE_2D, eye == ovrEye_Left ? zedTextureID_L : zedTextureID_R); #if !OPENGL_GPU_INTEROP glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, zed->retrieveImage(eye == ovrEye_Left ? sl::zed::SIDE::LEFT : sl::zed::SIDE::RIGHT).data); #endif // Bind the hit value glUniform1f(glGetUniformLocation(shader.getProgramId(), "hit"), eye == ovrEye_Left ? hit : -hit); // Draw the ZED image glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0); } } } /* Note: Even if we don't ask to refresh the framebuffer or if the Camera::grab() doesn't catch a new frame, we have to submit an image to the Rift; it needs 75Hz refresh. Else there will be jumbs, black frames and/or glitches in the headset. */ ovrLayerHeader* layers = &ld.Header; // Submit the frame to the Oculus compositor // which will display the frame in the Oculus headset result = ovr_SubmitFrame(hmd, 0, &viewScaleDesc, &layers, 1); if (!OVR_SUCCESS(result)) { std::cout << "ERROR: failed to submit frame" << std::endl; glDeleteBuffers(3, rectVBO); ovr_DestroySwapTextureSet(hmd, ptextureSet); ovr_DestroyMirrorTexture(hmd, &mirrorTexture->Texture); ovr_Destroy(hmd); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; return -1; } // Copy the frame to the mirror buffer // which will be drawn in the SDL2 image glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFBOID); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0); GLint w = mirrorTexture->OGL.Header.TextureSize.w; GLint h = mirrorTexture->OGL.Header.TextureSize.h; glBlitFramebuffer(0, h, w, 0, 0, 0, w, h, GL_COLOR_BUFFER_BIT, GL_NEAREST); glBindFramebuffer(GL_READ_FRAMEBUFFER, 0); // Swap the SDL2 window SDL_GL_SwapWindow(window); } // Disable all OpenGL buffer glDisableVertexAttribArray(Shader::ATTRIB_TEXTURE2D_POS); glDisableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); glBindBuffer(GL_ARRAY_BUFFER, 0); glBindTexture(GL_TEXTURE_2D, 0); glUseProgram(0); glBindVertexArray(0); // Delete the Vertex Buffer Objects of the rectangle glDeleteBuffers(3, rectVBO); // Delete SDL, OpenGL, Oculus and ZED context ovr_DestroySwapTextureSet(hmd, ptextureSet); ovr_DestroyMirrorTexture(hmd, &mirrorTexture->Texture); ovr_Destroy(hmd); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; // quit return 0; }
// Display to an HMD with OVR SDK backend. void displayHMD() { ovrSessionStatus sessionStatus; ovr_GetSessionStatus(g_session, &sessionStatus); if (sessionStatus.HmdPresent == false) { displayMonitor(); return; } const ovrHmdDesc& hmdDesc = m_Hmd; double sensorSampleTime; // sensorSampleTime is fed into the layer later if (g_hmdVisible) { // Call ovr_GetRenderDesc each frame to get the ovrEyeRenderDesc, as the returned values (e.g. HmdToEyeOffset) may change at runtime. ovrEyeRenderDesc eyeRenderDesc[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(g_session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(g_session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); // Get eye poses, feeding in correct IPD offset ovrVector3f HmdToEyeOffset[2] = { eyeRenderDesc[0].HmdToEyeOffset, eyeRenderDesc[1].HmdToEyeOffset }; #if 0 // Get both eye poses simultaneously, with IPD offset already included. double displayMidpointSeconds = ovr_GetPredictedDisplayTime(g_session, 0); ovrTrackingState hmdState = ovr_GetTrackingState(g_session, displayMidpointSeconds, ovrTrue); ovr_CalcEyePoses(hmdState.HeadPose.ThePose, HmdToEyeOffset, m_eyePoses); #else ovr_GetEyePoses(g_session, g_frameIndex, ovrTrue, HmdToEyeOffset, m_eyePoses, &sensorSampleTime); #endif storeHmdPose(m_eyePoses[0]); for (int eye = 0; eye < 2; ++eye) { const FBO& swapfbo = m_swapFBO[eye]; const ovrTextureSwapChain& chain = g_textureSwapChain[eye]; int curIndex; ovr_GetTextureSwapChainCurrentIndex(g_session, chain, &curIndex); GLuint curTexId; ovr_GetTextureSwapChainBufferGL(g_session, chain, curIndex, &curTexId); glBindFramebuffer(GL_FRAMEBUFFER, swapfbo.id); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, curTexId, 0); glViewport(0, 0, swapfbo.w, swapfbo.h); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glEnable(GL_FRAMEBUFFER_SRGB); { glClearColor(0.3f, 0.3f, 0.3f, 0.f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); const ovrSizei& downSize = ovr_GetFovTextureSize(g_session, ovrEyeType(eye), hmdDesc.DefaultEyeFov[eye], m_fboScale); ovrRecti vp = { 0, 0, downSize.w, downSize.h }; const int texh = swapfbo.h; vp.Pos.y = (texh - vp.Size.h) / 2; glViewport(vp.Pos.x, vp.Pos.y, vp.Size.w, vp.Size.h); // Cinemascope - letterbox bars scissoring off pixels above and below vp center const float hc = .5f * m_cinemaScope; const int scisPx = static_cast<int>(hc * static_cast<float>(vp.Size.h)); ovrRecti sp = vp; sp.Pos.y += scisPx; sp.Size.h -= 2 * scisPx; glScissor(sp.Pos.x, sp.Pos.y, sp.Size.w, sp.Size.h); glEnable(GL_SCISSOR_TEST); glEnable(GL_DEPTH_TEST); // Render the scene for the current eye const ovrPosef& eyePose = m_eyePoses[eye]; const glm::mat4 mview = makeWorldToChassisMatrix() * makeMatrixFromPose(eyePose, m_headSize); const ovrMatrix4f ovrproj = ovrMatrix4f_Projection(hmdDesc.DefaultEyeFov[eye], 0.2f, 1000.0f, ovrProjection_None); const glm::mat4 proj = makeGlmMatrixFromOvrMatrix(ovrproj); g_pScene->RenderForOneEye(glm::value_ptr(glm::inverse(mview)), glm::value_ptr(proj)); const ovrTextureSwapChain& chain = g_textureSwapChain[eye]; const ovrResult commitres = ovr_CommitTextureSwapChain(g_session, chain); if (!OVR_SUCCESS(commitres)) { LOG_ERROR("ovr_CommitTextureSwapChain returned %d", commitres); return; } } glDisable(GL_SCISSOR_TEST); // Grab a copy of the left eye's undistorted render output for presentation // to the desktop window instead of the barrel distorted mirror texture. // This blit, while cheap, could cost some framerate to the HMD. // An over-the-shoulder view is another option, at a greater performance cost. if (0) { if (eye == ovrEyeType::ovrEye_Left) { BlitLeftEyeRenderToUndistortedMirrorTexture(); } } glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0); glBindFramebuffer(GL_FRAMEBUFFER, 0); } } std::vector<const ovrLayerHeader*> layerHeaders; { // Do distortion rendering, Present and flush/sync ovrLayerEyeFov ld; ld.Header.Type = ovrLayerType_EyeFov; ld.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft; // Because OpenGL. for (int eye = 0; eye < 2; ++eye) { const FBO& swapfbo = m_swapFBO[eye]; const ovrTextureSwapChain& chain = g_textureSwapChain[eye]; ld.ColorTexture[eye] = chain; const ovrSizei& downSize = ovr_GetFovTextureSize(g_session, ovrEyeType(eye), hmdDesc.DefaultEyeFov[eye], m_fboScale); ovrRecti vp = { 0, 0, downSize.w, downSize.h }; const int texh = swapfbo.h; vp.Pos.y = (texh - vp.Size.h) / 2; ld.Viewport[eye] = vp; ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye]; ld.RenderPose[eye] = m_eyePoses[eye]; ld.SensorSampleTime = sensorSampleTime; } layerHeaders.push_back(&ld.Header); // Submit layers to HMD for display ovrLayerQuad ql; if (g_tweakbarQuad.m_showQuadInWorld) { ql.Header.Type = ovrLayerType_Quad; ql.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft; // Because OpenGL. ql.ColorTexture = g_tweakbarQuad.m_swapChain; ovrRecti vp; vp.Pos.x = 0; vp.Pos.y = 0; vp.Size.w = 600; ///@todo vp.Size.h = 600; ///@todo ql.Viewport = vp; ql.QuadPoseCenter = g_tweakbarQuad.m_QuadPoseCenter; ql.QuadSize = { 1.f, 1.f }; ///@todo Pass in g_tweakbarQuad.SetHmdEyeRay(m_eyePoses[ovrEyeType::ovrEye_Left]); // Writes to m_layerQuad.QuadPoseCenter g_tweakbarQuad.DrawToQuad(); layerHeaders.push_back(&ql.Header); } } #if 0 ovrViewScaleDesc viewScaleDesc; viewScaleDesc.HmdToEyeOffset[0] = m_eyeOffsets[0]; viewScaleDesc.HmdToEyeOffset[1] = m_eyeOffsets[1]; viewScaleDesc.HmdSpaceToWorldScaleInMeters = 1.f; #endif const ovrResult result = ovr_SubmitFrame(g_session, g_frameIndex, nullptr, &layerHeaders[0], layerHeaders.size()); if (result == ovrSuccess) { g_hmdVisible = true; } else if (result == ovrSuccess_NotVisible) { g_hmdVisible = false; ///@todo Enter a lower-power, polling "no focus/HMD not worn" mode } else if (result == ovrError_DisplayLost) { LOG_INFO("ovr_SubmitFrame returned ovrError_DisplayLost"); g_hmdVisible = false; ///@todo Tear down textures and session and re-create } else { LOG_INFO("ovr_SubmitFrame returned %d", result); //g_hmdVisible = false; } // Handle OVR session events ovr_GetSessionStatus(g_session, &sessionStatus); if (sessionStatus.ShouldQuit) { glfwSetWindowShouldClose(g_pMirrorWindow, 1); } if (sessionStatus.ShouldRecenter) { ovr_RecenterTrackingOrigin(g_session); } // Blit mirror texture to monitor window if (g_hmdVisible) { glViewport(0, 0, g_mirrorWindowSz.x, g_mirrorWindowSz.y); const FBO& srcFBO = m_mirrorFBO; glBindFramebuffer(GL_READ_FRAMEBUFFER, srcFBO.id); glBlitFramebuffer( 0, srcFBO.h, srcFBO.w, 0, 0, 0, g_mirrorWindowSz.x, g_mirrorWindowSz.y, GL_COLOR_BUFFER_BIT, GL_NEAREST); glBindFramebuffer(GL_READ_FRAMEBUFFER, 0); } else { displayMonitor(); } ++g_frameIndex; #ifdef USE_ANTTWEAKBAR if (g_tweakbarQuad.m_showQuadInWorld) { TwDraw(); } #endif }
// return true to retry later (e.g. after display lost) static bool MainLoop(bool retryCreate) { // Initialize these to nullptr here to handle device lost failures cleanly ovrTexture * mirrorTexture = nullptr; OculusTexture * pEyeRenderTexture[2] = { nullptr, nullptr }; DepthBuffer * pEyeDepthBuffer[2] = { nullptr, nullptr }; Scene * roomScene = nullptr; Camera * mainCam = nullptr; D3D11_TEXTURE2D_DESC td = {}; ovrHmd HMD; ovrGraphicsLuid luid; ovrResult result = ovr_Create(&HMD, &luid); if (!OVR_SUCCESS(result)) return retryCreate; ovrHmdDesc hmdDesc = ovr_GetHmdDesc(HMD); // ------------------------------------------------------------------- // Add: Make Instance that CL Eye Camera Capture Class CLEyeCameraCapture* cam[2] = { NULL }; // Query for number of connected camera int numCams = CLEyeGetCameraCount(); if (numCams == 0) { printf_s("No PS3Eye Camera detected\n"); goto Done; } printf_s("Found %d cameras\n", numCams); for (int iCam = 0; iCam < numCams; iCam++) { char windowName[64]; // Query unique camera uuid GUID guid = CLEyeGetCameraUUID(iCam); printf("Camera %d GUID: [%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x]\n", iCam + 1, guid.Data1, guid.Data2, guid.Data3, guid.Data4[0], guid.Data4[1], guid.Data4[2], guid.Data4[3], guid.Data4[4], guid.Data4[5], guid.Data4[6], guid.Data4[7]); sprintf_s(windowName, "Camera Window %d", iCam + 1); // Create camera capture object cam[iCam] = new CLEyeCameraCapture(windowName, guid, CLEYE_COLOR_RAW, CLEYE_VGA, 30); cam[iCam]->StartCapture(); } // ------------------------------------------------------------------- // Setup Device and Graphics // Note: the mirror window can be any size, for this sample we use 1/2 the HMD resolution if (!DIRECTX.InitDevice(hmdDesc.Resolution.w / 2, hmdDesc.Resolution.h / 2, reinterpret_cast<LUID*>(&luid))) goto Done; // Make the eye render buffers (caution if actual size < requested due to HW limits). ovrRecti eyeRenderViewport[2]; for (int eye = 0; eye < 2; ++eye) { ovrSizei idealSize = ovr_GetFovTextureSize(HMD, (ovrEyeType)eye, hmdDesc.DefaultEyeFov[eye], 1.0f); pEyeRenderTexture[eye] = new OculusTexture(); if (!pEyeRenderTexture[eye]->Init(HMD, idealSize.w, idealSize.h)) { if (retryCreate) goto Done; VALIDATE(OVR_SUCCESS(result), "Failed to create eye texture."); } pEyeDepthBuffer[eye] = new DepthBuffer(DIRECTX.Device, idealSize.w, idealSize.h); eyeRenderViewport[eye].Pos.x = 0; eyeRenderViewport[eye].Pos.y = 0; eyeRenderViewport[eye].Size = idealSize; if (!pEyeRenderTexture[eye]->TextureSet) { if (retryCreate) goto Done; VALIDATE(false, "Failed to create texture."); } } // Create a mirror to see on the monitor. td.ArraySize = 1; td.Format = DXGI_FORMAT_R8G8B8A8_UNORM_SRGB; td.Width = DIRECTX.WinSizeW; td.Height = DIRECTX.WinSizeH; td.Usage = D3D11_USAGE_DEFAULT; td.SampleDesc.Count = 1; td.MipLevels = 1; result = ovr_CreateMirrorTextureD3D11(HMD, DIRECTX.Device, &td, 0, &mirrorTexture); if (!OVR_SUCCESS(result)) { if (retryCreate) goto Done; VALIDATE(false, "Failed to create mirror texture."); } // Create the room model roomScene = new Scene(false); // Create camera mainCam = new Camera(&XMVectorSet(0.0f, 1.6f, 5.0f, 0), &XMQuaternionIdentity()); // Setup VR components, filling out description ovrEyeRenderDesc eyeRenderDesc[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(HMD, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(HMD, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); bool isVisible = true; DCB portConfig; portConfig.BaudRate = 115200; portConfig.Parity = EVENPARITY; g_seriPort.Start("\\\\.\\COM3", &portConfig); // Main loop while (DIRECTX.HandleMessages()) { XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, -0.05f, 0), mainCam->Rot); XMVECTOR right = XMVector3Rotate(XMVectorSet(0.05f, 0, 0, 0), mainCam->Rot); if (DIRECTX.Key['W'] || DIRECTX.Key[VK_UP]) mainCam->Pos = XMVectorAdd(mainCam->Pos, forward); if (DIRECTX.Key['S'] || DIRECTX.Key[VK_DOWN]) mainCam->Pos = XMVectorSubtract(mainCam->Pos, forward); if (DIRECTX.Key['D']) mainCam->Pos = XMVectorAdd(mainCam->Pos, right); if (DIRECTX.Key['A']) mainCam->Pos = XMVectorSubtract(mainCam->Pos, right); static float Yaw = 0; if (DIRECTX.Key[VK_LEFT]) mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw += 0.02f, 0); if (DIRECTX.Key[VK_RIGHT]) mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw -= 0.02f, 0); // Animate the cube static float cubeClock = 0; roomScene->Models[0]->Pos = XMFLOAT3(9 * sin(cubeClock), 3, 9 * cos(cubeClock += 0.015f)); // Get both eye poses simultaneously, with IPD offset already included. ovrPosef EyeRenderPose[2]; ovrVector3f HmdToEyeViewOffset[2] = { eyeRenderDesc[0].HmdToEyeViewOffset, eyeRenderDesc[1].HmdToEyeViewOffset }; double frameTime = ovr_GetPredictedDisplayTime(HMD, 0); // Keeping sensorSampleTime as close to ovr_GetTrackingState as possible - fed into the layer double sensorSampleTime = ovr_GetTimeInSeconds(); ovrTrackingState hmdState = ovr_GetTrackingState(HMD, frameTime, ovrTrue); ovr_CalcEyePoses(hmdState.HeadPose.ThePose, HmdToEyeViewOffset, EyeRenderPose); // -------------------------------------------------------------------------- // Add: Get Head Yaw Roll Pitch float hmdPitch = 0.0f; float hmdRoll = 0.0f; float hmdYaw = 0.0f; OVR::Posef HeadPose = hmdState.HeadPose.ThePose; HeadPose.Rotation.GetEulerAngles<OVR::Axis_Y, OVR::Axis_X, OVR::Axis_Z>(&hmdYaw, &hmdPitch, &hmdRoll); SetPos(2, ServoRoll(hmdYaw)); SetPos(3, ServoRoll(hmdPitch)); // -------------------------------------------------------------------------- // Render Scene to Eye Buffers if (isVisible) { for (int eye = 0; eye < 2; ++eye) { // Increment to use next texture, just before writing pEyeRenderTexture[eye]->AdvanceToNextTexture(); // Clear and set up rendertarget int texIndex = pEyeRenderTexture[eye]->TextureSet->CurrentIndex; DIRECTX.SetAndClearRenderTarget(pEyeRenderTexture[eye]->TexRtv[texIndex], pEyeDepthBuffer[eye]); DIRECTX.SetViewport((float)eyeRenderViewport[eye].Pos.x, (float)eyeRenderViewport[eye].Pos.y, (float)eyeRenderViewport[eye].Size.w, (float)eyeRenderViewport[eye].Size.h); //Get the pose information in XM format XMVECTOR eyeQuat = XMVectorSet(EyeRenderPose[eye].Orientation.x, EyeRenderPose[eye].Orientation.y, EyeRenderPose[eye].Orientation.z, EyeRenderPose[eye].Orientation.w); XMVECTOR eyePos = XMVectorSet(EyeRenderPose[eye].Position.x, EyeRenderPose[eye].Position.y, EyeRenderPose[eye].Position.z, 0); // Get view and projection matrices for the Rift camera XMVECTOR CombinedPos = XMVectorAdd(mainCam->Pos, XMVector3Rotate(eyePos, mainCam->Rot)); Camera finalCam(&CombinedPos, &(XMQuaternionMultiply(eyeQuat,mainCam->Rot))); XMMATRIX view = finalCam.GetViewMatrix(); ovrMatrix4f p = ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.2f, 1000.0f, ovrProjection_RightHanded); XMMATRIX proj = XMMatrixSet(p.M[0][0], p.M[1][0], p.M[2][0], p.M[3][0], p.M[0][1], p.M[1][1], p.M[2][1], p.M[3][1], p.M[0][2], p.M[1][2], p.M[2][2], p.M[3][2], p.M[0][3], p.M[1][3], p.M[2][3], p.M[3][3]); XMMATRIX prod = XMMatrixMultiply(view, proj); roomScene->Render(&prod, 1, 1, 1, 1, true); } } // Initialize our single full screen Fov layer. ovrLayerEyeFov ld = {}; ld.Header.Type = ovrLayerType_EyeFov; ld.Header.Flags = 0; for (int eye = 0; eye < 2; ++eye) { ld.ColorTexture[eye] = pEyeRenderTexture[eye]->TextureSet; ld.Viewport[eye] = eyeRenderViewport[eye]; ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye]; ld.RenderPose[eye] = EyeRenderPose[eye]; ld.SensorSampleTime = sensorSampleTime; } ovrLayerHeader* layers = &ld.Header; result = ovr_SubmitFrame(HMD, 0, nullptr, &layers, 1); // exit the rendering loop if submit returns an error, will retry on ovrError_DisplayLost if (!OVR_SUCCESS(result)) goto Done; isVisible = (result == ovrSuccess); // Render mirror ovrD3D11Texture* tex = (ovrD3D11Texture*)mirrorTexture; DIRECTX.Context->CopyResource(DIRECTX.BackBuffer, tex->D3D11.pTexture); DIRECTX.SwapChain->Present(0, 0); } // Release resources Done: delete mainCam; delete roomScene; if (mirrorTexture) ovr_DestroyMirrorTexture(HMD, mirrorTexture); for (int eye = 0; eye < 2; ++eye) { delete pEyeRenderTexture[eye]; delete pEyeDepthBuffer[eye]; } DIRECTX.ReleaseDevice(); ovr_Destroy(HMD); g_seriPort.End(); for (int iCam = 0; iCam < numCams; iCam++) { cam[iCam]->StopCapture(); delete cam[iCam]; } // Retry on ovrError_DisplayLost return retryCreate || OVR_SUCCESS(result) || (result == ovrError_DisplayLost); }
// return true to retry later (e.g. after display lost) static bool MainLoop(bool retryCreate) { // Initialize these to nullptr here to handle device lost failures cleanly ovrMirrorTexture mirrorTexture = nullptr; OculusTexture * pEyeRenderTexture = nullptr; DepthBuffer * pEyeDepthBuffer = nullptr; Scene * roomScene = nullptr; Camera * mainCam = nullptr; ovrMirrorTextureDesc desc = {}; bool isVisible = true; long long frameIndex = 0; bool useInstancing = false; const int repeatDrawing = 1; ovrSession session; ovrGraphicsLuid luid; ovrResult result = ovr_Create(&session, &luid); if (!OVR_SUCCESS(result)) return retryCreate; ovrHmdDesc hmdDesc = ovr_GetHmdDesc(session); // Setup Device and Graphics // Note: the mirror window can be any size, for this sample we use 1/2 the HMD resolution if (!DIRECTX.InitDevice(hmdDesc.Resolution.w / 2, hmdDesc.Resolution.h / 2, reinterpret_cast<LUID*>(&luid))) goto Done; ovrRecti eyeRenderViewport[2]; // Make a single eye texture { ovrSizei eyeTexSizeL = ovr_GetFovTextureSize(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0], 1.0f); ovrSizei eyeTexSizeR = ovr_GetFovTextureSize(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1], 1.0f); ovrSizei textureSize; textureSize.w = eyeTexSizeL.w + eyeTexSizeR.w; textureSize.h = max(eyeTexSizeL.h, eyeTexSizeR.h); pEyeRenderTexture = new OculusTexture(); if (!pEyeRenderTexture->Init(session, textureSize.w, textureSize.h)) { if (retryCreate) goto Done; VALIDATE(OVR_SUCCESS(result), "Failed to create eye texture."); } pEyeDepthBuffer = new DepthBuffer(DIRECTX.Device, textureSize.w, textureSize.h); // set viewports eyeRenderViewport[0].Pos.x = 0; eyeRenderViewport[0].Pos.y = 0; eyeRenderViewport[0].Size = eyeTexSizeL; eyeRenderViewport[1].Pos.x = eyeTexSizeL.w; eyeRenderViewport[1].Pos.y = 0; eyeRenderViewport[1].Size = eyeTexSizeR; } if (!pEyeRenderTexture->TextureChain) { if (retryCreate) goto Done; VALIDATE(false, "Failed to create texture."); } // Create a mirror to see on the monitor. desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; desc.Width = DIRECTX.WinSizeW; desc.Height = DIRECTX.WinSizeH; result = ovr_CreateMirrorTextureDX(session, DIRECTX.Device, &desc, &mirrorTexture); if (!OVR_SUCCESS(result)) { if (retryCreate) goto Done; VALIDATE(false, "Failed to create mirror texture."); } // Create the room model roomScene = new Scene(false); // Create camera mainCam = new Camera(&XMVectorSet(0.0f, 1.6f, 5.0f, 0), &XMQuaternionIdentity()); // Setup VR components, filling out description ovrEyeRenderDesc eyeRenderDesc[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); // Main loop while (DIRECTX.HandleMessages()) { XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, -0.05f, 0), mainCam->Rot); XMVECTOR right = XMVector3Rotate(XMVectorSet(0.05f, 0, 0, 0), mainCam->Rot); XMVECTOR up = XMVector3Rotate(XMVectorSet(0, 0.05f, 0, 0), mainCam->Rot); if (DIRECTX.Key['W'] || DIRECTX.Key[VK_UP]) mainCam->Pos = XMVectorAdd(mainCam->Pos, forward); if (DIRECTX.Key['S'] || DIRECTX.Key[VK_DOWN]) mainCam->Pos = XMVectorSubtract(mainCam->Pos, forward); if (DIRECTX.Key['D']) mainCam->Pos = XMVectorAdd(mainCam->Pos, right); if (DIRECTX.Key['A']) mainCam->Pos = XMVectorSubtract(mainCam->Pos, right); if (DIRECTX.Key['Q']) mainCam->Pos = XMVectorAdd(mainCam->Pos, up); if (DIRECTX.Key['E']) mainCam->Pos = XMVectorSubtract(mainCam->Pos, up); static float Yaw = 0; if (DIRECTX.Key[VK_LEFT]) mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw += 0.02f, 0); if (DIRECTX.Key[VK_RIGHT]) mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw -= 0.02f, 0); if (DIRECTX.Key['P']) ovr_SetInt(session, OVR_PERF_HUD_MODE, int(ovrPerfHud_AppRenderTiming)); else ovr_SetInt(session, OVR_PERF_HUD_MODE, int(ovrPerfHud_Off)); useInstancing = DIRECTX.Key['I']; // Animate the cube static float cubeClock = 0; roomScene->Models[0]->Pos = XMFLOAT3(9 * sin(cubeClock), 3, 9 * cos(cubeClock += 0.015f)); // Get both eye poses simultaneously, with IPD offset already included. ovrPosef EyeRenderPose[2]; ovrVector3f HmdToEyeOffset[2] = { eyeRenderDesc[0].HmdToEyeOffset, eyeRenderDesc[1].HmdToEyeOffset }; double sensorSampleTime; // sensorSampleTime is fed into the layer later ovr_GetEyePoses(session, frameIndex, ovrTrue, HmdToEyeOffset, EyeRenderPose, &sensorSampleTime); // Render scene to eye texture if (isVisible) { DIRECTX.SetAndClearRenderTarget(pEyeRenderTexture->GetRTV(), pEyeDepthBuffer); // calculate eye transforms XMMATRIX viewProjMatrix[2]; for (int eye = 0; eye < 2; ++eye) { //Get the pose information in XM format XMVECTOR eyeQuat = XMLoadFloat4((XMFLOAT4 *)&EyeRenderPose[eye].Orientation.x); XMVECTOR eyePos = XMVectorSet(EyeRenderPose[eye].Position.x, EyeRenderPose[eye].Position.y, EyeRenderPose[eye].Position.z, 0); // Get view and projection matrices for the Rift camera XMVECTOR CombinedPos = XMVectorAdd(mainCam->Pos, XMVector3Rotate(eyePos, mainCam->Rot)); Camera finalCam(&CombinedPos, &(XMQuaternionMultiply(eyeQuat, mainCam->Rot))); XMMATRIX view = finalCam.GetViewMatrix(); ovrMatrix4f p = ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.1f, 100.0f, ovrProjection_None); XMMATRIX proj = XMMatrixSet(p.M[0][0], p.M[1][0], p.M[2][0], p.M[3][0], p.M[0][1], p.M[1][1], p.M[2][1], p.M[3][1], p.M[0][2], p.M[1][2], p.M[2][2], p.M[3][2], p.M[0][3], p.M[1][3], p.M[2][3], p.M[3][3]); if (useInstancing) { // scale and offset projection matrix to shift image to correct part of texture for each eye XMMATRIX scale = XMMatrixScaling(0.5f, 1.0f, 1.0f); XMMATRIX translate = XMMatrixTranslation((eye==0) ? -0.5f : 0.5f, 0.0f, 0.0f); proj = XMMatrixMultiply(proj, scale); proj = XMMatrixMultiply(proj, translate); } viewProjMatrix[eye] = XMMatrixMultiply(view, proj); } if (useInstancing) { // use instancing for stereo DIRECTX.SetViewport(0.0f, 0.0f, (float)eyeRenderViewport[0].Size.w + eyeRenderViewport[1].Size.w, (float)eyeRenderViewport[0].Size.h); // render scene for (int i = 0; i < repeatDrawing; i++) roomScene->RenderInstanced(&viewProjMatrix[0], 1, 1, 1, 1, true); } else { // non-instanced path for (int eye = 0; eye < 2; ++eye) { // set viewport DIRECTX.SetViewport((float)eyeRenderViewport[eye].Pos.x, (float)eyeRenderViewport[eye].Pos.y, (float)eyeRenderViewport[eye].Size.w, (float)eyeRenderViewport[eye].Size.h); // render scene for (int i = 0; i < repeatDrawing; i++) roomScene->Render(&viewProjMatrix[eye], 1, 1, 1, 1, true); } } // Commit rendering to the swap chain pEyeRenderTexture->Commit(); } // Initialize our single full screen Fov layer. ovrLayerEyeFov ld = {}; ld.Header.Type = ovrLayerType_EyeFov; ld.Header.Flags = 0; ld.SensorSampleTime = sensorSampleTime; for (int eye = 0; eye < 2; ++eye) { ld.ColorTexture[eye] = pEyeRenderTexture->TextureChain; ld.Viewport[eye] = eyeRenderViewport[eye]; ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye]; ld.RenderPose[eye] = EyeRenderPose[eye]; } ovrLayerHeader* layers = &ld.Header; result = ovr_SubmitFrame(session, frameIndex, nullptr, &layers, 1); // exit the rendering loop if submit returns an error, will retry on ovrError_DisplayLost if (!OVR_SUCCESS(result)) goto Done; isVisible = (result == ovrSuccess); // Render mirror ID3D11Texture2D* tex = nullptr; ovr_GetMirrorTextureBufferDX(session, mirrorTexture, IID_PPV_ARGS(&tex)); DIRECTX.Context->CopyResource(DIRECTX.BackBuffer, tex); tex->Release(); DIRECTX.SwapChain->Present(0, 0); frameIndex++; } // Release resources Done: delete mainCam; delete roomScene; if (mirrorTexture) ovr_DestroyMirrorTexture(session, mirrorTexture); delete pEyeRenderTexture; delete pEyeDepthBuffer; DIRECTX.ReleaseDevice(); ovr_Destroy(session); // Retry on ovrError_DisplayLost return retryCreate || OVR_SUCCESS(result) || (result == ovrError_DisplayLost); }
int OgreOculus::go(void) { // Create Root object root = new Ogre::Root("plugin.cfg", "ogre.cfg"); // OpenGL root->loadPlugin("RenderSystem_GL_d"); root->setRenderSystem(root->getRenderSystemByName("OpenGL Rendering Subsystem")); // Initialize Root root->initialise(false); // Initialize Oculus ovrHmd hmd; ovrHmdDesc hmdDesc; ovrGraphicsLuid luid; ovr_Initialize(nullptr); if(ovr_Create(&hmd, &luid) != ovrSuccess) exit(-1); hmdDesc = ovr_GetHmdDesc(hmd); if(ovr_ConfigureTracking(hmd, ovrTrackingCap_Orientation |ovrTrackingCap_MagYawCorrection |ovrTrackingCap_Position, 0) != ovrSuccess) exit(-2); // Turn off HUD ovr_SetInt(hmd, "PerfHudMode", ovrPerfHud_Off); // Create a window window = root->createRenderWindow("Ogre + Oculus = <3", hmdDesc.Resolution.w/2, hmdDesc.Resolution.h/2, false); // Create scene manager and cameras smgr = root->createSceneManager(Ogre::ST_GENERIC); // Load Ogre resource paths from config file Ogre::ConfigFile cf; cf.load("resources_d.cfg"); // Go through all sections & settings in the file and add resources Ogre::ConfigFile::SectionIterator seci = cf.getSectionIterator(); Ogre::String secName, typeName, archName; while (seci.hasMoreElements()) { secName = seci.peekNextKey(); Ogre::ConfigFile::SettingsMultiMap *settings = seci.getNext(); Ogre::ConfigFile::SettingsMultiMap::iterator i; for (i = settings->begin(); i != settings->end(); ++i) { typeName = i->first; archName = i->second; Ogre::ResourceGroupManager::getSingleton().addResourceLocation( archName, typeName, secName); } } // Set resources Ogre::TextureManager::getSingleton().setDefaultNumMipmaps(5); Ogre::ResourceGroupManager::getSingleton().initialiseAllResourceGroups(); // Create the model itself via OgreModel.cpp createOgreModel(smgr); // Create camera createCamera(); // Set viewport and background color Ogre::Viewport* vp = window->addViewport(mCamera); vp->setBackgroundColour(Ogre::ColourValue(34, 89, 0)); // Yellow // Set aspect ratio mCamera->setAspectRatio( Ogre::Real(vp->getActualWidth()) / Ogre::Real(vp->getActualHeight())); // Initialize glew if(glewInit() != GLEW_OK) exit(-3); // Get texture sizes ovrSizei texSizeL, texSizeR; texSizeL = ovr_GetFovTextureSize(hmd, ovrEye_Left, hmdDesc.DefaultEyeFov[left], 1); texSizeR = ovr_GetFovTextureSize(hmd, ovrEye_Right, hmdDesc.DefaultEyeFov[right], 1); // Calculate render buffer size ovrSizei bufferSize; bufferSize.w = texSizeL.w + texSizeR.w; bufferSize.h = max(texSizeL.h, texSizeR.h); // Create render texture set ovrSwapTextureSet* textureSet; if(ovr_CreateSwapTextureSetGL(hmd, GL_RGB, bufferSize.w, bufferSize.h, &textureSet) != ovrSuccess) exit(-4); // Create Ogre render texture Ogre::GLTextureManager* textureManager = static_cast<Ogre::GLTextureManager*>(Ogre::GLTextureManager::getSingletonPtr()); Ogre::TexturePtr rtt_texture(textureManager->createManual("RttTex", Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME, Ogre::TEX_TYPE_2D, bufferSize.w, bufferSize.h, 0, Ogre::PF_R8G8B8, Ogre::TU_RENDERTARGET)); Ogre::RenderTexture* rttEyes = rtt_texture->getBuffer(0, 0)->getRenderTarget(); Ogre::GLTexture* gltex = static_cast<Ogre::GLTexture*>(Ogre::GLTextureManager::getSingleton().getByName("RttTex").getPointer()); GLuint renderTextureID = gltex->getGLID(); // Put camera viewport on the ogre render texture Ogre::Viewport* vpts[nbEyes]; vpts[left]=rttEyes->addViewport(cams[left], 0, 0, 0, 0.5f); vpts[right]=rttEyes->addViewport(cams[right], 1, 0.5f, 0, 0.5f); vpts[left]->setBackgroundColour(Ogre::ColourValue(34, 89, 0)); // Black background vpts[right]->setBackgroundColour(Ogre::ColourValue(34, 89, 0)); ovrTexture* mirrorTexture; if(ovr_CreateMirrorTextureGL(hmd, GL_RGB, hmdDesc.Resolution.w, hmdDesc.Resolution.h, &mirrorTexture) != ovrSuccess) exit(-5); Ogre::TexturePtr mirror_texture(textureManager->createManual("MirrorTex", Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME, Ogre::TEX_TYPE_2D, hmdDesc.Resolution.w, hmdDesc.Resolution.h, 0, Ogre::PF_R8G8B8, Ogre::TU_RENDERTARGET)); // Get GLIDs GLuint ogreMirrorTextureID = static_cast<Ogre::GLTexture*>(Ogre::GLTextureManager::getSingleton().getByName("MirrorTex").getPointer())->getGLID(); GLuint oculusMirrorTextureID = ((ovrGLTexture*)mirrorTexture)->OGL.TexId; // Create EyeRenderDesc ovrEyeRenderDesc EyeRenderDesc[nbEyes]; EyeRenderDesc[left] = ovr_GetRenderDesc(hmd, ovrEye_Left, hmdDesc.DefaultEyeFov[left]); EyeRenderDesc[right] = ovr_GetRenderDesc(hmd, ovrEye_Right, hmdDesc.DefaultEyeFov[right]); // Get offsets ovrVector3f offset[nbEyes]; offset[left]=EyeRenderDesc[left].HmdToEyeViewOffset; offset[right]=EyeRenderDesc[right].HmdToEyeViewOffset; // Compositor layer ovrLayerEyeFov layer; layer.Header.Type = ovrLayerType_EyeFov; layer.Header.Flags = 0; layer.ColorTexture[left] = textureSet; layer.ColorTexture[right] = textureSet; layer.Fov[left] = EyeRenderDesc[left].Fov; layer.Fov[right] = EyeRenderDesc[right].Fov; layer.Viewport[left] = OVR::Recti(0, 0, bufferSize.w/2, bufferSize.h); layer.Viewport[right] = OVR::Recti(bufferSize.w/2, 0, bufferSize.w/2, bufferSize.h); // Get projection matrices for(size_t eyeIndex(0); eyeIndex < ovrEye_Count; eyeIndex++) { // Get the projection matrix OVR::Matrix4f proj = ovrMatrix4f_Projection(EyeRenderDesc[eyeIndex].Fov, static_cast<float>(0.01f), 4000, true); // Convert it to Ogre matrix Ogre::Matrix4 OgreProj; for(size_t x(0); x < 4; x++) for(size_t y(0); y < 4; y++) OgreProj[x][y] = proj.M[x][y]; // Set the matrix cams[eyeIndex]->setCustomProjectionMatrix(true, OgreProj); } // Variables for render loop bool render(true); ovrFrameTiming hmdFrameTiming; ovrTrackingState ts; OVR::Posef pose; ovrLayerHeader* layers; // Create event listener for handling user input createEventListener(); //Run physics loop in a new thread std::map<Ogre::Entity*, Ogre::Vector3> positionRequests; std::map<Ogre::Entity*, std::string> animationRequests; std::map<Ogre::Entity*, std::vector<int>> rotationRequests; std::map<std::string, std::string> message; std::thread physicsThread(physicsLoop, smgr, &message, &positionRequests, &animationRequests, &rotationRequests); // Render loop while(render) { // Suspend physics loop and perform requested movement/rotations/animations if(positionRequests.size() > 0 || animationRequests.size() > 0 || rotationRequests.size() > 0){ message.insert(std::pair<std::string, std::string>("", "")); for(auto const &request : positionRequests) { Ogre::Vector3 pos = request.second; Ogre::SceneNode* sceneNode = request.first->getParentSceneNode(); sceneNode->setPosition(pos); } for(auto const &request : animationRequests) { request.first->getAnimationState(request.second)->addTime(0.1); } for(auto const &request : rotationRequests) { Ogre::SceneNode* sceneNode = request.first->getParentSceneNode(); sceneNode->roll(Ogre::Degree(request.second[0])); sceneNode->pitch(Ogre::Degree(request.second[1])); sceneNode->yaw(Ogre::Degree(request.second[2])); } positionRequests.clear(); animationRequests.clear(); rotationRequests.clear(); // Resume physics loop message.clear(); } // Update Ogre window Ogre::WindowEventUtilities::messagePump(); // Advance textureset index textureSet->CurrentIndex = (textureSet->CurrentIndex + 1) % textureSet->TextureCount; // Capture user input mKeyboard->capture(); mMouse->capture(); // Movement calculations mPlayerNode->translate(mDirection, Ogre::Node::TS_LOCAL); hmdFrameTiming = ovr_GetFrameTiming(hmd, 0); ts = ovr_GetTrackingState(hmd, hmdFrameTiming.DisplayMidpointSeconds); pose = ts.HeadPose.ThePose; ovr_CalcEyePoses(pose, offset, layer.RenderPose); oculusOrient = pose.Rotation; oculusPos = pose.Translation; mHeadNode->setOrientation(Ogre::Quaternion(oculusOrient.w, oculusOrient.x, oculusOrient.y, oculusOrient.z) * initialOculusOrientation.Inverse()); // Apply head tracking mHeadNode->setPosition(headPositionTrackingSensitivity * Ogre::Vector3(oculusPos.x, oculusPos.y,oculusPos.z)); // Update Ogre viewports root->_fireFrameRenderingQueued(); vpts[left]->update(); vpts[right]->update(); // Copy the rendered image to the Oculus Swap Texture glCopyImageSubData(renderTextureID, GL_TEXTURE_2D, 0, 0, 0, 0, ((ovrGLTexture*)(&textureSet->Textures[textureSet->CurrentIndex]))->OGL.TexId, GL_TEXTURE_2D, 0, 0, 0, 0, bufferSize.w,bufferSize.h, 1); layers = &layer.Header; // Submit new frame to the Oculus and update window ovr_SubmitFrame(hmd, 0, nullptr, &layers, 1); window->update(); // Exit loop when window is closed if(window->isClosed()) render = false; } // Shud down Oculus ovr_Destroy(hmd); ovr_Shutdown(); // Delete Ogre root and return delete root; return EXIT_SUCCESS; }