void OculusWindow::init_context() { WindowBase::init_context(); auto const& glapi = ctx_.render_context->opengl_api(); glapi.glGenFramebuffers(1, &blit_fbo_read_); glapi.glGenFramebuffers(1, &blit_fbo_write_); ovrSizei recommended_size_r = ovr_GetFovTextureSize(hmd_session_, ovrEye_Left, hmd_desc_.DefaultEyeFov[ovrEye_Left], 1.0f); ovrSizei recommended_size_l = ovr_GetFovTextureSize(hmd_session_, ovrEye_Right, hmd_desc_.DefaultEyeFov[ovrEye_Right], 1.0f); texture_swap_chain_desc_.Type = ovrTexture_2D; texture_swap_chain_desc_.ArraySize = 1; texture_swap_chain_desc_.Width = recommended_size_l.w + recommended_size_r.w; texture_swap_chain_desc_.Height = recommended_size_l.h; texture_swap_chain_desc_.MipLevels = 1; texture_swap_chain_desc_.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; texture_swap_chain_desc_.SampleCount = 1; texture_swap_chain_desc_.StaticImage = ovrFalse; auto result = ovr_CreateTextureSwapChainGL(hmd_session_, &texture_swap_chain_desc_, &texture_swap_chain_); if (result != ovrSuccess) { ovrErrorInfo info; ovr_GetLastErrorInfo(&info); gua::Logger::LOG_WARNING << "Failed to create swap textures for Oculus Rift.\n" << "Error Code: " << info.ErrorString << std::endl; } color_layer_.ColorTexture[0] = texture_swap_chain_; color_layer_.ColorTexture[1] = texture_swap_chain_; }
void OculusBaseDisplayPlugin::activate() { _session = acquireOculusSession(); _hmdDesc = ovr_GetHmdDesc(_session); _ipd = ovr_GetFloat(_session, OVR_KEY_IPD, _ipd); glm::uvec2 eyeSizes[2]; _viewScaleDesc.HmdSpaceToWorldScaleInMeters = 1.0f; ovr_for_each_eye([&](ovrEyeType eye) { _eyeFovs[eye] = _hmdDesc.DefaultEyeFov[eye]; ovrEyeRenderDesc& erd = _eyeRenderDescs[eye] = ovr_GetRenderDesc(_session, eye, _eyeFovs[eye]); ovrMatrix4f ovrPerspectiveProjection = ovrMatrix4f_Projection(erd.Fov, DEFAULT_NEAR_CLIP, DEFAULT_FAR_CLIP, ovrProjection_RightHanded); _eyeProjections[eye] = toGlm(ovrPerspectiveProjection); _eyeOffsets[eye] = glm::translate(mat4(), toGlm(erd.HmdToEyeViewOffset)); eyeSizes[eye] = toGlm(ovr_GetFovTextureSize(_session, eye, erd.Fov, 1.0f)); _viewScaleDesc.HmdToEyeViewOffset[eye] = erd.HmdToEyeViewOffset; }); auto combinedFov = _eyeFovs[0]; combinedFov.LeftTan = combinedFov.RightTan = std::max(combinedFov.LeftTan, combinedFov.RightTan); _cullingProjection = toGlm(ovrMatrix4f_Projection(combinedFov, DEFAULT_NEAR_CLIP, DEFAULT_FAR_CLIP, ovrProjection_RightHanded)); _renderTargetSize = uvec2( eyeSizes[0].x + eyeSizes[1].x, std::max(eyeSizes[0].y, eyeSizes[1].y)); if (!OVR_SUCCESS(ovr_ConfigureTracking(_session, ovrTrackingCap_Orientation | ovrTrackingCap_Position | ovrTrackingCap_MagYawCorrection, 0))) { qFatal("Could not attach to sensor device"); } // Parent class relies on our _session intialization, so it must come after that. memset(&_sceneLayer, 0, sizeof(ovrLayerEyeFov)); _sceneLayer.Header.Type = ovrLayerType_EyeFov; _sceneLayer.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft; ovr_for_each_eye([&](ovrEyeType eye) { ovrFovPort & fov = _sceneLayer.Fov[eye] = _eyeRenderDescs[eye].Fov; ovrSizei & size = _sceneLayer.Viewport[eye].Size = ovr_GetFovTextureSize(_session, eye, fov, 1.0f); _sceneLayer.Viewport[eye].Pos = { eye == ovrEye_Left ? 0 : size.w, 0 }; }); if (!OVR_SUCCESS(ovr_ConfigureTracking(_session, ovrTrackingCap_Orientation | ovrTrackingCap_Position | ovrTrackingCap_MagYawCorrection, 0))) { qFatal("Could not attach to sensor device"); } // This must come after the initialization, so that the values calculated // above are available during the customizeContext call (when not running // in threaded present mode) HmdDisplayPlugin::activate(); }
void VR::init() { #if defined(_OVR_) ovrInitParams initParams = { ovrInit_RequestVersion, OVR_MINOR_VERSION, NULL, 0, 0 }; ovrResult result = ovr_Initialize(&initParams); if (OVR_FAILURE(result)) { ovrErrorInfo errorInfo; ovr_GetLastErrorInfo(&errorInfo); Log(L"ovr_Initialize failed: " << errorInfo.ErrorString << endl); Error(L"ovr_Initialize failed"); return; } Log("LibOVR initialized ok!" << endl); result = ovr_Create(&session, &luid); if (OVR_FAILURE(result)) { ovr_Shutdown(); Error(L"No Oculus Rift detected. Cannot run in OVR mode without Oculus Rift device."); return; } desc = ovr_GetHmdDesc(session); resolution = desc.Resolution; // Start the sensor which provides the Rift’s pose and motion. /* result = ovr_ConfigureTracking(session, ovrTrackingCap_Orientation | ovrTrackingCap_MagYawCorrection | ovrTrackingCap_Position, 0); if (OVR_FAILURE(result)) Error(L"Could not enable Oculus Rift Tracking. Cannot run in OVR mode without Oculus Rift tracking."); */ // Setup VR components, filling out description eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, desc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, desc.DefaultEyeFov[1]); nextTracking(); // tracking setup complete, now init rendering: // Configure Stereo settings. Sizei recommenedTex0Size = ovr_GetFovTextureSize(session, ovrEye_Left, desc.DefaultEyeFov[0], 1.0f); Sizei recommenedTex1Size = ovr_GetFovTextureSize(session, ovrEye_Right, desc.DefaultEyeFov[1], 1.0f); buffersize_width = recommenedTex0Size.w + recommenedTex1Size.w; buffersize_height = max(recommenedTex0Size.h, recommenedTex1Size.h); result = ovr_RequestBoundaryVisible(session, ovrTrue); if (OVR_FAILURE(result)) { Log(L"Oculus Boundary system inactive."); } else { Log(L"Oculus Boundary system activated!!"); } #endif }
OculusVR::RenderBuffer::RenderBuffer(const ovrSession &session) { ovrHmdDesc hmdDesc = ovr_GetHmdDesc(session); ovrSizei eyeTextureSize0 = ovr_GetFovTextureSize(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0], 1.0f); ovrSizei eyeTextureSize1 = ovr_GetFovTextureSize(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1], 1.0f); m_bufferSize.w = eyeTextureSize0.w + eyeTextureSize1.w; m_bufferSize.h = max(eyeTextureSize0.h, eyeTextureSize1.h); ovrTextureSwapChainDesc desc = {}; desc.Type = ovrTexture_2D; desc.ArraySize = 1; desc.Width = m_bufferSize.w; desc.Height = m_bufferSize.h; desc.MipLevels = 1; desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; desc.SampleCount = 1; desc.StaticImage = ovrFalse; ovrResult result = ovr_CreateTextureSwapChainGL(session, &desc, &m_swapTextureChain); if(result) { GLuint chainTexId; ovr_GetTextureSwapChainBufferGL(session, m_swapTextureChain, 0, &chainTexId); glBindTexture(GL_TEXTURE_2D, chainTexId); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); } glGenFramebuffers(1, &m_eyeFbo); // create depth buffer glGenTextures(1, &m_depthBuffer); glBindTexture(GL_TEXTURE_2D, m_depthBuffer); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT24, m_bufferSize.w, m_bufferSize.h, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, NULL); // MSAA color texture and fbo setup // simply comment this line out to skip MSAA altogether SetupMSAA(); }
bool OculusBaseDisplayPlugin::internalActivate() { _session = acquireOculusSession(); if (!_session) { return false; } _hmdDesc = ovr_GetHmdDesc(_session); glm::uvec2 eyeSizes[2]; _viewScaleDesc.HmdSpaceToWorldScaleInMeters = 1.0f; _ipd = 0; ovr_for_each_eye([&](ovrEyeType eye) { _eyeFovs[eye] = _hmdDesc.DefaultEyeFov[eye]; ovrEyeRenderDesc& erd = _eyeRenderDescs[eye] = ovr_GetRenderDesc(_session, eye, _eyeFovs[eye]); ovrMatrix4f ovrPerspectiveProjection = ovrMatrix4f_Projection(erd.Fov, DEFAULT_NEAR_CLIP, DEFAULT_FAR_CLIP, ovrProjection_ClipRangeOpenGL); _eyeProjections[eye] = toGlm(ovrPerspectiveProjection); _eyeOffsets[eye] = glm::translate(mat4(), toGlm(erd.HmdToEyeOffset)); eyeSizes[eye] = toGlm(ovr_GetFovTextureSize(_session, eye, erd.Fov, 1.0f)); _viewScaleDesc.HmdToEyeOffset[eye] = erd.HmdToEyeOffset; _ipd += glm::abs(glm::length(toGlm(erd.HmdToEyeOffset))); }); auto combinedFov = _eyeFovs[0]; combinedFov.LeftTan = combinedFov.RightTan = std::max(combinedFov.LeftTan, combinedFov.RightTan); _cullingProjection = toGlm(ovrMatrix4f_Projection(combinedFov, DEFAULT_NEAR_CLIP, DEFAULT_FAR_CLIP, ovrProjection_ClipRangeOpenGL)); _renderTargetSize = uvec2( eyeSizes[0].x + eyeSizes[1].x, std::max(eyeSizes[0].y, eyeSizes[1].y)); memset(&_sceneLayer, 0, sizeof(ovrLayerEyeFov)); _sceneLayer.Header.Type = ovrLayerType_EyeFov; _sceneLayer.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft; ovr_for_each_eye([&](ovrEyeType eye) { ovrFovPort & fov = _sceneLayer.Fov[eye] = _eyeRenderDescs[eye].Fov; ovrSizei & size = _sceneLayer.Viewport[eye].Size = ovr_GetFovTextureSize(_session, eye, fov, 1.0f); _sceneLayer.Viewport[eye].Pos = { eye == ovrEye_Left ? 0 : size.w, 0 }; }); // This must come after the initialization, so that the values calculated // above are available during the customizeContext call (when not running // in threaded present mode) return Parent::internalActivate(); }
void GuardianSystemDemo::InitRenderTargets(const ovrHmdDesc& hmdDesc) { // For each eye for (int i = 0; i < ovrEye_Count; ++i) { // Viewport const float kPixelsPerDisplayPixel = 1.0f; ovrSizei idealSize = ovr_GetFovTextureSize(mSession, (ovrEyeType)i, hmdDesc.DefaultEyeFov[i], kPixelsPerDisplayPixel); mEyeRenderViewport[i] = { 0, 0, idealSize.w, idealSize.h }; // Create Swap Chain ovrTextureSwapChainDesc desc = { ovrTexture_2D, OVR_FORMAT_R8G8B8A8_UNORM_SRGB, 1, idealSize.w, idealSize.h, 1, 1, ovrFalse, ovrTextureMisc_DX_Typeless, ovrTextureBind_DX_RenderTarget }; // Configure Eye render layers mEyeRenderLayer.Header.Type = ovrLayerType_EyeFov; mEyeRenderLayer.Viewport[i] = mEyeRenderViewport[i]; mEyeRenderLayer.Fov[i] = hmdDesc.DefaultEyeFov[i]; mHmdToEyeOffset[i] = ovr_GetRenderDesc(mSession, (ovrEyeType)i, hmdDesc.DefaultEyeFov[i]).HmdToEyeOffset; // DirectX 11 - Generate RenderTargetView from textures in swap chain // ---------------------------------------------------------------------- ovrResult result = ovr_CreateTextureSwapChainDX(mSession, DIRECTX.Device, &desc, &mTextureChain[i]); if (!OVR_SUCCESS(result)) { printf("ovr_CreateTextureSwapChainDX failed"); exit(-1); } // Render Target, normally triple-buffered int textureCount = 0; ovr_GetTextureSwapChainLength(mSession, mTextureChain[i], &textureCount); for (int j = 0; j < textureCount; ++j) { ID3D11Texture2D* renderTexture = nullptr; ovr_GetTextureSwapChainBufferDX(mSession, mTextureChain[i], j, IID_PPV_ARGS(&renderTexture)); D3D11_RENDER_TARGET_VIEW_DESC renderTargetViewDesc = { DXGI_FORMAT_R8G8B8A8_UNORM, D3D11_RTV_DIMENSION_TEXTURE2D }; ID3D11RenderTargetView* renderTargetView = nullptr; DIRECTX.Device->CreateRenderTargetView(renderTexture, &renderTargetViewDesc, &renderTargetView); mEyeRenderTargets[i].push_back(renderTargetView); renderTexture->Release(); } // DirectX 11 - Generate Depth // ---------------------------------------------------------------------- D3D11_TEXTURE2D_DESC depthTextureDesc = { (UINT)idealSize.w, (UINT)idealSize.h, 1, 1, DXGI_FORMAT_D32_FLOAT, {1, 0}, D3D11_USAGE_DEFAULT, D3D11_BIND_DEPTH_STENCIL, 0, 0 }; ID3D11Texture2D* depthTexture = nullptr; DIRECTX.Device->CreateTexture2D(&depthTextureDesc, NULL, &depthTexture); DIRECTX.Device->CreateDepthStencilView(depthTexture, NULL, &mEyeDepthTarget[i]); depthTexture->Release(); } }
void vx_ovr_namespace_::OVRHMDHandleWithDevice::createTextureSet() { int i; ovrResult result; texSizeLeft_ = ovr_GetFovTextureSize(session_, ovrEye_Left, description_.DefaultEyeFov[0], 1); result = ovr_CreateSwapTextureSetGL(session_, GL_SRGB8_ALPHA8, texSizeLeft_.w, texSizeLeft_.h, &textureSetLeft_); if (!OVR_SUCCESS(result)) { throw new VX_OVR_RunTimeError("Failed to create Texture Set for Left Eye"); } for (i = 0; i < textureSetLeft_->TextureCount; ++i) { ovrGLTexture *tex = (ovrGLTexture *)&textureSetLeft_->Textures[i]; glBindTexture(GL_TEXTURE_2D, tex->OGL.TexId); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); } fboLeft_ = vxWnd::GLEWWrapper::generateFramebufferObject(texSizeLeft_.w, texSizeLeft_.h); texSizeRight_ = ovr_GetFovTextureSize(session_, ovrEye_Right, description_.DefaultEyeFov[0], 1); result = ovr_CreateSwapTextureSetGL(session_, GL_SRGB8_ALPHA8, texSizeRight_.w, texSizeRight_.h, &textureSetRight_); if (!OVR_SUCCESS(result)) { throw new VX_OVR_RunTimeError("Failed to create Texture Set for Right Eye"); } for (i = 0; i < textureSetRight_->TextureCount; ++i) { ovrGLTexture *tex = (ovrGLTexture *)&textureSetLeft_->Textures[i]; glBindTexture(GL_TEXTURE_2D, tex->OGL.TexId); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); } fboRight_ = vxWnd::GLEWWrapper::generateFramebufferObject(texSizeRight_.w, texSizeRight_.h); glBindTexture(GL_TEXTURE_2D, 0); }
OVR_PUBLIC_FUNCTION(ovrEyeRenderDesc) ovr_GetRenderDesc(ovrSession session, ovrEyeType eyeType, ovrFovPort fov) { ovrEyeRenderDesc desc; desc.Eye = eyeType; desc.Fov = fov; OVR::Matrix4f HmdToEyeMatrix = REV_HmdMatrixToOVRMatrix(g_VRSystem->GetEyeToHeadTransform((vr::EVREye)eyeType)); float WidthTan = fov.LeftTan + fov.RightTan; float HeightTan = fov.UpTan + fov.DownTan; ovrSizei size = ovr_GetFovTextureSize(session, eyeType, fov, 1.0); desc.DistortedViewport = OVR::Recti(eyeType == ovrEye_Right ? size.w : 0, 0, size.w, size.h); desc.PixelsPerTanAngleAtCenter = OVR::Vector2f(size.w / WidthTan, size.h / HeightTan); desc.HmdToEyeOffset = HmdToEyeMatrix.GetTranslation(); return desc; }
///@brief Called once a GL context has been set up. void initVR() { const ovrHmdDesc& hmd = m_Hmd; for (int eye = 0; eye < 2; ++eye) { const ovrSizei& bufferSize = ovr_GetFovTextureSize(g_session, ovrEyeType(eye), hmd.DefaultEyeFov[eye], 1.f); LOG_INFO("Eye %d tex : %dx%d @ ()", eye, bufferSize.w, bufferSize.h); ovrTextureSwapChain textureSwapChain = 0; ovrTextureSwapChainDesc desc = {}; desc.Type = ovrTexture_2D; desc.ArraySize = 1; desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; desc.Width = bufferSize.w; desc.Height = bufferSize.h; desc.MipLevels = 1; desc.SampleCount = 1; desc.StaticImage = ovrFalse; // Allocate the frameBuffer that will hold the scene, and then be // re-rendered to the screen with distortion ovrTextureSwapChain& chain = g_textureSwapChain[eye]; if (ovr_CreateTextureSwapChainGL(g_session, &desc, &chain) == ovrSuccess) { int length = 0; ovr_GetTextureSwapChainLength(g_session, chain, &length); for (int i = 0; i < length; ++i) { GLuint chainTexId; ovr_GetTextureSwapChainBufferGL(g_session, chain, i, &chainTexId); glBindTexture(GL_TEXTURE_2D, chainTexId); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); } } else { LOG_ERROR("Unable to create swap textures"); return; } // Manually assemble swap FBO FBO& swapfbo = m_swapFBO[eye]; swapfbo.w = bufferSize.w; swapfbo.h = bufferSize.h; glGenFramebuffers(1, &swapfbo.id); glBindFramebuffer(GL_FRAMEBUFFER, swapfbo.id); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, swapfbo.tex, 0); swapfbo.depth = 0; glGenRenderbuffers(1, &swapfbo.depth); glBindRenderbuffer(GL_RENDERBUFFER, swapfbo.depth); glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, bufferSize.w, bufferSize.h); glBindRenderbuffer(GL_RENDERBUFFER, 0); glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, swapfbo.depth); // Check status const GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER); if (status != GL_FRAMEBUFFER_COMPLETE) { LOG_ERROR("Framebuffer status incomplete: %d %x", status, status); } glBindFramebuffer(GL_FRAMEBUFFER, 0); } // Initialize mirror texture ovrMirrorTextureDesc desc; memset(&desc, 0, sizeof(desc)); desc.Width = g_mirrorWindowSz.x; desc.Height = g_mirrorWindowSz.y; desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; const ovrResult result = ovr_CreateMirrorTextureGL(g_session, &desc, &g_mirrorTexture); if (!OVR_SUCCESS(result)) { LOG_ERROR("Unable to create mirror texture"); return; } // Manually assemble mirror FBO m_mirrorFBO.w = g_mirrorWindowSz.x; m_mirrorFBO.h = g_mirrorWindowSz.y; glGenFramebuffers(1, &m_mirrorFBO.id); glBindFramebuffer(GL_FRAMEBUFFER, m_mirrorFBO.id); GLuint texId; ovr_GetMirrorTextureBufferGL(g_session, g_mirrorTexture, &texId); m_mirrorFBO.tex = texId; glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_mirrorFBO.tex, 0); const ovrSizei sz = { 600, 600 }; g_tweakbarQuad.initGL(g_session, sz); glBindFramebuffer(GL_FRAMEBUFFER, 0); glBindTexture(GL_TEXTURE_2D, 0); g_hmdVisible = true; }
DLL_EXPORT_API npBool xnOvrCreateTexturesDx(xnOvrSession* session, void* dxDevice, int* outTextureCount, float pixelPerDisplayPixel, int mirrorBufferWidth, int mirrorBufferHeight) { session->HmdDesc = ovr_GetHmdDesc(session->Session); ovrSizei sizel = ovr_GetFovTextureSize(session->Session, ovrEye_Left, session->HmdDesc.DefaultEyeFov[0], pixelPerDisplayPixel); ovrSizei sizer = ovr_GetFovTextureSize(session->Session, ovrEye_Right, session->HmdDesc.DefaultEyeFov[1], pixelPerDisplayPixel); ovrSizei bufferSize; bufferSize.w = sizel.w + sizer.w; bufferSize.h = fmax(sizel.h, sizer.h); ovrTextureSwapChainDesc texDesc = {}; texDesc.Type = ovrTexture_2D; texDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; texDesc.ArraySize = 1; texDesc.Width = bufferSize.w; texDesc.Height = bufferSize.h; texDesc.MipLevels = 1; texDesc.SampleCount = 1; texDesc.StaticImage = ovrFalse; texDesc.MiscFlags = ovrTextureMisc_None; texDesc.BindFlags = ovrTextureBind_DX_RenderTarget; if(!OVR_SUCCESS(ovr_CreateTextureSwapChainDX(session->Session, dxDevice, &texDesc, &session->SwapChain))) { return false; } auto count = 0; ovr_GetTextureSwapChainLength(session->Session, session->SwapChain, &count); *outTextureCount = count; //init structures session->EyeRenderDesc[0] = ovr_GetRenderDesc(session->Session, ovrEye_Left, session->HmdDesc.DefaultEyeFov[0]); session->EyeRenderDesc[1] = ovr_GetRenderDesc(session->Session, ovrEye_Right, session->HmdDesc.DefaultEyeFov[1]); session->HmdToEyeViewOffset[0] = session->EyeRenderDesc[0].HmdToEyeOffset; session->HmdToEyeViewOffset[1] = session->EyeRenderDesc[1].HmdToEyeOffset; session->Layer.Header.Type = ovrLayerType_EyeFov; session->Layer.Header.Flags = 0; session->Layer.ColorTexture[0] = session->SwapChain; session->Layer.ColorTexture[1] = session->SwapChain; session->Layer.Fov[0] = session->EyeRenderDesc[0].Fov; session->Layer.Fov[1] = session->EyeRenderDesc[1].Fov; session->Layer.Viewport[0].Pos.x = 0; session->Layer.Viewport[0].Pos.y = 0; session->Layer.Viewport[0].Size.w = bufferSize.w / 2; session->Layer.Viewport[0].Size.h = bufferSize.h; session->Layer.Viewport[1].Pos.x = bufferSize.w / 2; session->Layer.Viewport[1].Pos.y = 0; session->Layer.Viewport[1].Size.w = bufferSize.w / 2; session->Layer.Viewport[1].Size.h = bufferSize.h; //create mirror as well if (mirrorBufferHeight != 0 && mirrorBufferWidth != 0) { ovrMirrorTextureDesc mirrorDesc = {}; mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; mirrorDesc.Width = mirrorBufferWidth; mirrorDesc.Height = mirrorBufferHeight; if (!OVR_SUCCESS(ovr_CreateMirrorTextureDX(session->Session, dxDevice, &mirrorDesc, &session->Mirror))) { return false; } } return true; }
int main(int argc, char **argv) { // Initialize SDL2's context SDL_Init(SDL_INIT_VIDEO); // Initialize Oculus' context ovrResult result = ovr_Initialize(nullptr); if (OVR_FAILURE(result)) { std::cout << "ERROR: Failed to initialize libOVR" << std::endl; SDL_Quit(); return -1; } ovrSession session; ovrGraphicsLuid luid; // Connect to the Oculus headset result = ovr_Create(&session, &luid); if (OVR_FAILURE(result)) { std::cout << "ERROR: Oculus Rift not detected" << std::endl; ovr_Shutdown(); SDL_Quit(); return -1; } int x = SDL_WINDOWPOS_CENTERED, y = SDL_WINDOWPOS_CENTERED; int winWidth = 1280; int winHeight = 720; Uint32 flags = SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN; // Create SDL2 Window SDL_Window* window = SDL_CreateWindow("OVR ZED App", x, y, winWidth, winHeight, flags); // Create OpenGL context SDL_GLContext glContext = SDL_GL_CreateContext(window); // Initialize GLEW glewInit(); // Turn off vsync to let the compositor do its magic SDL_GL_SetSwapInterval(0); // Initialize the ZED Camera sl::zed::Camera* zed = 0; zed = new sl::zed::Camera(sl::zed::HD720); sl::zed::ERRCODE zederr = zed->init(sl::zed::MODE::PERFORMANCE, 0); int zedWidth = zed->getImageSize().width; int zedHeight = zed->getImageSize().height; if (zederr != sl::zed::SUCCESS) { std::cout << "ERROR: " << sl::zed::errcode2str(zederr) << std::endl; ovr_Destroy(session); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; return -1; } GLuint zedTextureID_L, zedTextureID_R; // Generate OpenGL texture for left images of the ZED camera glGenTextures(1, &zedTextureID_L); glBindTexture(GL_TEXTURE_2D, zedTextureID_L); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); // Generate OpenGL texture for right images of the ZED camera glGenTextures(1, &zedTextureID_R); glBindTexture(GL_TEXTURE_2D, zedTextureID_R); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glBindTexture(GL_TEXTURE_2D, 0); #if OPENGL_GPU_INTEROP cudaGraphicsResource* cimg_L; cudaGraphicsResource* cimg_R; cudaError_t errL, errR; errL = cudaGraphicsGLRegisterImage(&cimg_L, zedTextureID_L, GL_TEXTURE_2D, cudaGraphicsMapFlagsNone); errR = cudaGraphicsGLRegisterImage(&cimg_R, zedTextureID_R, GL_TEXTURE_2D, cudaGraphicsMapFlagsNone); if (errL != cudaSuccess || errR != cudaSuccess) { std::cout << "ERROR: cannot create CUDA texture : " << errL << "|" << errR << std::endl; } #endif ovrHmdDesc hmdDesc = ovr_GetHmdDesc(session); // Get the texture sizes of Oculus eyes ovrSizei textureSize0 = ovr_GetFovTextureSize(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0], 1.0f); ovrSizei textureSize1 = ovr_GetFovTextureSize(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1], 1.0f); // Compute the final size of the render buffer ovrSizei bufferSize; bufferSize.w = textureSize0.w + textureSize1.w; bufferSize.h = std::max(textureSize0.h, textureSize1.h); // Initialize OpenGL swap textures to render ovrTextureSwapChain textureChain = nullptr; // Description of the swap chain ovrTextureSwapChainDesc descTextureSwap = {}; descTextureSwap.Type = ovrTexture_2D; descTextureSwap.ArraySize = 1; descTextureSwap.Width = bufferSize.w; descTextureSwap.Height = bufferSize.h; descTextureSwap.MipLevels = 1; descTextureSwap.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; descTextureSwap.SampleCount = 1; descTextureSwap.StaticImage = ovrFalse; // Create the OpenGL texture swap chain result = ovr_CreateTextureSwapChainGL(session, &descTextureSwap, &textureChain); int length = 0; ovr_GetTextureSwapChainLength(session, textureChain, &length); if (OVR_SUCCESS(result)) { for (int i = 0; i < length; ++i) { GLuint chainTexId; ovr_GetTextureSwapChainBufferGL(session, textureChain, i, &chainTexId); glBindTexture(GL_TEXTURE_2D, chainTexId); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); } } else { std::cout << "ERROR: failed creating swap texture" << std::endl; ovr_Destroy(session); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; return -1; } // Generate frame buffer to render GLuint fboID; glGenFramebuffers(1, &fboID); // Generate depth buffer of the frame buffer GLuint depthBuffID; glGenTextures(1, &depthBuffID); glBindTexture(GL_TEXTURE_2D, depthBuffID); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); GLenum internalFormat = GL_DEPTH_COMPONENT24; GLenum type = GL_UNSIGNED_INT; glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, bufferSize.w, bufferSize.h, 0, GL_DEPTH_COMPONENT, type, NULL); // Create a mirror texture to display the render result in the SDL2 window ovrMirrorTextureDesc descMirrorTexture; memset(&descMirrorTexture, 0, sizeof(descMirrorTexture)); descMirrorTexture.Width = winWidth; descMirrorTexture.Height = winHeight; descMirrorTexture.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; ovrMirrorTexture mirrorTexture = nullptr; result = ovr_CreateMirrorTextureGL(session, &descMirrorTexture, &mirrorTexture); if (!OVR_SUCCESS(result)) { std::cout << "ERROR: Failed to create mirror texture" << std::endl; } GLuint mirrorTextureId; ovr_GetMirrorTextureBufferGL(session, mirrorTexture, &mirrorTextureId); GLuint mirrorFBOID; glGenFramebuffers(1, &mirrorFBOID); glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFBOID); glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mirrorTextureId, 0); glFramebufferRenderbuffer(GL_READ_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, 0); glBindFramebuffer(GL_READ_FRAMEBUFFER, 0); // Frame index used by the compositor // it needs to be updated each new frame long long frameIndex = 0; // FloorLevel will give tracking poses where the floor height is 0 ovr_SetTrackingOriginType(session, ovrTrackingOrigin_FloorLevel); // Initialize a default Pose ovrPosef eyeRenderPose[2]; // Get the render description of the left and right "eyes" of the Oculus headset ovrEyeRenderDesc eyeRenderDesc[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); // Get the Oculus view scale description ovrVector3f hmdToEyeOffset[2]; double sensorSampleTime; // Create and compile the shader's sources Shader shader(OVR_ZED_VS, OVR_ZED_FS); // Compute the ZED image field of view with the ZED parameters float zedFovH = atanf(zed->getImageSize().width / (zed->getParameters()->LeftCam.fx *2.f)) * 2.f; // Compute the Horizontal Oculus' field of view with its parameters float ovrFovH = (atanf(hmdDesc.DefaultEyeFov[0].LeftTan) + atanf(hmdDesc.DefaultEyeFov[0].RightTan)); // Compute the useful part of the ZED image unsigned int usefulWidth = zed->getImageSize().width * ovrFovH / zedFovH; // Compute the size of the final image displayed in the headset with the ZED image's aspect-ratio kept unsigned int widthFinal = bufferSize.w / 2; float heightGL = 1.f; float widthGL = 1.f; if (usefulWidth > 0.f) { unsigned int heightFinal = zed->getImageSize().height * widthFinal / usefulWidth; // Convert this size to OpenGL viewport's frame's coordinates heightGL = (heightFinal) / (float)(bufferSize.h); widthGL = ((zed->getImageSize().width * (heightFinal / (float)zed->getImageSize().height)) / (float)widthFinal); } else { std::cout << "WARNING: ZED parameters got wrong values." "Default vertical and horizontal FOV are used.\n" "Check your calibration file or check if your ZED is not too close to a surface or an object." << std::endl; } // Compute the Vertical Oculus' field of view with its parameters float ovrFovV = (atanf(hmdDesc.DefaultEyeFov[0].UpTan) + atanf(hmdDesc.DefaultEyeFov[0].DownTan)); // Compute the center of the optical lenses of the headset float offsetLensCenterX = ((atanf(hmdDesc.DefaultEyeFov[0].LeftTan)) / ovrFovH) * 2.f - 1.f; float offsetLensCenterY = ((atanf(hmdDesc.DefaultEyeFov[0].UpTan)) / ovrFovV) * 2.f - 1.f; // Create a rectangle with the computed coordinates and push it in GPU memory. struct GLScreenCoordinates { float left, up, right, down; } screenCoord; screenCoord.up = heightGL + offsetLensCenterY; screenCoord.down = heightGL - offsetLensCenterY; screenCoord.right = widthGL + offsetLensCenterX; screenCoord.left = widthGL - offsetLensCenterX; float rectVertices[12] = { -screenCoord.left, -screenCoord.up, 0, screenCoord.right, -screenCoord.up, 0, screenCoord.right, screenCoord.down, 0, -screenCoord.left, screenCoord.down, 0 }; GLuint rectVBO[3]; glGenBuffers(1, &rectVBO[0]); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[0]); glBufferData(GL_ARRAY_BUFFER, sizeof(rectVertices), rectVertices, GL_STATIC_DRAW); float rectTexCoord[8] = { 0, 1, 1, 1, 1, 0, 0, 0 }; glGenBuffers(1, &rectVBO[1]); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[1]); glBufferData(GL_ARRAY_BUFFER, sizeof(rectTexCoord), rectTexCoord, GL_STATIC_DRAW); unsigned int rectIndices[6] = { 0, 1, 2, 0, 2, 3 }; glGenBuffers(1, &rectVBO[2]); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, rectVBO[2]); glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(rectIndices), rectIndices, GL_STATIC_DRAW); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); glBindBuffer(GL_ARRAY_BUFFER, 0); // Initialize hit value float hit = 0.02f; // Initialize a boolean that will be used to stop the application’s loop and another one to pause/unpause rendering bool end = false; bool refresh = true; // SDL variable that will be used to store input events SDL_Event events; // Initialize time variables. They will be used to limit the number of frames rendered per second. // Frame counter unsigned int riftc = 0, zedc = 1; // Chronometer unsigned int rifttime = 0, zedtime = 0, zedFPS = 0; int time1 = 0, timePerFrame = 0; int frameRate = (int)(1000 / MAX_FPS); // This boolean is used to test if the application is focused bool isVisible = true; // Enable the shader glUseProgram(shader.getProgramId()); // Bind the Vertex Buffer Objects of the rectangle that displays ZED images // vertices glEnableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[0]); glVertexAttribPointer(Shader::ATTRIB_VERTICES_POS, 3, GL_FLOAT, GL_FALSE, 0, 0); // indices glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, rectVBO[2]); // texture coordinates glEnableVertexAttribArray(Shader::ATTRIB_TEXTURE2D_POS); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[1]); glVertexAttribPointer(Shader::ATTRIB_TEXTURE2D_POS, 2, GL_FLOAT, GL_FALSE, 0, 0); // Main loop while (!end) { // Compute the time used to render the previous frame timePerFrame = SDL_GetTicks() - time1; // If the previous frame has been rendered too fast if (timePerFrame < frameRate) { // Pause the loop to have a max FPS equal to MAX_FPS SDL_Delay(frameRate - timePerFrame); timePerFrame = frameRate; } // Increment the ZED chronometer zedtime += timePerFrame; // If ZED chronometer reached 1 second if (zedtime > 1000) { zedFPS = zedc; zedc = 0; zedtime = 0; } // Increment the Rift chronometer and the Rift frame counter rifttime += timePerFrame; riftc++; // If Rift chronometer reached 200 milliseconds if (rifttime > 200) { // Display FPS std::cout << "\rRIFT FPS: " << 1000 / (rifttime / riftc) << " | ZED FPS: " << zedFPS; // Reset Rift chronometer rifttime = 0; // Reset Rift frame counter riftc = 0; } // Start frame chronometer time1 = SDL_GetTicks(); // While there is an event catched and not tested while (SDL_PollEvent(&events)) { // If a key is released if (events.type == SDL_KEYUP) { // If Q quit the application if (events.key.keysym.scancode == SDL_SCANCODE_Q) end = true; // If R reset the hit value else if (events.key.keysym.scancode == SDL_SCANCODE_R) hit = 0.0f; // If C pause/unpause rendering else if (events.key.keysym.scancode == SDL_SCANCODE_C) refresh = !refresh; } // If the mouse wheel is used if (events.type == SDL_MOUSEWHEEL) { // Increase or decrease hit value float s; events.wheel.y > 0 ? s = 1.0f : s = -1.0f; hit += 0.005f * s; } } // Get texture swap index where we must draw our frame GLuint curTexId; int curIndex; ovr_GetTextureSwapChainCurrentIndex(session, textureChain, &curIndex); ovr_GetTextureSwapChainBufferGL(session, textureChain, curIndex, &curTexId); // Call ovr_GetRenderDesc each frame to get the ovrEyeRenderDesc, as the returned values (e.g. HmdToEyeOffset) may change at runtime. eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); hmdToEyeOffset[0] = eyeRenderDesc[0].HmdToEyeOffset; hmdToEyeOffset[1] = eyeRenderDesc[1].HmdToEyeOffset; // Get eye poses, feeding in correct IPD offset ovr_GetEyePoses(session, frameIndex, ovrTrue, hmdToEyeOffset, eyeRenderPose, &sensorSampleTime); // If the application is focused if (isVisible) { // If successful grab a new ZED image if (!zed->grab(sl::zed::SENSING_MODE::RAW, false, false)) { // Update the ZED frame counter zedc++; if (refresh) { #if OPENGL_GPU_INTEROP sl::zed::Mat m = zed->retrieveImage_gpu(sl::zed::SIDE::LEFT); cudaArray_t arrIm; cudaGraphicsMapResources(1, &cimg_L, 0); cudaGraphicsSubResourceGetMappedArray(&arrIm, cimg_L, 0, 0); cudaMemcpy2DToArray(arrIm, 0, 0, m.data, m.step, zedWidth * 4, zedHeight, cudaMemcpyDeviceToDevice); cudaGraphicsUnmapResources(1, &cimg_L, 0); m = zed->retrieveImage_gpu(sl::zed::SIDE::RIGHT); cudaGraphicsMapResources(1, &cimg_R, 0); cudaGraphicsSubResourceGetMappedArray(&arrIm, cimg_R, 0, 0); cudaMemcpy2DToArray(arrIm, 0, 0, m.data, m.step, zedWidth * 4, zedHeight, cudaMemcpyDeviceToDevice); // *4 = 4 channels * 1 bytes (uint) cudaGraphicsUnmapResources(1, &cimg_R, 0); #endif // Bind the frame buffer glBindFramebuffer(GL_FRAMEBUFFER, fboID); // Set its color layer 0 as the current swap texture glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, curTexId, 0); // Set its depth layer as our depth buffer glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthBuffID, 0); // Clear the frame buffer glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glClearColor(0, 0, 0, 1); // Render for each Oculus eye the equivalent ZED image for (int eye = 0; eye < 2; eye++) { // Set the left or right vertical half of the buffer as the viewport glViewport(eye == ovrEye_Left ? 0 : bufferSize.w / 2, 0, bufferSize.w / 2, bufferSize.h); // Bind the left or right ZED image glBindTexture(GL_TEXTURE_2D, eye == ovrEye_Left ? zedTextureID_L : zedTextureID_R); #if !OPENGL_GPU_INTEROP glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, zed->retrieveImage(eye == ovrEye_Left ? sl::zed::SIDE::LEFT : sl::zed::SIDE::RIGHT).data); #endif // Bind the hit value glUniform1f(glGetUniformLocation(shader.getProgramId(), "hit"), eye == ovrEye_Left ? hit : -hit); // Bind the isLeft value glUniform1ui(glGetUniformLocation(shader.getProgramId(), "isLeft"), eye == ovrEye_Left ? 1U : 0U); // Draw the ZED image glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0); } // Avoids an error when calling SetAndClearRenderSurface during next iteration. // Without this, during the next while loop iteration SetAndClearRenderSurface // would bind a framebuffer with an invalid COLOR_ATTACHMENT0 because the texture ID // associated with COLOR_ATTACHMENT0 had been unlocked by calling wglDXUnlockObjectsNV. glBindFramebuffer(GL_FRAMEBUFFER, fboID); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, 0, 0); // Commit changes to the textures so they get picked up frame ovr_CommitTextureSwapChain(session, textureChain); } // Do not forget to increment the frameIndex! frameIndex++; } } /* Note: Even if we don't ask to refresh the framebuffer or if the Camera::grab() doesn't catch a new frame, we have to submit an image to the Rift; it needs 75Hz refresh. Else there will be jumbs, black frames and/or glitches in the headset. */ ovrLayerEyeFov ld; ld.Header.Type = ovrLayerType_EyeFov; // Tell to the Oculus compositor that our texture origin is at the bottom left ld.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft; // Because OpenGL | Disable head tracking // Set the Oculus layer eye field of view for each view for (int eye = 0; eye < 2; ++eye) { // Set the color texture as the current swap texture ld.ColorTexture[eye] = textureChain; // Set the viewport as the right or left vertical half part of the color texture ld.Viewport[eye] = OVR::Recti(eye == ovrEye_Left ? 0 : bufferSize.w / 2, 0, bufferSize.w / 2, bufferSize.h); // Set the field of view ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye]; // Set the pose matrix ld.RenderPose[eye] = eyeRenderPose[eye]; } ld.SensorSampleTime = sensorSampleTime; ovrLayerHeader* layers = &ld.Header; // Submit the frame to the Oculus compositor // which will display the frame in the Oculus headset result = ovr_SubmitFrame(session, frameIndex, nullptr, &layers, 1); if (!OVR_SUCCESS(result)) { std::cout << "ERROR: failed to submit frame" << std::endl; glDeleteBuffers(3, rectVBO); ovr_DestroyTextureSwapChain(session, textureChain); ovr_DestroyMirrorTexture(session, mirrorTexture); ovr_Destroy(session); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; return -1; } if (result == ovrSuccess && !isVisible) { std::cout << "The application is now shown in the headset." << std::endl; } isVisible = (result == ovrSuccess); // This is not really needed for this application but it may be usefull for an more advanced application ovrSessionStatus sessionStatus; ovr_GetSessionStatus(session, &sessionStatus); if (sessionStatus.ShouldRecenter) { std::cout << "Recenter Tracking asked by Session" << std::endl; ovr_RecenterTrackingOrigin(session); } // Copy the frame to the mirror buffer // which will be drawn in the SDL2 image glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFBOID); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0); GLint w = winWidth; GLint h = winHeight; glBlitFramebuffer(0, h, w, 0, 0, 0, w, h, GL_COLOR_BUFFER_BIT, GL_NEAREST); glBindFramebuffer(GL_READ_FRAMEBUFFER, 0); // Swap the SDL2 window SDL_GL_SwapWindow(window); } // Disable all OpenGL buffer glDisableVertexAttribArray(Shader::ATTRIB_TEXTURE2D_POS); glDisableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); glBindBuffer(GL_ARRAY_BUFFER, 0); glBindTexture(GL_TEXTURE_2D, 0); glUseProgram(0); glBindVertexArray(0); // Delete the Vertex Buffer Objects of the rectangle glDeleteBuffers(3, rectVBO); // Delete SDL, OpenGL, Oculus and ZED context ovr_DestroyTextureSwapChain(session, textureChain); ovr_DestroyMirrorTexture(session, mirrorTexture); ovr_Destroy(session); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; // quit return 0; }
// return true to retry later (e.g. after display lost) static bool MainLoop(bool retryCreate) { // Initialize these to nullptr here to handle device lost failures cleanly ovrMirrorTexture mirrorTexture = nullptr; OculusEyeTexture* pEyeRenderTexture[2] = { nullptr, nullptr }; Scene* roomScene = nullptr; Camera* mainCam = nullptr; ovrMirrorTextureDesc mirrorDesc = {}; ovrSession session; ovrGraphicsLuid luid; ovrResult result = ovr_Create(&session, &luid); if (!OVR_SUCCESS(result)) return retryCreate; ovrHmdDesc hmdDesc = ovr_GetHmdDesc(session); // Setup Device and Graphics // Note: the mirror window can be any size, for this sample we use 1/2 the HMD resolution if (!DIRECTX.InitDevice(hmdDesc.Resolution.w / 2, hmdDesc.Resolution.h / 2, reinterpret_cast<LUID*>(&luid))) goto Done; // Make the eye render buffers (caution if actual size < requested due to HW limits). ovrRecti eyeRenderViewport[2]; for (int eye = 0; eye < 2; ++eye) { ovrSizei idealSize = ovr_GetFovTextureSize(session, (ovrEyeType)eye, hmdDesc.DefaultEyeFov[eye], 1.0f); pEyeRenderTexture[eye] = new OculusEyeTexture(); if (!pEyeRenderTexture[eye]->Init(session, idealSize.w, idealSize.h, true)) { if (retryCreate) goto Done; FATALERROR("Failed to create eye texture."); } eyeRenderViewport[eye].Pos.x = 0; eyeRenderViewport[eye].Pos.y = 0; eyeRenderViewport[eye].Size = idealSize; if (!pEyeRenderTexture[eye]->TextureChain) { if (retryCreate) goto Done; FATALERROR("Failed to create texture."); } } // Create a mirror to see on the monitor. mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; mirrorDesc.Width = DIRECTX.WinSizeW; mirrorDesc.Height = DIRECTX.WinSizeH; result = ovr_CreateMirrorTextureDX(session, DIRECTX.CommandQueue, &mirrorDesc, &mirrorTexture); if (!OVR_SUCCESS(result)) { if (retryCreate) goto Done; FATALERROR("Failed to create mirror texture."); } // Create the room model roomScene = new Scene(false); // Create camera mainCam = new Camera(XMVectorSet(0.0f, 1.6f, 5.0f, 0), XMQuaternionIdentity()); // Setup VR components, filling out description ovrEyeRenderDesc eyeRenderDesc[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); long long frameIndex = 0; bool drawMirror = true; DIRECTX.InitFrame(drawMirror); // Main loop while (DIRECTX.HandleMessages()) { ovrSessionStatus sessionStatus; ovr_GetSessionStatus(session, &sessionStatus); if (sessionStatus.ShouldQuit) { // Because the application is requested to quit, should not request retry retryCreate = false; break; } if (sessionStatus.ShouldRecenter) ovr_RecenterTrackingOrigin(session); if (sessionStatus.IsVisible) { XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, -0.05f, 0), mainCam->GetRotVec()); XMVECTOR right = XMVector3Rotate(XMVectorSet(0.05f, 0, 0, 0), mainCam->GetRotVec()); XMVECTOR mainCamPos = mainCam->GetPosVec(); XMVECTOR mainCamRot = mainCam->GetRotVec(); if (DIRECTX.Key['W'] || DIRECTX.Key[VK_UP]) mainCamPos = XMVectorAdd( mainCamPos, forward); if (DIRECTX.Key['S'] || DIRECTX.Key[VK_DOWN]) mainCamPos = XMVectorSubtract(mainCamPos, forward); if (DIRECTX.Key['D']) mainCamPos = XMVectorAdd( mainCamPos, right); if (DIRECTX.Key['A']) mainCamPos = XMVectorSubtract(mainCamPos, right); static float Yaw = 0; if (DIRECTX.Key[VK_LEFT]) mainCamRot = XMQuaternionRotationRollPitchYaw(0, Yaw += 0.02f, 0); if (DIRECTX.Key[VK_RIGHT]) mainCamRot = XMQuaternionRotationRollPitchYaw(0, Yaw -= 0.02f, 0); mainCam->SetPosVec(mainCamPos); mainCam->SetRotVec(mainCamRot); // Animate the cube static float cubeClock = 0; roomScene->Models[0]->Pos = XMFLOAT3(9 * sin(cubeClock), 3, 9 * cos(cubeClock += 0.015f)); // Get both eye poses simultaneously, with IPD offset already included. ovrPosef EyeRenderPose[2]; ovrVector3f HmdToEyeOffset[2] = { eyeRenderDesc[0].HmdToEyeOffset, eyeRenderDesc[1].HmdToEyeOffset }; double sensorSampleTime; // sensorSampleTime is fed into the layer later ovr_GetEyePoses(session, frameIndex, ovrTrue, HmdToEyeOffset, EyeRenderPose, &sensorSampleTime); // Render Scene to Eye Buffers for (int eye = 0; eye < 2; ++eye) { DIRECTX.SetActiveContext(eye == 0 ? DrawContext_EyeRenderLeft : DrawContext_EyeRenderRight); DIRECTX.SetActiveEye(eye); CD3DX12_RESOURCE_BARRIER resBar = CD3DX12_RESOURCE_BARRIER::Transition(pEyeRenderTexture[eye]->GetD3DResource(), D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE, D3D12_RESOURCE_STATE_RENDER_TARGET); DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(1, &resBar); DIRECTX.SetAndClearRenderTarget(pEyeRenderTexture[eye]->GetRtv(), pEyeRenderTexture[eye]->GetDsv()); DIRECTX.SetViewport((float)eyeRenderViewport[eye].Pos.x, (float)eyeRenderViewport[eye].Pos.y, (float)eyeRenderViewport[eye].Size.w, (float)eyeRenderViewport[eye].Size.h); //Get the pose information in XM format XMVECTOR eyeQuat = XMVectorSet(EyeRenderPose[eye].Orientation.x, EyeRenderPose[eye].Orientation.y, EyeRenderPose[eye].Orientation.z, EyeRenderPose[eye].Orientation.w); XMVECTOR eyePos = XMVectorSet(EyeRenderPose[eye].Position.x, EyeRenderPose[eye].Position.y, EyeRenderPose[eye].Position.z, 0); // Get view and projection matrices for the Rift camera Camera finalCam(XMVectorAdd(mainCamPos, XMVector3Rotate(eyePos, mainCamRot)), XMQuaternionMultiply(eyeQuat, mainCamRot)); XMMATRIX view = finalCam.GetViewMatrix(); ovrMatrix4f p = ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.2f, 1000.0f, ovrProjection_None); XMMATRIX proj = XMMatrixSet(p.M[0][0], p.M[1][0], p.M[2][0], p.M[3][0], p.M[0][1], p.M[1][1], p.M[2][1], p.M[3][1], p.M[0][2], p.M[1][2], p.M[2][2], p.M[3][2], p.M[0][3], p.M[1][3], p.M[2][3], p.M[3][3]); XMMATRIX prod = XMMatrixMultiply(view, proj); roomScene->Render(&prod, 1, 1, 1, 1, true); resBar = CD3DX12_RESOURCE_BARRIER::Transition(pEyeRenderTexture[eye]->GetD3DResource(), D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE); DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(1, &resBar); // Commit rendering to the swap chain pEyeRenderTexture[eye]->Commit(); // kick off eye render command lists before ovr_SubmitFrame() DIRECTX.SubmitCommandList(DIRECTX.ActiveContext); } // Initialize our single full screen Fov layer. ovrLayerEyeFov ld = {}; ld.Header.Type = ovrLayerType_EyeFov; ld.Header.Flags = 0; for (int eye = 0; eye < 2; ++eye) { ld.ColorTexture[eye] = pEyeRenderTexture[eye]->TextureChain; ld.Viewport[eye] = eyeRenderViewport[eye]; ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye]; ld.RenderPose[eye] = EyeRenderPose[eye]; ld.SensorSampleTime = sensorSampleTime; } ovrLayerHeader* layers = &ld.Header; result = ovr_SubmitFrame(session, frameIndex, nullptr, &layers, 1); // exit the rendering loop if submit returns an error, will retry on ovrError_DisplayLost if (!OVR_SUCCESS(result)) goto Done; frameIndex++; } if (drawMirror) { DIRECTX.SetActiveContext(DrawContext_Final); DIRECTX.SetViewport(0.0f, 0.0f, (float)hmdDesc.Resolution.w / 2, (float)hmdDesc.Resolution.h / 2); // Render mirror ID3D12Resource* mirrorTexRes = nullptr; ovr_GetMirrorTextureBufferDX(session, mirrorTexture, IID_PPV_ARGS(&mirrorTexRes)); //DIRECTX.SetAndClearRenderTarget(DIRECTX.CurrentFrameResources().SwapChainRtvHandle, nullptr, 1.0f, 0.5f, 0.0f, 1.0f); CD3DX12_RESOURCE_BARRIER preMirrorBlitBar[] = { CD3DX12_RESOURCE_BARRIER::Transition(DIRECTX.CurrentFrameResources().SwapChainBuffer, D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_COPY_DEST), CD3DX12_RESOURCE_BARRIER::Transition(mirrorTexRes, D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_COPY_SOURCE) }; // Indicate that the back buffer will now be copied into DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(ARRAYSIZE(preMirrorBlitBar), preMirrorBlitBar); DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->CopyResource(DIRECTX.CurrentFrameResources().SwapChainBuffer, mirrorTexRes); CD3DX12_RESOURCE_BARRIER resBar = CD3DX12_RESOURCE_BARRIER::Transition(mirrorTexRes, D3D12_RESOURCE_STATE_COPY_SOURCE, D3D12_RESOURCE_STATE_RENDER_TARGET); DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(1, &resBar); } DIRECTX.SubmitCommandListAndPresent(drawMirror); } // Release resources Done: delete mainCam; delete roomScene; if (mirrorTexture) ovr_DestroyMirrorTexture(session, mirrorTexture); for (int eye = 0; eye < 2; ++eye) { delete pEyeRenderTexture[eye]; } DIRECTX.ReleaseDevice(); ovr_Destroy(session); // Retry on ovrError_DisplayLost return retryCreate || (result == ovrError_DisplayLost); }
int OgreOculus::go(void) { // Create Root object root = new Ogre::Root("plugin.cfg", "ogre.cfg"); // OpenGL root->loadPlugin("RenderSystem_GL_d"); root->setRenderSystem(root->getRenderSystemByName("OpenGL Rendering Subsystem")); // Initialize Root root->initialise(false); // Initialize Oculus ovrHmd hmd; ovrHmdDesc hmdDesc; ovrGraphicsLuid luid; ovr_Initialize(nullptr); if(ovr_Create(&hmd, &luid) != ovrSuccess) exit(-1); hmdDesc = ovr_GetHmdDesc(hmd); if(ovr_ConfigureTracking(hmd, ovrTrackingCap_Orientation |ovrTrackingCap_MagYawCorrection |ovrTrackingCap_Position, 0) != ovrSuccess) exit(-2); // Turn off HUD ovr_SetInt(hmd, "PerfHudMode", ovrPerfHud_Off); // Create a window window = root->createRenderWindow("Ogre + Oculus = <3", hmdDesc.Resolution.w/2, hmdDesc.Resolution.h/2, false); // Create scene manager and cameras smgr = root->createSceneManager(Ogre::ST_GENERIC); // Load Ogre resource paths from config file Ogre::ConfigFile cf; cf.load("resources_d.cfg"); // Go through all sections & settings in the file and add resources Ogre::ConfigFile::SectionIterator seci = cf.getSectionIterator(); Ogre::String secName, typeName, archName; while (seci.hasMoreElements()) { secName = seci.peekNextKey(); Ogre::ConfigFile::SettingsMultiMap *settings = seci.getNext(); Ogre::ConfigFile::SettingsMultiMap::iterator i; for (i = settings->begin(); i != settings->end(); ++i) { typeName = i->first; archName = i->second; Ogre::ResourceGroupManager::getSingleton().addResourceLocation( archName, typeName, secName); } } // Set resources Ogre::TextureManager::getSingleton().setDefaultNumMipmaps(5); Ogre::ResourceGroupManager::getSingleton().initialiseAllResourceGroups(); // Create the model itself via OgreModel.cpp createOgreModel(smgr); // Create camera createCamera(); // Set viewport and background color Ogre::Viewport* vp = window->addViewport(mCamera); vp->setBackgroundColour(Ogre::ColourValue(34, 89, 0)); // Yellow // Set aspect ratio mCamera->setAspectRatio( Ogre::Real(vp->getActualWidth()) / Ogre::Real(vp->getActualHeight())); // Initialize glew if(glewInit() != GLEW_OK) exit(-3); // Get texture sizes ovrSizei texSizeL, texSizeR; texSizeL = ovr_GetFovTextureSize(hmd, ovrEye_Left, hmdDesc.DefaultEyeFov[left], 1); texSizeR = ovr_GetFovTextureSize(hmd, ovrEye_Right, hmdDesc.DefaultEyeFov[right], 1); // Calculate render buffer size ovrSizei bufferSize; bufferSize.w = texSizeL.w + texSizeR.w; bufferSize.h = max(texSizeL.h, texSizeR.h); // Create render texture set ovrSwapTextureSet* textureSet; if(ovr_CreateSwapTextureSetGL(hmd, GL_RGB, bufferSize.w, bufferSize.h, &textureSet) != ovrSuccess) exit(-4); // Create Ogre render texture Ogre::GLTextureManager* textureManager = static_cast<Ogre::GLTextureManager*>(Ogre::GLTextureManager::getSingletonPtr()); Ogre::TexturePtr rtt_texture(textureManager->createManual("RttTex", Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME, Ogre::TEX_TYPE_2D, bufferSize.w, bufferSize.h, 0, Ogre::PF_R8G8B8, Ogre::TU_RENDERTARGET)); Ogre::RenderTexture* rttEyes = rtt_texture->getBuffer(0, 0)->getRenderTarget(); Ogre::GLTexture* gltex = static_cast<Ogre::GLTexture*>(Ogre::GLTextureManager::getSingleton().getByName("RttTex").getPointer()); GLuint renderTextureID = gltex->getGLID(); // Put camera viewport on the ogre render texture Ogre::Viewport* vpts[nbEyes]; vpts[left]=rttEyes->addViewport(cams[left], 0, 0, 0, 0.5f); vpts[right]=rttEyes->addViewport(cams[right], 1, 0.5f, 0, 0.5f); vpts[left]->setBackgroundColour(Ogre::ColourValue(34, 89, 0)); // Black background vpts[right]->setBackgroundColour(Ogre::ColourValue(34, 89, 0)); ovrTexture* mirrorTexture; if(ovr_CreateMirrorTextureGL(hmd, GL_RGB, hmdDesc.Resolution.w, hmdDesc.Resolution.h, &mirrorTexture) != ovrSuccess) exit(-5); Ogre::TexturePtr mirror_texture(textureManager->createManual("MirrorTex", Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME, Ogre::TEX_TYPE_2D, hmdDesc.Resolution.w, hmdDesc.Resolution.h, 0, Ogre::PF_R8G8B8, Ogre::TU_RENDERTARGET)); // Get GLIDs GLuint ogreMirrorTextureID = static_cast<Ogre::GLTexture*>(Ogre::GLTextureManager::getSingleton().getByName("MirrorTex").getPointer())->getGLID(); GLuint oculusMirrorTextureID = ((ovrGLTexture*)mirrorTexture)->OGL.TexId; // Create EyeRenderDesc ovrEyeRenderDesc EyeRenderDesc[nbEyes]; EyeRenderDesc[left] = ovr_GetRenderDesc(hmd, ovrEye_Left, hmdDesc.DefaultEyeFov[left]); EyeRenderDesc[right] = ovr_GetRenderDesc(hmd, ovrEye_Right, hmdDesc.DefaultEyeFov[right]); // Get offsets ovrVector3f offset[nbEyes]; offset[left]=EyeRenderDesc[left].HmdToEyeViewOffset; offset[right]=EyeRenderDesc[right].HmdToEyeViewOffset; // Compositor layer ovrLayerEyeFov layer; layer.Header.Type = ovrLayerType_EyeFov; layer.Header.Flags = 0; layer.ColorTexture[left] = textureSet; layer.ColorTexture[right] = textureSet; layer.Fov[left] = EyeRenderDesc[left].Fov; layer.Fov[right] = EyeRenderDesc[right].Fov; layer.Viewport[left] = OVR::Recti(0, 0, bufferSize.w/2, bufferSize.h); layer.Viewport[right] = OVR::Recti(bufferSize.w/2, 0, bufferSize.w/2, bufferSize.h); // Get projection matrices for(size_t eyeIndex(0); eyeIndex < ovrEye_Count; eyeIndex++) { // Get the projection matrix OVR::Matrix4f proj = ovrMatrix4f_Projection(EyeRenderDesc[eyeIndex].Fov, static_cast<float>(0.01f), 4000, true); // Convert it to Ogre matrix Ogre::Matrix4 OgreProj; for(size_t x(0); x < 4; x++) for(size_t y(0); y < 4; y++) OgreProj[x][y] = proj.M[x][y]; // Set the matrix cams[eyeIndex]->setCustomProjectionMatrix(true, OgreProj); } // Variables for render loop bool render(true); ovrFrameTiming hmdFrameTiming; ovrTrackingState ts; OVR::Posef pose; ovrLayerHeader* layers; // Create event listener for handling user input createEventListener(); //Run physics loop in a new thread std::map<Ogre::Entity*, Ogre::Vector3> positionRequests; std::map<Ogre::Entity*, std::string> animationRequests; std::map<Ogre::Entity*, std::vector<int>> rotationRequests; std::map<std::string, std::string> message; std::thread physicsThread(physicsLoop, smgr, &message, &positionRequests, &animationRequests, &rotationRequests); // Render loop while(render) { // Suspend physics loop and perform requested movement/rotations/animations if(positionRequests.size() > 0 || animationRequests.size() > 0 || rotationRequests.size() > 0){ message.insert(std::pair<std::string, std::string>("", "")); for(auto const &request : positionRequests) { Ogre::Vector3 pos = request.second; Ogre::SceneNode* sceneNode = request.first->getParentSceneNode(); sceneNode->setPosition(pos); } for(auto const &request : animationRequests) { request.first->getAnimationState(request.second)->addTime(0.1); } for(auto const &request : rotationRequests) { Ogre::SceneNode* sceneNode = request.first->getParentSceneNode(); sceneNode->roll(Ogre::Degree(request.second[0])); sceneNode->pitch(Ogre::Degree(request.second[1])); sceneNode->yaw(Ogre::Degree(request.second[2])); } positionRequests.clear(); animationRequests.clear(); rotationRequests.clear(); // Resume physics loop message.clear(); } // Update Ogre window Ogre::WindowEventUtilities::messagePump(); // Advance textureset index textureSet->CurrentIndex = (textureSet->CurrentIndex + 1) % textureSet->TextureCount; // Capture user input mKeyboard->capture(); mMouse->capture(); // Movement calculations mPlayerNode->translate(mDirection, Ogre::Node::TS_LOCAL); hmdFrameTiming = ovr_GetFrameTiming(hmd, 0); ts = ovr_GetTrackingState(hmd, hmdFrameTiming.DisplayMidpointSeconds); pose = ts.HeadPose.ThePose; ovr_CalcEyePoses(pose, offset, layer.RenderPose); oculusOrient = pose.Rotation; oculusPos = pose.Translation; mHeadNode->setOrientation(Ogre::Quaternion(oculusOrient.w, oculusOrient.x, oculusOrient.y, oculusOrient.z) * initialOculusOrientation.Inverse()); // Apply head tracking mHeadNode->setPosition(headPositionTrackingSensitivity * Ogre::Vector3(oculusPos.x, oculusPos.y,oculusPos.z)); // Update Ogre viewports root->_fireFrameRenderingQueued(); vpts[left]->update(); vpts[right]->update(); // Copy the rendered image to the Oculus Swap Texture glCopyImageSubData(renderTextureID, GL_TEXTURE_2D, 0, 0, 0, 0, ((ovrGLTexture*)(&textureSet->Textures[textureSet->CurrentIndex]))->OGL.TexId, GL_TEXTURE_2D, 0, 0, 0, 0, bufferSize.w,bufferSize.h, 1); layers = &layer.Header; // Submit new frame to the Oculus and update window ovr_SubmitFrame(hmd, 0, nullptr, &layers, 1); window->update(); // Exit loop when window is closed if(window->isClosed()) render = false; } // Shud down Oculus ovr_Destroy(hmd); ovr_Shutdown(); // Delete Ogre root and return delete root; return EXIT_SUCCESS; }
// return true to retry later (e.g. after display lost) static bool MainLoop(bool retryCreate) { // Initialize these to nullptr here to handle device lost failures cleanly ovrMirrorTexture mirrorTexture = nullptr; OculusTexture * pEyeRenderTexture = nullptr; DepthBuffer * pEyeDepthBuffer = nullptr; Scene * roomScene = nullptr; Camera * mainCam = nullptr; ovrMirrorTextureDesc desc = {}; bool isVisible = true; long long frameIndex = 0; bool useInstancing = false; const int repeatDrawing = 1; ovrSession session; ovrGraphicsLuid luid; ovrResult result = ovr_Create(&session, &luid); if (!OVR_SUCCESS(result)) return retryCreate; ovrHmdDesc hmdDesc = ovr_GetHmdDesc(session); // Setup Device and Graphics // Note: the mirror window can be any size, for this sample we use 1/2 the HMD resolution if (!DIRECTX.InitDevice(hmdDesc.Resolution.w / 2, hmdDesc.Resolution.h / 2, reinterpret_cast<LUID*>(&luid))) goto Done; ovrRecti eyeRenderViewport[2]; // Make a single eye texture { ovrSizei eyeTexSizeL = ovr_GetFovTextureSize(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0], 1.0f); ovrSizei eyeTexSizeR = ovr_GetFovTextureSize(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1], 1.0f); ovrSizei textureSize; textureSize.w = eyeTexSizeL.w + eyeTexSizeR.w; textureSize.h = max(eyeTexSizeL.h, eyeTexSizeR.h); pEyeRenderTexture = new OculusTexture(); if (!pEyeRenderTexture->Init(session, textureSize.w, textureSize.h)) { if (retryCreate) goto Done; VALIDATE(OVR_SUCCESS(result), "Failed to create eye texture."); } pEyeDepthBuffer = new DepthBuffer(DIRECTX.Device, textureSize.w, textureSize.h); // set viewports eyeRenderViewport[0].Pos.x = 0; eyeRenderViewport[0].Pos.y = 0; eyeRenderViewport[0].Size = eyeTexSizeL; eyeRenderViewport[1].Pos.x = eyeTexSizeL.w; eyeRenderViewport[1].Pos.y = 0; eyeRenderViewport[1].Size = eyeTexSizeR; } if (!pEyeRenderTexture->TextureChain) { if (retryCreate) goto Done; VALIDATE(false, "Failed to create texture."); } // Create a mirror to see on the monitor. desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; desc.Width = DIRECTX.WinSizeW; desc.Height = DIRECTX.WinSizeH; result = ovr_CreateMirrorTextureDX(session, DIRECTX.Device, &desc, &mirrorTexture); if (!OVR_SUCCESS(result)) { if (retryCreate) goto Done; VALIDATE(false, "Failed to create mirror texture."); } // Create the room model roomScene = new Scene(false); // Create camera mainCam = new Camera(&XMVectorSet(0.0f, 1.6f, 5.0f, 0), &XMQuaternionIdentity()); // Setup VR components, filling out description ovrEyeRenderDesc eyeRenderDesc[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); // Main loop while (DIRECTX.HandleMessages()) { XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, -0.05f, 0), mainCam->Rot); XMVECTOR right = XMVector3Rotate(XMVectorSet(0.05f, 0, 0, 0), mainCam->Rot); XMVECTOR up = XMVector3Rotate(XMVectorSet(0, 0.05f, 0, 0), mainCam->Rot); if (DIRECTX.Key['W'] || DIRECTX.Key[VK_UP]) mainCam->Pos = XMVectorAdd(mainCam->Pos, forward); if (DIRECTX.Key['S'] || DIRECTX.Key[VK_DOWN]) mainCam->Pos = XMVectorSubtract(mainCam->Pos, forward); if (DIRECTX.Key['D']) mainCam->Pos = XMVectorAdd(mainCam->Pos, right); if (DIRECTX.Key['A']) mainCam->Pos = XMVectorSubtract(mainCam->Pos, right); if (DIRECTX.Key['Q']) mainCam->Pos = XMVectorAdd(mainCam->Pos, up); if (DIRECTX.Key['E']) mainCam->Pos = XMVectorSubtract(mainCam->Pos, up); static float Yaw = 0; if (DIRECTX.Key[VK_LEFT]) mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw += 0.02f, 0); if (DIRECTX.Key[VK_RIGHT]) mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw -= 0.02f, 0); if (DIRECTX.Key['P']) ovr_SetInt(session, OVR_PERF_HUD_MODE, int(ovrPerfHud_AppRenderTiming)); else ovr_SetInt(session, OVR_PERF_HUD_MODE, int(ovrPerfHud_Off)); useInstancing = DIRECTX.Key['I']; // Animate the cube static float cubeClock = 0; roomScene->Models[0]->Pos = XMFLOAT3(9 * sin(cubeClock), 3, 9 * cos(cubeClock += 0.015f)); // Get both eye poses simultaneously, with IPD offset already included. ovrPosef EyeRenderPose[2]; ovrVector3f HmdToEyeOffset[2] = { eyeRenderDesc[0].HmdToEyeOffset, eyeRenderDesc[1].HmdToEyeOffset }; double sensorSampleTime; // sensorSampleTime is fed into the layer later ovr_GetEyePoses(session, frameIndex, ovrTrue, HmdToEyeOffset, EyeRenderPose, &sensorSampleTime); // Render scene to eye texture if (isVisible) { DIRECTX.SetAndClearRenderTarget(pEyeRenderTexture->GetRTV(), pEyeDepthBuffer); // calculate eye transforms XMMATRIX viewProjMatrix[2]; for (int eye = 0; eye < 2; ++eye) { //Get the pose information in XM format XMVECTOR eyeQuat = XMLoadFloat4((XMFLOAT4 *)&EyeRenderPose[eye].Orientation.x); XMVECTOR eyePos = XMVectorSet(EyeRenderPose[eye].Position.x, EyeRenderPose[eye].Position.y, EyeRenderPose[eye].Position.z, 0); // Get view and projection matrices for the Rift camera XMVECTOR CombinedPos = XMVectorAdd(mainCam->Pos, XMVector3Rotate(eyePos, mainCam->Rot)); Camera finalCam(&CombinedPos, &(XMQuaternionMultiply(eyeQuat, mainCam->Rot))); XMMATRIX view = finalCam.GetViewMatrix(); ovrMatrix4f p = ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.1f, 100.0f, ovrProjection_None); XMMATRIX proj = XMMatrixSet(p.M[0][0], p.M[1][0], p.M[2][0], p.M[3][0], p.M[0][1], p.M[1][1], p.M[2][1], p.M[3][1], p.M[0][2], p.M[1][2], p.M[2][2], p.M[3][2], p.M[0][3], p.M[1][3], p.M[2][3], p.M[3][3]); if (useInstancing) { // scale and offset projection matrix to shift image to correct part of texture for each eye XMMATRIX scale = XMMatrixScaling(0.5f, 1.0f, 1.0f); XMMATRIX translate = XMMatrixTranslation((eye==0) ? -0.5f : 0.5f, 0.0f, 0.0f); proj = XMMatrixMultiply(proj, scale); proj = XMMatrixMultiply(proj, translate); } viewProjMatrix[eye] = XMMatrixMultiply(view, proj); } if (useInstancing) { // use instancing for stereo DIRECTX.SetViewport(0.0f, 0.0f, (float)eyeRenderViewport[0].Size.w + eyeRenderViewport[1].Size.w, (float)eyeRenderViewport[0].Size.h); // render scene for (int i = 0; i < repeatDrawing; i++) roomScene->RenderInstanced(&viewProjMatrix[0], 1, 1, 1, 1, true); } else { // non-instanced path for (int eye = 0; eye < 2; ++eye) { // set viewport DIRECTX.SetViewport((float)eyeRenderViewport[eye].Pos.x, (float)eyeRenderViewport[eye].Pos.y, (float)eyeRenderViewport[eye].Size.w, (float)eyeRenderViewport[eye].Size.h); // render scene for (int i = 0; i < repeatDrawing; i++) roomScene->Render(&viewProjMatrix[eye], 1, 1, 1, 1, true); } } // Commit rendering to the swap chain pEyeRenderTexture->Commit(); } // Initialize our single full screen Fov layer. ovrLayerEyeFov ld = {}; ld.Header.Type = ovrLayerType_EyeFov; ld.Header.Flags = 0; ld.SensorSampleTime = sensorSampleTime; for (int eye = 0; eye < 2; ++eye) { ld.ColorTexture[eye] = pEyeRenderTexture->TextureChain; ld.Viewport[eye] = eyeRenderViewport[eye]; ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye]; ld.RenderPose[eye] = EyeRenderPose[eye]; } ovrLayerHeader* layers = &ld.Header; result = ovr_SubmitFrame(session, frameIndex, nullptr, &layers, 1); // exit the rendering loop if submit returns an error, will retry on ovrError_DisplayLost if (!OVR_SUCCESS(result)) goto Done; isVisible = (result == ovrSuccess); // Render mirror ID3D11Texture2D* tex = nullptr; ovr_GetMirrorTextureBufferDX(session, mirrorTexture, IID_PPV_ARGS(&tex)); DIRECTX.Context->CopyResource(DIRECTX.BackBuffer, tex); tex->Release(); DIRECTX.SwapChain->Present(0, 0); frameIndex++; } // Release resources Done: delete mainCam; delete roomScene; if (mirrorTexture) ovr_DestroyMirrorTexture(session, mirrorTexture); delete pEyeRenderTexture; delete pEyeDepthBuffer; DIRECTX.ReleaseDevice(); ovr_Destroy(session); // Retry on ovrError_DisplayLost return retryCreate || OVR_SUCCESS(result) || (result == ovrError_DisplayLost); }
int main(int argc, char **argv) { // Initialize SDL2's context SDL_Init(SDL_INIT_VIDEO); // Initialize Oculus' context ovrResult result = ovr_Initialize(nullptr); if (OVR_FAILURE(result)) { std::cout << "ERROR: Failed to initialize libOVR" << std::endl; SDL_Quit(); return -1; } ovrSession hmd; ovrGraphicsLuid luid; // Connect to the Oculus headset result = ovr_Create(&hmd, &luid); if (OVR_FAILURE(result)) { std::cout << "ERROR: Oculus Rift not detected" << std::endl; ovr_Shutdown(); SDL_Quit(); return -1; } int x = SDL_WINDOWPOS_CENTERED, y = SDL_WINDOWPOS_CENTERED; int winWidth = 1280; int winHeight = 720; Uint32 flags = SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN; // Create SDL2 Window SDL_Window* window = SDL_CreateWindow("OVR ZED App", x, y, winWidth, winHeight, flags); // Create OpenGL context SDL_GLContext glContext = SDL_GL_CreateContext(window); // Initialize GLEW glewInit(); // Turn off vsync to let the compositor do its magic SDL_GL_SetSwapInterval(0); // Initialize the ZED Camera sl::zed::Camera* zed = 0; zed = new sl::zed::Camera(sl::zed::HD720); sl::zed::ERRCODE zederr = zed->init(sl::zed::MODE::PERFORMANCE, 0); int zedWidth = zed->getImageSize().width; int zedHeight = zed->getImageSize().height; if (zederr != sl::zed::SUCCESS) { std::cout << "ERROR: " << sl::zed::errcode2str(zederr) << std::endl; ovr_Destroy(hmd); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; return -1; } GLuint zedTextureID_L, zedTextureID_R; // Generate OpenGL texture for left images of the ZED camera glGenTextures(1, &zedTextureID_L); glBindTexture(GL_TEXTURE_2D, zedTextureID_L); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); // Generate OpenGL texture for right images of the ZED camera glGenTextures(1, &zedTextureID_R); glBindTexture(GL_TEXTURE_2D, zedTextureID_R); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glBindTexture(GL_TEXTURE_2D, 0); #if OPENGL_GPU_INTEROP cudaGraphicsResource* cimg_L; cudaGraphicsResource* cimg_R; cudaError_t errL, errR; errL = cudaGraphicsGLRegisterImage(&cimg_L, zedTextureID_L, GL_TEXTURE_2D, cudaGraphicsMapFlagsNone); errR = cudaGraphicsGLRegisterImage(&cimg_R, zedTextureID_R, GL_TEXTURE_2D, cudaGraphicsMapFlagsNone); if (errL != cudaSuccess || errR != cudaSuccess) { std::cout << "ERROR: cannot create CUDA texture : " << errL << "|" << errR << std::endl; } #endif ovrHmdDesc hmdDesc = ovr_GetHmdDesc(hmd); // Get the texture sizes of Oculus eyes ovrSizei textureSize0 = ovr_GetFovTextureSize(hmd, ovrEye_Left, hmdDesc.DefaultEyeFov[0], 1.0f); ovrSizei textureSize1 = ovr_GetFovTextureSize(hmd, ovrEye_Right, hmdDesc.DefaultEyeFov[1], 1.0f); // Compute the final size of the render buffer ovrSizei bufferSize; bufferSize.w = textureSize0.w + textureSize1.w; bufferSize.h = std::max(textureSize0.h, textureSize1.h); // Initialize OpenGL swap textures to render ovrSwapTextureSet* ptextureSet = 0; if (OVR_SUCCESS(ovr_CreateSwapTextureSetGL(hmd, GL_SRGB8_ALPHA8, bufferSize.w, bufferSize.h, &ptextureSet))) { for (int i = 0; i < ptextureSet->TextureCount; ++i) { ovrGLTexture* tex = (ovrGLTexture*)&ptextureSet->Textures[i]; glBindTexture(GL_TEXTURE_2D, tex->OGL.TexId); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); } } else { std::cout << "ERROR: failed creating swap texture" << std::endl; ovr_Destroy(hmd); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; return -1; } // Generate frame buffer to render GLuint fboID; glGenFramebuffers(1, &fboID); // Generate depth buffer of the frame buffer GLuint depthBuffID; glGenTextures(1, &depthBuffID); glBindTexture(GL_TEXTURE_2D, depthBuffID); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); GLenum internalFormat = GL_DEPTH_COMPONENT24; GLenum type = GL_UNSIGNED_INT; glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, bufferSize.w, bufferSize.h, 0, GL_DEPTH_COMPONENT, type, NULL); // Create a mirror texture to display the render result in the SDL2 window ovrGLTexture* mirrorTexture = nullptr; result = ovr_CreateMirrorTextureGL(hmd, GL_SRGB8_ALPHA8, winWidth, winHeight, reinterpret_cast<ovrTexture**>(&mirrorTexture)); if (!OVR_SUCCESS(result)) { std::cout << "ERROR: Failed to create mirror texture" << std::endl; } GLuint mirrorFBOID; glGenFramebuffers(1, &mirrorFBOID); glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFBOID); glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mirrorTexture->OGL.TexId, 0); glFramebufferRenderbuffer(GL_READ_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, 0); glBindFramebuffer(GL_READ_FRAMEBUFFER, 0); // Initialize a default Pose ovrPosef eyeRenderPose; // Set Identity quaternion eyeRenderPose.Orientation.x = 0; eyeRenderPose.Orientation.y = 0; eyeRenderPose.Orientation.z = 0; eyeRenderPose.Orientation.w = 1; // Set World's origin position eyeRenderPose.Position.x = 0.f; eyeRenderPose.Position.y = 0.f; eyeRenderPose.Position.z = 0; ovrLayerEyeFov ld; ld.Header.Type = ovrLayerType_EyeFov; // Tell to the Oculus compositor that our texture origin is at the bottom left ld.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft | ovrLayerFlag_HeadLocked; // Because OpenGL | Disable head tracking // Set the Oculus layer eye field of view for each view for (int eye = 0; eye < 2; ++eye) { // Set the color texture as the current swap texture ld.ColorTexture[eye] = ptextureSet; // Set the viewport as the right or left vertical half part of the color texture ld.Viewport[eye] = OVR::Recti(eye == ovrEye_Left ? 0 : bufferSize.w / 2, 0, bufferSize.w / 2, bufferSize.h); // Set the field of view ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye]; // Set the pose matrix ld.RenderPose[eye] = eyeRenderPose; } double sensorSampleTime = ovr_GetTimeInSeconds(); ld.SensorSampleTime = sensorSampleTime; // Get the render description of the left and right "eyes" of the Oculus headset ovrEyeRenderDesc eyeRenderDesc[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(hmd, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(hmd, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); // Get the Oculus view scale description ovrVector3f viewOffset[2] = { eyeRenderDesc[0].HmdToEyeViewOffset, eyeRenderDesc[1].HmdToEyeViewOffset }; ovrViewScaleDesc viewScaleDesc; viewScaleDesc.HmdSpaceToWorldScaleInMeters = 1.0f; viewScaleDesc.HmdToEyeViewOffset[0] = viewOffset[0]; viewScaleDesc.HmdToEyeViewOffset[1] = viewOffset[1]; // Create and compile the shader's sources Shader shader(OVR_ZED_VS, OVR_ZED_FS); // Compute the ZED image field of view with the ZED parameters float zedFovH = atanf(zed->getImageSize().width / (zed->getParameters()->LeftCam.fx *2.f)) * 2.f; // Compute the Oculus' field of view with its parameters float ovrFovH = (atanf(hmdDesc.DefaultEyeFov[0].LeftTan) + atanf(hmdDesc.DefaultEyeFov[0].RightTan)); // Compute the useful part of the ZED image unsigned int usefulWidth = zed->getImageSize().width * ovrFovH / zedFovH; // Compute the size of the final image displayed in the headset with the ZED image's aspect-ratio kept unsigned int widthFinal = bufferSize.w / 2; unsigned int heightFinal = zed->getImageSize().height * widthFinal / usefulWidth; // Convert this size to OpenGL viewport's frame's coordinates float heightGL = (heightFinal) / (float)(bufferSize.h); float widthGL = ((zed->getImageSize().width * (heightFinal / (float)zed->getImageSize().height)) / (float)widthFinal); // Create a rectangle with the coordonates computed and push it in GPU memory. float rectVertices[12] = { -widthGL, -heightGL, 0, widthGL, -heightGL, 0, widthGL, heightGL, 0, -widthGL, heightGL, 0 }; GLuint rectVBO[3]; glGenBuffers(1, &rectVBO[0]); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[0]); glBufferData(GL_ARRAY_BUFFER, sizeof(rectVertices), rectVertices, GL_STATIC_DRAW); float rectTexCoord[8] = { 0, 1, 1, 1, 1, 0, 0, 0 }; glGenBuffers(1, &rectVBO[1]); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[1]); glBufferData(GL_ARRAY_BUFFER, sizeof(rectTexCoord), rectTexCoord, GL_STATIC_DRAW); unsigned int rectIndices[6] = { 0, 1, 2, 0, 2, 3 }; glGenBuffers(1, &rectVBO[2]); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, rectVBO[2]); glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(rectIndices), rectIndices, GL_STATIC_DRAW); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); glBindBuffer(GL_ARRAY_BUFFER, 0); // Initialize hit value float hit = 0.02f; // Initialize a boolean that will be used to stop the application’s loop and another one to pause/unpause rendering bool end = false; bool refresh = true; // SDL variable that will be used to store input events SDL_Event events; // Initialize time variables. They will be used to limit the number of frames rendered per second. // Frame counter unsigned int riftc = 0, zedc = 1; // Chronometer unsigned int rifttime = 0, zedtime = 0, zedFPS = 0; int time1 = 0, timePerFrame = 0; int frameRate = (int)(1000 / MAX_FPS); // Enable the shader glUseProgram(shader.getProgramId()); // Bind the Vertex Buffer Objects of the rectangle that displays ZED images // vertices glEnableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[0]); glVertexAttribPointer(Shader::ATTRIB_VERTICES_POS, 3, GL_FLOAT, GL_FALSE, 0, 0); // indices glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, rectVBO[2]); // texture coordinates glEnableVertexAttribArray(Shader::ATTRIB_TEXTURE2D_POS); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[1]); glVertexAttribPointer(Shader::ATTRIB_TEXTURE2D_POS, 2, GL_FLOAT, GL_FALSE, 0, 0); // Main loop while (!end) { // Compute the time used to render the previous frame timePerFrame = SDL_GetTicks() - time1; // If the previous frame has been rendered too fast if (timePerFrame < frameRate) { // Pause the loop to have a max FPS equal to MAX_FPS SDL_Delay(frameRate - timePerFrame); timePerFrame = frameRate; } // Increment the ZED chronometer zedtime += timePerFrame; // If ZED chronometer reached 1 second if (zedtime > 1000) { zedFPS = zedc; zedc = 0; zedtime = 0; } // Increment the Rift chronometer and the Rift frame counter rifttime += timePerFrame; riftc++; // If Rift chronometer reached 200 milliseconds if (rifttime > 200) { // Display FPS std::cout << "\rRIFT FPS: " << 1000 / (rifttime / riftc) << " | ZED FPS: " << zedFPS; // Reset Rift chronometer rifttime = 0; // Reset Rift frame counter riftc = 0; } // Start frame chronometer time1 = SDL_GetTicks(); // While there is an event catched and not tested while (SDL_PollEvent(&events)) { // If a key is released if (events.type == SDL_KEYUP) { // If Q quit the application if (events.key.keysym.scancode == SDL_SCANCODE_Q) end = true; // If R reset the hit value else if (events.key.keysym.scancode == SDL_SCANCODE_R) hit = 0.0f; // If C pause/unpause rendering else if (events.key.keysym.scancode == SDL_SCANCODE_C) refresh = !refresh; } // If the mouse wheel is used if (events.type == SDL_MOUSEWHEEL) { // Increase or decrease hit value float s; events.wheel.y > 0 ? s = 1.0f : s = -1.0f; hit += 0.005f * s; } } // If rendering is unpaused and // successful grab ZED image if (!zed->grab(sl::zed::SENSING_MODE::RAW, false, false)) { // Update the ZED frame counter zedc++; if (refresh) { #if OPENGL_GPU_INTEROP sl::zed::Mat m = zed->retrieveImage_gpu(sl::zed::SIDE::LEFT); cudaArray_t arrIm; cudaGraphicsMapResources(1, &cimg_L, 0); cudaGraphicsSubResourceGetMappedArray(&arrIm, cimg_L, 0, 0); cudaMemcpy2DToArray(arrIm, 0, 0, m.data, m.step, zedWidth * 4, zedHeight, cudaMemcpyDeviceToDevice); cudaGraphicsUnmapResources(1, &cimg_L, 0); m = zed->retrieveImage_gpu(sl::zed::SIDE::RIGHT); cudaGraphicsMapResources(1, &cimg_R, 0); cudaGraphicsSubResourceGetMappedArray(&arrIm, cimg_R, 0, 0); cudaMemcpy2DToArray(arrIm, 0, 0, m.data, m.step, zedWidth * 4, zedHeight, cudaMemcpyDeviceToDevice); // *4 = 4 channels * 1 bytes (uint) cudaGraphicsUnmapResources(1, &cimg_R, 0); #endif // Increment the CurrentIndex to point to the next texture within the output swap texture set. // CurrentIndex must be advanced round-robin fashion every time we draw a new frame ptextureSet->CurrentIndex = (ptextureSet->CurrentIndex + 1) % ptextureSet->TextureCount; // Get the current swap texture pointer auto tex = reinterpret_cast<ovrGLTexture*>(&ptextureSet->Textures[ptextureSet->CurrentIndex]); // Bind the frame buffer glBindFramebuffer(GL_FRAMEBUFFER, fboID); // Set its color layer 0 as the current swap texture glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex->OGL.TexId, 0); // Set its depth layer as our depth buffer glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthBuffID, 0); // Clear the frame buffer glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glClearColor(0, 0, 0, 1); // Render for each Oculus eye the equivalent ZED image for (int eye = 0; eye < 2; eye++) { // Set the left or right vertical half of the buffer as the viewport glViewport(ld.Viewport[eye].Pos.x, ld.Viewport[eye].Pos.y, ld.Viewport[eye].Size.w, ld.Viewport[eye].Size.h); // Bind the left or right ZED image glBindTexture(GL_TEXTURE_2D, eye == ovrEye_Left ? zedTextureID_L : zedTextureID_R); #if !OPENGL_GPU_INTEROP glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, zed->retrieveImage(eye == ovrEye_Left ? sl::zed::SIDE::LEFT : sl::zed::SIDE::RIGHT).data); #endif // Bind the hit value glUniform1f(glGetUniformLocation(shader.getProgramId(), "hit"), eye == ovrEye_Left ? hit : -hit); // Draw the ZED image glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0); } } } /* Note: Even if we don't ask to refresh the framebuffer or if the Camera::grab() doesn't catch a new frame, we have to submit an image to the Rift; it needs 75Hz refresh. Else there will be jumbs, black frames and/or glitches in the headset. */ ovrLayerHeader* layers = &ld.Header; // Submit the frame to the Oculus compositor // which will display the frame in the Oculus headset result = ovr_SubmitFrame(hmd, 0, &viewScaleDesc, &layers, 1); if (!OVR_SUCCESS(result)) { std::cout << "ERROR: failed to submit frame" << std::endl; glDeleteBuffers(3, rectVBO); ovr_DestroySwapTextureSet(hmd, ptextureSet); ovr_DestroyMirrorTexture(hmd, &mirrorTexture->Texture); ovr_Destroy(hmd); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; return -1; } // Copy the frame to the mirror buffer // which will be drawn in the SDL2 image glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFBOID); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0); GLint w = mirrorTexture->OGL.Header.TextureSize.w; GLint h = mirrorTexture->OGL.Header.TextureSize.h; glBlitFramebuffer(0, h, w, 0, 0, 0, w, h, GL_COLOR_BUFFER_BIT, GL_NEAREST); glBindFramebuffer(GL_READ_FRAMEBUFFER, 0); // Swap the SDL2 window SDL_GL_SwapWindow(window); } // Disable all OpenGL buffer glDisableVertexAttribArray(Shader::ATTRIB_TEXTURE2D_POS); glDisableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); glBindBuffer(GL_ARRAY_BUFFER, 0); glBindTexture(GL_TEXTURE_2D, 0); glUseProgram(0); glBindVertexArray(0); // Delete the Vertex Buffer Objects of the rectangle glDeleteBuffers(3, rectVBO); // Delete SDL, OpenGL, Oculus and ZED context ovr_DestroySwapTextureSet(hmd, ptextureSet); ovr_DestroyMirrorTexture(hmd, &mirrorTexture->Texture); ovr_Destroy(hmd); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; // quit return 0; }
void OculusWindow::initialize_hmd_environment() { try { auto result = ovr_Initialize(nullptr); if (!OVR_SUCCESS(result)) { Logger::LOG_WARNING << "Failed to initialize oculus environment!" << "Errorcode:" << (int)result << std::endl; } ovrGraphicsLuid luid; result = ovr_Create(&hmd_session_, &luid); if (!OVR_SUCCESS(result)) { throw std::runtime_error("Unable to create HMD."); } hmd_desc_ = ovr_GetHmdDesc(hmd_session_); // get optimal texture size for rendering ovrSizei ideal_texture_size_left = ovr_GetFovTextureSize(hmd_session_, ovrEyeType(0), hmd_desc_.DefaultEyeFov[0], 1); ovrSizei ideal_texture_size_right = ovr_GetFovTextureSize(hmd_session_, ovrEyeType(1), hmd_desc_.DefaultEyeFov[1], 1); math::vec2ui window_size(ideal_texture_size_left.w + ideal_texture_size_right.w, std::max(ideal_texture_size_left.h, ideal_texture_size_right.h)); // initialize window => resolution is independent of rendering resolution! config.set_size(window_size); config.set_left_resolution(math::vec2ui(ideal_texture_size_left.w, ideal_texture_size_left.h)); config.set_left_position(math::vec2ui(0, 0)); config.set_right_resolution(math::vec2ui(ideal_texture_size_right.w, ideal_texture_size_right.h)); config.set_right_position(math::vec2ui(ideal_texture_size_left.w, 0)); // Initialize VR structures, filling out description. ovrEyeRenderDesc eyeRenderDesc[2]; ovrVector3f hmdToEyeViewOffset[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(hmd_session_, ovrEye_Left, hmd_desc_.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(hmd_session_, ovrEye_Right, hmd_desc_.DefaultEyeFov[1]); hmdToEyeViewOffset[0] = eyeRenderDesc[0].HmdToEyeOffset; hmdToEyeViewOffset[1] = eyeRenderDesc[1].HmdToEyeOffset; // Initialize our single full screen Fov layer. color_layer_.Header.Type = ovrLayerType_EyeFov; color_layer_.Header.Flags = 0; color_layer_.Fov[0] = eyeRenderDesc[0].Fov; color_layer_.Fov[1] = eyeRenderDesc[1].Fov; ovrRecti left_viewport; left_viewport.Size = { int(config.left_resolution().x), int(config.left_resolution().y) }; left_viewport.Pos = { int(config.left_position().x), int(config.left_position().y) }; ovrRecti right_viewport; right_viewport.Size = { int(config.right_resolution().x), int(config.right_resolution().y) }; right_viewport.Pos = { int(config.right_position().x), int(config.right_position().y) }; color_layer_.Viewport[0] = left_viewport; color_layer_.Viewport[1] = right_viewport; } catch (std::exception& e) { gua::Logger::LOG_WARNING << "Failed to initialize oculus rift.\n" << e.what() << std::endl; } }
void VRImplOVR::connect(VRDesc* _desc) { ovrGraphicsLuid luid; ovrResult result = ovr_Create(&m_session, &luid); if (!OVR_SUCCESS(result)) { BX_TRACE("Failed to create OVR device."); return; } BX_STATIC_ASSERT(sizeof(_desc->m_adapterLuid) >= sizeof(luid)); memcpy(&_desc->m_adapterLuid, &luid, sizeof(luid)); ovrHmdDesc hmdDesc = ovr_GetHmdDesc(m_session); _desc->m_deviceType = hmdDesc.Type; _desc->m_refreshRate = hmdDesc.DisplayRefreshRate; _desc->m_deviceSize.m_w = hmdDesc.Resolution.w; _desc->m_deviceSize.m_h = hmdDesc.Resolution.h; BX_TRACE("OVR HMD: %s, %s, firmware: %d.%d" , hmdDesc.ProductName , hmdDesc.Manufacturer , hmdDesc.FirmwareMajor , hmdDesc.FirmwareMinor ); ovrSizei eyeSize[2] = { ovr_GetFovTextureSize(m_session, ovrEye_Left, hmdDesc.DefaultEyeFov[0], 1.0f), ovr_GetFovTextureSize(m_session, ovrEye_Right, hmdDesc.DefaultEyeFov[0], 1.0f), }; for (int eye = 0; eye < 2; ++eye) { BX_STATIC_ASSERT(sizeof(_desc->m_eyeFov[eye]) == sizeof(hmdDesc.DefaultEyeFov[eye])); memcpy(&_desc->m_eyeFov[eye], &hmdDesc.DefaultEyeFov[eye], sizeof(_desc->m_eyeFov[eye])); _desc->m_eyeSize[eye].m_w = eyeSize[eye].w; _desc->m_eyeSize[eye].m_h = eyeSize[eye].h; } float neckOffset[2] = {OVR_DEFAULT_NECK_TO_EYE_HORIZONTAL, OVR_DEFAULT_NECK_TO_EYE_VERTICAL}; ovr_GetFloatArray(m_session, OVR_KEY_NECK_TO_EYE_DISTANCE, neckOffset, 2); _desc->m_neckOffset[0] = neckOffset[0]; _desc->m_neckOffset[1] = neckOffset[1]; // build constant layer settings m_renderLayer.Header.Type = ovrLayerType_EyeFov; m_renderLayer.Header.Flags = 0; for (int eye = 0; eye < 2; ++eye) { m_renderLayer.Fov[eye] = hmdDesc.DefaultEyeFov[eye]; m_renderLayer.Viewport[eye].Pos.x = 0; m_renderLayer.Viewport[eye].Pos.y = 0; m_renderLayer.Viewport[eye].Size = eyeSize[eye]; } m_viewScale.HmdSpaceToWorldScaleInMeters = 1.0f; for (int eye = 0; eye < 2; ++eye) { ovrEyeRenderDesc erd = ovr_GetRenderDesc(m_session, static_cast<ovrEyeType>(eye), hmdDesc.DefaultEyeFov[eye]); m_viewScale.HmdToEyeOffset[eye] = erd.HmdToEyeOffset; m_eyeFov[eye] = erd.Fov; m_pixelsPerTanAngleAtCenter[eye] = erd.PixelsPerTanAngleAtCenter; } }
// return true to retry later (e.g. after display lost) static bool MainLoop(bool retryCreate) { // Initialize these to nullptr here to handle device lost failures cleanly ovrTexture * mirrorTexture = nullptr; OculusTexture * pEyeRenderTexture[2] = { nullptr, nullptr }; DepthBuffer * pEyeDepthBuffer[2] = { nullptr, nullptr }; Scene * roomScene = nullptr; Camera * mainCam = nullptr; D3D11_TEXTURE2D_DESC td = {}; ovrHmd HMD; ovrGraphicsLuid luid; ovrResult result = ovr_Create(&HMD, &luid); if (!OVR_SUCCESS(result)) return retryCreate; ovrHmdDesc hmdDesc = ovr_GetHmdDesc(HMD); // ------------------------------------------------------------------- // Add: Make Instance that CL Eye Camera Capture Class CLEyeCameraCapture* cam[2] = { NULL }; // Query for number of connected camera int numCams = CLEyeGetCameraCount(); if (numCams == 0) { printf_s("No PS3Eye Camera detected\n"); goto Done; } printf_s("Found %d cameras\n", numCams); for (int iCam = 0; iCam < numCams; iCam++) { char windowName[64]; // Query unique camera uuid GUID guid = CLEyeGetCameraUUID(iCam); printf("Camera %d GUID: [%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x]\n", iCam + 1, guid.Data1, guid.Data2, guid.Data3, guid.Data4[0], guid.Data4[1], guid.Data4[2], guid.Data4[3], guid.Data4[4], guid.Data4[5], guid.Data4[6], guid.Data4[7]); sprintf_s(windowName, "Camera Window %d", iCam + 1); // Create camera capture object cam[iCam] = new CLEyeCameraCapture(windowName, guid, CLEYE_COLOR_RAW, CLEYE_VGA, 30); cam[iCam]->StartCapture(); } // ------------------------------------------------------------------- // Setup Device and Graphics // Note: the mirror window can be any size, for this sample we use 1/2 the HMD resolution if (!DIRECTX.InitDevice(hmdDesc.Resolution.w / 2, hmdDesc.Resolution.h / 2, reinterpret_cast<LUID*>(&luid))) goto Done; // Make the eye render buffers (caution if actual size < requested due to HW limits). ovrRecti eyeRenderViewport[2]; for (int eye = 0; eye < 2; ++eye) { ovrSizei idealSize = ovr_GetFovTextureSize(HMD, (ovrEyeType)eye, hmdDesc.DefaultEyeFov[eye], 1.0f); pEyeRenderTexture[eye] = new OculusTexture(); if (!pEyeRenderTexture[eye]->Init(HMD, idealSize.w, idealSize.h)) { if (retryCreate) goto Done; VALIDATE(OVR_SUCCESS(result), "Failed to create eye texture."); } pEyeDepthBuffer[eye] = new DepthBuffer(DIRECTX.Device, idealSize.w, idealSize.h); eyeRenderViewport[eye].Pos.x = 0; eyeRenderViewport[eye].Pos.y = 0; eyeRenderViewport[eye].Size = idealSize; if (!pEyeRenderTexture[eye]->TextureSet) { if (retryCreate) goto Done; VALIDATE(false, "Failed to create texture."); } } // Create a mirror to see on the monitor. td.ArraySize = 1; td.Format = DXGI_FORMAT_R8G8B8A8_UNORM_SRGB; td.Width = DIRECTX.WinSizeW; td.Height = DIRECTX.WinSizeH; td.Usage = D3D11_USAGE_DEFAULT; td.SampleDesc.Count = 1; td.MipLevels = 1; result = ovr_CreateMirrorTextureD3D11(HMD, DIRECTX.Device, &td, 0, &mirrorTexture); if (!OVR_SUCCESS(result)) { if (retryCreate) goto Done; VALIDATE(false, "Failed to create mirror texture."); } // Create the room model roomScene = new Scene(false); // Create camera mainCam = new Camera(&XMVectorSet(0.0f, 1.6f, 5.0f, 0), &XMQuaternionIdentity()); // Setup VR components, filling out description ovrEyeRenderDesc eyeRenderDesc[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(HMD, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(HMD, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); bool isVisible = true; DCB portConfig; portConfig.BaudRate = 115200; portConfig.Parity = EVENPARITY; g_seriPort.Start("\\\\.\\COM3", &portConfig); // Main loop while (DIRECTX.HandleMessages()) { XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, -0.05f, 0), mainCam->Rot); XMVECTOR right = XMVector3Rotate(XMVectorSet(0.05f, 0, 0, 0), mainCam->Rot); if (DIRECTX.Key['W'] || DIRECTX.Key[VK_UP]) mainCam->Pos = XMVectorAdd(mainCam->Pos, forward); if (DIRECTX.Key['S'] || DIRECTX.Key[VK_DOWN]) mainCam->Pos = XMVectorSubtract(mainCam->Pos, forward); if (DIRECTX.Key['D']) mainCam->Pos = XMVectorAdd(mainCam->Pos, right); if (DIRECTX.Key['A']) mainCam->Pos = XMVectorSubtract(mainCam->Pos, right); static float Yaw = 0; if (DIRECTX.Key[VK_LEFT]) mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw += 0.02f, 0); if (DIRECTX.Key[VK_RIGHT]) mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw -= 0.02f, 0); // Animate the cube static float cubeClock = 0; roomScene->Models[0]->Pos = XMFLOAT3(9 * sin(cubeClock), 3, 9 * cos(cubeClock += 0.015f)); // Get both eye poses simultaneously, with IPD offset already included. ovrPosef EyeRenderPose[2]; ovrVector3f HmdToEyeViewOffset[2] = { eyeRenderDesc[0].HmdToEyeViewOffset, eyeRenderDesc[1].HmdToEyeViewOffset }; double frameTime = ovr_GetPredictedDisplayTime(HMD, 0); // Keeping sensorSampleTime as close to ovr_GetTrackingState as possible - fed into the layer double sensorSampleTime = ovr_GetTimeInSeconds(); ovrTrackingState hmdState = ovr_GetTrackingState(HMD, frameTime, ovrTrue); ovr_CalcEyePoses(hmdState.HeadPose.ThePose, HmdToEyeViewOffset, EyeRenderPose); // -------------------------------------------------------------------------- // Add: Get Head Yaw Roll Pitch float hmdPitch = 0.0f; float hmdRoll = 0.0f; float hmdYaw = 0.0f; OVR::Posef HeadPose = hmdState.HeadPose.ThePose; HeadPose.Rotation.GetEulerAngles<OVR::Axis_Y, OVR::Axis_X, OVR::Axis_Z>(&hmdYaw, &hmdPitch, &hmdRoll); SetPos(2, ServoRoll(hmdYaw)); SetPos(3, ServoRoll(hmdPitch)); // -------------------------------------------------------------------------- // Render Scene to Eye Buffers if (isVisible) { for (int eye = 0; eye < 2; ++eye) { // Increment to use next texture, just before writing pEyeRenderTexture[eye]->AdvanceToNextTexture(); // Clear and set up rendertarget int texIndex = pEyeRenderTexture[eye]->TextureSet->CurrentIndex; DIRECTX.SetAndClearRenderTarget(pEyeRenderTexture[eye]->TexRtv[texIndex], pEyeDepthBuffer[eye]); DIRECTX.SetViewport((float)eyeRenderViewport[eye].Pos.x, (float)eyeRenderViewport[eye].Pos.y, (float)eyeRenderViewport[eye].Size.w, (float)eyeRenderViewport[eye].Size.h); //Get the pose information in XM format XMVECTOR eyeQuat = XMVectorSet(EyeRenderPose[eye].Orientation.x, EyeRenderPose[eye].Orientation.y, EyeRenderPose[eye].Orientation.z, EyeRenderPose[eye].Orientation.w); XMVECTOR eyePos = XMVectorSet(EyeRenderPose[eye].Position.x, EyeRenderPose[eye].Position.y, EyeRenderPose[eye].Position.z, 0); // Get view and projection matrices for the Rift camera XMVECTOR CombinedPos = XMVectorAdd(mainCam->Pos, XMVector3Rotate(eyePos, mainCam->Rot)); Camera finalCam(&CombinedPos, &(XMQuaternionMultiply(eyeQuat,mainCam->Rot))); XMMATRIX view = finalCam.GetViewMatrix(); ovrMatrix4f p = ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.2f, 1000.0f, ovrProjection_RightHanded); XMMATRIX proj = XMMatrixSet(p.M[0][0], p.M[1][0], p.M[2][0], p.M[3][0], p.M[0][1], p.M[1][1], p.M[2][1], p.M[3][1], p.M[0][2], p.M[1][2], p.M[2][2], p.M[3][2], p.M[0][3], p.M[1][3], p.M[2][3], p.M[3][3]); XMMATRIX prod = XMMatrixMultiply(view, proj); roomScene->Render(&prod, 1, 1, 1, 1, true); } } // Initialize our single full screen Fov layer. ovrLayerEyeFov ld = {}; ld.Header.Type = ovrLayerType_EyeFov; ld.Header.Flags = 0; for (int eye = 0; eye < 2; ++eye) { ld.ColorTexture[eye] = pEyeRenderTexture[eye]->TextureSet; ld.Viewport[eye] = eyeRenderViewport[eye]; ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye]; ld.RenderPose[eye] = EyeRenderPose[eye]; ld.SensorSampleTime = sensorSampleTime; } ovrLayerHeader* layers = &ld.Header; result = ovr_SubmitFrame(HMD, 0, nullptr, &layers, 1); // exit the rendering loop if submit returns an error, will retry on ovrError_DisplayLost if (!OVR_SUCCESS(result)) goto Done; isVisible = (result == ovrSuccess); // Render mirror ovrD3D11Texture* tex = (ovrD3D11Texture*)mirrorTexture; DIRECTX.Context->CopyResource(DIRECTX.BackBuffer, tex->D3D11.pTexture); DIRECTX.SwapChain->Present(0, 0); } // Release resources Done: delete mainCam; delete roomScene; if (mirrorTexture) ovr_DestroyMirrorTexture(HMD, mirrorTexture); for (int eye = 0; eye < 2; ++eye) { delete pEyeRenderTexture[eye]; delete pEyeDepthBuffer[eye]; } DIRECTX.ReleaseDevice(); ovr_Destroy(HMD); g_seriPort.End(); for (int iCam = 0; iCam < numCams; iCam++) { cam[iCam]->StopCapture(); delete cam[iCam]; } // Retry on ovrError_DisplayLost return retryCreate || OVR_SUCCESS(result) || (result == ovrError_DisplayLost); }
void OculusBaseDisplayPlugin::activate() { if (!OVR_SUCCESS(ovr_Initialize(nullptr))) { qFatal("Could not init OVR"); } if (!OVR_SUCCESS(ovr_Create(&_session, &_luid))) { qFatal("Failed to acquire HMD"); } WindowOpenGLDisplayPlugin::activate(); _hmdDesc = ovr_GetHmdDesc(_session); _ipd = ovr_GetFloat(_session, OVR_KEY_IPD, _ipd); glm::uvec2 eyeSizes[2]; ovr_for_each_eye([&](ovrEyeType eye) { _eyeFovs[eye] = _hmdDesc.DefaultEyeFov[eye]; ovrEyeRenderDesc& erd = _eyeRenderDescs[eye] = ovr_GetRenderDesc(_session, eye, _eyeFovs[eye]); ovrMatrix4f ovrPerspectiveProjection = ovrMatrix4f_Projection(erd.Fov, DEFAULT_NEAR_CLIP, DEFAULT_FAR_CLIP, ovrProjection_RightHanded); _eyeProjections[eye] = toGlm(ovrPerspectiveProjection); ovrPerspectiveProjection = ovrMatrix4f_Projection(erd.Fov, 0.001f, 10.0f, ovrProjection_RightHanded); _compositeEyeProjections[eye] = toGlm(ovrPerspectiveProjection); _eyeOffsets[eye] = erd.HmdToEyeViewOffset; eyeSizes[eye] = toGlm(ovr_GetFovTextureSize(_session, eye, erd.Fov, 1.0f)); }); ovrFovPort combined = _eyeFovs[Left]; combined.LeftTan = std::max(_eyeFovs[Left].LeftTan, _eyeFovs[Right].LeftTan); combined.RightTan = std::max(_eyeFovs[Left].RightTan, _eyeFovs[Right].RightTan); ovrMatrix4f ovrPerspectiveProjection = ovrMatrix4f_Projection(combined, DEFAULT_NEAR_CLIP, DEFAULT_FAR_CLIP, ovrProjection_RightHanded); _eyeProjections[Mono] = toGlm(ovrPerspectiveProjection); _desiredFramebufferSize = uvec2( eyeSizes[0].x + eyeSizes[1].x, std::max(eyeSizes[0].y, eyeSizes[1].y)); if (!OVR_SUCCESS(ovr_ConfigureTracking(_session, ovrTrackingCap_Orientation | ovrTrackingCap_Position | ovrTrackingCap_MagYawCorrection, 0))) { qFatal("Could not attach to sensor device"); } // Parent class relies on our _session intialization, so it must come after that. memset(&_sceneLayer, 0, sizeof(ovrLayerEyeFov)); _sceneLayer.Header.Type = ovrLayerType_EyeFov; _sceneLayer.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft; ovr_for_each_eye([&](ovrEyeType eye) { ovrFovPort & fov = _sceneLayer.Fov[eye] = _eyeRenderDescs[eye].Fov; ovrSizei & size = _sceneLayer.Viewport[eye].Size = ovr_GetFovTextureSize(_session, eye, fov, 1.0f); _sceneLayer.Viewport[eye].Pos = { eye == ovrEye_Left ? 0 : size.w, 0 }; }); if (!OVR_SUCCESS(ovr_ConfigureTracking(_session, ovrTrackingCap_Orientation | ovrTrackingCap_Position | ovrTrackingCap_MagYawCorrection, 0))) { qFatal("Could not attach to sensor device"); } }
/** * Render the Virtual Cinema Theatre. ***/ void* OculusTracker::Provoke(void* pThis, int eD3D, int eD3DInterface, int eD3DMethod, DWORD dwNumberConnected, int& nProvokerIndex) { // update game timer m_cGameTimer.Tick(); static UINT unFrameSkip = 200; if (unFrameSkip > 0) { unFrameSkip--; return nullptr; } // #define _DEBUG_OTR #ifdef _DEBUG_OTR { wchar_t buf[128]; wsprintf(buf, L"[OTR] ifc %u mtd %u", eD3DInterface, eD3DMethod); OutputDebugString(buf); } #endif // save ini file ? if (m_nIniFrameCount) { if (m_nIniFrameCount == 1) SaveIniSettings(); m_nIniFrameCount--; } // main menu update ? if (m_sMenu.bOnChanged) { // set back event bool, set ini file frame count m_sMenu.bOnChanged = false; m_nIniFrameCount = 300; // loop through entries for (size_t nIx = 0; nIx < m_sMenu.asEntries.size(); nIx++) { // entry index changed ? if (m_sMenu.asEntries[nIx].bOnChanged) { m_sMenu.asEntries[nIx].bOnChanged = false; // touch entries ? if (nIx < 25) { // set new vk code by string m_aaunKeys[1][nIx] = GetVkCodeByString(m_sMenu.asEntries[nIx].astrValueEnumeration[m_sMenu.asEntries[nIx].unValue]); } } } } if (m_hSession) { #pragma region controller // controller indices static const uint32_t s_unIndexRemote = 0; static const uint32_t s_unIndexTouch = 1; static const uint32_t s_unIndexXBox = 2; // get all connected input states ovrInputState sInputState[3] = {}; unsigned int unControllersConnected = ovr_GetConnectedControllerTypes(m_hSession); #pragma region Remote if (unControllersConnected & ovrControllerType_Remote) { ovr_GetInputState(m_hSession, ovrControllerType_Remote, &sInputState[s_unIndexRemote]); // handle all remote buttons except Oculus private ones if (sInputState[s_unIndexRemote].Buttons & ovrButton_Up) m_sMenu.bOnUp = true; if (sInputState[s_unIndexRemote].Buttons & ovrButton_Down) m_sMenu.bOnDown = true; if (sInputState[s_unIndexRemote].Buttons & ovrButton_Left) m_sMenu.bOnLeft = true; if (sInputState[s_unIndexRemote].Buttons & ovrButton_Right) m_sMenu.bOnRight = true; if (sInputState[s_unIndexRemote].Buttons & ovrButton_Enter) m_sMenu.bOnAccept = true; if (sInputState[s_unIndexRemote].Buttons & ovrButton_Back) m_sMenu.bOnBack = true; } #pragma endregion #pragma region touch if (unControllersConnected & ovrControllerType_Touch) { // get input state ovr_GetInputState(m_hSession, ovrControllerType_Touch, &sInputState[s_unIndexTouch]); // loop through controller buttons for (UINT unButtonIx = 0; unButtonIx < unButtonNo; unButtonIx++) { // cast keyboard event if (sInputState[s_unIndexTouch].Buttons & aunButtonIds[unButtonIx]) { if (!m_aabKeys[s_unIndexTouch][unButtonIx]) MapButtonDown(s_unIndexTouch, unButtonIx); } else if (m_aabKeys[s_unIndexTouch][unButtonIx]) MapButtonUp(s_unIndexTouch, unButtonIx); } } #pragma endregion if (unControllersConnected & ovrControllerType_XBox) ovr_GetInputState(m_hSession, ovrControllerType_XBox, &sInputState[s_unIndexXBox]); #pragma endregion #pragma region hmd /*// Start the sensor which informs of the Rift's pose and motion .... obsolete for SDK 1.3.x ?? ovr_ConfigureTracking(m_hSession, ovrTrackingCap_Orientation | ovrTrackingCap_MagYawCorrection | ovrTrackingCap_Position, 0);*/ // get the current tracking state ovrTrackingState sTrackingState = ovr_GetTrackingState(m_hSession, ovr_GetTimeInSeconds(), false); if (TRUE)//(sTrackingState.StatusFlags & (ovrStatus_OrientationTracked | ovrStatus_PositionTracked)) { // get pose ovrPoseStatef sPoseState = sTrackingState.HeadPose; m_sPose = sPoseState.ThePose; m_sOrientation.x = m_sPose.Orientation.x; m_sOrientation.y = m_sPose.Orientation.y; m_sOrientation.z = m_sPose.Orientation.z; m_sOrientation.w = m_sPose.Orientation.w; // backup old euler angles and velocity float fEulerOld[3]; float fEulerVelocityOld[3]; memcpy(&fEulerOld[0], &m_fEuler[0], sizeof(float)* 3); memcpy(&fEulerVelocityOld[0], &m_fEulerVelocity[0], sizeof(float)* 3); // predicted euler angles ? for Oculus, due to ATW, we do not predict the euler angles if (FALSE) { // get angles m_sOrientation.GetEulerAngles<Axis::Axis_Y, Axis::Axis_X, Axis::Axis_Z, RotateDirection::Rotate_CW, HandedSystem::Handed_R >(&m_fEuler[1], &m_fEuler[0], &m_fEuler[2]); // quick fix here... m_fEuler[1] *= -1.0f; m_fEuler[0] *= -1.0f; m_fEuler[2] *= -1.0f; // get euler velocity + acceleration float fEulerAcceleration[3]; for (UINT unI = 0; unI < 3; unI++) { // get the velocity m_fEulerVelocity[unI] = (m_fEuler[unI] - fEulerOld[unI]) / (float)m_cGameTimer.DeltaTime(); // get the acceleration fEulerAcceleration[unI] = (m_fEulerVelocity[unI] - fEulerVelocityOld[unI]) / (float)m_cGameTimer.DeltaTime(); } // get predicted euler for (UINT unI = 0; unI < 3; unI++) { // compute predicted euler m_fEulerPredicted[unI] = (0.5f * fEulerAcceleration[unI] * ((float)m_cGameTimer.DeltaTime() * (float)m_cGameTimer.DeltaTime())) + (m_fEulerVelocity[unI] * (float)m_cGameTimer.DeltaTime()) + m_fEuler[unI]; } } else { // get angles m_sOrientation.GetEulerAngles<Axis::Axis_Y, Axis::Axis_X, Axis::Axis_Z, RotateDirection::Rotate_CW, HandedSystem::Handed_R >(&m_fEulerPredicted[1], &m_fEulerPredicted[0], &m_fEulerPredicted[2]); // quick fix here... m_fEulerPredicted[1] *= -1.0f; m_fEulerPredicted[0] *= -1.0f; m_fEulerPredicted[2] *= -1.0f; } // set the drawing update to true m_bControlUpdate = true; // set position m_afPosition[0] = (float)-m_sPose.Position.x - m_afPositionOrigin[0]; m_afPosition[1] = (float)-m_sPose.Position.y - m_afPositionOrigin[1]; m_afPosition[2] = (float)m_sPose.Position.z + m_afPositionOrigin[2]; // get eye render pose and other fields ovrEyeRenderDesc asEyeRenderDesc[2]; asEyeRenderDesc[0] = ovr_GetRenderDesc(m_hSession, ovrEye_Left, m_sHMDDesc.DefaultEyeFov[0]); asEyeRenderDesc[1] = ovr_GetRenderDesc(m_hSession, ovrEye_Right, m_sHMDDesc.DefaultEyeFov[1]); ovrPosef asHmdToEyePose[2] = { asEyeRenderDesc[0].HmdToEyePose,asEyeRenderDesc[1].HmdToEyePose }; //ovrVector3f asHmdToEyeViewOffset[2] = { asEyeRenderDesc[0].HmdToEyePose, asEyeRenderDesc[1].HmdToEyePose }; ovrPosef asEyeRenderPose[2]; static long long s_frameIndex = 0; static double s_sensorSampleTime = 0.0; // sensorSampleTime is fed into the layer later ovr_GetEyePoses(m_hSession, s_frameIndex, ovrTrue, asHmdToEyePose, asEyeRenderPose, &s_sensorSampleTime); // ovr_CalcEyePoses(sTrackingState.HeadPose.ThePose, asHmdToEyePose, asEyeRenderPose); // create rotation matrix from euler angles D3DXMATRIX sRotation; D3DXMATRIX sPitch, sYaw, sRoll; D3DXMatrixRotationX(&sPitch, m_fEulerPredicted[0]); D3DXMatrixRotationY(&sYaw, m_fEulerPredicted[1]); D3DXMatrixRotationZ(&sRoll, -m_fEulerPredicted[2]); sRotation = sYaw * sPitch * sRoll; // create per eye view matrix from rotation and position D3DXMATRIX sView[2]; for (UINT unEye = 0; unEye < 2; unEye++) { D3DXMATRIX sTranslation; D3DXMatrixTranslation(&sTranslation, (float)-asEyeRenderPose[unEye].Position.x - m_afPositionOrigin[0], (float)-asEyeRenderPose[unEye].Position.y - m_afPositionOrigin[1], (float)asEyeRenderPose[unEye].Position.z + m_afPositionOrigin[2]); sView[unEye] = sTranslation * sRotation; } // create head pose view matrix D3DXMATRIX sTranslation; D3DXMatrixTranslation(&sTranslation, (float)-sTrackingState.HeadPose.ThePose.Position.x - m_afPositionOrigin[0], (float)-sTrackingState.HeadPose.ThePose.Position.y - m_afPositionOrigin[1], (float)sTrackingState.HeadPose.ThePose.Position.z + m_afPositionOrigin[2]); m_sView = sTranslation * sRotation; // create inverse view matrix D3DXMATRIX sVInv = {}; D3DXMatrixInverse(&sVInv, nullptr, &m_sView); // get projection matrices left/right D3DXMATRIX asToEye[2]; D3DXMATRIX asProjection[2]; for (UINT unEye = 0; unEye < 2; unEye++) { // get ovr projection ovrMatrix4f sProj = ovrMatrix4f_Projection(m_sHMDDesc.DefaultEyeFov[unEye], 0.01f, 30.0f, ovrProjection_LeftHanded); // create dx projection asProjection[unEye] = D3DXMATRIX(&sProj.M[0][0]); D3DXMatrixTranspose(&asProjection[unEye], &asProjection[unEye]); // create eventual projection using inverse matrix of the head pose view matrix m_asProjection[unEye] = sVInv * sView[unEye] * asProjection[unEye]; } } #pragma endregion } else { // Initialize LibOVR, and the Rift... then create hmd handle ovrResult result = ovr_Initialize(nullptr); if (!OVR_SUCCESS(result)) { OutputDebugString(L"[OVR] Failed to initialize libOVR."); return nullptr; } result = ovr_Create(&m_hSession, &m_sLuid); if (!OVR_SUCCESS(result)) { OutputDebugString(L"[OVR] Failed to retreive HMD handle."); return nullptr; } else OutputDebugString(L"[OVR] HMD handle initialized !"); if (m_hSession) { // get the description and set pointers m_sHMDDesc = ovr_GetHmdDesc(m_hSession); // Configure Stereo settings. ovrSizei sRecommenedTex0Size = ovr_GetFovTextureSize(m_hSession, ovrEye_Left, m_sHMDDesc.DefaultEyeFov[0], 1.0f); ovrSizei sRecommenedTex1Size = ovr_GetFovTextureSize(m_hSession, ovrEye_Right, m_sHMDDesc.DefaultEyeFov[1], 1.0f); ovrSizei sTextureSize; sTextureSize.w = max(sRecommenedTex0Size.w, sRecommenedTex1Size.w); sTextureSize.h = max(sRecommenedTex0Size.h, sRecommenedTex1Size.h); m_unRenderTextureWidth = (UINT)sTextureSize.w; m_unRenderTextureHeight = (UINT)sTextureSize.h; // get view offset ovrEyeRenderDesc asEyeRenderDesc[2]; asEyeRenderDesc[0] = ovr_GetRenderDesc(m_hSession, ovrEye_Left, m_sHMDDesc.DefaultEyeFov[0]); asEyeRenderDesc[1] = ovr_GetRenderDesc(m_hSession, ovrEye_Right, m_sHMDDesc.DefaultEyeFov[1]); ovrVector3f asViewOffset[2] = { asEyeRenderDesc[0].HmdToEyePose.Position, asEyeRenderDesc[1].HmdToEyePose.Position }; // get projection matrices left/right D3DXMATRIX asToEye[2]; D3DXMATRIX asProjection[2]; for (UINT unEye = 0; unEye < 2; unEye++) { // get ovr projection ovrMatrix4f sProj = ovrMatrix4f_Projection(m_sHMDDesc.DefaultEyeFov[unEye], 0.01f, 30.0f, ovrProjection_LeftHanded); // create dx projection asProjection[unEye] = D3DXMATRIX(&sProj.M[0][0]); D3DXMatrixTranspose(&asProjection[unEye], &asProjection[unEye]); // create view offset translation matrix D3DXMatrixTranslation(&asToEye[unEye], -asViewOffset[unEye].x, -asViewOffset[unEye].y, -asViewOffset[unEye].z); // create eventual projection m_asProjection[unEye] = asToEye[unEye] * asProjection[unEye]; } } } return nullptr; }
// Display to an HMD with OVR SDK backend. void displayHMD() { ovrSessionStatus sessionStatus; ovr_GetSessionStatus(g_session, &sessionStatus); if (sessionStatus.HmdPresent == false) { displayMonitor(); return; } const ovrHmdDesc& hmdDesc = m_Hmd; double sensorSampleTime; // sensorSampleTime is fed into the layer later if (g_hmdVisible) { // Call ovr_GetRenderDesc each frame to get the ovrEyeRenderDesc, as the returned values (e.g. HmdToEyeOffset) may change at runtime. ovrEyeRenderDesc eyeRenderDesc[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(g_session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(g_session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); // Get eye poses, feeding in correct IPD offset ovrVector3f HmdToEyeOffset[2] = { eyeRenderDesc[0].HmdToEyeOffset, eyeRenderDesc[1].HmdToEyeOffset }; #if 0 // Get both eye poses simultaneously, with IPD offset already included. double displayMidpointSeconds = ovr_GetPredictedDisplayTime(g_session, 0); ovrTrackingState hmdState = ovr_GetTrackingState(g_session, displayMidpointSeconds, ovrTrue); ovr_CalcEyePoses(hmdState.HeadPose.ThePose, HmdToEyeOffset, m_eyePoses); #else ovr_GetEyePoses(g_session, g_frameIndex, ovrTrue, HmdToEyeOffset, m_eyePoses, &sensorSampleTime); #endif storeHmdPose(m_eyePoses[0]); for (int eye = 0; eye < 2; ++eye) { const FBO& swapfbo = m_swapFBO[eye]; const ovrTextureSwapChain& chain = g_textureSwapChain[eye]; int curIndex; ovr_GetTextureSwapChainCurrentIndex(g_session, chain, &curIndex); GLuint curTexId; ovr_GetTextureSwapChainBufferGL(g_session, chain, curIndex, &curTexId); glBindFramebuffer(GL_FRAMEBUFFER, swapfbo.id); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, curTexId, 0); glViewport(0, 0, swapfbo.w, swapfbo.h); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glEnable(GL_FRAMEBUFFER_SRGB); { glClearColor(0.3f, 0.3f, 0.3f, 0.f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); const ovrSizei& downSize = ovr_GetFovTextureSize(g_session, ovrEyeType(eye), hmdDesc.DefaultEyeFov[eye], m_fboScale); ovrRecti vp = { 0, 0, downSize.w, downSize.h }; const int texh = swapfbo.h; vp.Pos.y = (texh - vp.Size.h) / 2; glViewport(vp.Pos.x, vp.Pos.y, vp.Size.w, vp.Size.h); // Cinemascope - letterbox bars scissoring off pixels above and below vp center const float hc = .5f * m_cinemaScope; const int scisPx = static_cast<int>(hc * static_cast<float>(vp.Size.h)); ovrRecti sp = vp; sp.Pos.y += scisPx; sp.Size.h -= 2 * scisPx; glScissor(sp.Pos.x, sp.Pos.y, sp.Size.w, sp.Size.h); glEnable(GL_SCISSOR_TEST); glEnable(GL_DEPTH_TEST); // Render the scene for the current eye const ovrPosef& eyePose = m_eyePoses[eye]; const glm::mat4 mview = makeWorldToChassisMatrix() * makeMatrixFromPose(eyePose, m_headSize); const ovrMatrix4f ovrproj = ovrMatrix4f_Projection(hmdDesc.DefaultEyeFov[eye], 0.2f, 1000.0f, ovrProjection_None); const glm::mat4 proj = makeGlmMatrixFromOvrMatrix(ovrproj); g_pScene->RenderForOneEye(glm::value_ptr(glm::inverse(mview)), glm::value_ptr(proj)); const ovrTextureSwapChain& chain = g_textureSwapChain[eye]; const ovrResult commitres = ovr_CommitTextureSwapChain(g_session, chain); if (!OVR_SUCCESS(commitres)) { LOG_ERROR("ovr_CommitTextureSwapChain returned %d", commitres); return; } } glDisable(GL_SCISSOR_TEST); // Grab a copy of the left eye's undistorted render output for presentation // to the desktop window instead of the barrel distorted mirror texture. // This blit, while cheap, could cost some framerate to the HMD. // An over-the-shoulder view is another option, at a greater performance cost. if (0) { if (eye == ovrEyeType::ovrEye_Left) { BlitLeftEyeRenderToUndistortedMirrorTexture(); } } glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0); glBindFramebuffer(GL_FRAMEBUFFER, 0); } } std::vector<const ovrLayerHeader*> layerHeaders; { // Do distortion rendering, Present and flush/sync ovrLayerEyeFov ld; ld.Header.Type = ovrLayerType_EyeFov; ld.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft; // Because OpenGL. for (int eye = 0; eye < 2; ++eye) { const FBO& swapfbo = m_swapFBO[eye]; const ovrTextureSwapChain& chain = g_textureSwapChain[eye]; ld.ColorTexture[eye] = chain; const ovrSizei& downSize = ovr_GetFovTextureSize(g_session, ovrEyeType(eye), hmdDesc.DefaultEyeFov[eye], m_fboScale); ovrRecti vp = { 0, 0, downSize.w, downSize.h }; const int texh = swapfbo.h; vp.Pos.y = (texh - vp.Size.h) / 2; ld.Viewport[eye] = vp; ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye]; ld.RenderPose[eye] = m_eyePoses[eye]; ld.SensorSampleTime = sensorSampleTime; } layerHeaders.push_back(&ld.Header); // Submit layers to HMD for display ovrLayerQuad ql; if (g_tweakbarQuad.m_showQuadInWorld) { ql.Header.Type = ovrLayerType_Quad; ql.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft; // Because OpenGL. ql.ColorTexture = g_tweakbarQuad.m_swapChain; ovrRecti vp; vp.Pos.x = 0; vp.Pos.y = 0; vp.Size.w = 600; ///@todo vp.Size.h = 600; ///@todo ql.Viewport = vp; ql.QuadPoseCenter = g_tweakbarQuad.m_QuadPoseCenter; ql.QuadSize = { 1.f, 1.f }; ///@todo Pass in g_tweakbarQuad.SetHmdEyeRay(m_eyePoses[ovrEyeType::ovrEye_Left]); // Writes to m_layerQuad.QuadPoseCenter g_tweakbarQuad.DrawToQuad(); layerHeaders.push_back(&ql.Header); } } #if 0 ovrViewScaleDesc viewScaleDesc; viewScaleDesc.HmdToEyeOffset[0] = m_eyeOffsets[0]; viewScaleDesc.HmdToEyeOffset[1] = m_eyeOffsets[1]; viewScaleDesc.HmdSpaceToWorldScaleInMeters = 1.f; #endif const ovrResult result = ovr_SubmitFrame(g_session, g_frameIndex, nullptr, &layerHeaders[0], layerHeaders.size()); if (result == ovrSuccess) { g_hmdVisible = true; } else if (result == ovrSuccess_NotVisible) { g_hmdVisible = false; ///@todo Enter a lower-power, polling "no focus/HMD not worn" mode } else if (result == ovrError_DisplayLost) { LOG_INFO("ovr_SubmitFrame returned ovrError_DisplayLost"); g_hmdVisible = false; ///@todo Tear down textures and session and re-create } else { LOG_INFO("ovr_SubmitFrame returned %d", result); //g_hmdVisible = false; } // Handle OVR session events ovr_GetSessionStatus(g_session, &sessionStatus); if (sessionStatus.ShouldQuit) { glfwSetWindowShouldClose(g_pMirrorWindow, 1); } if (sessionStatus.ShouldRecenter) { ovr_RecenterTrackingOrigin(g_session); } // Blit mirror texture to monitor window if (g_hmdVisible) { glViewport(0, 0, g_mirrorWindowSz.x, g_mirrorWindowSz.y); const FBO& srcFBO = m_mirrorFBO; glBindFramebuffer(GL_READ_FRAMEBUFFER, srcFBO.id); glBlitFramebuffer( 0, srcFBO.h, srcFBO.w, 0, 0, 0, g_mirrorWindowSz.x, g_mirrorWindowSz.y, GL_COLOR_BUFFER_BIT, GL_NEAREST); glBindFramebuffer(GL_READ_FRAMEBUFFER, 0); } else { displayMonitor(); } ++g_frameIndex; #ifdef USE_ANTTWEAKBAR if (g_tweakbarQuad.m_showQuadInWorld) { TwDraw(); } #endif }