void OculusWindow::start_frame() { GlfwWindow::start_frame(); auto ftiming = ovr_GetPredictedDisplayTime(hmd_session_, 0); ovrTrackingState hmdState = ovr_GetTrackingState(hmd_session_, ftiming, true); ovrEyeRenderDesc eyeRenderDesc[2]; ovrVector3f hmdToEyeViewOffset[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(hmd_session_, ovrEye_Left, hmd_desc_.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(hmd_session_, ovrEye_Right, hmd_desc_.DefaultEyeFov[1]); hmdToEyeViewOffset[0] = eyeRenderDesc[0].HmdToEyeOffset; hmdToEyeViewOffset[1] = eyeRenderDesc[1].HmdToEyeOffset; ovr_CalcEyePoses(hmdState.HeadPose.ThePose, hmdToEyeViewOffset, color_layer_.RenderPose); if (hmdState.StatusFlags & (ovrStatus_OrientationTracked | ovrStatus_PositionTracked)) { auto pose = hmdState.HeadPose.ThePose; scm::math::quat<double> rot_quat(pose.Orientation.w, pose.Orientation.x, pose.Orientation.y, pose.Orientation.z); hmd_sensor_orientation_ = scm::math::make_translation((double)pose.Position.x, (double)pose.Position.y, (double)pose.Position.z) * rot_quat.to_matrix(); } }
bool OculusBaseDisplayPlugin::beginFrameRender(uint32_t frameIndex) { _currentRenderFrameInfo = FrameInfo(); _currentRenderFrameInfo.sensorSampleTime = ovr_GetTimeInSeconds();; _currentRenderFrameInfo.predictedDisplayTime = ovr_GetPredictedDisplayTime(_session, frameIndex); auto trackingState = ovr_GetTrackingState(_session, _currentRenderFrameInfo.predictedDisplayTime, ovrTrue); _currentRenderFrameInfo.renderPose = toGlm(trackingState.HeadPose.ThePose); _currentRenderFrameInfo.presentPose = _currentRenderFrameInfo.renderPose; std::array<glm::mat4, 2> handPoses; // Make controller poses available to the presentation thread ovr_for_each_hand([&](ovrHandType hand) { static const auto REQUIRED_HAND_STATUS = ovrStatus_OrientationTracked & ovrStatus_PositionTracked; if (REQUIRED_HAND_STATUS != (trackingState.HandStatusFlags[hand] & REQUIRED_HAND_STATUS)) { return; } auto correctedPose = ovrControllerPoseToHandPose(hand, trackingState.HandPoses[hand]); static const glm::quat HAND_TO_LASER_ROTATION = glm::rotation(Vectors::UNIT_Z, Vectors::UNIT_NEG_Y); handPoses[hand] = glm::translate(glm::mat4(), correctedPose.translation) * glm::mat4_cast(correctedPose.rotation * HAND_TO_LASER_ROTATION); }); withRenderThreadLock([&] { _uiModelTransform = DependencyManager::get<CompositorHelper>()->getModelTransform(); _handPoses = handPoses; _frameInfos[frameIndex] = _currentRenderFrameInfo; }); return Parent::beginFrameRender(frameIndex); }
glm::mat4 OculusBaseDisplayPlugin::getHeadPose(uint32_t frameIndex) const { static uint32_t lastFrameSeen = 0; auto displayTime = ovr_GetPredictedDisplayTime(_session, frameIndex); auto trackingState = ovr_GetTrackingState(_session, displayTime, frameIndex > lastFrameSeen); if (frameIndex > lastFrameSeen) { lastFrameSeen = frameIndex; } return toGlm(trackingState.HeadPose.ThePose); }
DLL_EXPORT_API void xnOvrUpdate(xnOvrSession* session) { session->EyeRenderDesc[0] = ovr_GetRenderDesc(session->Session, ovrEye_Left, session->HmdDesc.DefaultEyeFov[0]); session->EyeRenderDesc[1] = ovr_GetRenderDesc(session->Session, ovrEye_Right, session->HmdDesc.DefaultEyeFov[1]); session->HmdToEyeViewOffset[0] = session->EyeRenderDesc[0].HmdToEyeOffset; session->HmdToEyeViewOffset[1] = session->EyeRenderDesc[1].HmdToEyeOffset; session->Layer.SensorSampleTime = ovr_GetPredictedDisplayTime(session->Session, 0); session->CurrentState = ovr_GetTrackingState(session->Session, session->Layer.SensorSampleTime, ovrTrue); ovr_CalcEyePoses(session->CurrentState.HeadPose.ThePose, session->HmdToEyeViewOffset, session->Layer.RenderPose); }
bool FGearVR::GetEyePoses(const FGameFrame& InFrame, ovrPosef outEyePoses[2], ovrSensorState& outSensorState) { FScopeLock lock(&OvrMobileLock); if (!OvrMobile_RT) { return false; } double predictedTime = 0.0; const double now = ovr_GetTimeInSeconds(); if (IsInGameThread()) { if (OCFlags.NeedResetOrientationAndPosition) { ResetOrientationAndPosition(ResetToYaw); } // Get the latest head tracking state, predicted ahead to the midpoint of the time // it will be displayed. It will always be corrected to the real values by // time warp, but the closer we get, the less black will be pulled in at the edges. static double prev; const double rawDelta = now - prev; prev = now; const double clampedPrediction = FMath::Min( 0.1, rawDelta * 2 ); predictedTime = now + clampedPrediction; //UE_LOG(LogHMD, Log, TEXT("GT Frame %d, predicted time: %.6f, delta %.6f"), InFrame.FrameNumber, (float)(clampedPrediction), float(rawDelta)); } else if (IsInRenderingThread()) { predictedTime = ovr_GetPredictedDisplayTime(OvrMobile_RT, 1, 1); //UE_LOG(LogHMD, Log, TEXT("RT Frame %d, predicted time: %.6f"), InFrame.FrameNumber, (float)(predictedTime - now)); } outSensorState = ovr_GetPredictedSensorState(OvrMobile_RT, predictedTime); OVR::Posef hmdPose = (OVR::Posef)outSensorState.Predicted.Pose; OVR::Vector3f transl0 = hmdPose.Orientation.Rotate(-((OVR::Vector3f)InFrame.GetSettings()->HmdToEyeViewOffset[0])) + hmdPose.Position; OVR::Vector3f transl1 = hmdPose.Orientation.Rotate(-((OVR::Vector3f)InFrame.GetSettings()->HmdToEyeViewOffset[1])) + hmdPose.Position; // Currently HmdToEyeViewOffset is only a 3D vector // (Negate HmdToEyeViewOffset because offset is a view matrix offset and not a camera offset) outEyePoses[0].Orientation = outEyePoses[1].Orientation = outSensorState.Predicted.Pose.Orientation; outEyePoses[0].Position = transl0; outEyePoses[1].Position = transl1; return true; }
void OculusVR::OnRenderStart() { m_hmdToEyeOffset[0] = m_eyeRenderDesc[0].HmdToEyeOffset; m_hmdToEyeOffset[1] = m_eyeRenderDesc[1].HmdToEyeOffset; // this data is fetched only for the debug display, no need to do this to just get the rendering work m_frameTiming = ovr_GetPredictedDisplayTime(m_hmdSession, 0); m_trackingState = ovr_GetTrackingState(m_hmdSession, m_frameTiming, ovrTrue); // Get both eye poses simultaneously, with IPD offset already included. ovr_GetEyePoses(m_hmdSession, m_frameIndex, ovrTrue, m_hmdToEyeOffset, m_eyeRenderPose, &m_sensorSampleTime); // set the render texture in swap chain int curIndex; ovr_GetTextureSwapChainCurrentIndex(m_hmdSession, m_renderBuffer->m_swapTextureChain, &curIndex); ovr_GetTextureSwapChainBufferGL(m_hmdSession, m_renderBuffer->m_swapTextureChain, curIndex, &m_renderBuffer->m_eyeTexId); if (m_msaaEnabled) m_renderBuffer->OnRenderMSAAStart(); else m_renderBuffer->OnRenderStart(); }
void VR::nextTracking() { #if defined(_DEBUG) // make sure we are only caled once per frame: static vector<bool> called; if (xapp->getFramenum() < 50000) { size_t framenum = (size_t) xapp->getFramenum(); assert(called.size() <= framenum); called.push_back(true); assert(called.size() == framenum+1); } #endif // Get both eye poses simultaneously, with IPD offset already included. ovrVector3f useHmdToEyeViewOffset[2] = { eyeRenderDesc[0].HmdToEyeOffset, eyeRenderDesc[1].HmdToEyeOffset }; //ovrPosef temp_EyeRenderPose[2]; double displayMidpointSeconds = ovr_GetPredictedDisplayTime(session, 0); ovrTrackingState ts = ovr_GetTrackingState(session, displayMidpointSeconds, false); ovr_CalcEyePoses(ts.HeadPose.ThePose, useHmdToEyeViewOffset, layer.RenderPose); ovrResult result; ovrBoundaryTestResult btest; ovrBool visible; result = ovr_GetBoundaryVisible(session, &visible); if (0) { Log("visible = " << (visible == ovrTrue) << endl); result = ovr_TestBoundary(session, ovrTrackedDevice_HMD, ovrBoundary_Outer, &btest); if (OVR_SUCCESS(result)) { //Log("boundary success"); if (result == ovrSuccess) Log("success" << endl); if (result == ovrSuccess_BoundaryInvalid) Log("success boundary invalid" << endl); if (result == ovrSuccess_DeviceUnavailable) Log("success device unavailable" << endl); } } layer.Fov[0] = eyeRenderDesc[0].Fov; layer.Fov[1] = eyeRenderDesc[1].Fov; // Render the two undistorted eye views into their render buffers. for (int eye = 0; eye < 2; eye++) { ovrPosef * useEyePose = &EyeRenderPose[eye]; float * useYaw = &YawAtRender[eye]; float Yaw = XM_PI; *useEyePose = layer.RenderPose[eye]; *useYaw = Yaw; // Get view and projection matrices (note near Z to reduce eye strain) Matrix4f rollPitchYaw = Matrix4f::RotationY(Yaw); Matrix4f finalRollPitchYaw = rollPitchYaw * Matrix4f(useEyePose->Orientation); // fix finalRollPitchYaw for LH coordinate system: Matrix4f s = Matrix4f::Scaling(1.0f, -1.0f, -1.0f); // 1 1 -1 finalRollPitchYaw = s * finalRollPitchYaw * s; Vector3f finalUp = finalRollPitchYaw.Transform(Vector3f(0, 1, 0)); Vector3f finalForward = finalRollPitchYaw.Transform(Vector3f(0, 0, -1));//0 0 1 Vector3f Posf; Posf.x = xapp->camera.pos.x; Posf.y = xapp->camera.pos.y; Posf.z = xapp->camera.pos.z; Vector3f diff = rollPitchYaw.Transform(useEyePose->Position); //diff /= 10.0f; //diff.x = 0.0f; //diff.y = 0.0f; //diff.z = 0.0f; Vector3f shiftedEyePos; shiftedEyePos.x = Posf.x - diff.x; shiftedEyePos.y = Posf.y + diff.y; shiftedEyePos.z = Posf.z + diff.z; xapp->camera.look.x = finalForward.x; xapp->camera.look.y = finalForward.y; xapp->camera.look.z = finalForward.z; Matrix4f view = Matrix4f::LookAtLH(shiftedEyePos, shiftedEyePos + finalForward, finalUp); Matrix4f projO = ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.2f, 2000.0f, ovrProjection_LeftHanded); Matrix4fToXM(this->viewOVR[eye], view.Transposed()); Matrix4fToXM(this->projOVR[eye], projO.Transposed()); } }
// Display to an HMD with OVR SDK backend. void displayHMD() { ovrSessionStatus sessionStatus; ovr_GetSessionStatus(g_session, &sessionStatus); if (sessionStatus.HmdPresent == false) { displayMonitor(); return; } const ovrHmdDesc& hmdDesc = m_Hmd; double sensorSampleTime; // sensorSampleTime is fed into the layer later if (g_hmdVisible) { // Call ovr_GetRenderDesc each frame to get the ovrEyeRenderDesc, as the returned values (e.g. HmdToEyeOffset) may change at runtime. ovrEyeRenderDesc eyeRenderDesc[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(g_session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(g_session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); // Get eye poses, feeding in correct IPD offset ovrVector3f HmdToEyeOffset[2] = { eyeRenderDesc[0].HmdToEyeOffset, eyeRenderDesc[1].HmdToEyeOffset }; #if 0 // Get both eye poses simultaneously, with IPD offset already included. double displayMidpointSeconds = ovr_GetPredictedDisplayTime(g_session, 0); ovrTrackingState hmdState = ovr_GetTrackingState(g_session, displayMidpointSeconds, ovrTrue); ovr_CalcEyePoses(hmdState.HeadPose.ThePose, HmdToEyeOffset, m_eyePoses); #else ovr_GetEyePoses(g_session, g_frameIndex, ovrTrue, HmdToEyeOffset, m_eyePoses, &sensorSampleTime); #endif storeHmdPose(m_eyePoses[0]); for (int eye = 0; eye < 2; ++eye) { const FBO& swapfbo = m_swapFBO[eye]; const ovrTextureSwapChain& chain = g_textureSwapChain[eye]; int curIndex; ovr_GetTextureSwapChainCurrentIndex(g_session, chain, &curIndex); GLuint curTexId; ovr_GetTextureSwapChainBufferGL(g_session, chain, curIndex, &curTexId); glBindFramebuffer(GL_FRAMEBUFFER, swapfbo.id); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, curTexId, 0); glViewport(0, 0, swapfbo.w, swapfbo.h); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glEnable(GL_FRAMEBUFFER_SRGB); { glClearColor(0.3f, 0.3f, 0.3f, 0.f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); const ovrSizei& downSize = ovr_GetFovTextureSize(g_session, ovrEyeType(eye), hmdDesc.DefaultEyeFov[eye], m_fboScale); ovrRecti vp = { 0, 0, downSize.w, downSize.h }; const int texh = swapfbo.h; vp.Pos.y = (texh - vp.Size.h) / 2; glViewport(vp.Pos.x, vp.Pos.y, vp.Size.w, vp.Size.h); // Cinemascope - letterbox bars scissoring off pixels above and below vp center const float hc = .5f * m_cinemaScope; const int scisPx = static_cast<int>(hc * static_cast<float>(vp.Size.h)); ovrRecti sp = vp; sp.Pos.y += scisPx; sp.Size.h -= 2 * scisPx; glScissor(sp.Pos.x, sp.Pos.y, sp.Size.w, sp.Size.h); glEnable(GL_SCISSOR_TEST); glEnable(GL_DEPTH_TEST); // Render the scene for the current eye const ovrPosef& eyePose = m_eyePoses[eye]; const glm::mat4 mview = makeWorldToChassisMatrix() * makeMatrixFromPose(eyePose, m_headSize); const ovrMatrix4f ovrproj = ovrMatrix4f_Projection(hmdDesc.DefaultEyeFov[eye], 0.2f, 1000.0f, ovrProjection_None); const glm::mat4 proj = makeGlmMatrixFromOvrMatrix(ovrproj); g_pScene->RenderForOneEye(glm::value_ptr(glm::inverse(mview)), glm::value_ptr(proj)); const ovrTextureSwapChain& chain = g_textureSwapChain[eye]; const ovrResult commitres = ovr_CommitTextureSwapChain(g_session, chain); if (!OVR_SUCCESS(commitres)) { LOG_ERROR("ovr_CommitTextureSwapChain returned %d", commitres); return; } } glDisable(GL_SCISSOR_TEST); // Grab a copy of the left eye's undistorted render output for presentation // to the desktop window instead of the barrel distorted mirror texture. // This blit, while cheap, could cost some framerate to the HMD. // An over-the-shoulder view is another option, at a greater performance cost. if (0) { if (eye == ovrEyeType::ovrEye_Left) { BlitLeftEyeRenderToUndistortedMirrorTexture(); } } glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0); glBindFramebuffer(GL_FRAMEBUFFER, 0); } } std::vector<const ovrLayerHeader*> layerHeaders; { // Do distortion rendering, Present and flush/sync ovrLayerEyeFov ld; ld.Header.Type = ovrLayerType_EyeFov; ld.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft; // Because OpenGL. for (int eye = 0; eye < 2; ++eye) { const FBO& swapfbo = m_swapFBO[eye]; const ovrTextureSwapChain& chain = g_textureSwapChain[eye]; ld.ColorTexture[eye] = chain; const ovrSizei& downSize = ovr_GetFovTextureSize(g_session, ovrEyeType(eye), hmdDesc.DefaultEyeFov[eye], m_fboScale); ovrRecti vp = { 0, 0, downSize.w, downSize.h }; const int texh = swapfbo.h; vp.Pos.y = (texh - vp.Size.h) / 2; ld.Viewport[eye] = vp; ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye]; ld.RenderPose[eye] = m_eyePoses[eye]; ld.SensorSampleTime = sensorSampleTime; } layerHeaders.push_back(&ld.Header); // Submit layers to HMD for display ovrLayerQuad ql; if (g_tweakbarQuad.m_showQuadInWorld) { ql.Header.Type = ovrLayerType_Quad; ql.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft; // Because OpenGL. ql.ColorTexture = g_tweakbarQuad.m_swapChain; ovrRecti vp; vp.Pos.x = 0; vp.Pos.y = 0; vp.Size.w = 600; ///@todo vp.Size.h = 600; ///@todo ql.Viewport = vp; ql.QuadPoseCenter = g_tweakbarQuad.m_QuadPoseCenter; ql.QuadSize = { 1.f, 1.f }; ///@todo Pass in g_tweakbarQuad.SetHmdEyeRay(m_eyePoses[ovrEyeType::ovrEye_Left]); // Writes to m_layerQuad.QuadPoseCenter g_tweakbarQuad.DrawToQuad(); layerHeaders.push_back(&ql.Header); } } #if 0 ovrViewScaleDesc viewScaleDesc; viewScaleDesc.HmdToEyeOffset[0] = m_eyeOffsets[0]; viewScaleDesc.HmdToEyeOffset[1] = m_eyeOffsets[1]; viewScaleDesc.HmdSpaceToWorldScaleInMeters = 1.f; #endif const ovrResult result = ovr_SubmitFrame(g_session, g_frameIndex, nullptr, &layerHeaders[0], layerHeaders.size()); if (result == ovrSuccess) { g_hmdVisible = true; } else if (result == ovrSuccess_NotVisible) { g_hmdVisible = false; ///@todo Enter a lower-power, polling "no focus/HMD not worn" mode } else if (result == ovrError_DisplayLost) { LOG_INFO("ovr_SubmitFrame returned ovrError_DisplayLost"); g_hmdVisible = false; ///@todo Tear down textures and session and re-create } else { LOG_INFO("ovr_SubmitFrame returned %d", result); //g_hmdVisible = false; } // Handle OVR session events ovr_GetSessionStatus(g_session, &sessionStatus); if (sessionStatus.ShouldQuit) { glfwSetWindowShouldClose(g_pMirrorWindow, 1); } if (sessionStatus.ShouldRecenter) { ovr_RecenterTrackingOrigin(g_session); } // Blit mirror texture to monitor window if (g_hmdVisible) { glViewport(0, 0, g_mirrorWindowSz.x, g_mirrorWindowSz.y); const FBO& srcFBO = m_mirrorFBO; glBindFramebuffer(GL_READ_FRAMEBUFFER, srcFBO.id); glBlitFramebuffer( 0, srcFBO.h, srcFBO.w, 0, 0, 0, g_mirrorWindowSz.x, g_mirrorWindowSz.y, GL_COLOR_BUFFER_BIT, GL_NEAREST); glBindFramebuffer(GL_READ_FRAMEBUFFER, 0); } else { displayMonitor(); } ++g_frameIndex; #ifdef USE_ANTTWEAKBAR if (g_tweakbarQuad.m_showQuadInWorld) { TwDraw(); } #endif }
// return true to retry later (e.g. after display lost) static bool MainLoop(bool retryCreate) { // Initialize these to nullptr here to handle device lost failures cleanly ovrTexture * mirrorTexture = nullptr; OculusTexture * pEyeRenderTexture[2] = { nullptr, nullptr }; DepthBuffer * pEyeDepthBuffer[2] = { nullptr, nullptr }; Scene * roomScene = nullptr; Camera * mainCam = nullptr; D3D11_TEXTURE2D_DESC td = {}; ovrHmd HMD; ovrGraphicsLuid luid; ovrResult result = ovr_Create(&HMD, &luid); if (!OVR_SUCCESS(result)) return retryCreate; ovrHmdDesc hmdDesc = ovr_GetHmdDesc(HMD); // ------------------------------------------------------------------- // Add: Make Instance that CL Eye Camera Capture Class CLEyeCameraCapture* cam[2] = { NULL }; // Query for number of connected camera int numCams = CLEyeGetCameraCount(); if (numCams == 0) { printf_s("No PS3Eye Camera detected\n"); goto Done; } printf_s("Found %d cameras\n", numCams); for (int iCam = 0; iCam < numCams; iCam++) { char windowName[64]; // Query unique camera uuid GUID guid = CLEyeGetCameraUUID(iCam); printf("Camera %d GUID: [%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x]\n", iCam + 1, guid.Data1, guid.Data2, guid.Data3, guid.Data4[0], guid.Data4[1], guid.Data4[2], guid.Data4[3], guid.Data4[4], guid.Data4[5], guid.Data4[6], guid.Data4[7]); sprintf_s(windowName, "Camera Window %d", iCam + 1); // Create camera capture object cam[iCam] = new CLEyeCameraCapture(windowName, guid, CLEYE_COLOR_RAW, CLEYE_VGA, 30); cam[iCam]->StartCapture(); } // ------------------------------------------------------------------- // Setup Device and Graphics // Note: the mirror window can be any size, for this sample we use 1/2 the HMD resolution if (!DIRECTX.InitDevice(hmdDesc.Resolution.w / 2, hmdDesc.Resolution.h / 2, reinterpret_cast<LUID*>(&luid))) goto Done; // Make the eye render buffers (caution if actual size < requested due to HW limits). ovrRecti eyeRenderViewport[2]; for (int eye = 0; eye < 2; ++eye) { ovrSizei idealSize = ovr_GetFovTextureSize(HMD, (ovrEyeType)eye, hmdDesc.DefaultEyeFov[eye], 1.0f); pEyeRenderTexture[eye] = new OculusTexture(); if (!pEyeRenderTexture[eye]->Init(HMD, idealSize.w, idealSize.h)) { if (retryCreate) goto Done; VALIDATE(OVR_SUCCESS(result), "Failed to create eye texture."); } pEyeDepthBuffer[eye] = new DepthBuffer(DIRECTX.Device, idealSize.w, idealSize.h); eyeRenderViewport[eye].Pos.x = 0; eyeRenderViewport[eye].Pos.y = 0; eyeRenderViewport[eye].Size = idealSize; if (!pEyeRenderTexture[eye]->TextureSet) { if (retryCreate) goto Done; VALIDATE(false, "Failed to create texture."); } } // Create a mirror to see on the monitor. td.ArraySize = 1; td.Format = DXGI_FORMAT_R8G8B8A8_UNORM_SRGB; td.Width = DIRECTX.WinSizeW; td.Height = DIRECTX.WinSizeH; td.Usage = D3D11_USAGE_DEFAULT; td.SampleDesc.Count = 1; td.MipLevels = 1; result = ovr_CreateMirrorTextureD3D11(HMD, DIRECTX.Device, &td, 0, &mirrorTexture); if (!OVR_SUCCESS(result)) { if (retryCreate) goto Done; VALIDATE(false, "Failed to create mirror texture."); } // Create the room model roomScene = new Scene(false); // Create camera mainCam = new Camera(&XMVectorSet(0.0f, 1.6f, 5.0f, 0), &XMQuaternionIdentity()); // Setup VR components, filling out description ovrEyeRenderDesc eyeRenderDesc[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(HMD, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(HMD, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); bool isVisible = true; DCB portConfig; portConfig.BaudRate = 115200; portConfig.Parity = EVENPARITY; g_seriPort.Start("\\\\.\\COM3", &portConfig); // Main loop while (DIRECTX.HandleMessages()) { XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, -0.05f, 0), mainCam->Rot); XMVECTOR right = XMVector3Rotate(XMVectorSet(0.05f, 0, 0, 0), mainCam->Rot); if (DIRECTX.Key['W'] || DIRECTX.Key[VK_UP]) mainCam->Pos = XMVectorAdd(mainCam->Pos, forward); if (DIRECTX.Key['S'] || DIRECTX.Key[VK_DOWN]) mainCam->Pos = XMVectorSubtract(mainCam->Pos, forward); if (DIRECTX.Key['D']) mainCam->Pos = XMVectorAdd(mainCam->Pos, right); if (DIRECTX.Key['A']) mainCam->Pos = XMVectorSubtract(mainCam->Pos, right); static float Yaw = 0; if (DIRECTX.Key[VK_LEFT]) mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw += 0.02f, 0); if (DIRECTX.Key[VK_RIGHT]) mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw -= 0.02f, 0); // Animate the cube static float cubeClock = 0; roomScene->Models[0]->Pos = XMFLOAT3(9 * sin(cubeClock), 3, 9 * cos(cubeClock += 0.015f)); // Get both eye poses simultaneously, with IPD offset already included. ovrPosef EyeRenderPose[2]; ovrVector3f HmdToEyeViewOffset[2] = { eyeRenderDesc[0].HmdToEyeViewOffset, eyeRenderDesc[1].HmdToEyeViewOffset }; double frameTime = ovr_GetPredictedDisplayTime(HMD, 0); // Keeping sensorSampleTime as close to ovr_GetTrackingState as possible - fed into the layer double sensorSampleTime = ovr_GetTimeInSeconds(); ovrTrackingState hmdState = ovr_GetTrackingState(HMD, frameTime, ovrTrue); ovr_CalcEyePoses(hmdState.HeadPose.ThePose, HmdToEyeViewOffset, EyeRenderPose); // -------------------------------------------------------------------------- // Add: Get Head Yaw Roll Pitch float hmdPitch = 0.0f; float hmdRoll = 0.0f; float hmdYaw = 0.0f; OVR::Posef HeadPose = hmdState.HeadPose.ThePose; HeadPose.Rotation.GetEulerAngles<OVR::Axis_Y, OVR::Axis_X, OVR::Axis_Z>(&hmdYaw, &hmdPitch, &hmdRoll); SetPos(2, ServoRoll(hmdYaw)); SetPos(3, ServoRoll(hmdPitch)); // -------------------------------------------------------------------------- // Render Scene to Eye Buffers if (isVisible) { for (int eye = 0; eye < 2; ++eye) { // Increment to use next texture, just before writing pEyeRenderTexture[eye]->AdvanceToNextTexture(); // Clear and set up rendertarget int texIndex = pEyeRenderTexture[eye]->TextureSet->CurrentIndex; DIRECTX.SetAndClearRenderTarget(pEyeRenderTexture[eye]->TexRtv[texIndex], pEyeDepthBuffer[eye]); DIRECTX.SetViewport((float)eyeRenderViewport[eye].Pos.x, (float)eyeRenderViewport[eye].Pos.y, (float)eyeRenderViewport[eye].Size.w, (float)eyeRenderViewport[eye].Size.h); //Get the pose information in XM format XMVECTOR eyeQuat = XMVectorSet(EyeRenderPose[eye].Orientation.x, EyeRenderPose[eye].Orientation.y, EyeRenderPose[eye].Orientation.z, EyeRenderPose[eye].Orientation.w); XMVECTOR eyePos = XMVectorSet(EyeRenderPose[eye].Position.x, EyeRenderPose[eye].Position.y, EyeRenderPose[eye].Position.z, 0); // Get view and projection matrices for the Rift camera XMVECTOR CombinedPos = XMVectorAdd(mainCam->Pos, XMVector3Rotate(eyePos, mainCam->Rot)); Camera finalCam(&CombinedPos, &(XMQuaternionMultiply(eyeQuat,mainCam->Rot))); XMMATRIX view = finalCam.GetViewMatrix(); ovrMatrix4f p = ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.2f, 1000.0f, ovrProjection_RightHanded); XMMATRIX proj = XMMatrixSet(p.M[0][0], p.M[1][0], p.M[2][0], p.M[3][0], p.M[0][1], p.M[1][1], p.M[2][1], p.M[3][1], p.M[0][2], p.M[1][2], p.M[2][2], p.M[3][2], p.M[0][3], p.M[1][3], p.M[2][3], p.M[3][3]); XMMATRIX prod = XMMatrixMultiply(view, proj); roomScene->Render(&prod, 1, 1, 1, 1, true); } } // Initialize our single full screen Fov layer. ovrLayerEyeFov ld = {}; ld.Header.Type = ovrLayerType_EyeFov; ld.Header.Flags = 0; for (int eye = 0; eye < 2; ++eye) { ld.ColorTexture[eye] = pEyeRenderTexture[eye]->TextureSet; ld.Viewport[eye] = eyeRenderViewport[eye]; ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye]; ld.RenderPose[eye] = EyeRenderPose[eye]; ld.SensorSampleTime = sensorSampleTime; } ovrLayerHeader* layers = &ld.Header; result = ovr_SubmitFrame(HMD, 0, nullptr, &layers, 1); // exit the rendering loop if submit returns an error, will retry on ovrError_DisplayLost if (!OVR_SUCCESS(result)) goto Done; isVisible = (result == ovrSuccess); // Render mirror ovrD3D11Texture* tex = (ovrD3D11Texture*)mirrorTexture; DIRECTX.Context->CopyResource(DIRECTX.BackBuffer, tex->D3D11.pTexture); DIRECTX.SwapChain->Present(0, 0); } // Release resources Done: delete mainCam; delete roomScene; if (mirrorTexture) ovr_DestroyMirrorTexture(HMD, mirrorTexture); for (int eye = 0; eye < 2; ++eye) { delete pEyeRenderTexture[eye]; delete pEyeDepthBuffer[eye]; } DIRECTX.ReleaseDevice(); ovr_Destroy(HMD); g_seriPort.End(); for (int iCam = 0; iCam < numCams; iCam++) { cam[iCam]->StopCapture(); delete cam[iCam]; } // Retry on ovrError_DisplayLost return retryCreate || OVR_SUCCESS(result) || (result == ovrError_DisplayLost); }
//---------------------------------------------------------------------- void Tracker::Draw(ovrSession Session, RenderDevice* pRender, Player ThePlayer, ovrTrackingOrigin TrackingOriginType, bool Sitting, float ExtraSittingAltitude, Matrix4f * /*ViewFromWorld*/, int eye, ovrPosef * EyeRenderPose) { OVR_UNUSED2(ExtraSittingAltitude, Sitting); // Don't render if not ready if (!TrackerHeadModel) return; // Initial rendering setup pRender->SetDepthMode(true, true); pRender->SetCullMode(OVR::Render::D3D11::RenderDevice::Cull_Off); // Draw in local frame of reference, so get view matrix Quatf eyeRot = EyeRenderPose[eye].Orientation; Vector3f up = eyeRot.Rotate(UpVector); Vector3f forward = eyeRot.Rotate(ForwardVector); Vector3f viewPos = EyeRenderPose[eye].Position; Matrix4f localViewMat = Matrix4f::LookAtRH(viewPos, viewPos + forward, up); // Get some useful values about the situation Vector3f headWorldPos = ThePlayer.GetHeadPosition(TrackingOriginType); ovrTrackerPose trackerPose = ovr_GetTrackerPose(Session, 0); Vector3f centreEyePos = ((Vector3f)(EyeRenderPose[0].Position) + (Vector3f)(EyeRenderPose[1].Position))*0.5f; double ftiming = ovr_GetPredictedDisplayTime(Session, 0); ovrTrackingState trackingState = ovr_GetTrackingState(Session, ftiming, ovrTrue); bool tracked = trackingState.StatusFlags & ovrStatus_PositionTracked ? true : false; // Find altitude of stand. // If we are at floor level, display the tracker stand on the physical floor. // If are using eye level coordinate system, just render the standard height of the stalk. float altitudeOfFloorInLocalSpace; if (TrackingOriginType == ovrTrackingOrigin_FloorLevel) altitudeOfFloorInLocalSpace = 0.01f; else altitudeOfFloorInLocalSpace = trackerPose.Pose.Position.y - 0.22f; //0.18f; Vector3f localStandPos = Vector3f(trackerPose.Pose.Position.x, altitudeOfFloorInLocalSpace, trackerPose.Pose.Position.z); // Set position of tracker models according to pose. TrackerHeadModel->SetPosition(trackerPose.Pose.Position); TrackerHeadModel->SetOrientation(trackerPose.Pose.Orientation); // We scale the stalk so that it has correct physical height. Matrix4f stalkScale = Matrix4f::Scaling(1.0f, trackerPose.Pose.Position.y - altitudeOfFloorInLocalSpace - 0.0135f, 1.0f); TrackerStalkModel->SetMatrix(Matrix4f::Translation(Vector3f(trackerPose.Pose.Position) - Vector3f(0,0.0135f,0)) * stalkScale * Matrix4f(TrackerStalkModel->GetOrientation())); TrackerStandModel->SetPosition(localStandPos); TrackerConeModel->SetPosition(trackerPose.Pose.Position); TrackerConeModel->SetOrientation(trackerPose.Pose.Orientation); TrackerLinesModel->SetPosition(trackerPose.Pose.Position); TrackerLinesModel->SetOrientation(trackerPose.Pose.Orientation); if (trackerLinesAlwaysVisible) pRender->SetDepthMode(false, true); // Set rendering tint proportional to proximity, and red if not tracked. float dist = DistToBoundary(centreEyePos, trackerPose.Pose, true); //OVR_DEBUG_LOG(("Dist = %0.3f\n", dist)); // This defines a color ramp at specified distances from the edge. // Display staring at 0.4 - 0.2 meter [alpha 0->1] // Turn to yellow after [0.2] float distThreshods[4] = { 0.0f, 0.1f, 0.2f, 0.35f }; Vector4f thresholdColors[4] = { Vector4f(1.0f, 0.3f, 0.0f, 1.0f), // Yellow-red Vector4f(1.0f, 1.0f, 0.0f, 0.8f), // Yellow Vector4f(1.0f, 1.0f, 1.0f, 0.6f), // White Vector4f(1.0f, 1.0f, 1.0f, 0.0f) // White-transparent }; // Assign tint based on the lookup table Vector4f globalTint = Vector4f(1, 1, 1, 0); int distSearch = 0; if (dist <= 0.0f) dist = 0.001f; for (; distSearch < sizeof(distThreshods) / sizeof(distThreshods[0]) - 1; distSearch++) { if (dist < distThreshods[distSearch+1]) { float startT = distThreshods[distSearch]; float endT = distThreshods[distSearch+1]; float factor = (dist - startT) / (endT - startT); globalTint = thresholdColors[distSearch] * (1.0f - factor) + thresholdColors[distSearch + 1] * factor; break; } } if (!tracked) globalTint = Vector4f(1, 0, 0, 1); pRender->SetGlobalTint(globalTint); if (minimumAlphaOfTracker > globalTint.w) globalTint.w = minimumAlphaOfTracker; // We try to draw twice here: Once with Z clipping to give a bright image, // and once with Z testing off to give a dim outline for those cases. // Solid bakground if (globalTint.w > 0.01) { pRender->SetDepthMode(true, true); // Draw the tracker representation LOCAL_RenderModelWithAlpha(pRender, TrackerStandModel, localViewMat); LOCAL_RenderModelWithAlpha(pRender, TrackerStalkModel, localViewMat); LOCAL_RenderModelWithAlpha(pRender, TrackerHeadModel, localViewMat); LOCAL_RenderModelWithAlpha(pRender, TrackerLinesModel, localViewMat); if (drawWalls) LOCAL_RenderModelWithAlpha(pRender, TrackerConeModel, localViewMat); } if (globalTint.w > 0.01f) globalTint.w = 0.01f; pRender->SetGlobalTint(globalTint); pRender->SetDepthMode(false, true); LOCAL_RenderModelWithAlpha(pRender, TrackerStandModel, localViewMat); LOCAL_RenderModelWithAlpha(pRender, TrackerStalkModel, localViewMat); LOCAL_RenderModelWithAlpha(pRender, TrackerHeadModel, localViewMat); LOCAL_RenderModelWithAlpha(pRender, TrackerLinesModel, localViewMat); if (drawWalls) LOCAL_RenderModelWithAlpha(pRender, TrackerConeModel, localViewMat); // Revert to rendering defaults pRender->SetGlobalTint(Vector4f(1, 1, 1, 1)); pRender->SetCullMode(RenderDevice::Cull_Back); pRender->SetDepthMode(true, true); }