Beispiel #1
0
void OculusWindow::start_frame() {
    GlfwWindow::start_frame();

    auto ftiming = ovr_GetPredictedDisplayTime(hmd_session_, 0);

    ovrTrackingState hmdState = ovr_GetTrackingState(hmd_session_, ftiming, true);

    ovrEyeRenderDesc eyeRenderDesc[2];
    ovrVector3f      hmdToEyeViewOffset[2];
    eyeRenderDesc[0] = ovr_GetRenderDesc(hmd_session_, ovrEye_Left, hmd_desc_.DefaultEyeFov[0]);
    eyeRenderDesc[1] = ovr_GetRenderDesc(hmd_session_, ovrEye_Right, hmd_desc_.DefaultEyeFov[1]);


    hmdToEyeViewOffset[0] = eyeRenderDesc[0].HmdToEyeOffset;
    hmdToEyeViewOffset[1] = eyeRenderDesc[1].HmdToEyeOffset;

    ovr_CalcEyePoses(hmdState.HeadPose.ThePose, hmdToEyeViewOffset, color_layer_.RenderPose);


    if (hmdState.StatusFlags & (ovrStatus_OrientationTracked | ovrStatus_PositionTracked)) {

        auto pose = hmdState.HeadPose.ThePose;

        scm::math::quat<double> rot_quat(pose.Orientation.w,
                                         pose.Orientation.x,
                                         pose.Orientation.y,
                                         pose.Orientation.z);

        hmd_sensor_orientation_ = scm::math::make_translation((double)pose.Position.x, (double)pose.Position.y, (double)pose.Position.z) * rot_quat.to_matrix();
    }
}
bool OculusBaseDisplayPlugin::beginFrameRender(uint32_t frameIndex) {
    _currentRenderFrameInfo = FrameInfo();
    _currentRenderFrameInfo.sensorSampleTime = ovr_GetTimeInSeconds();;
    _currentRenderFrameInfo.predictedDisplayTime = ovr_GetPredictedDisplayTime(_session, frameIndex);
    auto trackingState = ovr_GetTrackingState(_session, _currentRenderFrameInfo.predictedDisplayTime, ovrTrue);
    _currentRenderFrameInfo.renderPose = toGlm(trackingState.HeadPose.ThePose);
    _currentRenderFrameInfo.presentPose = _currentRenderFrameInfo.renderPose;

    std::array<glm::mat4, 2> handPoses;
    // Make controller poses available to the presentation thread
    ovr_for_each_hand([&](ovrHandType hand) {
        static const auto REQUIRED_HAND_STATUS = ovrStatus_OrientationTracked & ovrStatus_PositionTracked;
        if (REQUIRED_HAND_STATUS != (trackingState.HandStatusFlags[hand] & REQUIRED_HAND_STATUS)) {
            return;
        }

        auto correctedPose = ovrControllerPoseToHandPose(hand, trackingState.HandPoses[hand]);
        static const glm::quat HAND_TO_LASER_ROTATION = glm::rotation(Vectors::UNIT_Z, Vectors::UNIT_NEG_Y);
        handPoses[hand] = glm::translate(glm::mat4(), correctedPose.translation) * glm::mat4_cast(correctedPose.rotation * HAND_TO_LASER_ROTATION);
    });

    withRenderThreadLock([&] {
        _uiModelTransform = DependencyManager::get<CompositorHelper>()->getModelTransform();
        _handPoses = handPoses;
        _frameInfos[frameIndex] = _currentRenderFrameInfo;
    });
    return Parent::beginFrameRender(frameIndex);
}
  void update() {
    ovrTrackingState trackingState = ovr_GetTrackingState(hmd, 0);
    ovrPoseStatef & poseState = trackingState.HeadPose;

    orientation = ovr::toGlm(poseState.ThePose.Orientation);
    linearA = ovr::toGlm(poseState.LinearAcceleration);
    angularV = ovr::toGlm(poseState.AngularVelocity);
  }
glm::mat4 OculusBaseDisplayPlugin::getHeadPose(uint32_t frameIndex) const {
    static uint32_t lastFrameSeen = 0;
    auto displayTime = ovr_GetPredictedDisplayTime(_session, frameIndex);
    auto trackingState = ovr_GetTrackingState(_session, displayTime, frameIndex > lastFrameSeen);
    if (frameIndex > lastFrameSeen) {
        lastFrameSeen = frameIndex;
    }
    return toGlm(trackingState.HeadPose.ThePose);
}
Point3F OculusVRSensorDevice::getPosition()
{
   if(!mIsValid)
      return Point3F();
   
   ovrTrackingState ts = ovr_GetTrackingState(mDevice, ovr_GetTimeInSeconds(), ovrTrue);
   OVR::Vector3f v = ts.HeadPose.ThePose.Position;
   return Point3F(-v.x, v.z, -v.y);
}
Beispiel #6
0
	DLL_EXPORT_API void xnOvrUpdate(xnOvrSession* session)
	{
		session->EyeRenderDesc[0] = ovr_GetRenderDesc(session->Session, ovrEye_Left, session->HmdDesc.DefaultEyeFov[0]);
		session->EyeRenderDesc[1] = ovr_GetRenderDesc(session->Session, ovrEye_Right, session->HmdDesc.DefaultEyeFov[1]);
		session->HmdToEyeViewOffset[0] = session->EyeRenderDesc[0].HmdToEyeOffset;
		session->HmdToEyeViewOffset[1] = session->EyeRenderDesc[1].HmdToEyeOffset;

		session->Layer.SensorSampleTime = ovr_GetPredictedDisplayTime(session->Session, 0);
		session->CurrentState = ovr_GetTrackingState(session->Session, session->Layer.SensorSampleTime, ovrTrue);
		ovr_CalcEyePoses(session->CurrentState.HeadPose.ThePose, session->HmdToEyeViewOffset, session->Layer.RenderPose);
	}
EulerF OculusVRSensorDevice::getRawEulerRotation()
{
   if(!mIsValid)
      return Point3F::Zero;

   ovrTrackingState ts = ovr_GetTrackingState(mDevice, ovr_GetTimeInSeconds(), ovrTrue);
   OVR::Quatf orientation = ts.HeadPose.ThePose.Orientation;

   // Sensor rotation in Euler format
   EulerF rot;
   OculusVRUtil::convertRotation(orientation, rot);
   return rot;
}
EulerF OculusVRSensorDevice::getAngularVelocity()
{
   if(!mIsValid)
      return EulerF::Zero;
   
   ovrTrackingState ts = ovr_GetTrackingState(mDevice, ovr_GetTimeInSeconds(), ovrTrue);
   OVR::Vector3f v = ts.HeadPose.AngularVelocity;
   
   // Sensor angular velocity in EulerF format
   EulerF vel;
   OculusVRUtil::convertAngularVelocity(v, vel);

   return vel;
}
VectorF OculusVRSensorDevice::getAcceleration()
{
   if(!mIsValid)
      return VectorF::Zero;
   
   ovrTrackingState ts = ovr_GetTrackingState(mDevice, ovr_GetTimeInSeconds(), ovrTrue);
   OVR::Vector3f a = ts.HeadPose.LinearAcceleration;

   // Sensor acceleration in VectorF format
   VectorF acceleration;
   OculusVRUtil::convertAcceleration(a, acceleration);

   return acceleration;
}
void OculusVR::OnRenderStart()
{
    m_hmdToEyeOffset[0] = m_eyeRenderDesc[0].HmdToEyeOffset;
    m_hmdToEyeOffset[1] = m_eyeRenderDesc[1].HmdToEyeOffset;

    // this data is fetched only for the debug display, no need to do this to just get the rendering work
    m_frameTiming   = ovr_GetPredictedDisplayTime(m_hmdSession, 0);
    m_trackingState = ovr_GetTrackingState(m_hmdSession, m_frameTiming, ovrTrue);

    // Get both eye poses simultaneously, with IPD offset already included.
    ovr_GetEyePoses(m_hmdSession, m_frameIndex, ovrTrue, m_hmdToEyeOffset, m_eyeRenderPose, &m_sensorSampleTime);    

    // set the render texture in swap chain
    int curIndex;
    ovr_GetTextureSwapChainCurrentIndex(m_hmdSession, m_renderBuffer->m_swapTextureChain, &curIndex);
    ovr_GetTextureSwapChainBufferGL(m_hmdSession, m_renderBuffer->m_swapTextureChain, curIndex, &m_renderBuffer->m_eyeTexId);

    if (m_msaaEnabled)
        m_renderBuffer->OnRenderMSAAStart();
    else
        m_renderBuffer->OnRenderStart();
}
// Display to an HMD with OVR SDK backend.
void displayHMD()
{
    ovrSessionStatus sessionStatus;
    ovr_GetSessionStatus(g_session, &sessionStatus);

    if (sessionStatus.HmdPresent == false)
    {
        displayMonitor();
        return;
    }

    const ovrHmdDesc& hmdDesc = m_Hmd;
    double sensorSampleTime; // sensorSampleTime is fed into the layer later
    if (g_hmdVisible)
    {
        // Call ovr_GetRenderDesc each frame to get the ovrEyeRenderDesc, as the returned values (e.g. HmdToEyeOffset) may change at runtime.
        ovrEyeRenderDesc eyeRenderDesc[2];
        eyeRenderDesc[0] = ovr_GetRenderDesc(g_session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]);
        eyeRenderDesc[1] = ovr_GetRenderDesc(g_session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]);

        // Get eye poses, feeding in correct IPD offset
        ovrVector3f HmdToEyeOffset[2] = {
            eyeRenderDesc[0].HmdToEyeOffset,
            eyeRenderDesc[1].HmdToEyeOffset };
#if 0
        // Get both eye poses simultaneously, with IPD offset already included.
        double displayMidpointSeconds = ovr_GetPredictedDisplayTime(g_session, 0);
        ovrTrackingState hmdState = ovr_GetTrackingState(g_session, displayMidpointSeconds, ovrTrue);
        ovr_CalcEyePoses(hmdState.HeadPose.ThePose, HmdToEyeOffset, m_eyePoses);
#else
        ovr_GetEyePoses(g_session, g_frameIndex, ovrTrue, HmdToEyeOffset, m_eyePoses, &sensorSampleTime);
#endif
        storeHmdPose(m_eyePoses[0]);

        for (int eye = 0; eye < 2; ++eye)
        {
            const FBO& swapfbo = m_swapFBO[eye];
            const ovrTextureSwapChain& chain = g_textureSwapChain[eye];

            int curIndex;
            ovr_GetTextureSwapChainCurrentIndex(g_session, chain, &curIndex);
            GLuint curTexId;
            ovr_GetTextureSwapChainBufferGL(g_session, chain, curIndex, &curTexId);

            glBindFramebuffer(GL_FRAMEBUFFER, swapfbo.id);
            glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, curTexId, 0);

            glViewport(0, 0, swapfbo.w, swapfbo.h);
            glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
            glEnable(GL_FRAMEBUFFER_SRGB);

            {
                glClearColor(0.3f, 0.3f, 0.3f, 0.f);
                glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

                const ovrSizei& downSize = ovr_GetFovTextureSize(g_session, ovrEyeType(eye), hmdDesc.DefaultEyeFov[eye], m_fboScale);
                ovrRecti vp = { 0, 0, downSize.w, downSize.h };
                const int texh = swapfbo.h;
                vp.Pos.y = (texh - vp.Size.h) / 2;
                glViewport(vp.Pos.x, vp.Pos.y, vp.Size.w, vp.Size.h);

                // Cinemascope - letterbox bars scissoring off pixels above and below vp center
                const float hc = .5f * m_cinemaScope;
                const int scisPx = static_cast<int>(hc * static_cast<float>(vp.Size.h));
                ovrRecti sp = vp;
                sp.Pos.y += scisPx;
                sp.Size.h -= 2 * scisPx;
                glScissor(sp.Pos.x, sp.Pos.y, sp.Size.w, sp.Size.h);
                glEnable(GL_SCISSOR_TEST);
                glEnable(GL_DEPTH_TEST);

                // Render the scene for the current eye
                const ovrPosef& eyePose = m_eyePoses[eye];
                const glm::mat4 mview =
                    makeWorldToChassisMatrix() *
                    makeMatrixFromPose(eyePose, m_headSize);
                const ovrMatrix4f ovrproj = ovrMatrix4f_Projection(hmdDesc.DefaultEyeFov[eye], 0.2f, 1000.0f, ovrProjection_None);
                const glm::mat4 proj = makeGlmMatrixFromOvrMatrix(ovrproj);
                g_pScene->RenderForOneEye(glm::value_ptr(glm::inverse(mview)), glm::value_ptr(proj));

                const ovrTextureSwapChain& chain = g_textureSwapChain[eye];
                const ovrResult commitres = ovr_CommitTextureSwapChain(g_session, chain);
                if (!OVR_SUCCESS(commitres))
                {
                    LOG_ERROR("ovr_CommitTextureSwapChain returned %d", commitres);
                    return;
                }
            }
            glDisable(GL_SCISSOR_TEST);

            // Grab a copy of the left eye's undistorted render output for presentation
            // to the desktop window instead of the barrel distorted mirror texture.
            // This blit, while cheap, could cost some framerate to the HMD.
            // An over-the-shoulder view is another option, at a greater performance cost.
            if (0)
            {
                if (eye == ovrEyeType::ovrEye_Left)
                {
                    BlitLeftEyeRenderToUndistortedMirrorTexture();
                }
            }

            glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
            glBindFramebuffer(GL_FRAMEBUFFER, 0);
        }
    }

    std::vector<const ovrLayerHeader*> layerHeaders;
    {
        // Do distortion rendering, Present and flush/sync
        ovrLayerEyeFov ld;
        ld.Header.Type = ovrLayerType_EyeFov;
        ld.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft; // Because OpenGL.

        for (int eye = 0; eye < 2; ++eye)
        {
            const FBO& swapfbo = m_swapFBO[eye];
            const ovrTextureSwapChain& chain = g_textureSwapChain[eye];

            ld.ColorTexture[eye] = chain;

            const ovrSizei& downSize = ovr_GetFovTextureSize(g_session, ovrEyeType(eye), hmdDesc.DefaultEyeFov[eye], m_fboScale);
            ovrRecti vp = { 0, 0, downSize.w, downSize.h };
            const int texh = swapfbo.h;
            vp.Pos.y = (texh - vp.Size.h) / 2;

            ld.Viewport[eye] = vp;
            ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye];
            ld.RenderPose[eye] = m_eyePoses[eye];
            ld.SensorSampleTime = sensorSampleTime;
        }
        layerHeaders.push_back(&ld.Header);

        // Submit layers to HMD for display
        ovrLayerQuad ql;
        if (g_tweakbarQuad.m_showQuadInWorld)
        {
            ql.Header.Type = ovrLayerType_Quad;
            ql.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft; // Because OpenGL.

            ql.ColorTexture = g_tweakbarQuad.m_swapChain;
            ovrRecti vp;
            vp.Pos.x = 0;
            vp.Pos.y = 0;
            vp.Size.w = 600; ///@todo
            vp.Size.h = 600; ///@todo
            ql.Viewport = vp;
            ql.QuadPoseCenter = g_tweakbarQuad.m_QuadPoseCenter;
            ql.QuadSize = { 1.f, 1.f }; ///@todo Pass in

            g_tweakbarQuad.SetHmdEyeRay(m_eyePoses[ovrEyeType::ovrEye_Left]); // Writes to m_layerQuad.QuadPoseCenter
            g_tweakbarQuad.DrawToQuad();
            layerHeaders.push_back(&ql.Header);
        }
    }

#if 0
    ovrViewScaleDesc viewScaleDesc;
    viewScaleDesc.HmdToEyeOffset[0] = m_eyeOffsets[0];
    viewScaleDesc.HmdToEyeOffset[1] = m_eyeOffsets[1];
    viewScaleDesc.HmdSpaceToWorldScaleInMeters = 1.f;
#endif

    const ovrResult result = ovr_SubmitFrame(g_session, g_frameIndex, nullptr, &layerHeaders[0], layerHeaders.size());
    if (result == ovrSuccess)
    {
        g_hmdVisible = true;
    }
    else if (result == ovrSuccess_NotVisible)
    {
        g_hmdVisible = false;
        ///@todo Enter a lower-power, polling "no focus/HMD not worn" mode
    }
    else if (result == ovrError_DisplayLost)
    {
        LOG_INFO("ovr_SubmitFrame returned ovrError_DisplayLost");
        g_hmdVisible = false;
        ///@todo Tear down textures and session and re-create
    }
    else
    {
        LOG_INFO("ovr_SubmitFrame returned %d", result);
        //g_hmdVisible = false;
    }

    // Handle OVR session events
    ovr_GetSessionStatus(g_session, &sessionStatus);
    if (sessionStatus.ShouldQuit)
    {
        glfwSetWindowShouldClose(g_pMirrorWindow, 1);
    }
    if (sessionStatus.ShouldRecenter)
    {
        ovr_RecenterTrackingOrigin(g_session);
    }

    // Blit mirror texture to monitor window
    if (g_hmdVisible)
    {
        glViewport(0, 0, g_mirrorWindowSz.x, g_mirrorWindowSz.y);
        const FBO& srcFBO = m_mirrorFBO;
        glBindFramebuffer(GL_READ_FRAMEBUFFER, srcFBO.id);
        glBlitFramebuffer(
            0, srcFBO.h, srcFBO.w, 0,
            0, 0, g_mirrorWindowSz.x, g_mirrorWindowSz.y,
            GL_COLOR_BUFFER_BIT, GL_NEAREST);
        glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
    }
    else
    {
        displayMonitor();
    }
    ++g_frameIndex;

#ifdef USE_ANTTWEAKBAR
    if (g_tweakbarQuad.m_showQuadInWorld)
    {
        TwDraw();
    }
#endif
}
Beispiel #12
0
// return true to retry later (e.g. after display lost)
static bool MainLoop(bool retryCreate)
{
    // Initialize these to nullptr here to handle device lost failures cleanly
	ovrTexture     * mirrorTexture = nullptr;
	OculusTexture  * pEyeRenderTexture[2] = { nullptr, nullptr };
	DepthBuffer    * pEyeDepthBuffer[2] = { nullptr, nullptr };
    Scene          * roomScene = nullptr; 
    Camera         * mainCam = nullptr;
	D3D11_TEXTURE2D_DESC td = {};

	ovrHmd HMD;
	ovrGraphicsLuid luid;
	ovrResult result = ovr_Create(&HMD, &luid);
    if (!OVR_SUCCESS(result))
        return retryCreate;

    ovrHmdDesc hmdDesc = ovr_GetHmdDesc(HMD);

	// -------------------------------------------------------------------
	// Add: Make Instance that CL Eye Camera Capture Class
	CLEyeCameraCapture* cam[2] = { NULL };

	// Query for number of connected camera
	int numCams = CLEyeGetCameraCount();
	if (numCams == 0)
	{
		printf_s("No PS3Eye Camera detected\n");
		goto Done;
	}
	printf_s("Found %d cameras\n", numCams);

	for (int iCam = 0; iCam < numCams; iCam++)
	{
		char windowName[64];

		// Query unique camera uuid
		GUID guid = CLEyeGetCameraUUID(iCam);
		printf("Camera %d GUID: [%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x]\n",
			iCam + 1, guid.Data1, guid.Data2, guid.Data3,
			guid.Data4[0], guid.Data4[1], guid.Data4[2],
			guid.Data4[3], guid.Data4[4], guid.Data4[5],
			guid.Data4[6], guid.Data4[7]);
		sprintf_s(windowName, "Camera Window %d", iCam + 1);

		// Create camera capture object
		cam[iCam] = new CLEyeCameraCapture(windowName, guid, CLEYE_COLOR_RAW, CLEYE_VGA, 30);
		cam[iCam]->StartCapture();
	}
	// -------------------------------------------------------------------

	// Setup Device and Graphics
	// Note: the mirror window can be any size, for this sample we use 1/2 the HMD resolution
    if (!DIRECTX.InitDevice(hmdDesc.Resolution.w / 2, hmdDesc.Resolution.h / 2, reinterpret_cast<LUID*>(&luid)))
        goto Done;

	// Make the eye render buffers (caution if actual size < requested due to HW limits). 
	ovrRecti         eyeRenderViewport[2];

	for (int eye = 0; eye < 2; ++eye)
	{
		ovrSizei idealSize = ovr_GetFovTextureSize(HMD, (ovrEyeType)eye, hmdDesc.DefaultEyeFov[eye], 1.0f);
		pEyeRenderTexture[eye] = new OculusTexture();
        if (!pEyeRenderTexture[eye]->Init(HMD, idealSize.w, idealSize.h))
        {
            if (retryCreate) goto Done;
	        VALIDATE(OVR_SUCCESS(result), "Failed to create eye texture.");
        }
		pEyeDepthBuffer[eye] = new DepthBuffer(DIRECTX.Device, idealSize.w, idealSize.h);
		eyeRenderViewport[eye].Pos.x = 0;
		eyeRenderViewport[eye].Pos.y = 0;
		eyeRenderViewport[eye].Size = idealSize;
        if (!pEyeRenderTexture[eye]->TextureSet)
        {
            if (retryCreate) goto Done;
            VALIDATE(false, "Failed to create texture.");
        }
	}

	// Create a mirror to see on the monitor.
	td.ArraySize = 1;
    td.Format = DXGI_FORMAT_R8G8B8A8_UNORM_SRGB;
	td.Width = DIRECTX.WinSizeW;
	td.Height = DIRECTX.WinSizeH;
	td.Usage = D3D11_USAGE_DEFAULT;
	td.SampleDesc.Count = 1;
	td.MipLevels = 1;
    result = ovr_CreateMirrorTextureD3D11(HMD, DIRECTX.Device, &td, 0, &mirrorTexture);
    if (!OVR_SUCCESS(result))
    {
        if (retryCreate) goto Done;
        VALIDATE(false, "Failed to create mirror texture.");
    }

	// Create the room model
    roomScene = new Scene(false);

	// Create camera
    mainCam = new Camera(&XMVectorSet(0.0f, 1.6f, 5.0f, 0), &XMQuaternionIdentity());

	// Setup VR components, filling out description
	ovrEyeRenderDesc eyeRenderDesc[2];
	eyeRenderDesc[0] = ovr_GetRenderDesc(HMD, ovrEye_Left, hmdDesc.DefaultEyeFov[0]);
	eyeRenderDesc[1] = ovr_GetRenderDesc(HMD, ovrEye_Right, hmdDesc.DefaultEyeFov[1]);

    bool isVisible = true;

	DCB portConfig;
	portConfig.BaudRate = 115200;
	portConfig.Parity = EVENPARITY;

	g_seriPort.Start("\\\\.\\COM3", &portConfig);


	// Main loop
	while (DIRECTX.HandleMessages())
	{
		XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, -0.05f, 0), mainCam->Rot);
		XMVECTOR right   = XMVector3Rotate(XMVectorSet(0.05f, 0, 0, 0),  mainCam->Rot);
		if (DIRECTX.Key['W'] || DIRECTX.Key[VK_UP])	  mainCam->Pos = XMVectorAdd(mainCam->Pos, forward);
		if (DIRECTX.Key['S'] || DIRECTX.Key[VK_DOWN]) mainCam->Pos = XMVectorSubtract(mainCam->Pos, forward);
		if (DIRECTX.Key['D'])                         mainCam->Pos = XMVectorAdd(mainCam->Pos, right);
		if (DIRECTX.Key['A'])                         mainCam->Pos = XMVectorSubtract(mainCam->Pos, right);
		static float Yaw = 0;
		if (DIRECTX.Key[VK_LEFT])  mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw += 0.02f, 0);
		if (DIRECTX.Key[VK_RIGHT]) mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw -= 0.02f, 0);

		// Animate the cube
		static float cubeClock = 0;
		roomScene->Models[0]->Pos = XMFLOAT3(9 * sin(cubeClock), 3, 9 * cos(cubeClock += 0.015f));

		// Get both eye poses simultaneously, with IPD offset already included. 
		ovrPosef         EyeRenderPose[2];
		ovrVector3f      HmdToEyeViewOffset[2] = { eyeRenderDesc[0].HmdToEyeViewOffset,
			                                       eyeRenderDesc[1].HmdToEyeViewOffset };
        double frameTime = ovr_GetPredictedDisplayTime(HMD, 0);
        // Keeping sensorSampleTime as close to ovr_GetTrackingState as possible - fed into the layer
        double           sensorSampleTime = ovr_GetTimeInSeconds();
		ovrTrackingState hmdState = ovr_GetTrackingState(HMD, frameTime, ovrTrue);
		ovr_CalcEyePoses(hmdState.HeadPose.ThePose, HmdToEyeViewOffset, EyeRenderPose);

		// --------------------------------------------------------------------------
		// Add: Get Head Yaw Roll Pitch
		float hmdPitch = 0.0f;
		float hmdRoll = 0.0f;
		float hmdYaw = 0.0f;

		OVR::Posef HeadPose = hmdState.HeadPose.ThePose;
		HeadPose.Rotation.GetEulerAngles<OVR::Axis_Y, OVR::Axis_X, OVR::Axis_Z>(&hmdYaw, &hmdPitch, &hmdRoll);

		SetPos(2, ServoRoll(hmdYaw));
		SetPos(3, ServoRoll(hmdPitch));

		// --------------------------------------------------------------------------


		// Render Scene to Eye Buffers
        if (isVisible)
        {
            for (int eye = 0; eye < 2; ++eye)
		    {
			    // Increment to use next texture, just before writing
			    pEyeRenderTexture[eye]->AdvanceToNextTexture();

			    // Clear and set up rendertarget
			    int texIndex = pEyeRenderTexture[eye]->TextureSet->CurrentIndex;
			    DIRECTX.SetAndClearRenderTarget(pEyeRenderTexture[eye]->TexRtv[texIndex], pEyeDepthBuffer[eye]);
			    DIRECTX.SetViewport((float)eyeRenderViewport[eye].Pos.x, (float)eyeRenderViewport[eye].Pos.y,
				    (float)eyeRenderViewport[eye].Size.w, (float)eyeRenderViewport[eye].Size.h);

			    //Get the pose information in XM format
			    XMVECTOR eyeQuat = XMVectorSet(EyeRenderPose[eye].Orientation.x, EyeRenderPose[eye].Orientation.y,
				                               EyeRenderPose[eye].Orientation.z, EyeRenderPose[eye].Orientation.w);
			    XMVECTOR eyePos = XMVectorSet(EyeRenderPose[eye].Position.x, EyeRenderPose[eye].Position.y, EyeRenderPose[eye].Position.z, 0);

			    // Get view and projection matrices for the Rift camera
			    XMVECTOR CombinedPos = XMVectorAdd(mainCam->Pos, XMVector3Rotate(eyePos, mainCam->Rot));
			    Camera finalCam(&CombinedPos, &(XMQuaternionMultiply(eyeQuat,mainCam->Rot)));
			    XMMATRIX view = finalCam.GetViewMatrix();
			    ovrMatrix4f p = ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.2f, 1000.0f, ovrProjection_RightHanded);
			    XMMATRIX proj = XMMatrixSet(p.M[0][0], p.M[1][0], p.M[2][0], p.M[3][0],
				                            p.M[0][1], p.M[1][1], p.M[2][1], p.M[3][1],
				                            p.M[0][2], p.M[1][2], p.M[2][2], p.M[3][2],
				                            p.M[0][3], p.M[1][3], p.M[2][3], p.M[3][3]);
			    XMMATRIX prod = XMMatrixMultiply(view, proj);
			    roomScene->Render(&prod, 1, 1, 1, 1, true);
		    }
        }

		// Initialize our single full screen Fov layer.
        ovrLayerEyeFov ld = {};
		ld.Header.Type = ovrLayerType_EyeFov;
		ld.Header.Flags = 0;

		for (int eye = 0; eye < 2; ++eye)
		{
			ld.ColorTexture[eye] = pEyeRenderTexture[eye]->TextureSet;
			ld.Viewport[eye] = eyeRenderViewport[eye];
			ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye];
			ld.RenderPose[eye] = EyeRenderPose[eye];
            ld.SensorSampleTime = sensorSampleTime;
		}

        ovrLayerHeader* layers = &ld.Header;
        result = ovr_SubmitFrame(HMD, 0, nullptr, &layers, 1);
        // exit the rendering loop if submit returns an error, will retry on ovrError_DisplayLost
        if (!OVR_SUCCESS(result))
            goto Done;

        isVisible = (result == ovrSuccess);

        // Render mirror
        ovrD3D11Texture* tex = (ovrD3D11Texture*)mirrorTexture;
        DIRECTX.Context->CopyResource(DIRECTX.BackBuffer, tex->D3D11.pTexture);
        DIRECTX.SwapChain->Present(0, 0);
	}

	// Release resources
Done:
    delete mainCam;
    delete roomScene;
	if (mirrorTexture) ovr_DestroyMirrorTexture(HMD, mirrorTexture);
    for (int eye = 0; eye < 2; ++eye)
    {
	    delete pEyeRenderTexture[eye];
        delete pEyeDepthBuffer[eye];
    }
	DIRECTX.ReleaseDevice();
	ovr_Destroy(HMD);

	g_seriPort.End();

	for (int iCam = 0; iCam < numCams; iCam++)
	{
		cam[iCam]->StopCapture();
		delete cam[iCam];
	}

    // Retry on ovrError_DisplayLost
    return retryCreate || OVR_SUCCESS(result) || (result == ovrError_DisplayLost);
}
int OgreOculus::go(void)
{
	// Create Root object
	root = new Ogre::Root("plugin.cfg", "ogre.cfg");

	// OpenGL
    root->loadPlugin("RenderSystem_GL_d");
    root->setRenderSystem(root->getRenderSystemByName("OpenGL Rendering Subsystem"));

	// Initialize Root
	root->initialise(false);

	// Initialize Oculus
	ovrHmd hmd;
	ovrHmdDesc hmdDesc;
	ovrGraphicsLuid luid;
	ovr_Initialize(nullptr);
	if(ovr_Create(&hmd, &luid) != ovrSuccess)
		exit(-1);
	hmdDesc = ovr_GetHmdDesc(hmd);
	if(ovr_ConfigureTracking(hmd,
		ovrTrackingCap_Orientation |ovrTrackingCap_MagYawCorrection |ovrTrackingCap_Position,
		0) != ovrSuccess)
		exit(-2);

	// Turn off HUD
	ovr_SetInt(hmd, "PerfHudMode", ovrPerfHud_Off);

	// Create a window
	window = root->createRenderWindow("Ogre + Oculus = <3", hmdDesc.Resolution.w/2, hmdDesc.Resolution.h/2, false);

	// Create scene manager and cameras
	smgr = root->createSceneManager(Ogre::ST_GENERIC);

	// Load Ogre resource paths from config file
    Ogre::ConfigFile cf;
    cf.load("resources_d.cfg");

    // Go through all sections & settings in the file and add resources
    Ogre::ConfigFile::SectionIterator seci = cf.getSectionIterator();

    Ogre::String secName, typeName, archName;
    while (seci.hasMoreElements())
    {
        secName = seci.peekNextKey();
        Ogre::ConfigFile::SettingsMultiMap *settings = seci.getNext();
        Ogre::ConfigFile::SettingsMultiMap::iterator i;
        for (i = settings->begin(); i != settings->end(); ++i)
        {
            typeName = i->first;
            archName = i->second;

            Ogre::ResourceGroupManager::getSingleton().addResourceLocation(
                archName, typeName, secName);
        }
    }

	// Set resources
	Ogre::TextureManager::getSingleton().setDefaultNumMipmaps(5);
	Ogre::ResourceGroupManager::getSingleton().initialiseAllResourceGroups();

	// Create the model itself via OgreModel.cpp
	createOgreModel(smgr);

	// Create camera
	createCamera();

	// Set viewport and background color
	Ogre::Viewport* vp = window->addViewport(mCamera);
	vp->setBackgroundColour(Ogre::ColourValue(34, 89, 0)); // Yellow

	// Set aspect ratio
	mCamera->setAspectRatio(
    Ogre::Real(vp->getActualWidth()) /
    Ogre::Real(vp->getActualHeight()));

	// Initialize glew
	if(glewInit() != GLEW_OK)
		exit(-3);

	// Get texture sizes
	ovrSizei texSizeL, texSizeR;
	texSizeL = ovr_GetFovTextureSize(hmd, ovrEye_Left, hmdDesc.DefaultEyeFov[left], 1);
	texSizeR = ovr_GetFovTextureSize(hmd, ovrEye_Right, hmdDesc.DefaultEyeFov[right], 1);

	// Calculate render buffer size
	ovrSizei bufferSize;
	bufferSize.w = texSizeL.w + texSizeR.w;
	bufferSize.h = max(texSizeL.h, texSizeR.h);

	// Create render texture set
	ovrSwapTextureSet* textureSet;
	if(ovr_CreateSwapTextureSetGL(hmd, GL_RGB, bufferSize.w, bufferSize.h, &textureSet) != ovrSuccess)
		exit(-4);

	// Create Ogre render texture
	Ogre::GLTextureManager* textureManager = static_cast<Ogre::GLTextureManager*>(Ogre::GLTextureManager::getSingletonPtr());
	Ogre::TexturePtr rtt_texture(textureManager->createManual("RttTex", Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME,
		Ogre::TEX_TYPE_2D, bufferSize.w, bufferSize.h, 0, Ogre::PF_R8G8B8, Ogre::TU_RENDERTARGET));
	Ogre::RenderTexture* rttEyes = rtt_texture->getBuffer(0, 0)->getRenderTarget();
	Ogre::GLTexture* gltex = static_cast<Ogre::GLTexture*>(Ogre::GLTextureManager::getSingleton().getByName("RttTex").getPointer());
	GLuint renderTextureID = gltex->getGLID();

	// Put camera viewport on the ogre render texture
	Ogre::Viewport* vpts[nbEyes];
	vpts[left]=rttEyes->addViewport(cams[left], 0, 0, 0, 0.5f);
	vpts[right]=rttEyes->addViewport(cams[right], 1, 0.5f, 0, 0.5f);
	vpts[left]->setBackgroundColour(Ogre::ColourValue(34, 89, 0)); // Black background
	vpts[right]->setBackgroundColour(Ogre::ColourValue(34, 89, 0));

	ovrTexture* mirrorTexture;
	if(ovr_CreateMirrorTextureGL(hmd, GL_RGB, hmdDesc.Resolution.w, hmdDesc.Resolution.h, &mirrorTexture) != ovrSuccess)
		exit(-5);
	Ogre::TexturePtr mirror_texture(textureManager->createManual("MirrorTex", Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME,
		Ogre::TEX_TYPE_2D, hmdDesc.Resolution.w, hmdDesc.Resolution.h, 0, Ogre::PF_R8G8B8, Ogre::TU_RENDERTARGET));

	// Get GLIDs
	GLuint ogreMirrorTextureID = static_cast<Ogre::GLTexture*>(Ogre::GLTextureManager::getSingleton().getByName("MirrorTex").getPointer())->getGLID();
	GLuint oculusMirrorTextureID = ((ovrGLTexture*)mirrorTexture)->OGL.TexId;

	// Create EyeRenderDesc
	ovrEyeRenderDesc EyeRenderDesc[nbEyes];
	EyeRenderDesc[left] = ovr_GetRenderDesc(hmd, ovrEye_Left, hmdDesc.DefaultEyeFov[left]);
	EyeRenderDesc[right] = ovr_GetRenderDesc(hmd, ovrEye_Right, hmdDesc.DefaultEyeFov[right]);

	// Get offsets
	ovrVector3f offset[nbEyes];
	offset[left]=EyeRenderDesc[left].HmdToEyeViewOffset;
	offset[right]=EyeRenderDesc[right].HmdToEyeViewOffset;

	// Compositor layer
	ovrLayerEyeFov layer;
	layer.Header.Type = ovrLayerType_EyeFov;
	layer.Header.Flags = 0;
	layer.ColorTexture[left] = textureSet;
	layer.ColorTexture[right] = textureSet;
	layer.Fov[left] = EyeRenderDesc[left].Fov;
	layer.Fov[right] = EyeRenderDesc[right].Fov;
	layer.Viewport[left] = OVR::Recti(0, 0, bufferSize.w/2, bufferSize.h);
	layer.Viewport[right] = OVR::Recti(bufferSize.w/2, 0, bufferSize.w/2, bufferSize.h);

	// Get projection matrices
	for(size_t eyeIndex(0); eyeIndex < ovrEye_Count; eyeIndex++)
	{
		// Get the projection matrix
		OVR::Matrix4f proj = ovrMatrix4f_Projection(EyeRenderDesc[eyeIndex].Fov,
			static_cast<float>(0.01f),
			4000,
			true);

		// Convert it to Ogre matrix
		Ogre::Matrix4 OgreProj;
		for(size_t x(0); x < 4; x++)
			for(size_t y(0); y < 4; y++)
				OgreProj[x][y] = proj.M[x][y];

		// Set the matrix
		cams[eyeIndex]->setCustomProjectionMatrix(true, OgreProj);
	}

	// Variables for render loop
	bool render(true);
	ovrFrameTiming hmdFrameTiming;
	ovrTrackingState ts;
	OVR::Posef pose;
	ovrLayerHeader* layers;

	// Create event listener for handling user input
	createEventListener();

	//Run physics loop in a new thread
	std::map<Ogre::Entity*, Ogre::Vector3> positionRequests;
	std::map<Ogre::Entity*, std::string> animationRequests;
	std::map<Ogre::Entity*, std::vector<int>> rotationRequests;
	std::map<std::string, std::string> message;
	std::thread physicsThread(physicsLoop, smgr, &message, &positionRequests, &animationRequests, &rotationRequests);

	// Render loop
	while(render)
	{
		// Suspend physics loop and perform requested movement/rotations/animations
		if(positionRequests.size() > 0 || animationRequests.size() > 0 || rotationRequests.size() > 0){
			message.insert(std::pair<std::string, std::string>("", ""));
		
			for(auto const &request : positionRequests) {
				Ogre::Vector3 pos = request.second;
				Ogre::SceneNode* sceneNode = request.first->getParentSceneNode();
				sceneNode->setPosition(pos);
			}

			for(auto const &request : animationRequests) {
				request.first->getAnimationState(request.second)->addTime(0.1);
			}

			for(auto const &request : rotationRequests) {
				Ogre::SceneNode* sceneNode = request.first->getParentSceneNode();
				sceneNode->roll(Ogre::Degree(request.second[0]));
				sceneNode->pitch(Ogre::Degree(request.second[1]));
				sceneNode->yaw(Ogre::Degree(request.second[2]));
			}

			positionRequests.clear();
			animationRequests.clear();
			rotationRequests.clear();

			// Resume physics loop
			message.clear();
		}

		// Update Ogre window
		Ogre::WindowEventUtilities::messagePump();

		// Advance textureset index
		textureSet->CurrentIndex = (textureSet->CurrentIndex + 1) % textureSet->TextureCount;
		
		// Capture user input
		mKeyboard->capture();
		mMouse->capture();

		// Movement calculations
		mPlayerNode->translate(mDirection, Ogre::Node::TS_LOCAL);
		hmdFrameTiming = ovr_GetFrameTiming(hmd, 0);
		ts = ovr_GetTrackingState(hmd, hmdFrameTiming.DisplayMidpointSeconds);
		pose = ts.HeadPose.ThePose;
		ovr_CalcEyePoses(pose, offset, layer.RenderPose);
		oculusOrient = pose.Rotation;
		oculusPos = pose.Translation;
		mHeadNode->setOrientation(Ogre::Quaternion(oculusOrient.w, oculusOrient.x, oculusOrient.y, oculusOrient.z) * initialOculusOrientation.Inverse());
		
		// Apply head tracking
		mHeadNode->setPosition(headPositionTrackingSensitivity * Ogre::Vector3(oculusPos.x, oculusPos.y,oculusPos.z));
		
		// Update Ogre viewports
		root->_fireFrameRenderingQueued();
		vpts[left]->update();
		vpts[right]->update();

		// Copy the rendered image to the Oculus Swap Texture
		glCopyImageSubData(renderTextureID, GL_TEXTURE_2D, 0, 0, 0, 0,
		((ovrGLTexture*)(&textureSet->Textures[textureSet->CurrentIndex]))->OGL.TexId, GL_TEXTURE_2D, 0, 0, 0, 0,
		bufferSize.w,bufferSize.h, 1);
		layers = &layer.Header;

		// Submit new frame to the Oculus and update window
		ovr_SubmitFrame(hmd, 0, nullptr, &layers, 1);
		window->update();

		// Exit loop when window is closed
		if(window->isClosed()) render = false;
	}

	// Shud down Oculus
	ovr_Destroy(hmd);
	ovr_Shutdown();

	// Delete Ogre root and return
	delete root;
	return EXIT_SUCCESS;
}
Beispiel #14
0
void OVRCameraFrustum::Recalculate(ovrHmd hmd)
{
    ovrTrackingState tState = ovr_GetTrackingState(hmd, 0.0f);
    ovrHmdDesc hmdDesc      = ovr_GetHmdDesc(hmd);
    ovrVector3f trackerPose = tState.CameraPose.Position;

    float trackerFar  = hmdDesc.CameraFrustumFarZInMeters;
    float trackerNear = hmdDesc.CameraFrustumNearZInMeters;
    float trackerHFov = hmdDesc.CameraFrustumHFovInRadians;
    float trackerVFov = hmdDesc.CameraFrustumVFovInRadians;

    float hScale = tanf(trackerHFov / 2.f);
    float vScale = tanf(trackerVFov / 2.f);

    // camera orientation quaternion
    OVR::Quatf trackerOrientationQuat(tState.CameraPose.Orientation.x,
                                      tState.CameraPose.Orientation.y,
                                      tState.CameraPose.Orientation.z,
                                      tState.CameraPose.Orientation.w);

    // orientation indicator vector running from camera pose to near plane
    OVR::Vector3f trackerOrientationVec(0.f, 0.f, trackerNear);

    // near plane vertex positions
    OVR::Vector3f nearV1(-hScale * trackerNear, vScale * trackerNear, trackerNear);
    OVR::Vector3f nearV2(-hScale * trackerNear, -vScale * trackerNear, trackerNear);
    OVR::Vector3f nearV3(hScale * trackerNear, -vScale * trackerNear, trackerNear);
    OVR::Vector3f nearV4(hScale * trackerNear, vScale * trackerNear, trackerNear);

    // far plane vertex positions
    OVR::Vector3f farV1(-hScale * trackerFar, vScale * trackerFar, trackerFar);
    OVR::Vector3f farV2(-hScale * trackerFar, -vScale * trackerFar, trackerFar);
    OVR::Vector3f farV3(hScale * trackerFar, -vScale * trackerFar, trackerFar);
    OVR::Vector3f farV4(hScale * trackerFar, vScale * trackerFar, trackerFar);

    // reorient all vectors by current tracker camera orientation
    trackerOrientationVec = trackerOrientationQuat.Rotate(trackerOrientationVec);

    nearV1 = trackerOrientationQuat.Rotate(nearV1);
    nearV2 = trackerOrientationQuat.Rotate(nearV2);
    nearV3 = trackerOrientationQuat.Rotate(nearV3);
    nearV4 = trackerOrientationQuat.Rotate(nearV4);

    farV1 = trackerOrientationQuat.Rotate(farV1);
    farV2 = trackerOrientationQuat.Rotate(farV2);
    farV3 = trackerOrientationQuat.Rotate(farV3);
    farV4 = trackerOrientationQuat.Rotate(farV4);

    OVR::Vector3f orientationVector(trackerPose.x + trackerOrientationVec.x,
        trackerPose.y + trackerOrientationVec.y,
        trackerPose.z + trackerOrientationVec.z);
    // tracker camera frustum
    const GLfloat frustumVertexData[] = {
        trackerPose.x, trackerPose.y, trackerPose.z,
        trackerPose.x + farV1.x, trackerPose.y + farV1.y, trackerPose.z + farV1.z,
        trackerPose.x, trackerPose.y, trackerPose.z,
        trackerPose.x + farV2.x, trackerPose.y + farV2.y, trackerPose.z + farV2.z,
        trackerPose.x, trackerPose.y, trackerPose.z,
        trackerPose.x + farV3.x, trackerPose.y + farV3.y, trackerPose.z + farV3.z,
        trackerPose.x, trackerPose.y, trackerPose.z,
        trackerPose.x + farV4.x, trackerPose.y + farV4.y, trackerPose.z + farV4.z,
        // orientation vector (trackerPose to near plane)
        trackerPose.x, trackerPose.y, trackerPose.z,
        orientationVector.x, orientationVector.y, orientationVector.z
    };

    // near plane of the tracking camera
    const GLfloat nearPlaneVertexData[] = {
        trackerPose.x + nearV1.x, trackerPose.y + nearV1.y, trackerPose.z + nearV1.z,
        trackerPose.x + nearV2.x, trackerPose.y + nearV2.y, trackerPose.z + nearV2.z,
        trackerPose.x + nearV3.x, trackerPose.y + nearV3.y, trackerPose.z + nearV3.z,
        trackerPose.x + nearV4.x, trackerPose.y + nearV4.y, trackerPose.z + nearV4.z
    };

    // far plane of the tracking camera
    const GLfloat farPlaneVertexData[] = {
        trackerPose.x + farV1.x, trackerPose.y + farV1.y, trackerPose.z + farV1.z,
        trackerPose.x + farV2.x, trackerPose.y + farV2.y, trackerPose.z + farV2.z,
        trackerPose.x + farV3.x, trackerPose.y + farV3.y, trackerPose.z + farV3.z,
        trackerPose.x + farV4.x, trackerPose.y + farV4.y, trackerPose.z + farV4.z
    };

    if (glIsBuffer(m_vertexBuffers[0]))
        glDeleteBuffers(3, m_vertexBuffers);

    if (glIsVertexArray(m_vertexArray))
        glDeleteVertexArrays(1, &m_vertexArray);

    // create line VAO
    glGenVertexArrays(1, &m_vertexArray);
    glGenBuffers(3, m_vertexBuffers);

    glBindVertexArray(m_vertexArray);

    // Get a handle for our buffers (VBO)
    glBindBuffer(GL_ARRAY_BUFFER, m_vertexBuffers[0]);
    glBufferData(GL_ARRAY_BUFFER, sizeof(frustumVertexData), frustumVertexData, GL_STATIC_DRAW);

    glBindBuffer(GL_ARRAY_BUFFER, m_vertexBuffers[1]);
    glBufferData(GL_ARRAY_BUFFER, sizeof(nearPlaneVertexData), nearPlaneVertexData, GL_STATIC_DRAW);

    glBindBuffer(GL_ARRAY_BUFFER, m_vertexBuffers[2]);
    glBufferData(GL_ARRAY_BUFFER, sizeof(farPlaneVertexData), farPlaneVertexData, GL_STATIC_DRAW);
}
bool OculusVRSensorDevice::process(U32 deviceType, bool generateRotAsAngAxis, bool generateRotAsEuler, bool generateRotationAsAxisEvents, bool generatePositionEvents, F32 maxAxisRadius, bool generateRawSensor)
{
   if(!mIsValid)
      return false;

   // Grab current state
   ovrTrackingState ts = ovr_GetTrackingState(mDevice, ovr_GetTimeInSeconds(), ovrTrue);
   mLastStatus = ts.StatusFlags;

   // Store the current data from the sensor and compare with previous data
   U32 diff;
   OculusVRSensorData* currentBuffer = (mPrevData == mDataBuffer[0]) ? mDataBuffer[1] : mDataBuffer[0];
   currentBuffer->setData(ts, maxAxisRadius);
   diff = mPrevData->compare(currentBuffer, generateRawSensor);

   // Update the previous data pointer.  We do this here in case someone calls our
   // console functions during one of the input events below.
   mPrevData = currentBuffer;

   // Rotation event
   if(diff & OculusVRSensorData::DIFF_ROT)
   {
      if(generateRotAsAngAxis)
      {
         AngAxisF axisAA(currentBuffer->mRotQuat);
         INPUTMGR->buildInputEvent(deviceType, OculusVRConstants::DefaultOVRBase, SI_ROT, OVR_SENSORROT[mActionCodeIndex], SI_MOVE, axisAA);
      }

      if(generateRotAsEuler)
      {
         // Convert angles to degrees
         VectorF angles;
         for(U32 i=0; i<3; ++i)  
         {
            angles[i] = mRadToDeg(currentBuffer->mRotEuler[i]);
         }
         INPUTMGR->buildInputEvent(deviceType, OculusVRConstants::DefaultOVRBase, SI_POS, OVR_SENSORROTANG[mActionCodeIndex], SI_MOVE, angles);
      }
   }

   // Rotation as axis event
   if(generateRotationAsAxisEvents && diff & OculusVRSensorData::DIFF_ROTAXIS)
   {
      if(diff & OculusVRSensorData::DIFF_ROTAXISX)
         INPUTMGR->buildInputEvent(deviceType, OculusVRConstants::DefaultOVRBase, SI_AXIS, OVR_SENSORROTAXISX[mActionCodeIndex], SI_MOVE, currentBuffer->mRotAxis.x);
      if(diff & OculusVRSensorData::DIFF_ROTAXISY)
         INPUTMGR->buildInputEvent(deviceType, OculusVRConstants::DefaultOVRBase, SI_AXIS, OVR_SENSORROTAXISY[mActionCodeIndex], SI_MOVE, currentBuffer->mRotAxis.y);
   }

   if (generatePositionEvents && diff & OculusVRSensorData::DIFF_POS)
   {
      INPUTMGR->buildInputEvent(deviceType, OculusVRConstants::DefaultOVRBase, SI_AXIS, OVR_SENSORROTAXISX[mActionCodeIndex], SI_MOVE, currentBuffer->mPosition);
   }

   // Raw sensor event
   if(generateRawSensor && diff & OculusVRSensorData::DIFF_RAW)
   {
      if(diff & OculusVRSensorData::DIFF_ACCEL)
         INPUTMGR->buildInputEvent(deviceType, OculusVRConstants::DefaultOVRBase, SI_POS, OVR_SENSORACCELERATION[mActionCodeIndex], SI_MOVE, currentBuffer->mAcceleration);

      if(diff & OculusVRSensorData::DIFF_ANGVEL)
      {
         // Convert angles to degrees
         VectorF angles;
         for(U32 i=0; i<3; ++i)
         {
            angles[i] = mRadToDeg(currentBuffer->mAngVelocity[i]);
         }
         INPUTMGR->buildInputEvent(deviceType, OculusVRConstants::DefaultOVRBase, SI_POS, OVR_SENSORANGVEL[mActionCodeIndex], SI_MOVE, angles);
      }

      if(diff & OculusVRSensorData::DIFF_MAG)
         INPUTMGR->buildInputEvent(deviceType, OculusVRConstants::DefaultOVRBase, SI_POS, OVR_SENSORMAGNETOMETER[mActionCodeIndex], SI_MOVE, currentBuffer->mMagnetometer);
   }

   if (diff & OculusVRSensorData::DIFF_STATUS)
   {
      if (Con::isFunction("onOculusStatusUpdate"))
      {
         Con::executef("onOculusStatusUpdate", ts.StatusFlags);
      }
   }

   return true;
}
Beispiel #16
0
void VR::nextTracking()
{
#if defined(_DEBUG)
	// make sure we are only caled once per frame:
	static vector<bool> called;
	if (xapp->getFramenum() < 50000) {
		size_t framenum = (size_t) xapp->getFramenum();
		assert(called.size() <= framenum);
		called.push_back(true);
		assert(called.size() == framenum+1);
	}
#endif

	// Get both eye poses simultaneously, with IPD offset already included. 
	ovrVector3f useHmdToEyeViewOffset[2] = { eyeRenderDesc[0].HmdToEyeOffset, eyeRenderDesc[1].HmdToEyeOffset };
	//ovrPosef temp_EyeRenderPose[2];
	double displayMidpointSeconds = ovr_GetPredictedDisplayTime(session, 0);
	ovrTrackingState ts = ovr_GetTrackingState(session, displayMidpointSeconds, false);
	ovr_CalcEyePoses(ts.HeadPose.ThePose, useHmdToEyeViewOffset, layer.RenderPose);
	ovrResult result;
	ovrBoundaryTestResult btest;
	ovrBool visible;
	result = ovr_GetBoundaryVisible(session, &visible);
	if (0) {
		Log("visible = " << (visible == ovrTrue) << endl);

		result = ovr_TestBoundary(session, ovrTrackedDevice_HMD, ovrBoundary_Outer, &btest);
		if (OVR_SUCCESS(result)) {
			//Log("boundary success");
			if (result == ovrSuccess) Log("success" << endl);
			if (result == ovrSuccess_BoundaryInvalid) Log("success boundary invalid" << endl);
			if (result == ovrSuccess_DeviceUnavailable) Log("success device unavailable" << endl);
		}
	}
	layer.Fov[0] = eyeRenderDesc[0].Fov;
	layer.Fov[1] = eyeRenderDesc[1].Fov;

	// Render the two undistorted eye views into their render buffers.  
	for (int eye = 0; eye < 2; eye++)
	{
		ovrPosef    * useEyePose = &EyeRenderPose[eye];
		float       * useYaw = &YawAtRender[eye];
		float Yaw = XM_PI;
		*useEyePose = layer.RenderPose[eye];
		*useYaw = Yaw;

		// Get view and projection matrices (note near Z to reduce eye strain)
		Matrix4f rollPitchYaw = Matrix4f::RotationY(Yaw);
		Matrix4f finalRollPitchYaw = rollPitchYaw * Matrix4f(useEyePose->Orientation);
		// fix finalRollPitchYaw for LH coordinate system:
		Matrix4f s = Matrix4f::Scaling(1.0f, -1.0f, -1.0f);  // 1 1 -1
		finalRollPitchYaw = s * finalRollPitchYaw * s;

		Vector3f finalUp = finalRollPitchYaw.Transform(Vector3f(0, 1, 0));
		Vector3f finalForward = finalRollPitchYaw.Transform(Vector3f(0, 0, -1));//0 0 1
		Vector3f Posf;
		Posf.x = xapp->camera.pos.x;
		Posf.y = xapp->camera.pos.y;
		Posf.z = xapp->camera.pos.z;
		Vector3f diff = rollPitchYaw.Transform(useEyePose->Position);
		//diff /= 10.0f;
		//diff.x = 0.0f;
		//diff.y = 0.0f;
		//diff.z = 0.0f;
		Vector3f shiftedEyePos;
		shiftedEyePos.x = Posf.x - diff.x;
		shiftedEyePos.y = Posf.y + diff.y;
		shiftedEyePos.z = Posf.z + diff.z;
		xapp->camera.look.x = finalForward.x;
		xapp->camera.look.y = finalForward.y;
		xapp->camera.look.z = finalForward.z;

		Matrix4f view = Matrix4f::LookAtLH(shiftedEyePos, shiftedEyePos + finalForward, finalUp);
		Matrix4f projO = ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.2f, 2000.0f,  ovrProjection_LeftHanded);
		Matrix4fToXM(this->viewOVR[eye], view.Transposed());
		Matrix4fToXM(this->projOVR[eye], projO.Transposed());
	}
}
Beispiel #17
0
void vx_ovr_namespace_::OVRHMDHandleWithDevice::getTrackingState()
{
	ovrTrackingState ts = ovr_GetTrackingState(session_, ovr_GetTimeInSeconds(), ovrTrue);
	ovr_CalcEyePoses(ts.HeadPose.ThePose, viewOffset_, eyeRenderPosef_);
}
/**
* Render the Virtual Cinema Theatre.
***/
void* OculusTracker::Provoke(void* pThis, int eD3D, int eD3DInterface, int eD3DMethod, DWORD dwNumberConnected, int& nProvokerIndex)
{
	// update game timer
	m_cGameTimer.Tick();

	static UINT unFrameSkip = 200;
	if (unFrameSkip > 0)
	{
		unFrameSkip--;
		return nullptr;
	}

	// #define _DEBUG_OTR
#ifdef _DEBUG_OTR
	{ wchar_t buf[128]; wsprintf(buf, L"[OTR] ifc %u mtd %u", eD3DInterface, eD3DMethod); OutputDebugString(buf); }
#endif

	// save ini file ?
	if (m_nIniFrameCount)
	{
		if (m_nIniFrameCount == 1)
			SaveIniSettings();
		m_nIniFrameCount--;
	}

	// main menu update ?
	if (m_sMenu.bOnChanged)
	{
		// set back event bool, set ini file frame count
		m_sMenu.bOnChanged = false;
		m_nIniFrameCount = 300;

		// loop through entries
		for (size_t nIx = 0; nIx < m_sMenu.asEntries.size(); nIx++)
		{
			// entry index changed ?
			if (m_sMenu.asEntries[nIx].bOnChanged)
			{
				m_sMenu.asEntries[nIx].bOnChanged = false;

				// touch entries ?
				if (nIx < 25)
				{
					// set new vk code by string
					m_aaunKeys[1][nIx] = GetVkCodeByString(m_sMenu.asEntries[nIx].astrValueEnumeration[m_sMenu.asEntries[nIx].unValue]);
				}
			}
		}
	}

	if (m_hSession)
	{
#pragma region controller
		// controller indices
		static const uint32_t s_unIndexRemote = 0;
		static const uint32_t s_unIndexTouch = 1;
		static const uint32_t s_unIndexXBox = 2;

		// get all connected input states
		ovrInputState sInputState[3] = {};
		unsigned int unControllersConnected = ovr_GetConnectedControllerTypes(m_hSession);
#pragma region Remote
		if (unControllersConnected & ovrControllerType_Remote)
		{
			ovr_GetInputState(m_hSession, ovrControllerType_Remote, &sInputState[s_unIndexRemote]);

			// handle all remote buttons except Oculus private ones
			if (sInputState[s_unIndexRemote].Buttons & ovrButton_Up)
				m_sMenu.bOnUp = true;
			if (sInputState[s_unIndexRemote].Buttons & ovrButton_Down)
				m_sMenu.bOnDown = true;
			if (sInputState[s_unIndexRemote].Buttons & ovrButton_Left)
				m_sMenu.bOnLeft = true;
			if (sInputState[s_unIndexRemote].Buttons & ovrButton_Right)
				m_sMenu.bOnRight = true;
			if (sInputState[s_unIndexRemote].Buttons & ovrButton_Enter)
				m_sMenu.bOnAccept = true;
			if (sInputState[s_unIndexRemote].Buttons & ovrButton_Back)
				m_sMenu.bOnBack = true;
		}
#pragma endregion
#pragma region touch
		if (unControllersConnected & ovrControllerType_Touch)
		{
			// get input state
			ovr_GetInputState(m_hSession, ovrControllerType_Touch, &sInputState[s_unIndexTouch]);

			// loop through controller buttons
			for (UINT unButtonIx = 0; unButtonIx < unButtonNo; unButtonIx++)
			{
				// cast keyboard event
				if (sInputState[s_unIndexTouch].Buttons & aunButtonIds[unButtonIx])
				{
					if (!m_aabKeys[s_unIndexTouch][unButtonIx])
						MapButtonDown(s_unIndexTouch, unButtonIx);
				}
				else
				if (m_aabKeys[s_unIndexTouch][unButtonIx])
					MapButtonUp(s_unIndexTouch, unButtonIx);
			}
		}
#pragma endregion
		if (unControllersConnected & ovrControllerType_XBox)
			ovr_GetInputState(m_hSession, ovrControllerType_XBox, &sInputState[s_unIndexXBox]);



#pragma endregion
#pragma region hmd
		/*// Start the sensor which informs of the Rift's pose and motion   .... obsolete for SDK 1.3.x ??
		ovr_ConfigureTracking(m_hSession, ovrTrackingCap_Orientation | ovrTrackingCap_MagYawCorrection |
		ovrTrackingCap_Position, 0);*/

		// get the current tracking state
		ovrTrackingState sTrackingState = ovr_GetTrackingState(m_hSession, ovr_GetTimeInSeconds(), false);

		if (TRUE)//(sTrackingState.StatusFlags & (ovrStatus_OrientationTracked | ovrStatus_PositionTracked))
		{
			// get pose
			ovrPoseStatef sPoseState = sTrackingState.HeadPose;
			m_sPose = sPoseState.ThePose;
			m_sOrientation.x = m_sPose.Orientation.x;
			m_sOrientation.y = m_sPose.Orientation.y;
			m_sOrientation.z = m_sPose.Orientation.z;
			m_sOrientation.w = m_sPose.Orientation.w;

			// backup old euler angles and velocity
			float fEulerOld[3];
			float fEulerVelocityOld[3];
			memcpy(&fEulerOld[0], &m_fEuler[0], sizeof(float)* 3);
			memcpy(&fEulerVelocityOld[0], &m_fEulerVelocity[0], sizeof(float)* 3);

			// predicted euler angles ? for Oculus, due to ATW, we do not predict the euler angles
			if (FALSE)
			{
				// get angles
				m_sOrientation.GetEulerAngles<Axis::Axis_Y, Axis::Axis_X, Axis::Axis_Z, RotateDirection::Rotate_CW, HandedSystem::Handed_R >(&m_fEuler[1], &m_fEuler[0], &m_fEuler[2]);

				// quick fix here...
				m_fEuler[1] *= -1.0f;
				m_fEuler[0] *= -1.0f;
				m_fEuler[2] *= -1.0f;

				// get euler velocity + acceleration
				float fEulerAcceleration[3];
				for (UINT unI = 0; unI < 3; unI++)
				{
					// get the velocity
					m_fEulerVelocity[unI] = (m_fEuler[unI] - fEulerOld[unI]) / (float)m_cGameTimer.DeltaTime();

					// get the acceleration
					fEulerAcceleration[unI] = (m_fEulerVelocity[unI] - fEulerVelocityOld[unI]) / (float)m_cGameTimer.DeltaTime();
				}

				// get predicted euler
				for (UINT unI = 0; unI < 3; unI++)
				{
					// compute predicted euler
					m_fEulerPredicted[unI] = (0.5f * fEulerAcceleration[unI] * ((float)m_cGameTimer.DeltaTime() * (float)m_cGameTimer.DeltaTime())) + (m_fEulerVelocity[unI] * (float)m_cGameTimer.DeltaTime()) + m_fEuler[unI];
				}
			}
			else
			{
				// get angles
				m_sOrientation.GetEulerAngles<Axis::Axis_Y, Axis::Axis_X, Axis::Axis_Z, RotateDirection::Rotate_CW, HandedSystem::Handed_R >(&m_fEulerPredicted[1], &m_fEulerPredicted[0], &m_fEulerPredicted[2]);

				// quick fix here...
				m_fEulerPredicted[1] *= -1.0f;
				m_fEulerPredicted[0] *= -1.0f;
				m_fEulerPredicted[2] *= -1.0f;
			}

			// set the drawing update to true
			m_bControlUpdate = true;

			// set position
			m_afPosition[0] = (float)-m_sPose.Position.x - m_afPositionOrigin[0];
			m_afPosition[1] = (float)-m_sPose.Position.y - m_afPositionOrigin[1];
			m_afPosition[2] = (float)m_sPose.Position.z + m_afPositionOrigin[2];

			// get eye render pose and other fields
			ovrEyeRenderDesc asEyeRenderDesc[2];
			asEyeRenderDesc[0] = ovr_GetRenderDesc(m_hSession, ovrEye_Left, m_sHMDDesc.DefaultEyeFov[0]);
			asEyeRenderDesc[1] = ovr_GetRenderDesc(m_hSession, ovrEye_Right, m_sHMDDesc.DefaultEyeFov[1]);
			ovrPosef asHmdToEyePose[2] = { asEyeRenderDesc[0].HmdToEyePose,asEyeRenderDesc[1].HmdToEyePose };
			//ovrVector3f      asHmdToEyeViewOffset[2] = { asEyeRenderDesc[0].HmdToEyePose, asEyeRenderDesc[1].HmdToEyePose };
			ovrPosef         asEyeRenderPose[2];
			static long long s_frameIndex = 0;
			static double s_sensorSampleTime = 0.0;    // sensorSampleTime is fed into the layer later
			ovr_GetEyePoses(m_hSession, s_frameIndex, ovrTrue, asHmdToEyePose, asEyeRenderPose, &s_sensorSampleTime);
			// ovr_CalcEyePoses(sTrackingState.HeadPose.ThePose, asHmdToEyePose, asEyeRenderPose);

			// create rotation matrix from euler angles
			D3DXMATRIX sRotation;
			D3DXMATRIX sPitch, sYaw, sRoll;
			D3DXMatrixRotationX(&sPitch, m_fEulerPredicted[0]);
			D3DXMatrixRotationY(&sYaw, m_fEulerPredicted[1]);
			D3DXMatrixRotationZ(&sRoll, -m_fEulerPredicted[2]);
			sRotation = sYaw * sPitch * sRoll;

			// create per eye view matrix from rotation and position
			D3DXMATRIX sView[2];
			for (UINT unEye = 0; unEye < 2; unEye++)
			{
				D3DXMATRIX sTranslation;
				D3DXMatrixTranslation(&sTranslation, (float)-asEyeRenderPose[unEye].Position.x - m_afPositionOrigin[0], (float)-asEyeRenderPose[unEye].Position.y - m_afPositionOrigin[1], (float)asEyeRenderPose[unEye].Position.z + m_afPositionOrigin[2]);
				sView[unEye] = sTranslation * sRotation;
			}

			// create head pose view matrix
			D3DXMATRIX sTranslation;
			D3DXMatrixTranslation(&sTranslation, (float)-sTrackingState.HeadPose.ThePose.Position.x - m_afPositionOrigin[0], (float)-sTrackingState.HeadPose.ThePose.Position.y - m_afPositionOrigin[1], (float)sTrackingState.HeadPose.ThePose.Position.z + m_afPositionOrigin[2]);
			m_sView = sTranslation * sRotation;

			// create inverse view matrix
			D3DXMATRIX sVInv = {};
			D3DXMatrixInverse(&sVInv, nullptr, &m_sView);

			// get projection matrices left/right
			D3DXMATRIX asToEye[2];
			D3DXMATRIX asProjection[2];
			for (UINT unEye = 0; unEye < 2; unEye++)
			{
				// get ovr projection
				ovrMatrix4f sProj = ovrMatrix4f_Projection(m_sHMDDesc.DefaultEyeFov[unEye], 0.01f, 30.0f, ovrProjection_LeftHanded);

				// create dx projection
				asProjection[unEye] = D3DXMATRIX(&sProj.M[0][0]);
				D3DXMatrixTranspose(&asProjection[unEye], &asProjection[unEye]);

				// create eventual projection using inverse matrix of the head pose view matrix
				m_asProjection[unEye] = sVInv * sView[unEye] * asProjection[unEye];
			}
		}
#pragma endregion
	}
	else
	{
		// Initialize LibOVR, and the Rift... then create hmd handle
		ovrResult result = ovr_Initialize(nullptr);
		if (!OVR_SUCCESS(result))
		{
			OutputDebugString(L"[OVR] Failed to initialize libOVR.");
			return nullptr;
		}

		result = ovr_Create(&m_hSession, &m_sLuid);
		if (!OVR_SUCCESS(result))
		{
			OutputDebugString(L"[OVR] Failed to retreive HMD handle.");
			return nullptr;
		}
		else
			OutputDebugString(L"[OVR] HMD handle initialized !");

		if (m_hSession)
		{
			// get the description and set pointers
			m_sHMDDesc = ovr_GetHmdDesc(m_hSession);

			// Configure Stereo settings.
			ovrSizei sRecommenedTex0Size = ovr_GetFovTextureSize(m_hSession, ovrEye_Left,
				m_sHMDDesc.DefaultEyeFov[0], 1.0f);
			ovrSizei sRecommenedTex1Size = ovr_GetFovTextureSize(m_hSession, ovrEye_Right,
				m_sHMDDesc.DefaultEyeFov[1], 1.0f);

			ovrSizei sTextureSize;
			sTextureSize.w = max(sRecommenedTex0Size.w, sRecommenedTex1Size.w);
			sTextureSize.h = max(sRecommenedTex0Size.h, sRecommenedTex1Size.h);
			m_unRenderTextureWidth = (UINT)sTextureSize.w;
			m_unRenderTextureHeight = (UINT)sTextureSize.h;

			// get view offset
			ovrEyeRenderDesc asEyeRenderDesc[2];
			asEyeRenderDesc[0] = ovr_GetRenderDesc(m_hSession, ovrEye_Left, m_sHMDDesc.DefaultEyeFov[0]);
			asEyeRenderDesc[1] = ovr_GetRenderDesc(m_hSession, ovrEye_Right, m_sHMDDesc.DefaultEyeFov[1]);
			ovrVector3f asViewOffset[2] = { asEyeRenderDesc[0].HmdToEyePose.Position, asEyeRenderDesc[1].HmdToEyePose.Position };

			// get projection matrices left/right
			D3DXMATRIX asToEye[2];
			D3DXMATRIX asProjection[2];
			for (UINT unEye = 0; unEye < 2; unEye++)
			{
				// get ovr projection
				ovrMatrix4f sProj = ovrMatrix4f_Projection(m_sHMDDesc.DefaultEyeFov[unEye], 0.01f, 30.0f, ovrProjection_LeftHanded);

				// create dx projection
				asProjection[unEye] = D3DXMATRIX(&sProj.M[0][0]);
				D3DXMatrixTranspose(&asProjection[unEye], &asProjection[unEye]);

				// create view offset translation matrix
				D3DXMatrixTranslation(&asToEye[unEye], -asViewOffset[unEye].x, -asViewOffset[unEye].y, -asViewOffset[unEye].z);

				// create eventual projection
				m_asProjection[unEye] = asToEye[unEye] * asProjection[unEye];
			}
		}
	}

	return nullptr;
}
Beispiel #19
0
//----------------------------------------------------------------------
void Tracker::Draw(ovrSession Session, RenderDevice* pRender, Player ThePlayer, ovrTrackingOrigin TrackingOriginType,
	               bool Sitting, float ExtraSittingAltitude, Matrix4f * /*ViewFromWorld*/, int eye, ovrPosef * EyeRenderPose)
{
    OVR_UNUSED2(ExtraSittingAltitude, Sitting);

	// Don't render if not ready
	if (!TrackerHeadModel) return;

	// Initial rendering setup
	pRender->SetDepthMode(true, true);
	pRender->SetCullMode(OVR::Render::D3D11::RenderDevice::Cull_Off);

	// Draw in local frame of reference, so get view matrix
	Quatf eyeRot = EyeRenderPose[eye].Orientation;
	Vector3f up = eyeRot.Rotate(UpVector);
	Vector3f forward = eyeRot.Rotate(ForwardVector);
	Vector3f viewPos = EyeRenderPose[eye].Position;
	Matrix4f localViewMat = Matrix4f::LookAtRH(viewPos, viewPos + forward, up);

	// Get some useful values about the situation
	Vector3f          headWorldPos  = ThePlayer.GetHeadPosition(TrackingOriginType);
	ovrTrackerPose    trackerPose   = ovr_GetTrackerPose(Session, 0);
	Vector3f          centreEyePos  = ((Vector3f)(EyeRenderPose[0].Position) + (Vector3f)(EyeRenderPose[1].Position))*0.5f;
	double            ftiming       = ovr_GetPredictedDisplayTime(Session, 0);
	ovrTrackingState  trackingState = ovr_GetTrackingState(Session, ftiming, ovrTrue);
	bool              tracked       = trackingState.StatusFlags & ovrStatus_PositionTracked ? true : false;

	// Find altitude of stand.
    // If we are at floor level, display the tracker stand on the physical floor.
    // If are using eye level coordinate system, just render the standard height of the stalk.
    float altitudeOfFloorInLocalSpace;
    if (TrackingOriginType == ovrTrackingOrigin_FloorLevel)
        altitudeOfFloorInLocalSpace = 0.01f;
    else
        altitudeOfFloorInLocalSpace = trackerPose.Pose.Position.y - 0.22f;  //0.18f;

	Vector3f localStandPos = Vector3f(trackerPose.Pose.Position.x, altitudeOfFloorInLocalSpace,
                                      trackerPose.Pose.Position.z);

	// Set position of tracker models according to pose.
	TrackerHeadModel->SetPosition(trackerPose.Pose.Position);
	TrackerHeadModel->SetOrientation(trackerPose.Pose.Orientation);
	
    // We scale the stalk so that it has correct physical height.
    Matrix4f stalkScale = Matrix4f::Scaling(1.0f, trackerPose.Pose.Position.y - altitudeOfFloorInLocalSpace - 0.0135f, 1.0f);
    TrackerStalkModel->SetMatrix(Matrix4f::Translation(Vector3f(trackerPose.Pose.Position) - Vector3f(0,0.0135f,0)) * stalkScale *
                                 Matrix4f(TrackerStalkModel->GetOrientation()));
	
    TrackerStandModel->SetPosition(localStandPos);
	TrackerConeModel->SetPosition(trackerPose.Pose.Position);
	TrackerConeModel->SetOrientation(trackerPose.Pose.Orientation);
	TrackerLinesModel->SetPosition(trackerPose.Pose.Position);
	TrackerLinesModel->SetOrientation(trackerPose.Pose.Orientation);


    if (trackerLinesAlwaysVisible)
        pRender->SetDepthMode(false, true);

	// Set rendering tint proportional to proximity, and red if not tracked. 
	float dist = DistToBoundary(centreEyePos, trackerPose.Pose, true);    
	 //OVR_DEBUG_LOG(("Dist = %0.3f\n", dist));
    
    // This defines a color ramp at specified distances from the edge.
    // Display staring at 0.4 - 0.2 meter [alpha 0->1]
    // Turn to yellow after [0.2]
    float       distThreshods[4]   = { 0.0f, 0.1f, 0.2f, 0.35f };
    Vector4f    thresholdColors[4] = {
        Vector4f(1.0f, 0.3f, 0.0f, 1.0f),   // Yellow-red
        Vector4f(1.0f, 1.0f, 0.0f, 0.8f),   // Yellow
        Vector4f(1.0f, 1.0f, 1.0f, 0.6f),   // White
        Vector4f(1.0f, 1.0f, 1.0f, 0.0f)    // White-transparent
    };

    // Assign tint based on the lookup table
    Vector4f globalTint = Vector4f(1, 1, 1, 0);

    int distSearch = 0;
    if (dist <= 0.0f)
        dist = 0.001f;
    for (; distSearch < sizeof(distThreshods) / sizeof(distThreshods[0]) - 1; distSearch++)
    {
        if (dist < distThreshods[distSearch+1])
        {
            float startT = distThreshods[distSearch];
            float endT   = distThreshods[distSearch+1];
            float factor = (dist - startT) / (endT - startT);

            globalTint = thresholdColors[distSearch] * (1.0f - factor) +
                         thresholdColors[distSearch + 1] * factor;
            break;
        }
    }
    
    if (!tracked)
        globalTint = Vector4f(1, 0, 0, 1);
    
    pRender->SetGlobalTint(globalTint);

    if (minimumAlphaOfTracker > globalTint.w)
        globalTint.w = minimumAlphaOfTracker;

    // We try to draw twice here: Once with Z clipping to give a bright image,
    // and once with Z testing off to give a dim outline for those cases.

    // Solid bakground
    if (globalTint.w > 0.01)
    {
        pRender->SetDepthMode(true, true);

        // Draw the tracker representation
        LOCAL_RenderModelWithAlpha(pRender, TrackerStandModel, localViewMat);
        LOCAL_RenderModelWithAlpha(pRender, TrackerStalkModel, localViewMat);
        LOCAL_RenderModelWithAlpha(pRender, TrackerHeadModel, localViewMat);
        LOCAL_RenderModelWithAlpha(pRender, TrackerLinesModel, localViewMat);
        if (drawWalls)
            LOCAL_RenderModelWithAlpha(pRender, TrackerConeModel, localViewMat);
    }

    
    if (globalTint.w > 0.01f)
        globalTint.w = 0.01f;    
    pRender->SetGlobalTint(globalTint);
    pRender->SetDepthMode(false, true);
    LOCAL_RenderModelWithAlpha(pRender, TrackerStandModel, localViewMat);
    LOCAL_RenderModelWithAlpha(pRender, TrackerStalkModel, localViewMat);
    LOCAL_RenderModelWithAlpha(pRender, TrackerHeadModel, localViewMat);
    LOCAL_RenderModelWithAlpha(pRender, TrackerLinesModel, localViewMat);
    if (drawWalls)
        LOCAL_RenderModelWithAlpha(pRender, TrackerConeModel, localViewMat);

	// Revert to rendering defaults
	pRender->SetGlobalTint(Vector4f(1, 1, 1, 1));
	pRender->SetCullMode(RenderDevice::Cull_Back);
	pRender->SetDepthMode(true, true);
}