예제 #1
0
Transformf FrameTimeManager::GetEyePredictionPose(ovrHmd hmd, ovrEyeType eye)
{
    double         eyeRenderTime = GetEyePredictionTime(eye);
    ovrSensorState eyeState      = ovrHmd_GetSensorState(hmd, eyeRenderTime);

//    EyeRenderPoses[eye] = eyeState.Predicted.Pose;

    // Record view pose sampling time for Latency reporting.
    if (RenderIMUTimeSeconds == 0.0)
        RenderIMUTimeSeconds = eyeState.Recorded.TimeInSeconds;

    return eyeState.Predicted.Pose;
}
예제 #2
0
void GVRInterface::idle() {
#if defined(ANDROID) && defined(HAVE_LIBOVR)
    if (!_inVRMode && ovr_IsHeadsetDocked()) {
        qDebug() << "The headset just got docked - enter VR mode.";
        enterVRMode();
    } else if (_inVRMode) {
        
        if (ovr_IsHeadsetDocked()) {
            static int counter = 0;
            
            // Get the latest head tracking state, predicted ahead to the midpoint of the time
            // it will be displayed.  It will always be corrected to the real values by
            // time warp, but the closer we get, the less black will be pulled in at the edges.
            const double now = ovr_GetTimeInSeconds();
            static double prev;
            const double rawDelta = now - prev;
            prev = now;
            const double clampedPrediction = std::min( 0.1, rawDelta * 2);
            ovrSensorState sensor = ovrHmd_GetSensorState(OvrHmd, now + clampedPrediction, true );   
            
            auto ovrOrientation = sensor.Predicted.Pose.Orientation;
            glm::quat newOrientation(ovrOrientation.w, ovrOrientation.x, ovrOrientation.y, ovrOrientation.z);
            _client->setOrientation(newOrientation);
            
            if (counter++ % 100000 == 0) {
                qDebug() << "GetSensorState in frame" << counter << "-" 
                    << ovrOrientation.x <<  ovrOrientation.y <<  ovrOrientation.z <<  ovrOrientation.w;
            }
        } else {
            qDebug() << "The headset was undocked - leaving VR mode.";
            
            leaveVRMode();
        }
    } 
    
    OVR::KeyState& backKeyState = _mainWindow->getBackKeyState();
    auto backEvent = backKeyState.Update(ovr_GetTimeInSeconds());

    if (backEvent == OVR::KeyState::KEY_EVENT_LONG_PRESS) {
        qDebug() << "Attemping to start the Platform UI Activity.";
        ovr_StartPackageActivity(_ovr, PUI_CLASS_NAME, PUI_GLOBAL_MENU);
    } else if (backEvent == OVR::KeyState::KEY_EVENT_DOUBLE_TAP || backEvent == OVR::KeyState::KEY_EVENT_SHORT_PRESS) {
        qDebug() << "Got an event we should cancel for!";
    } else if (backEvent == OVR::KeyState::KEY_EVENT_DOUBLE_TAP) {
        qDebug() << "The button is down!";
    }
#endif
}
  void draw() {
    static int frameIndex = 0;
    ovrFrameTiming timing = ovrHmd_BeginFrameTiming(hmd, frameIndex++);
    for (int i = 0; i < 2; ++i) {
      const ovrEyeType eye = hmdDesc.EyeRenderOrder[i];
      const EyeArg & eyeArg = eyeArgs[eye];
      // Set up the per-eye projection matrix
      gl::Stacks::projection().top() = eyeArg.projection;

      eyeArg.frameBuffer.activate();
      gl::MatrixStack & mv = gl::Stacks::modelview();
      gl::Stacks::with_push([&]{
        ovrSensorState ss = ovrHmd_GetSensorState(hmd, timing.EyeScanoutSeconds[eye]);
        // Set up the per-eye modelview matrix
        // Apply the head pose
        mv.preMultiply(glm::inverse(Rift::fromOvr(ss.Predicted.Pose)));
        // Apply the per-eye offset
        mv.preMultiply(eyeArg.viewOffset);
        renderScene();
      });
      eyeArg.frameBuffer.deactivate();
    }

    glClearColor(0, 0, 1, 1);
    glClear(GL_COLOR_BUFFER_BIT);
    glDisable(GL_BLEND);
    glDisable(GL_CULL_FACE);
    glDisable(GL_DEPTH_TEST);

    distortionProgram->use();

    glViewport(0, 0, windowSize.x, windowSize.y);
    for_each_eye([&](ovrEyeType eye) {
      const EyeArg & eyeArg = eyeArgs[eye];
      distortionProgram->setUniform(0, eyeArg.scale);
      distortionProgram->setUniform(1, eyeArg.offset);
      eyeArg.frameBuffer.color->bind();
      eyeArg.meshVao->bind();
      glDrawElements(GL_TRIANGLES, eyeArg.mesh.IndexCount,
        GL_UNSIGNED_SHORT, nullptr);
    });
    gl::Texture2d::unbind();
    gl::Program::clear();
    ovrHmd_EndFrameTiming(hmd);
    glEnable(GL_CULL_FACE);
    glEnable(GL_DEPTH_TEST);
  }
예제 #4
0
void Render()
{
	ovrFrameTiming frameTiming = ovrHmd_BeginFrameTiming(HMD, 0);

	// 箱の回転の値を更新
	rotationBoxValue += 2.0f*frameTiming.DeltaSeconds;

	// キーボード等で操作する場合の目の位置を指定します。
	static OVR::Vector3f EyePos;
	EyePos.x = 0.0f, EyePos.y = 0.0f, EyePos.z = 0.0f;

	// マウスの回転等でYawを操作する場合に使用する。
	static float eyeYaw = 0;

	// センサーから取得
	ovrPosef movePose = ovrHmd_GetSensorState(HMD, frameTiming.ScanoutMidpointSeconds).Predicted.Pose;
	static ovrPosef eyeRenderPose[2];

	//身長ぶんの考慮をする際の計算
	//EyePos.y = ovrHmd_GetFloat(HMD, OVR_KEY_EYE_HEIGHT, EyePos.y);	

	// 今回は TriangleList しか使わない。
	g_pImmediateContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);

	//レンダーターゲットの設定
	g_pImmediateContext->OMSetRenderTargets(1, &g_pRenderTargetViewOculus, g_pDepthStencilViewOculus);

	//画面のクリア・深度バッファクリア
	float ClearColor[4] = { 0.0f, 0.125f, 0.3f, 1.0f }; // R,G,B,A の順番
	g_pImmediateContext->ClearRenderTargetView(g_pRenderTargetViewOculus, ClearColor);
	g_pImmediateContext->ClearDepthStencilView(g_pDepthStencilViewOculus, D3D11_CLEAR_DEPTH, 1.0f, 0);

	//それぞれの目に対応するシーンを描画します。
	for (int eyeIndex = 0; eyeIndex < ovrEye_Count; eyeIndex++)
	{
		ConstantBuffer cb;
		ovrEyeType eye = HMDDesc.EyeRenderOrder[eyeIndex];
		eyeRenderPose[eye] = ovrHmd_GetEyePose(HMD, eye);

		// ビュー行列を計算します。
		OVR::Matrix4f rotation = OVR::Matrix4f::RotationY(eyeYaw);											// あらかじめ(マウスなどで)計算された回転行列を適用する
		OVR::Matrix4f resultRotation = rotation * OVR::Matrix4f(eyeRenderPose[eye].Orientation) *			// 目の姿勢(回転)を計算する
										OVR::Matrix4f(1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 1);	// 軸に合うように方向を合わせる
		OVR::Vector3f resultUp = resultRotation.Transform(OVR::Vector3f(0, 1, 0));							// 上ベクトルを計算
		OVR::Vector3f forward = resultRotation.Transform(OVR::Vector3f(0, 0, -1));							// 前ベクトルを計算
		OVR::Vector3f resultEyePos = EyePos + rotation.Transform(eyeRenderPose[eye].Position);				// 最終的な目の位置を計算する
		OVR::Vector3f resultEyeAt = EyePos + rotation.Transform(eyeRenderPose[eye].Position) + forward;		// 最終的な目視先を計算する

		// 計算した値から xnamath でビュー行列を計算します。
		XMVECTOR Eye = XMVectorSet(resultEyePos.x, resultEyePos.y, resultEyePos.z, 0.0f);		//カメラの位置
		XMVECTOR At = XMVectorSet(resultEyeAt.x, resultEyeAt.y, resultEyeAt.z, 0.0f);			//カメラの注視先
		XMVECTOR Up = XMVectorSet(resultUp.x, resultUp.y, resultUp.z, 0.0f);					//カメラの真上のベクトル
		g_View = XMMatrixLookAtLH(Eye, At,Up) * XMMatrixTranslation(EyeRenderDesc[eye].ViewAdjust.x, EyeRenderDesc[eye].ViewAdjust.y, EyeRenderDesc[eye].ViewAdjust.z);

		// EyeRenderDesc からプロジェクション行列を計算します。
		// 目の中心からそれぞれ上下左右のfovの正接値(tan)が格納されているので libovr 専用の関数で計算します。
		// OVR::Matrix4f は xnamath と違い行と列が反対なので転置にしておきます。
		OVR::Matrix4f proj = OVR::CreateProjection(false, EyeRenderDesc[eye].Fov, 0.01f, 100.0f);
		proj.Transpose();
		memcpy_s(&g_Projection, 64, &proj, 64);		

		//ビューポートの設定(片目ぶんずつ設定)
		D3D11_VIEWPORT vp;
		vp.TopLeftX = EyeRenderViewport[eye].Pos.x;
		vp.TopLeftY = EyeRenderViewport[eye].Pos.y;
		vp.Width = EyeRenderViewport[eye].Size.w;
		vp.Height = EyeRenderViewport[eye].Size.h;
		vp.MinDepth = 0.0f;
		vp.MaxDepth = 1.0f;
		g_pImmediateContext->RSSetViewports(1, &vp);

		// コンスタントバッファに投げるための行列を設定
		// シェーダーに渡す際に転置行列になるため、ここで転置しておきます。
		cb.mView = XMMatrixTranspose(g_View);
		cb.mProjection = XMMatrixTranspose(g_Projection);

		//シーンを描画
		Scene(cb);
	}


	//ここでレンダーターゲットに描画したシーンをゆがませてバックバッファに描画します。
	DistortionMeshRender(3, HMD, frameTiming.TimewarpPointSeconds,eyeRenderPose);

	g_pSwapChain->Present(0, 0);
	//pRender->WaitUntilGpuIdle();  //今回はクエリ実装してない
	ovrHmd_EndFrameTiming(HMD);

}
예제 #5
0
void FrameTimeManager::GetTimewarpMatrices(ovrHmd hmd, ovrEyeType eyeId,
                                           ovrPosef renderPose, ovrMatrix4f twmOut[2])
{
    if (!hmd)
    {
        return;
    }

    double timewarpStartEnd[2] = { 0.0, 0.0 };    
    GetTimewarpPredictions(eyeId, timewarpStartEnd);
      
    ovrSensorState startState = ovrHmd_GetSensorState(hmd, timewarpStartEnd[0]);
    ovrSensorState endState   = ovrHmd_GetSensorState(hmd, timewarpStartEnd[1]);

    if (TimewarpIMUTimeSeconds == 0.0)
        TimewarpIMUTimeSeconds = startState.Recorded.TimeInSeconds;

    Quatf quatFromStart = startState.Predicted.Pose.Orientation;
    Quatf quatFromEnd   = endState.Predicted.Pose.Orientation;
    Quatf quatFromEye   = renderPose.Orientation; //EyeRenderPoses[eyeId].Orientation;
    quatFromEye.Invert();
    
    Quatf timewarpStartQuat = quatFromEye * quatFromStart;
    Quatf timewarpEndQuat   = quatFromEye * quatFromEnd;

    Matrix4f timewarpStart(timewarpStartQuat);
    Matrix4f timewarpEnd(timewarpEndQuat);
    

    // The real-world orientations have:                                  X=right, Y=up,   Z=backwards.
    // The vectors inside the mesh are in NDC to keep the shader simple: X=right, Y=down, Z=forwards.
    // So we need to perform a similarity transform on this delta matrix.
    // The verbose code would look like this:
    /*
    Matrix4f matBasisChange;
    matBasisChange.SetIdentity();
    matBasisChange.M[0][0] =  1.0f;
    matBasisChange.M[1][1] = -1.0f;
    matBasisChange.M[2][2] = -1.0f;
    Matrix4f matBasisChangeInv = matBasisChange.Inverted();
    matRenderFromNow = matBasisChangeInv * matRenderFromNow * matBasisChange;
    */
    // ...but of course all the above is a constant transform and much more easily done.
    // We flip the signs of the Y&Z row, then flip the signs of the Y&Z column,
    // and of course most of the flips cancel:
    // +++                        +--                     +--
    // +++ -> flip Y&Z columns -> +-- -> flip Y&Z rows -> -++
    // +++                        +--                     -++
    timewarpStart.M[0][1] = -timewarpStart.M[0][1];
    timewarpStart.M[0][2] = -timewarpStart.M[0][2];
    timewarpStart.M[1][0] = -timewarpStart.M[1][0];
    timewarpStart.M[2][0] = -timewarpStart.M[2][0];

    timewarpEnd  .M[0][1] = -timewarpEnd  .M[0][1];
    timewarpEnd  .M[0][2] = -timewarpEnd  .M[0][2];
    timewarpEnd  .M[1][0] = -timewarpEnd  .M[1][0];
    timewarpEnd  .M[2][0] = -timewarpEnd  .M[2][0];

    twmOut[0] = timewarpStart;
    twmOut[1] = timewarpEnd;
}
예제 #6
0
void OculusWorldDemoApp::OnIdle()
{
    double curtime = ovr_GetTimeInSeconds();
    // If running slower than 10fps, clamp. Helps when debugging, because then dt can be minutes!
    float  dt      = Alg::Min<float>(float(curtime - LastUpdate), 0.1f);
    LastUpdate     = curtime;    


    Profiler.RecordSample(RenderProfiler::Sample_FrameStart);

    if (LoadingState == LoadingState_DoLoad)
    {
        PopulateScene(MainFilePath.ToCStr());
        LoadingState = LoadingState_Finished;
        return;
    }    

    if (HmdSettingsChanged)
    {
        CalculateHmdValues();        
        HmdSettingsChanged = false;
    }

    HmdFrameTiming = ovrHmd_BeginFrame(Hmd, 0);


    // Update gamepad.
    GamepadState gamepadState;
    if (GetPlatformCore()->GetGamepadManager()->GetGamepadState(0, &gamepadState))
    {
        GamepadStateChanged(gamepadState);
    }

    SensorState ss = ovrHmd_GetSensorState(Hmd, HmdFrameTiming.ScanoutMidpointSeconds);
    HmdStatus = ss.StatusFlags;

    // Change message status around positional tracking.
	bool hadVisionTracking = HaveVisionTracking;
	HaveVisionTracking = (ss.StatusFlags & Status_PositionTracked) != 0;
	if (HaveVisionTracking && !hadVisionTracking)
		Menu.SetPopupMessage("Vision Tracking Acquired");
    if (!HaveVisionTracking && hadVisionTracking)
		Menu.SetPopupMessage("Lost Vision Tracking");
    
    // Check if any new devices were connected.
    ProcessDeviceNotificationQueue();
    // FPS count and timing.
    UpdateFrameRateCounter(curtime);

    
    // Update pose based on frame!
    ThePlayer.HeadPose = ss.Predicted.Pose;
    // Movement/rotation with the gamepad.
    ThePlayer.BodyYaw -= ThePlayer.GamepadRotate.x * dt;
    ThePlayer.HandleMovement(dt, &CollisionModels, &GroundCollisionModels, ShiftDown);


    // Record after processing time.
    Profiler.RecordSample(RenderProfiler::Sample_AfterGameProcessing);    


    // Determine if we are rendering this frame. Frame rendering may be
    // skipped based on FreezeEyeUpdate and Time-warp timing state.
    bool bupdateRenderedView = FrameNeedsRendering(curtime);
    
    if (bupdateRenderedView)
    {
        // If render texture size is changing, apply dynamic changes to viewport.
        ApplyDynamicResolutionScaling();

        pRender->BeginScene(PostProcess_None);

        if (ForceZeroIpd)
        {             
            // Zero IPD eye rendering: draw into left eye only,
            // re-use  texture for right eye.
            pRender->SetRenderTarget(RenderTargets[Rendertarget_Left].pTex);
            pRender->Clear();
        
            ovrPosef eyeRenderPose = ovrHmd_BeginEyeRender(Hmd, ovrEye_Left);
        
            View = CalculateViewFromPose(eyeRenderPose);
            RenderEyeView(ovrEye_Left);
            ovrHmd_EndEyeRender(Hmd, ovrEye_Left, eyeRenderPose, &EyeTexture[ovrEye_Left]);

            // Second eye gets the same texture (initialized to same value above).
            ovrHmd_BeginEyeRender(Hmd, ovrEye_Right); 
            ovrHmd_EndEyeRender(Hmd, ovrEye_Right, eyeRenderPose, &EyeTexture[ovrEye_Right]);
        }

        else if (RendertargetIsSharedByBothEyes)
        {
            // Shared render target eye rendering; set up RT once for both eyes.
            pRender->SetRenderTarget(RenderTargets[Rendertarget_BothEyes].pTex);
            pRender->Clear();

            for (int eyeIndex = 0; eyeIndex < ovrEye_Count; eyeIndex++)
            {      
                ovrEyeType eye = HmdDesc.EyeRenderOrder[eyeIndex];
                ovrPosef eyeRenderPose = ovrHmd_BeginEyeRender(Hmd, eye);

                View = CalculateViewFromPose(eyeRenderPose);
                RenderEyeView(eye); 
                ovrHmd_EndEyeRender(Hmd, eye, eyeRenderPose, &EyeTexture[eye]);
            }
        }

        else
        {
            // Separate eye rendering - each eye gets its own render target.
            for (int eyeIndex = 0; eyeIndex < ovrEye_Count; eyeIndex++)
            {      
                ovrEyeType eye = HmdDesc.EyeRenderOrder[eyeIndex];
                pRender->SetRenderTarget(
                    RenderTargets[(eye == 0) ? Rendertarget_Left : Rendertarget_Right].pTex);
                pRender->Clear();
            
                ovrPosef eyeRenderPose = ovrHmd_BeginEyeRender(Hmd, eye);

                View = CalculateViewFromPose(eyeRenderPose);
                RenderEyeView(eye);            
                ovrHmd_EndEyeRender(Hmd, eye, eyeRenderPose, &EyeTexture[eye]);
            }
        }   

        pRender->SetDefaultRenderTarget();
        pRender->FinishScene();        
    }
        
    /*
    double t= ovr_GetTimeInSeconds();
    while (ovr_GetTimeInSeconds() < (t + 0.017))
    {

    } */

    Profiler.RecordSample(RenderProfiler::Sample_AfterEyeRender);

    // TODO: These happen inside ovrHmd_EndFrame; need to hook into it.
    //Profiler.RecordSample(RenderProfiler::Sample_BeforeDistortion);
    ovrHmd_EndFrame(Hmd);
    Profiler.RecordSample(RenderProfiler::Sample_AfterPresent);    
}
예제 #7
0
int OculusWorldDemoApp::OnStartup(int argc, const char** argv)
{

    // *** Oculus HMD & Sensor Initialization

    // Create DeviceManager and first available HMDDevice from it.
    // Sensor object is created from the HMD, to ensure that it is on the
    // correct device.

    ovr_Initialize();

    Hmd = ovrHmd_Create(0);
    
    if (!Hmd)
    {
        // If we didn't detect an Hmd, create a simulated one for debugging.
        Hmd           = ovrHmd_CreateDebug(ovrHmd_DK1);
        UsingDebugHmd = true; 
        if (!Hmd)
        {   // Failed Hmd creation.
            return 1;
        }
    }

    // Get more details about the HMD.
    ovrHmd_GetDesc(Hmd, &HmdDesc);

    WindowSize = HmdDesc.Resolution;


    // ***** Setup System Window & rendering.

    if (!SetupWindowAndRendering(argc, argv))
        return 1;

    // Initialize FovSideTanMax, which allows us to change all Fov sides at once - Fov
    // starts at default and is clamped to this value.
    FovSideTanLimit = FovPort::Max(HmdDesc.MaxEyeFov[0], HmdDesc.MaxEyeFov[1]).GetMaxSideTan();
    FovSideTanMax   = FovPort::Max(HmdDesc.DefaultEyeFov[0], HmdDesc.DefaultEyeFov[1]).GetMaxSideTan();

    PositionTrackingEnabled = (HmdDesc.SensorCaps & ovrSensorCap_Position) ? true : false;


    // *** Configure HMD Stereo settings.
    
    CalculateHmdValues();

    // Query eye height.
    ThePlayer.UserEyeHeight = ovrHmd_GetFloat(Hmd, OVR_KEY_EYE_HEIGHT, ThePlayer.UserEyeHeight);
    ThePlayer.BodyPos.y     = ThePlayer.UserEyeHeight;
    // Center pupil for customization; real game shouldn't need to adjust this.
    CenterPupilDepthMeters  = ovrHmd_GetFloat(Hmd, "CenterPupilDepth", 0.0f);


    ThePlayer.bMotionRelativeToBody = false;  // Default to head-steering for DK1
    
    if (UsingDebugHmd)
        Menu.SetPopupMessage("NO HMD DETECTED");
    else if (!(ovrHmd_GetSensorState(Hmd, 0.0f).StatusFlags & ovrStatus_OrientationTracked))
        Menu.SetPopupMessage("NO SENSOR DETECTED");    
    else
        Menu.SetPopupMessage("Press F9 for Full-Screen on Rift");
    // Give first message 10 sec timeout, add border lines.
    Menu.SetPopupTimeout(10.0f, true);

    PopulateOptionMenu();

    // *** Identify Scene File & Prepare for Loading

    InitMainFilePath();  
    PopulatePreloadScene();
    
    LastUpdate = ovr_GetTimeInSeconds();
	
    return 0;
}
void OculusInterface::update(double time)
{
    if(!initialized) return;
    firstUpdated = true;
    ss = ovrHmd_GetSensorState(hmd, time);
}