Exemple #1
0
    void MainLoop()
    {
        Layer[0] = new VRLayer(Session);

        CameraCone cameraCone(this);

        while (HandleMessages())
        {
            ActionFromInput();

            // As we get eye poses, we also get the tracking state, for use later
            ovrTrackingState trackingState = Layer[0]->GetEyePoses();
            ovrTrackerPose   trackerPose  = ovr_GetTrackerPose(Session, 0);

            for (int eye = 0; eye < 2; ++eye)
            {
                Layer[0]->RenderSceneToEyeBuffer(MainCam, RoomScene, eye);

                // Lets clear the depth buffer, so we can see it clearly.
                // even if that means sorting over the top.
                // And also we have a different z buffer range, so would sort strangely
                DIRECTX.Context->ClearDepthStencilView(Layer[0]->pEyeDepthBuffer[eye]->TexDsv, D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1, 0);

                // Note, we vary its visibility
                // and also note the constant update of the camera's
                // location and orientation from within the SDK
                cameraCone.RenderToEyeBuffer(Layer[0], eye, &trackingState, &trackerPose, 0.625f);
            }

            Layer[0]->PrepareLayerHeader();
            DistortAndPresent(1);
        }
    }
Exemple #2
0
//-------------------------------------------------------------------------------------
int WINAPI WinMain(HINSTANCE hinst, HINSTANCE, LPSTR, int)
{
    BasicVR basicVR(hinst);
    basicVR.Layer[0] = new VRLayer(basicVR.HMD);
    
    CameraCone cameraCone(&basicVR);

    // Main loop
    while (basicVR.HandleMessages())
    {
        basicVR.ActionFromInput();

        // As we get eye poses, we also get the tracking state, for use later
        ovrTrackingState trackingState = basicVR.Layer[0]->GetEyePoses();

        // Now lets see how far off the volume we are
        // But we don't want our game position, we only want our rift generated position,
        // which we'll take as average of two positions.
        Vector3f eye0 = (Vector3f) basicVR.Layer[0]->EyeRenderPose[0].Position;
        Vector3f eye1 = (Vector3f) basicVR.Layer[0]->EyeRenderPose[1].Position;
        float dist = cameraCone.DistToBoundary(((eye0+eye1)*0.5f),trackingState.CameraPose);

        // We want it to be full visible at dist of 0.2 and below, but not becoming completely invisible
        const float distFullVisible = 0.2f;
        const float rateOfDimming = 4.0f;
        const float minVisibility = 0.1f;
        float visible = 1.0f - rateOfDimming*(dist-distFullVisible);  
        visible = max(visible,minVisibility);
        visible = min(visible,1.0f);

        for (int eye = 0; eye < 2; eye++)
        {
            // Render the proper scene, but adjust alpha
             basicVR.Layer[0]->RenderSceneToEyeBuffer(basicVR.MainCam, basicVR.pRoomScene,eye,0,0,1,1-visible); 

            // Lets clear the depth buffer, so we can see it clearly.
            // even if that means sorting over the top.
            // And also we have a different z buffer range, so would sort strangely
            DIRECTX.Context->ClearDepthStencilView(basicVR.Layer[0]->pEyeDepthBuffer[eye]->TexDsv, D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1, 0);

            // Note, we vary its visibility
            cameraCone.RenderToEyeBuffer(basicVR.Layer[0],eye, &trackingState, visible);
        }

        basicVR.Layer[0]->PrepareLayerHeader();
        basicVR.DistortAndPresent(1);
    }
    
    return (basicVR.Release(hinst));
}