XMMATRIX GetMatrix(json::array_t accessor) { return XMMatrixSet(accessor[0], accessor[1], accessor[2], accessor[3], accessor[4], accessor[5], accessor[6], accessor[7], accessor[8], accessor[9], accessor[10], accessor[11], accessor[12], accessor[13], accessor[14], accessor[15]); }
//--------------------------------------------------------------------------------------- // クロップ行列を作成します. //--------------------------------------------------------------------------------------- XMMATRIX CascadeShadow::CreateCropMatrix(const XMVECTOR& mini, const XMVECTOR& maxi) { /* ほぼ単位キューブクリッピングと同じ処理 */ float scaleX = 1.0f; float scaleY = 1.0f; float scaleZ = 1.0f; float offsetX = 0.0f; float offsetY = 0.0f; float offsetZ = 0.0f; scaleX = 2.0f / (maxi.x - mini.x); scaleY = 2.0f / (maxi.y - mini.y); offsetX = -0.5f * (maxi.x + mini.x) * scaleX; offsetY = -0.5f * (maxi.y + mini.y) * scaleY; // 1.0を下回るとシャドウマップに移る部分が小さくなってしまうので, // 制限をかける. scaleX = max(1.0f, scaleX); scaleY = max(1.0f, scaleY); #if 0 // GPU Gems 3の実装では下記処理を行っている. // maxi.zの値が0近づくことがあり, シャドウマップが真っ黒になるなどおかしくなることが発生するので, // この実装はZ成分については何も処理を行わない. mini.z = 0.0f; scaleZ = 1.0f / (maxi.z - mini.z); offsetZ = -mini.z * scaleZ; #endif return XMMatrixSet( scaleX, 0.0f, 0.0f, 0.0f, 0.0f, scaleY, 0.0f, 0.0f, 0.0f, 0.0f, scaleZ, 0.0f, offsetX, offsetY, offsetZ, 1.0f); }
// return true to retry later (e.g. after display lost) static bool MainLoop(bool retryCreate) { // Initialize these to nullptr here to handle device lost failures cleanly ovrMirrorTexture mirrorTexture = nullptr; OculusEyeTexture* pEyeRenderTexture[2] = { nullptr, nullptr }; Scene* roomScene = nullptr; Camera* mainCam = nullptr; ovrMirrorTextureDesc mirrorDesc = {}; ovrSession session; ovrGraphicsLuid luid; ovrResult result = ovr_Create(&session, &luid); if (!OVR_SUCCESS(result)) return retryCreate; ovrHmdDesc hmdDesc = ovr_GetHmdDesc(session); // Setup Device and Graphics // Note: the mirror window can be any size, for this sample we use 1/2 the HMD resolution if (!DIRECTX.InitDevice(hmdDesc.Resolution.w / 2, hmdDesc.Resolution.h / 2, reinterpret_cast<LUID*>(&luid))) goto Done; // Make the eye render buffers (caution if actual size < requested due to HW limits). ovrRecti eyeRenderViewport[2]; for (int eye = 0; eye < 2; ++eye) { ovrSizei idealSize = ovr_GetFovTextureSize(session, (ovrEyeType)eye, hmdDesc.DefaultEyeFov[eye], 1.0f); pEyeRenderTexture[eye] = new OculusEyeTexture(); if (!pEyeRenderTexture[eye]->Init(session, idealSize.w, idealSize.h, true)) { if (retryCreate) goto Done; FATALERROR("Failed to create eye texture."); } eyeRenderViewport[eye].Pos.x = 0; eyeRenderViewport[eye].Pos.y = 0; eyeRenderViewport[eye].Size = idealSize; if (!pEyeRenderTexture[eye]->TextureChain) { if (retryCreate) goto Done; FATALERROR("Failed to create texture."); } } // Create a mirror to see on the monitor. mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; mirrorDesc.Width = DIRECTX.WinSizeW; mirrorDesc.Height = DIRECTX.WinSizeH; result = ovr_CreateMirrorTextureDX(session, DIRECTX.CommandQueue, &mirrorDesc, &mirrorTexture); if (!OVR_SUCCESS(result)) { if (retryCreate) goto Done; FATALERROR("Failed to create mirror texture."); } // Create the room model roomScene = new Scene(false); // Create camera mainCam = new Camera(XMVectorSet(0.0f, 1.6f, 5.0f, 0), XMQuaternionIdentity()); // Setup VR components, filling out description ovrEyeRenderDesc eyeRenderDesc[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); long long frameIndex = 0; bool drawMirror = true; DIRECTX.InitFrame(drawMirror); // Main loop while (DIRECTX.HandleMessages()) { ovrSessionStatus sessionStatus; ovr_GetSessionStatus(session, &sessionStatus); if (sessionStatus.ShouldQuit) { // Because the application is requested to quit, should not request retry retryCreate = false; break; } if (sessionStatus.ShouldRecenter) ovr_RecenterTrackingOrigin(session); if (sessionStatus.IsVisible) { XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, -0.05f, 0), mainCam->GetRotVec()); XMVECTOR right = XMVector3Rotate(XMVectorSet(0.05f, 0, 0, 0), mainCam->GetRotVec()); XMVECTOR mainCamPos = mainCam->GetPosVec(); XMVECTOR mainCamRot = mainCam->GetRotVec(); if (DIRECTX.Key['W'] || DIRECTX.Key[VK_UP]) mainCamPos = XMVectorAdd( mainCamPos, forward); if (DIRECTX.Key['S'] || DIRECTX.Key[VK_DOWN]) mainCamPos = XMVectorSubtract(mainCamPos, forward); if (DIRECTX.Key['D']) mainCamPos = XMVectorAdd( mainCamPos, right); if (DIRECTX.Key['A']) mainCamPos = XMVectorSubtract(mainCamPos, right); static float Yaw = 0; if (DIRECTX.Key[VK_LEFT]) mainCamRot = XMQuaternionRotationRollPitchYaw(0, Yaw += 0.02f, 0); if (DIRECTX.Key[VK_RIGHT]) mainCamRot = XMQuaternionRotationRollPitchYaw(0, Yaw -= 0.02f, 0); mainCam->SetPosVec(mainCamPos); mainCam->SetRotVec(mainCamRot); // Animate the cube static float cubeClock = 0; roomScene->Models[0]->Pos = XMFLOAT3(9 * sin(cubeClock), 3, 9 * cos(cubeClock += 0.015f)); // Get both eye poses simultaneously, with IPD offset already included. ovrPosef EyeRenderPose[2]; ovrVector3f HmdToEyeOffset[2] = { eyeRenderDesc[0].HmdToEyeOffset, eyeRenderDesc[1].HmdToEyeOffset }; double sensorSampleTime; // sensorSampleTime is fed into the layer later ovr_GetEyePoses(session, frameIndex, ovrTrue, HmdToEyeOffset, EyeRenderPose, &sensorSampleTime); // Render Scene to Eye Buffers for (int eye = 0; eye < 2; ++eye) { DIRECTX.SetActiveContext(eye == 0 ? DrawContext_EyeRenderLeft : DrawContext_EyeRenderRight); DIRECTX.SetActiveEye(eye); CD3DX12_RESOURCE_BARRIER resBar = CD3DX12_RESOURCE_BARRIER::Transition(pEyeRenderTexture[eye]->GetD3DResource(), D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE, D3D12_RESOURCE_STATE_RENDER_TARGET); DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(1, &resBar); DIRECTX.SetAndClearRenderTarget(pEyeRenderTexture[eye]->GetRtv(), pEyeRenderTexture[eye]->GetDsv()); DIRECTX.SetViewport((float)eyeRenderViewport[eye].Pos.x, (float)eyeRenderViewport[eye].Pos.y, (float)eyeRenderViewport[eye].Size.w, (float)eyeRenderViewport[eye].Size.h); //Get the pose information in XM format XMVECTOR eyeQuat = XMVectorSet(EyeRenderPose[eye].Orientation.x, EyeRenderPose[eye].Orientation.y, EyeRenderPose[eye].Orientation.z, EyeRenderPose[eye].Orientation.w); XMVECTOR eyePos = XMVectorSet(EyeRenderPose[eye].Position.x, EyeRenderPose[eye].Position.y, EyeRenderPose[eye].Position.z, 0); // Get view and projection matrices for the Rift camera Camera finalCam(XMVectorAdd(mainCamPos, XMVector3Rotate(eyePos, mainCamRot)), XMQuaternionMultiply(eyeQuat, mainCamRot)); XMMATRIX view = finalCam.GetViewMatrix(); ovrMatrix4f p = ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.2f, 1000.0f, ovrProjection_None); XMMATRIX proj = XMMatrixSet(p.M[0][0], p.M[1][0], p.M[2][0], p.M[3][0], p.M[0][1], p.M[1][1], p.M[2][1], p.M[3][1], p.M[0][2], p.M[1][2], p.M[2][2], p.M[3][2], p.M[0][3], p.M[1][3], p.M[2][3], p.M[3][3]); XMMATRIX prod = XMMatrixMultiply(view, proj); roomScene->Render(&prod, 1, 1, 1, 1, true); resBar = CD3DX12_RESOURCE_BARRIER::Transition(pEyeRenderTexture[eye]->GetD3DResource(), D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE); DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(1, &resBar); // Commit rendering to the swap chain pEyeRenderTexture[eye]->Commit(); // kick off eye render command lists before ovr_SubmitFrame() DIRECTX.SubmitCommandList(DIRECTX.ActiveContext); } // Initialize our single full screen Fov layer. ovrLayerEyeFov ld = {}; ld.Header.Type = ovrLayerType_EyeFov; ld.Header.Flags = 0; for (int eye = 0; eye < 2; ++eye) { ld.ColorTexture[eye] = pEyeRenderTexture[eye]->TextureChain; ld.Viewport[eye] = eyeRenderViewport[eye]; ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye]; ld.RenderPose[eye] = EyeRenderPose[eye]; ld.SensorSampleTime = sensorSampleTime; } ovrLayerHeader* layers = &ld.Header; result = ovr_SubmitFrame(session, frameIndex, nullptr, &layers, 1); // exit the rendering loop if submit returns an error, will retry on ovrError_DisplayLost if (!OVR_SUCCESS(result)) goto Done; frameIndex++; } if (drawMirror) { DIRECTX.SetActiveContext(DrawContext_Final); DIRECTX.SetViewport(0.0f, 0.0f, (float)hmdDesc.Resolution.w / 2, (float)hmdDesc.Resolution.h / 2); // Render mirror ID3D12Resource* mirrorTexRes = nullptr; ovr_GetMirrorTextureBufferDX(session, mirrorTexture, IID_PPV_ARGS(&mirrorTexRes)); //DIRECTX.SetAndClearRenderTarget(DIRECTX.CurrentFrameResources().SwapChainRtvHandle, nullptr, 1.0f, 0.5f, 0.0f, 1.0f); CD3DX12_RESOURCE_BARRIER preMirrorBlitBar[] = { CD3DX12_RESOURCE_BARRIER::Transition(DIRECTX.CurrentFrameResources().SwapChainBuffer, D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_COPY_DEST), CD3DX12_RESOURCE_BARRIER::Transition(mirrorTexRes, D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_COPY_SOURCE) }; // Indicate that the back buffer will now be copied into DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(ARRAYSIZE(preMirrorBlitBar), preMirrorBlitBar); DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->CopyResource(DIRECTX.CurrentFrameResources().SwapChainBuffer, mirrorTexRes); CD3DX12_RESOURCE_BARRIER resBar = CD3DX12_RESOURCE_BARRIER::Transition(mirrorTexRes, D3D12_RESOURCE_STATE_COPY_SOURCE, D3D12_RESOURCE_STATE_RENDER_TARGET); DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(1, &resBar); } DIRECTX.SubmitCommandListAndPresent(drawMirror); } // Release resources Done: delete mainCam; delete roomScene; if (mirrorTexture) ovr_DestroyMirrorTexture(session, mirrorTexture); for (int eye = 0; eye < 2; ++eye) { delete pEyeRenderTexture[eye]; } DIRECTX.ReleaseDevice(); ovr_Destroy(session); // Retry on ovrError_DisplayLost return retryCreate || (result == ovrError_DisplayLost); }
void SuzanneDX::RenderShadowMaps() { D3D11_TEXTURE2D_DESC shadowMapDesc; ZeroMemory(&shadowMapDesc, sizeof(shadowMapDesc)); shadowMapDesc.Width = SHADOW_RESOLUTION; shadowMapDesc.Height = SHADOW_RESOLUTION; shadowMapDesc.MipLevels = 1; shadowMapDesc.ArraySize = 1; shadowMapDesc.Format = DXGI_FORMAT_R24G8_TYPELESS; shadowMapDesc.SampleDesc.Count = 1; shadowMapDesc.SampleDesc.Quality = 0; shadowMapDesc.Usage = D3D11_USAGE_DEFAULT; shadowMapDesc.BindFlags = D3D11_BIND_DEPTH_STENCIL | D3D11_BIND_SHADER_RESOURCE; shadowMapDesc.CPUAccessFlags = 0; shadowMapDesc.MiscFlags = 0; D3D11_DEPTH_STENCIL_VIEW_DESC shadowMapDsvDesc; ZeroMemory(&shadowMapDsvDesc, sizeof(shadowMapDsvDesc)); shadowMapDsvDesc.Format = DXGI_FORMAT_D24_UNORM_S8_UINT; shadowMapDsvDesc.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2D; shadowMapDsvDesc.Texture2D.MipSlice = 0; D3D11_SHADER_RESOURCE_VIEW_DESC shadowMapResourceDesc; ZeroMemory(&shadowMapResourceDesc, sizeof(shadowMapResourceDesc)); shadowMapResourceDesc.Format = DXGI_FORMAT_R24_UNORM_X8_TYPELESS; shadowMapResourceDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D; shadowMapResourceDesc.Texture2D.MipLevels = 1; shadowMapResourceDesc.Texture2D.MostDetailedMip = 0; D3D11_VIEWPORT shadowMapViewport; shadowMapViewport.Width = (FLOAT)SHADOW_RESOLUTION; shadowMapViewport.Height = (FLOAT)SHADOW_RESOLUTION; shadowMapViewport.MinDepth = 0.0f; shadowMapViewport.MaxDepth = 1.0f; shadowMapViewport.TopLeftX = 0; shadowMapViewport.TopLeftY = 0; mDeviceContext->RSSetViewports(1, &shadowMapViewport); XMMATRIX shadowViewProjectionMatrices[NUMBER_OF_LIGHTS]; for (int i = 0; i < NUMBER_OF_LIGHTS; ++i) { ID3D11Texture2D* shadowMap; mDevice->CreateTexture2D(&shadowMapDesc, NULL, &shadowMap); ID3D11DepthStencilView* shadowMapDsv; mDevice->CreateDepthStencilView(shadowMap, &shadowMapDsvDesc, &shadowMapDsv); mDevice->CreateShaderResourceView(shadowMap, &shadowMapResourceDesc, &shadowMapResources[i]); mDeviceContext->OMSetRenderTargets(0, 0, shadowMapDsv); mDeviceContext->ClearDepthStencilView(shadowMapDsv, D3D11_CLEAR_DEPTH, 1.0f, 0); XMVECTOR lightInvDir = XMVector3Normalize(XMVectorSet(lighting.lights[i].position.x, lighting.lights[i].position.y, lighting.lights[i].position.z, 1.0f)); XMMATRIX shadowProjectionMatrix = XMMatrixOrthographicRH(20, 20, -10, 20); XMMATRIX shadowViewMatrix = XMMatrixLookAtRH(lightInvDir, XMVectorSet(0, 0, 0, 1), XMVectorSet(0, 1, 0, 1)); shadowViewProjectionMatrices[i] = shadowViewMatrix * shadowProjectionMatrix; ID3D11Buffer* shadowViewProjectionMatrixBuffer = DXUtil::CreateMatrixBuffer(mDevice, shadowViewProjectionMatrices[i]); mDeviceContext->VSSetConstantBuffers(shadowViewProjectionMatrixBufferSlot, 1, &shadowViewProjectionMatrixBuffer); UINT stride = sizeof(Vertex); UINT offset = 0; for (ModelDX model : models) { mDeviceContext->IASetVertexBuffers(0, 1, &model.vertexBuffer, &stride, &offset); mDeviceContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST); mDeviceContext->IASetInputLayout(model.inputLayout); mDeviceContext->VSSetShader(shadowVertexShader, 0, 0); mDeviceContext->PSSetShader(shadowPixelShader, 0, 0); mDeviceContext->VSSetConstantBuffers(modelMatrixBufferSlot, 1, &model.modelMatrixBuffer); mDeviceContext->Draw(model.vertexCount, 0); } shadowMap->Release(); shadowMapDsv->Release(); shadowViewProjectionMatrixBuffer->Release(); } mDeviceContext->OMSetRenderTargets(1, &mRenderTargetView, mDepthStencilView); mDeviceContext->RSSetViewports(1, &mViewport); D3D11_BUFFER_DESC matrixDesc; ZeroMemory(&matrixDesc, sizeof(matrixDesc)); matrixDesc.ByteWidth = sizeof(XMMATRIX) * NUMBER_OF_LIGHTS; matrixDesc.Usage = D3D11_USAGE_DEFAULT; matrixDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER; matrixDesc.CPUAccessFlags = 0; D3D11_SUBRESOURCE_DATA matrixData; ZeroMemory(&matrixData, sizeof(matrixData)); matrixData.pSysMem = &shadowViewProjectionMatrices[0]; ID3D11Buffer* matrixBuffer; mDevice->CreateBuffer(&matrixDesc, &matrixData, &matrixBuffer); mDeviceContext->VSSetConstantBuffers(shadowViewProjectionMatrixBufferSlot, 1, &matrixBuffer); matrixBuffer->Release(); XMMATRIX biasMatrix = XMMatrixSet( 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.5, 0.5, 0.0, 1.0 ); ID3D11Buffer* shadowBiasMatrixBuffer = DXUtil::CreateMatrixBuffer(mDevice, biasMatrix); mDeviceContext->VSSetConstantBuffers(shadowBiasMatrixBufferSlot, 1, &shadowBiasMatrixBuffer); shadowBiasMatrixBuffer->Release(); }
// return true to retry later (e.g. after display lost) static bool MainLoop(bool retryCreate) { // Initialize these to nullptr here to handle device lost failures cleanly ovrTexture * mirrorTexture = nullptr; OculusTexture * pEyeRenderTexture[2] = { nullptr, nullptr }; DepthBuffer * pEyeDepthBuffer[2] = { nullptr, nullptr }; Scene * roomScene = nullptr; Camera * mainCam = nullptr; D3D11_TEXTURE2D_DESC td = {}; ovrHmd HMD; ovrGraphicsLuid luid; ovrResult result = ovr_Create(&HMD, &luid); if (!OVR_SUCCESS(result)) return retryCreate; ovrHmdDesc hmdDesc = ovr_GetHmdDesc(HMD); // ------------------------------------------------------------------- // Add: Make Instance that CL Eye Camera Capture Class CLEyeCameraCapture* cam[2] = { NULL }; // Query for number of connected camera int numCams = CLEyeGetCameraCount(); if (numCams == 0) { printf_s("No PS3Eye Camera detected\n"); goto Done; } printf_s("Found %d cameras\n", numCams); for (int iCam = 0; iCam < numCams; iCam++) { char windowName[64]; // Query unique camera uuid GUID guid = CLEyeGetCameraUUID(iCam); printf("Camera %d GUID: [%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x]\n", iCam + 1, guid.Data1, guid.Data2, guid.Data3, guid.Data4[0], guid.Data4[1], guid.Data4[2], guid.Data4[3], guid.Data4[4], guid.Data4[5], guid.Data4[6], guid.Data4[7]); sprintf_s(windowName, "Camera Window %d", iCam + 1); // Create camera capture object cam[iCam] = new CLEyeCameraCapture(windowName, guid, CLEYE_COLOR_RAW, CLEYE_VGA, 30); cam[iCam]->StartCapture(); } // ------------------------------------------------------------------- // Setup Device and Graphics // Note: the mirror window can be any size, for this sample we use 1/2 the HMD resolution if (!DIRECTX.InitDevice(hmdDesc.Resolution.w / 2, hmdDesc.Resolution.h / 2, reinterpret_cast<LUID*>(&luid))) goto Done; // Make the eye render buffers (caution if actual size < requested due to HW limits). ovrRecti eyeRenderViewport[2]; for (int eye = 0; eye < 2; ++eye) { ovrSizei idealSize = ovr_GetFovTextureSize(HMD, (ovrEyeType)eye, hmdDesc.DefaultEyeFov[eye], 1.0f); pEyeRenderTexture[eye] = new OculusTexture(); if (!pEyeRenderTexture[eye]->Init(HMD, idealSize.w, idealSize.h)) { if (retryCreate) goto Done; VALIDATE(OVR_SUCCESS(result), "Failed to create eye texture."); } pEyeDepthBuffer[eye] = new DepthBuffer(DIRECTX.Device, idealSize.w, idealSize.h); eyeRenderViewport[eye].Pos.x = 0; eyeRenderViewport[eye].Pos.y = 0; eyeRenderViewport[eye].Size = idealSize; if (!pEyeRenderTexture[eye]->TextureSet) { if (retryCreate) goto Done; VALIDATE(false, "Failed to create texture."); } } // Create a mirror to see on the monitor. td.ArraySize = 1; td.Format = DXGI_FORMAT_R8G8B8A8_UNORM_SRGB; td.Width = DIRECTX.WinSizeW; td.Height = DIRECTX.WinSizeH; td.Usage = D3D11_USAGE_DEFAULT; td.SampleDesc.Count = 1; td.MipLevels = 1; result = ovr_CreateMirrorTextureD3D11(HMD, DIRECTX.Device, &td, 0, &mirrorTexture); if (!OVR_SUCCESS(result)) { if (retryCreate) goto Done; VALIDATE(false, "Failed to create mirror texture."); } // Create the room model roomScene = new Scene(false); // Create camera mainCam = new Camera(&XMVectorSet(0.0f, 1.6f, 5.0f, 0), &XMQuaternionIdentity()); // Setup VR components, filling out description ovrEyeRenderDesc eyeRenderDesc[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(HMD, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(HMD, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); bool isVisible = true; DCB portConfig; portConfig.BaudRate = 115200; portConfig.Parity = EVENPARITY; g_seriPort.Start("\\\\.\\COM3", &portConfig); // Main loop while (DIRECTX.HandleMessages()) { XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, -0.05f, 0), mainCam->Rot); XMVECTOR right = XMVector3Rotate(XMVectorSet(0.05f, 0, 0, 0), mainCam->Rot); if (DIRECTX.Key['W'] || DIRECTX.Key[VK_UP]) mainCam->Pos = XMVectorAdd(mainCam->Pos, forward); if (DIRECTX.Key['S'] || DIRECTX.Key[VK_DOWN]) mainCam->Pos = XMVectorSubtract(mainCam->Pos, forward); if (DIRECTX.Key['D']) mainCam->Pos = XMVectorAdd(mainCam->Pos, right); if (DIRECTX.Key['A']) mainCam->Pos = XMVectorSubtract(mainCam->Pos, right); static float Yaw = 0; if (DIRECTX.Key[VK_LEFT]) mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw += 0.02f, 0); if (DIRECTX.Key[VK_RIGHT]) mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw -= 0.02f, 0); // Animate the cube static float cubeClock = 0; roomScene->Models[0]->Pos = XMFLOAT3(9 * sin(cubeClock), 3, 9 * cos(cubeClock += 0.015f)); // Get both eye poses simultaneously, with IPD offset already included. ovrPosef EyeRenderPose[2]; ovrVector3f HmdToEyeViewOffset[2] = { eyeRenderDesc[0].HmdToEyeViewOffset, eyeRenderDesc[1].HmdToEyeViewOffset }; double frameTime = ovr_GetPredictedDisplayTime(HMD, 0); // Keeping sensorSampleTime as close to ovr_GetTrackingState as possible - fed into the layer double sensorSampleTime = ovr_GetTimeInSeconds(); ovrTrackingState hmdState = ovr_GetTrackingState(HMD, frameTime, ovrTrue); ovr_CalcEyePoses(hmdState.HeadPose.ThePose, HmdToEyeViewOffset, EyeRenderPose); // -------------------------------------------------------------------------- // Add: Get Head Yaw Roll Pitch float hmdPitch = 0.0f; float hmdRoll = 0.0f; float hmdYaw = 0.0f; OVR::Posef HeadPose = hmdState.HeadPose.ThePose; HeadPose.Rotation.GetEulerAngles<OVR::Axis_Y, OVR::Axis_X, OVR::Axis_Z>(&hmdYaw, &hmdPitch, &hmdRoll); SetPos(2, ServoRoll(hmdYaw)); SetPos(3, ServoRoll(hmdPitch)); // -------------------------------------------------------------------------- // Render Scene to Eye Buffers if (isVisible) { for (int eye = 0; eye < 2; ++eye) { // Increment to use next texture, just before writing pEyeRenderTexture[eye]->AdvanceToNextTexture(); // Clear and set up rendertarget int texIndex = pEyeRenderTexture[eye]->TextureSet->CurrentIndex; DIRECTX.SetAndClearRenderTarget(pEyeRenderTexture[eye]->TexRtv[texIndex], pEyeDepthBuffer[eye]); DIRECTX.SetViewport((float)eyeRenderViewport[eye].Pos.x, (float)eyeRenderViewport[eye].Pos.y, (float)eyeRenderViewport[eye].Size.w, (float)eyeRenderViewport[eye].Size.h); //Get the pose information in XM format XMVECTOR eyeQuat = XMVectorSet(EyeRenderPose[eye].Orientation.x, EyeRenderPose[eye].Orientation.y, EyeRenderPose[eye].Orientation.z, EyeRenderPose[eye].Orientation.w); XMVECTOR eyePos = XMVectorSet(EyeRenderPose[eye].Position.x, EyeRenderPose[eye].Position.y, EyeRenderPose[eye].Position.z, 0); // Get view and projection matrices for the Rift camera XMVECTOR CombinedPos = XMVectorAdd(mainCam->Pos, XMVector3Rotate(eyePos, mainCam->Rot)); Camera finalCam(&CombinedPos, &(XMQuaternionMultiply(eyeQuat,mainCam->Rot))); XMMATRIX view = finalCam.GetViewMatrix(); ovrMatrix4f p = ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.2f, 1000.0f, ovrProjection_RightHanded); XMMATRIX proj = XMMatrixSet(p.M[0][0], p.M[1][0], p.M[2][0], p.M[3][0], p.M[0][1], p.M[1][1], p.M[2][1], p.M[3][1], p.M[0][2], p.M[1][2], p.M[2][2], p.M[3][2], p.M[0][3], p.M[1][3], p.M[2][3], p.M[3][3]); XMMATRIX prod = XMMatrixMultiply(view, proj); roomScene->Render(&prod, 1, 1, 1, 1, true); } } // Initialize our single full screen Fov layer. ovrLayerEyeFov ld = {}; ld.Header.Type = ovrLayerType_EyeFov; ld.Header.Flags = 0; for (int eye = 0; eye < 2; ++eye) { ld.ColorTexture[eye] = pEyeRenderTexture[eye]->TextureSet; ld.Viewport[eye] = eyeRenderViewport[eye]; ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye]; ld.RenderPose[eye] = EyeRenderPose[eye]; ld.SensorSampleTime = sensorSampleTime; } ovrLayerHeader* layers = &ld.Header; result = ovr_SubmitFrame(HMD, 0, nullptr, &layers, 1); // exit the rendering loop if submit returns an error, will retry on ovrError_DisplayLost if (!OVR_SUCCESS(result)) goto Done; isVisible = (result == ovrSuccess); // Render mirror ovrD3D11Texture* tex = (ovrD3D11Texture*)mirrorTexture; DIRECTX.Context->CopyResource(DIRECTX.BackBuffer, tex->D3D11.pTexture); DIRECTX.SwapChain->Present(0, 0); } // Release resources Done: delete mainCam; delete roomScene; if (mirrorTexture) ovr_DestroyMirrorTexture(HMD, mirrorTexture); for (int eye = 0; eye < 2; ++eye) { delete pEyeRenderTexture[eye]; delete pEyeDepthBuffer[eye]; } DIRECTX.ReleaseDevice(); ovr_Destroy(HMD); g_seriPort.End(); for (int iCam = 0; iCam < numCams; iCam++) { cam[iCam]->StopCapture(); delete cam[iCam]; } // Retry on ovrError_DisplayLost return retryCreate || OVR_SUCCESS(result) || (result == ovrError_DisplayLost); }
// return true to retry later (e.g. after display lost) static bool MainLoop(bool retryCreate) { // Initialize these to nullptr here to handle device lost failures cleanly ovrMirrorTexture mirrorTexture = nullptr; OculusTexture * pEyeRenderTexture = nullptr; DepthBuffer * pEyeDepthBuffer = nullptr; Scene * roomScene = nullptr; Camera * mainCam = nullptr; ovrMirrorTextureDesc desc = {}; bool isVisible = true; long long frameIndex = 0; bool useInstancing = false; const int repeatDrawing = 1; ovrSession session; ovrGraphicsLuid luid; ovrResult result = ovr_Create(&session, &luid); if (!OVR_SUCCESS(result)) return retryCreate; ovrHmdDesc hmdDesc = ovr_GetHmdDesc(session); // Setup Device and Graphics // Note: the mirror window can be any size, for this sample we use 1/2 the HMD resolution if (!DIRECTX.InitDevice(hmdDesc.Resolution.w / 2, hmdDesc.Resolution.h / 2, reinterpret_cast<LUID*>(&luid))) goto Done; ovrRecti eyeRenderViewport[2]; // Make a single eye texture { ovrSizei eyeTexSizeL = ovr_GetFovTextureSize(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0], 1.0f); ovrSizei eyeTexSizeR = ovr_GetFovTextureSize(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1], 1.0f); ovrSizei textureSize; textureSize.w = eyeTexSizeL.w + eyeTexSizeR.w; textureSize.h = max(eyeTexSizeL.h, eyeTexSizeR.h); pEyeRenderTexture = new OculusTexture(); if (!pEyeRenderTexture->Init(session, textureSize.w, textureSize.h)) { if (retryCreate) goto Done; VALIDATE(OVR_SUCCESS(result), "Failed to create eye texture."); } pEyeDepthBuffer = new DepthBuffer(DIRECTX.Device, textureSize.w, textureSize.h); // set viewports eyeRenderViewport[0].Pos.x = 0; eyeRenderViewport[0].Pos.y = 0; eyeRenderViewport[0].Size = eyeTexSizeL; eyeRenderViewport[1].Pos.x = eyeTexSizeL.w; eyeRenderViewport[1].Pos.y = 0; eyeRenderViewport[1].Size = eyeTexSizeR; } if (!pEyeRenderTexture->TextureChain) { if (retryCreate) goto Done; VALIDATE(false, "Failed to create texture."); } // Create a mirror to see on the monitor. desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; desc.Width = DIRECTX.WinSizeW; desc.Height = DIRECTX.WinSizeH; result = ovr_CreateMirrorTextureDX(session, DIRECTX.Device, &desc, &mirrorTexture); if (!OVR_SUCCESS(result)) { if (retryCreate) goto Done; VALIDATE(false, "Failed to create mirror texture."); } // Create the room model roomScene = new Scene(false); // Create camera mainCam = new Camera(&XMVectorSet(0.0f, 1.6f, 5.0f, 0), &XMQuaternionIdentity()); // Setup VR components, filling out description ovrEyeRenderDesc eyeRenderDesc[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); // Main loop while (DIRECTX.HandleMessages()) { XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, -0.05f, 0), mainCam->Rot); XMVECTOR right = XMVector3Rotate(XMVectorSet(0.05f, 0, 0, 0), mainCam->Rot); XMVECTOR up = XMVector3Rotate(XMVectorSet(0, 0.05f, 0, 0), mainCam->Rot); if (DIRECTX.Key['W'] || DIRECTX.Key[VK_UP]) mainCam->Pos = XMVectorAdd(mainCam->Pos, forward); if (DIRECTX.Key['S'] || DIRECTX.Key[VK_DOWN]) mainCam->Pos = XMVectorSubtract(mainCam->Pos, forward); if (DIRECTX.Key['D']) mainCam->Pos = XMVectorAdd(mainCam->Pos, right); if (DIRECTX.Key['A']) mainCam->Pos = XMVectorSubtract(mainCam->Pos, right); if (DIRECTX.Key['Q']) mainCam->Pos = XMVectorAdd(mainCam->Pos, up); if (DIRECTX.Key['E']) mainCam->Pos = XMVectorSubtract(mainCam->Pos, up); static float Yaw = 0; if (DIRECTX.Key[VK_LEFT]) mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw += 0.02f, 0); if (DIRECTX.Key[VK_RIGHT]) mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw -= 0.02f, 0); if (DIRECTX.Key['P']) ovr_SetInt(session, OVR_PERF_HUD_MODE, int(ovrPerfHud_AppRenderTiming)); else ovr_SetInt(session, OVR_PERF_HUD_MODE, int(ovrPerfHud_Off)); useInstancing = DIRECTX.Key['I']; // Animate the cube static float cubeClock = 0; roomScene->Models[0]->Pos = XMFLOAT3(9 * sin(cubeClock), 3, 9 * cos(cubeClock += 0.015f)); // Get both eye poses simultaneously, with IPD offset already included. ovrPosef EyeRenderPose[2]; ovrVector3f HmdToEyeOffset[2] = { eyeRenderDesc[0].HmdToEyeOffset, eyeRenderDesc[1].HmdToEyeOffset }; double sensorSampleTime; // sensorSampleTime is fed into the layer later ovr_GetEyePoses(session, frameIndex, ovrTrue, HmdToEyeOffset, EyeRenderPose, &sensorSampleTime); // Render scene to eye texture if (isVisible) { DIRECTX.SetAndClearRenderTarget(pEyeRenderTexture->GetRTV(), pEyeDepthBuffer); // calculate eye transforms XMMATRIX viewProjMatrix[2]; for (int eye = 0; eye < 2; ++eye) { //Get the pose information in XM format XMVECTOR eyeQuat = XMLoadFloat4((XMFLOAT4 *)&EyeRenderPose[eye].Orientation.x); XMVECTOR eyePos = XMVectorSet(EyeRenderPose[eye].Position.x, EyeRenderPose[eye].Position.y, EyeRenderPose[eye].Position.z, 0); // Get view and projection matrices for the Rift camera XMVECTOR CombinedPos = XMVectorAdd(mainCam->Pos, XMVector3Rotate(eyePos, mainCam->Rot)); Camera finalCam(&CombinedPos, &(XMQuaternionMultiply(eyeQuat, mainCam->Rot))); XMMATRIX view = finalCam.GetViewMatrix(); ovrMatrix4f p = ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.1f, 100.0f, ovrProjection_None); XMMATRIX proj = XMMatrixSet(p.M[0][0], p.M[1][0], p.M[2][0], p.M[3][0], p.M[0][1], p.M[1][1], p.M[2][1], p.M[3][1], p.M[0][2], p.M[1][2], p.M[2][2], p.M[3][2], p.M[0][3], p.M[1][3], p.M[2][3], p.M[3][3]); if (useInstancing) { // scale and offset projection matrix to shift image to correct part of texture for each eye XMMATRIX scale = XMMatrixScaling(0.5f, 1.0f, 1.0f); XMMATRIX translate = XMMatrixTranslation((eye==0) ? -0.5f : 0.5f, 0.0f, 0.0f); proj = XMMatrixMultiply(proj, scale); proj = XMMatrixMultiply(proj, translate); } viewProjMatrix[eye] = XMMatrixMultiply(view, proj); } if (useInstancing) { // use instancing for stereo DIRECTX.SetViewport(0.0f, 0.0f, (float)eyeRenderViewport[0].Size.w + eyeRenderViewport[1].Size.w, (float)eyeRenderViewport[0].Size.h); // render scene for (int i = 0; i < repeatDrawing; i++) roomScene->RenderInstanced(&viewProjMatrix[0], 1, 1, 1, 1, true); } else { // non-instanced path for (int eye = 0; eye < 2; ++eye) { // set viewport DIRECTX.SetViewport((float)eyeRenderViewport[eye].Pos.x, (float)eyeRenderViewport[eye].Pos.y, (float)eyeRenderViewport[eye].Size.w, (float)eyeRenderViewport[eye].Size.h); // render scene for (int i = 0; i < repeatDrawing; i++) roomScene->Render(&viewProjMatrix[eye], 1, 1, 1, 1, true); } } // Commit rendering to the swap chain pEyeRenderTexture->Commit(); } // Initialize our single full screen Fov layer. ovrLayerEyeFov ld = {}; ld.Header.Type = ovrLayerType_EyeFov; ld.Header.Flags = 0; ld.SensorSampleTime = sensorSampleTime; for (int eye = 0; eye < 2; ++eye) { ld.ColorTexture[eye] = pEyeRenderTexture->TextureChain; ld.Viewport[eye] = eyeRenderViewport[eye]; ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye]; ld.RenderPose[eye] = EyeRenderPose[eye]; } ovrLayerHeader* layers = &ld.Header; result = ovr_SubmitFrame(session, frameIndex, nullptr, &layers, 1); // exit the rendering loop if submit returns an error, will retry on ovrError_DisplayLost if (!OVR_SUCCESS(result)) goto Done; isVisible = (result == ovrSuccess); // Render mirror ID3D11Texture2D* tex = nullptr; ovr_GetMirrorTextureBufferDX(session, mirrorTexture, IID_PPV_ARGS(&tex)); DIRECTX.Context->CopyResource(DIRECTX.BackBuffer, tex); tex->Release(); DIRECTX.SwapChain->Present(0, 0); frameIndex++; } // Release resources Done: delete mainCam; delete roomScene; if (mirrorTexture) ovr_DestroyMirrorTexture(session, mirrorTexture); delete pEyeRenderTexture; delete pEyeDepthBuffer; DIRECTX.ReleaseDevice(); ovr_Destroy(session); // Retry on ovrError_DisplayLost return retryCreate || OVR_SUCCESS(result) || (result == ovrError_DisplayLost); }