void GuardianSystemDemo::InitRenderTargets(const ovrHmdDesc& hmdDesc) { // For each eye for (int i = 0; i < ovrEye_Count; ++i) { // Viewport const float kPixelsPerDisplayPixel = 1.0f; ovrSizei idealSize = ovr_GetFovTextureSize(mSession, (ovrEyeType)i, hmdDesc.DefaultEyeFov[i], kPixelsPerDisplayPixel); mEyeRenderViewport[i] = { 0, 0, idealSize.w, idealSize.h }; // Create Swap Chain ovrTextureSwapChainDesc desc = { ovrTexture_2D, OVR_FORMAT_R8G8B8A8_UNORM_SRGB, 1, idealSize.w, idealSize.h, 1, 1, ovrFalse, ovrTextureMisc_DX_Typeless, ovrTextureBind_DX_RenderTarget }; // Configure Eye render layers mEyeRenderLayer.Header.Type = ovrLayerType_EyeFov; mEyeRenderLayer.Viewport[i] = mEyeRenderViewport[i]; mEyeRenderLayer.Fov[i] = hmdDesc.DefaultEyeFov[i]; mHmdToEyeOffset[i] = ovr_GetRenderDesc(mSession, (ovrEyeType)i, hmdDesc.DefaultEyeFov[i]).HmdToEyeOffset; // DirectX 11 - Generate RenderTargetView from textures in swap chain // ---------------------------------------------------------------------- ovrResult result = ovr_CreateTextureSwapChainDX(mSession, DIRECTX.Device, &desc, &mTextureChain[i]); if (!OVR_SUCCESS(result)) { printf("ovr_CreateTextureSwapChainDX failed"); exit(-1); } // Render Target, normally triple-buffered int textureCount = 0; ovr_GetTextureSwapChainLength(mSession, mTextureChain[i], &textureCount); for (int j = 0; j < textureCount; ++j) { ID3D11Texture2D* renderTexture = nullptr; ovr_GetTextureSwapChainBufferDX(mSession, mTextureChain[i], j, IID_PPV_ARGS(&renderTexture)); D3D11_RENDER_TARGET_VIEW_DESC renderTargetViewDesc = { DXGI_FORMAT_R8G8B8A8_UNORM, D3D11_RTV_DIMENSION_TEXTURE2D }; ID3D11RenderTargetView* renderTargetView = nullptr; DIRECTX.Device->CreateRenderTargetView(renderTexture, &renderTargetViewDesc, &renderTargetView); mEyeRenderTargets[i].push_back(renderTargetView); renderTexture->Release(); } // DirectX 11 - Generate Depth // ---------------------------------------------------------------------- D3D11_TEXTURE2D_DESC depthTextureDesc = { (UINT)idealSize.w, (UINT)idealSize.h, 1, 1, DXGI_FORMAT_D32_FLOAT, {1, 0}, D3D11_USAGE_DEFAULT, D3D11_BIND_DEPTH_STENCIL, 0, 0 }; ID3D11Texture2D* depthTexture = nullptr; DIRECTX.Device->CreateTexture2D(&depthTextureDesc, NULL, &depthTexture); DIRECTX.Device->CreateDepthStencilView(depthTexture, NULL, &mEyeDepthTarget[i]); depthTexture->Release(); } }
bool Init(ovrSession session, int sizeW, int sizeH, bool createDepth) { Session = session; ovrTextureSwapChainDesc desc = {}; desc.Type = ovrTexture_2D; desc.ArraySize = 1; desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; desc.Width = sizeW; desc.Height = sizeH; desc.MipLevels = 1; desc.SampleCount = 1; desc.MiscFlags = ovrTextureMisc_DX_Typeless; desc.StaticImage = ovrFalse; desc.BindFlags = ovrTextureBind_DX_RenderTarget; ovrResult result = ovr_CreateTextureSwapChainDX(session, DIRECTX.CommandQueue, &desc, &TextureChain); if (!OVR_SUCCESS(result)) return false; int textureCount = 0; ovr_GetTextureSwapChainLength(Session, TextureChain, &textureCount); TexRtv.resize(textureCount); TexResource.resize(textureCount); for (int i = 0; i < textureCount; ++i) { result = ovr_GetTextureSwapChainBufferDX(Session, TextureChain, i, IID_PPV_ARGS(&TexResource[i])); if (!OVR_SUCCESS(result)) return false; D3D12_RENDER_TARGET_VIEW_DESC rtvd = {}; rtvd.Format = DXGI_FORMAT_R8G8B8A8_UNORM; rtvd.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2D; TexRtv[i] = DIRECTX.RtvHandleProvider.AllocCpuHandle(); DIRECTX.Device->CreateRenderTargetView(TexResource[i], &rtvd, TexRtv[i]); } if (createDepth) { DepthTex.resize(textureCount); DepthTexDsv.resize(textureCount); for (int i = 0; i < textureCount; i++) { DepthTexDsv[i] = DIRECTX.DsvHandleProvider.AllocCpuHandle(); DepthTex[i] = new DepthBuffer(DIRECTX.Device, DepthTexDsv[i], sizeW, sizeH); } } return true; }
VR::~VR() { if (!this->xapp->ovrRendering) return; #if defined(_OVR_) int count; ovr_GetTextureSwapChainLength(session, textureSwapChain, &count); for (int i = 0; i < count; ++i) { texResource[i]->Release(); //texRtv[i]->Release(); } ovr_DestroyTextureSwapChain(session, textureSwapChain); ovr_Destroy(session); ovr_Shutdown(); #endif }
DLL_EXPORT_API xnOvrQuadLayer* xnOvrCreateQuadLayerTexturesDx(xnOvrSession* session, void* dxDevice, int* outTextureCount, int width, int height, int mipLevels, int sampleCount) { auto layer = new xnOvrQuadLayer; ovrTextureSwapChainDesc texDesc = {}; texDesc.Type = ovrTexture_2D; texDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; texDesc.ArraySize = 1; texDesc.Width = width; texDesc.Height = height; texDesc.MipLevels = mipLevels; texDesc.SampleCount = sampleCount; texDesc.StaticImage = ovrFalse; texDesc.MiscFlags = ovrTextureMisc_None; texDesc.BindFlags = ovrTextureBind_DX_RenderTarget; if (!OVR_SUCCESS(ovr_CreateTextureSwapChainDX(session->Session, dxDevice, &texDesc, &layer->SwapChain))) { delete layer; return NULL; } auto count = 0; ovr_GetTextureSwapChainLength(session->Session, layer->SwapChain, &count); *outTextureCount = count; layer->Layer.Header.Type = ovrLayerType_Quad; layer->Layer.Header.Flags = ovrLayerFlag_HighQuality; layer->Layer.ColorTexture = layer->SwapChain; layer->Layer.Viewport.Pos.x = 0; layer->Layer.Viewport.Pos.y = 0; layer->Layer.Viewport.Size.w = width; layer->Layer.Viewport.Size.h = height; layer->Layer.QuadPoseCenter.Orientation.x = 0; layer->Layer.QuadPoseCenter.Orientation.y = 0; layer->Layer.QuadPoseCenter.Orientation.z = 0; layer->Layer.QuadPoseCenter.Orientation.w = 1; layer->Layer.QuadPoseCenter.Position.x = 0; layer->Layer.QuadPoseCenter.Position.y = 0; layer->Layer.QuadPoseCenter.Position.z = -1; layer->Layer.QuadSize.x = 2; layer->Layer.QuadSize.y = 2; return layer; }
bool Init(ovrSession session, int sizeW, int sizeH) { Session = session; ovrTextureSwapChainDesc desc = {}; desc.Type = ovrTexture_2D; desc.ArraySize = 1; desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; desc.Width = sizeW; desc.Height = sizeH; desc.MipLevels = 1; desc.SampleCount = 1; desc.MiscFlags = ovrTextureMisc_DX_Typeless; desc.StaticImage = ovrFalse; desc.BindFlags = ovrTextureBind_DX_RenderTarget; ovrResult result = ovr_CreateTextureSwapChainDX(session, DIRECTX.Device, &desc, &TextureChain); if (!OVR_SUCCESS(result)) return false; int textureCount = 0; ovr_GetTextureSwapChainLength(Session, TextureChain, &textureCount); VALIDATE(textureCount == TextureCount, "TextureCount mismatch."); for (int i = 0; i < TextureCount; ++i) { ID3D11Texture2D* tex = nullptr; ovr_GetTextureSwapChainBufferDX(Session, TextureChain, i, IID_PPV_ARGS(&tex)); D3D11_RENDER_TARGET_VIEW_DESC rtvd = {}; rtvd.Format = DXGI_FORMAT_R8G8B8A8_UNORM; rtvd.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D; DIRECTX.Device->CreateRenderTargetView(tex, &rtvd, &TexRtv[i]); tex->Release(); } return true; }
void HudQuad::initGL(ovrSession& session, ovrSizei sz) { m_session = session; ///@todo Make this a parameter to draw func m_QuadPoseCenter.Orientation = //{ 0.f, 0.f, 0.f, 1.f }; { 0.129206583f, 0.0310291424f, 0.000810863741f, -0.991131783f }; m_QuadPoseCenter.Position = { 0.f, -.375f, -.75f }; const ovrSizei& bufferSize = { 600, 600 }; ovrTextureSwapChainDesc desc = {}; desc.Type = ovrTexture_2D; desc.ArraySize = 1; desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; desc.Width = bufferSize.w; desc.Height = bufferSize.h; desc.MipLevels = 1; desc.SampleCount = 1; desc.StaticImage = ovrFalse; // Allocate the frameBuffer that will hold the scene, and then be // re-rendered to the screen with distortion if (ovr_CreateTextureSwapChainGL(session, &desc, &m_swapChain) == ovrSuccess) { int length = 0; ovr_GetTextureSwapChainLength(session, m_swapChain, &length); for (int i = 0; i < length; ++i) { GLuint chainTexId; ovr_GetTextureSwapChainBufferGL(session, m_swapChain, i, &chainTexId); glBindTexture(GL_TEXTURE_2D, chainTexId); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); } } else { LOG_ERROR("HudQuad::initGL Unable to create swap textures"); return; } // Manually assemble swap FBO FBO& swapfbo = m_fbo; swapfbo.w = bufferSize.w; swapfbo.h = bufferSize.h; glGenFramebuffers(1, &swapfbo.id); glBindFramebuffer(GL_FRAMEBUFFER, swapfbo.id); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, swapfbo.tex, 0); swapfbo.depth = 0; glGenRenderbuffers(1, &swapfbo.depth); glBindRenderbuffer(GL_RENDERBUFFER, swapfbo.depth); glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, bufferSize.w, bufferSize.h); glBindRenderbuffer(GL_RENDERBUFFER, 0); glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, swapfbo.depth); // Check status const GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER); if (status != GL_FRAMEBUFFER_COMPLETE) { LOG_ERROR("Framebuffer status incomplete: %d %x", status, status); } glBindFramebuffer(GL_FRAMEBUFFER, 0); }
void VR::initD3D() { #if defined(_OVR_) Sizei bufferSize; bufferSize.w = buffersize_width; bufferSize.h = buffersize_height; // xapp->d3d11Device.Get() will not work, we need a real D3D11 device //for (int i = 0; i < xapp->FrameCount; i++) { // ID3D12Resource *resource = xapp->renderTargets[i].Get(); // D3D12_RESOURCE_DESC rDesc = resource->GetDesc(); // D3D11_RESOURCE_FLAGS d3d11Flags = { D3D11_BIND_RENDER_TARGET }; // ThrowIfFailed(xapp->d3d11On12Device->CreateWrappedResource( // resource, // &d3d11Flags, // D3D12_RESOURCE_STATE_RENDER_TARGET, // D3D12_RESOURCE_STATE_PRESENT, // IID_PPV_ARGS(&xapp->wrappedBackBuffers[i]) // )); // //xapp->d3d11On12Device->AcquireWrappedResources(xapp->wrappedBackBuffers[i].GetAddressOf(), 1); //} ovrTextureSwapChainDesc dsDesc = {}; dsDesc.Type = ovrTexture_2D; dsDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; dsDesc.ArraySize = 1; dsDesc.Width = bufferSize.w; dsDesc.Height = bufferSize.h; dsDesc.MipLevels = 1; dsDesc.SampleCount = 1; dsDesc.StaticImage = ovrFalse; dsDesc.MiscFlags = ovrTextureMisc_DX_Typeless;//ovrTextureMisc_None; dsDesc.BindFlags = ovrTextureBind_DX_RenderTarget; /* D3D11_TEXTURE2D_DESC dsDesc; dsDesc.Width = bufferSize.w; dsDesc.Height = bufferSize.h; dsDesc.MipLevels = 1; dsDesc.ArraySize = 1; dsDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;//DXGI_FORMAT_B8G8R8A8_UNORM_SRGB; dsDesc.SampleDesc.Count = 1; dsDesc.SampleDesc.Quality = 0; dsDesc.Usage = D3D11_USAGE_DEFAULT; dsDesc.CPUAccessFlags = 0; dsDesc.MiscFlags = D3D11_RESOURCE_MISC_SHARED; dsDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET; */ if (ovr_CreateTextureSwapChainDX(session, xapp->commandQueue.Get()/*xapp->reald3d11Device.Get()*/, &dsDesc, &textureSwapChain) == ovrSuccess) { int count = 0; ovr_GetTextureSwapChainLength(session, textureSwapChain, &count); texRtv.resize(count); texResource.resize(count); // Create descriptor heaps. UINT rtvDescriptorSize; { // Describe and create a render target view (RTV) descriptor heap. D3D12_DESCRIPTOR_HEAP_DESC rtvHeapDesc = {}; rtvHeapDesc.NumDescriptors = count; rtvHeapDesc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_RTV; rtvHeapDesc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE; ThrowIfFailed(xapp->device->CreateDescriptorHeap(&rtvHeapDesc, IID_PPV_ARGS(&rtvVRHeap))); rtvVRHeap->SetName(L"rtVRHeap_xapp"); rtvDescriptorSize = xapp->device->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_RTV); } for (int i = 0; i < count; ++i) { ID3D11Texture2D* tex = nullptr; ovr_GetTextureSwapChainBufferDX(session, textureSwapChain, i, IID_PPV_ARGS(&texResource[i])); //xapp->reald3d11Device.Get()->CreateRenderTargetView(tex, nullptr, &texRtv[i]); D3D12_RENDER_TARGET_VIEW_DESC rtvd = {}; rtvd.Format = DXGI_FORMAT_R8G8B8A8_UNORM; rtvd.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2D; CD3DX12_CPU_DESCRIPTOR_HANDLE rtvHandle(rtvVRHeap->GetCPUDescriptorHandleForHeapStart(), i, rtvDescriptorSize); texRtv[i] = rtvHandle; xapp->device->CreateRenderTargetView(texResource[i], /*nullptr*/&rtvd, texRtv[i]); //ComPtr<IDXGIResource> dxgires; //tex->QueryInterface<IDXGIResource>(&dxgires); ////Log("dxgires = " << dxgires.GetAddressOf() << endl); //HANDLE shHandle; //dxgires->GetSharedHandle(&shHandle); ////Log("shared handle = " << shHandle << endl); //xapp->d3d11Device->OpenSharedResource(shHandle, IID_PPV_ARGS(&xapp->wrappedTextures[i])); //tex->Release(); } } // Initialize our single full screen Fov layer. layer.Header.Type = ovrLayerType_EyeFov; layer.Header.Flags = 0; layer.ColorTexture[0] = textureSwapChain; layer.ColorTexture[1] = nullptr;//textureSwapChain; layer.Fov[0] = eyeRenderDesc[0].Fov; layer.Fov[1] = eyeRenderDesc[1].Fov; layer.Viewport[0] = Recti(0, 0, bufferSize.w / 2, bufferSize.h); layer.Viewport[1] = Recti(bufferSize.w / 2, 0, bufferSize.w / 2, bufferSize.h); // ld.RenderPose and ld.SensorSampleTime are updated later per frame. #endif #if defined(_DEBUG) // SetStablePowerState requires Win10 to be in developer mode: // start settings app, then search for 'for developers settings', // then enable it under developer features, developer mode //xapp->device->SetStablePowerState(true); #endif }
DLL_EXPORT_API npBool xnOvrCreateTexturesDx(xnOvrSession* session, void* dxDevice, int* outTextureCount, float pixelPerDisplayPixel, int mirrorBufferWidth, int mirrorBufferHeight) { session->HmdDesc = ovr_GetHmdDesc(session->Session); ovrSizei sizel = ovr_GetFovTextureSize(session->Session, ovrEye_Left, session->HmdDesc.DefaultEyeFov[0], pixelPerDisplayPixel); ovrSizei sizer = ovr_GetFovTextureSize(session->Session, ovrEye_Right, session->HmdDesc.DefaultEyeFov[1], pixelPerDisplayPixel); ovrSizei bufferSize; bufferSize.w = sizel.w + sizer.w; bufferSize.h = fmax(sizel.h, sizer.h); ovrTextureSwapChainDesc texDesc = {}; texDesc.Type = ovrTexture_2D; texDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; texDesc.ArraySize = 1; texDesc.Width = bufferSize.w; texDesc.Height = bufferSize.h; texDesc.MipLevels = 1; texDesc.SampleCount = 1; texDesc.StaticImage = ovrFalse; texDesc.MiscFlags = ovrTextureMisc_None; texDesc.BindFlags = ovrTextureBind_DX_RenderTarget; if(!OVR_SUCCESS(ovr_CreateTextureSwapChainDX(session->Session, dxDevice, &texDesc, &session->SwapChain))) { return false; } auto count = 0; ovr_GetTextureSwapChainLength(session->Session, session->SwapChain, &count); *outTextureCount = count; //init structures session->EyeRenderDesc[0] = ovr_GetRenderDesc(session->Session, ovrEye_Left, session->HmdDesc.DefaultEyeFov[0]); session->EyeRenderDesc[1] = ovr_GetRenderDesc(session->Session, ovrEye_Right, session->HmdDesc.DefaultEyeFov[1]); session->HmdToEyeViewOffset[0] = session->EyeRenderDesc[0].HmdToEyeOffset; session->HmdToEyeViewOffset[1] = session->EyeRenderDesc[1].HmdToEyeOffset; session->Layer.Header.Type = ovrLayerType_EyeFov; session->Layer.Header.Flags = 0; session->Layer.ColorTexture[0] = session->SwapChain; session->Layer.ColorTexture[1] = session->SwapChain; session->Layer.Fov[0] = session->EyeRenderDesc[0].Fov; session->Layer.Fov[1] = session->EyeRenderDesc[1].Fov; session->Layer.Viewport[0].Pos.x = 0; session->Layer.Viewport[0].Pos.y = 0; session->Layer.Viewport[0].Size.w = bufferSize.w / 2; session->Layer.Viewport[0].Size.h = bufferSize.h; session->Layer.Viewport[1].Pos.x = bufferSize.w / 2; session->Layer.Viewport[1].Pos.y = 0; session->Layer.Viewport[1].Size.w = bufferSize.w / 2; session->Layer.Viewport[1].Size.h = bufferSize.h; //create mirror as well if (mirrorBufferHeight != 0 && mirrorBufferWidth != 0) { ovrMirrorTextureDesc mirrorDesc = {}; mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; mirrorDesc.Width = mirrorBufferWidth; mirrorDesc.Height = mirrorBufferHeight; if (!OVR_SUCCESS(ovr_CreateMirrorTextureDX(session->Session, dxDevice, &mirrorDesc, &session->Mirror))) { return false; } } return true; }
int main(int argc, char **argv) { // Initialize SDL2's context SDL_Init(SDL_INIT_VIDEO); // Initialize Oculus' context ovrResult result = ovr_Initialize(nullptr); if (OVR_FAILURE(result)) { std::cout << "ERROR: Failed to initialize libOVR" << std::endl; SDL_Quit(); return -1; } ovrSession session; ovrGraphicsLuid luid; // Connect to the Oculus headset result = ovr_Create(&session, &luid); if (OVR_FAILURE(result)) { std::cout << "ERROR: Oculus Rift not detected" << std::endl; ovr_Shutdown(); SDL_Quit(); return -1; } int x = SDL_WINDOWPOS_CENTERED, y = SDL_WINDOWPOS_CENTERED; int winWidth = 1280; int winHeight = 720; Uint32 flags = SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN; // Create SDL2 Window SDL_Window* window = SDL_CreateWindow("OVR ZED App", x, y, winWidth, winHeight, flags); // Create OpenGL context SDL_GLContext glContext = SDL_GL_CreateContext(window); // Initialize GLEW glewInit(); // Turn off vsync to let the compositor do its magic SDL_GL_SetSwapInterval(0); // Initialize the ZED Camera sl::zed::Camera* zed = 0; zed = new sl::zed::Camera(sl::zed::HD720); sl::zed::ERRCODE zederr = zed->init(sl::zed::MODE::PERFORMANCE, 0); int zedWidth = zed->getImageSize().width; int zedHeight = zed->getImageSize().height; if (zederr != sl::zed::SUCCESS) { std::cout << "ERROR: " << sl::zed::errcode2str(zederr) << std::endl; ovr_Destroy(session); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; return -1; } GLuint zedTextureID_L, zedTextureID_R; // Generate OpenGL texture for left images of the ZED camera glGenTextures(1, &zedTextureID_L); glBindTexture(GL_TEXTURE_2D, zedTextureID_L); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); // Generate OpenGL texture for right images of the ZED camera glGenTextures(1, &zedTextureID_R); glBindTexture(GL_TEXTURE_2D, zedTextureID_R); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glBindTexture(GL_TEXTURE_2D, 0); #if OPENGL_GPU_INTEROP cudaGraphicsResource* cimg_L; cudaGraphicsResource* cimg_R; cudaError_t errL, errR; errL = cudaGraphicsGLRegisterImage(&cimg_L, zedTextureID_L, GL_TEXTURE_2D, cudaGraphicsMapFlagsNone); errR = cudaGraphicsGLRegisterImage(&cimg_R, zedTextureID_R, GL_TEXTURE_2D, cudaGraphicsMapFlagsNone); if (errL != cudaSuccess || errR != cudaSuccess) { std::cout << "ERROR: cannot create CUDA texture : " << errL << "|" << errR << std::endl; } #endif ovrHmdDesc hmdDesc = ovr_GetHmdDesc(session); // Get the texture sizes of Oculus eyes ovrSizei textureSize0 = ovr_GetFovTextureSize(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0], 1.0f); ovrSizei textureSize1 = ovr_GetFovTextureSize(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1], 1.0f); // Compute the final size of the render buffer ovrSizei bufferSize; bufferSize.w = textureSize0.w + textureSize1.w; bufferSize.h = std::max(textureSize0.h, textureSize1.h); // Initialize OpenGL swap textures to render ovrTextureSwapChain textureChain = nullptr; // Description of the swap chain ovrTextureSwapChainDesc descTextureSwap = {}; descTextureSwap.Type = ovrTexture_2D; descTextureSwap.ArraySize = 1; descTextureSwap.Width = bufferSize.w; descTextureSwap.Height = bufferSize.h; descTextureSwap.MipLevels = 1; descTextureSwap.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; descTextureSwap.SampleCount = 1; descTextureSwap.StaticImage = ovrFalse; // Create the OpenGL texture swap chain result = ovr_CreateTextureSwapChainGL(session, &descTextureSwap, &textureChain); int length = 0; ovr_GetTextureSwapChainLength(session, textureChain, &length); if (OVR_SUCCESS(result)) { for (int i = 0; i < length; ++i) { GLuint chainTexId; ovr_GetTextureSwapChainBufferGL(session, textureChain, i, &chainTexId); glBindTexture(GL_TEXTURE_2D, chainTexId); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); } } else { std::cout << "ERROR: failed creating swap texture" << std::endl; ovr_Destroy(session); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; return -1; } // Generate frame buffer to render GLuint fboID; glGenFramebuffers(1, &fboID); // Generate depth buffer of the frame buffer GLuint depthBuffID; glGenTextures(1, &depthBuffID); glBindTexture(GL_TEXTURE_2D, depthBuffID); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); GLenum internalFormat = GL_DEPTH_COMPONENT24; GLenum type = GL_UNSIGNED_INT; glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, bufferSize.w, bufferSize.h, 0, GL_DEPTH_COMPONENT, type, NULL); // Create a mirror texture to display the render result in the SDL2 window ovrMirrorTextureDesc descMirrorTexture; memset(&descMirrorTexture, 0, sizeof(descMirrorTexture)); descMirrorTexture.Width = winWidth; descMirrorTexture.Height = winHeight; descMirrorTexture.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; ovrMirrorTexture mirrorTexture = nullptr; result = ovr_CreateMirrorTextureGL(session, &descMirrorTexture, &mirrorTexture); if (!OVR_SUCCESS(result)) { std::cout << "ERROR: Failed to create mirror texture" << std::endl; } GLuint mirrorTextureId; ovr_GetMirrorTextureBufferGL(session, mirrorTexture, &mirrorTextureId); GLuint mirrorFBOID; glGenFramebuffers(1, &mirrorFBOID); glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFBOID); glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mirrorTextureId, 0); glFramebufferRenderbuffer(GL_READ_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, 0); glBindFramebuffer(GL_READ_FRAMEBUFFER, 0); // Frame index used by the compositor // it needs to be updated each new frame long long frameIndex = 0; // FloorLevel will give tracking poses where the floor height is 0 ovr_SetTrackingOriginType(session, ovrTrackingOrigin_FloorLevel); // Initialize a default Pose ovrPosef eyeRenderPose[2]; // Get the render description of the left and right "eyes" of the Oculus headset ovrEyeRenderDesc eyeRenderDesc[2]; eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); // Get the Oculus view scale description ovrVector3f hmdToEyeOffset[2]; double sensorSampleTime; // Create and compile the shader's sources Shader shader(OVR_ZED_VS, OVR_ZED_FS); // Compute the ZED image field of view with the ZED parameters float zedFovH = atanf(zed->getImageSize().width / (zed->getParameters()->LeftCam.fx *2.f)) * 2.f; // Compute the Horizontal Oculus' field of view with its parameters float ovrFovH = (atanf(hmdDesc.DefaultEyeFov[0].LeftTan) + atanf(hmdDesc.DefaultEyeFov[0].RightTan)); // Compute the useful part of the ZED image unsigned int usefulWidth = zed->getImageSize().width * ovrFovH / zedFovH; // Compute the size of the final image displayed in the headset with the ZED image's aspect-ratio kept unsigned int widthFinal = bufferSize.w / 2; float heightGL = 1.f; float widthGL = 1.f; if (usefulWidth > 0.f) { unsigned int heightFinal = zed->getImageSize().height * widthFinal / usefulWidth; // Convert this size to OpenGL viewport's frame's coordinates heightGL = (heightFinal) / (float)(bufferSize.h); widthGL = ((zed->getImageSize().width * (heightFinal / (float)zed->getImageSize().height)) / (float)widthFinal); } else { std::cout << "WARNING: ZED parameters got wrong values." "Default vertical and horizontal FOV are used.\n" "Check your calibration file or check if your ZED is not too close to a surface or an object." << std::endl; } // Compute the Vertical Oculus' field of view with its parameters float ovrFovV = (atanf(hmdDesc.DefaultEyeFov[0].UpTan) + atanf(hmdDesc.DefaultEyeFov[0].DownTan)); // Compute the center of the optical lenses of the headset float offsetLensCenterX = ((atanf(hmdDesc.DefaultEyeFov[0].LeftTan)) / ovrFovH) * 2.f - 1.f; float offsetLensCenterY = ((atanf(hmdDesc.DefaultEyeFov[0].UpTan)) / ovrFovV) * 2.f - 1.f; // Create a rectangle with the computed coordinates and push it in GPU memory. struct GLScreenCoordinates { float left, up, right, down; } screenCoord; screenCoord.up = heightGL + offsetLensCenterY; screenCoord.down = heightGL - offsetLensCenterY; screenCoord.right = widthGL + offsetLensCenterX; screenCoord.left = widthGL - offsetLensCenterX; float rectVertices[12] = { -screenCoord.left, -screenCoord.up, 0, screenCoord.right, -screenCoord.up, 0, screenCoord.right, screenCoord.down, 0, -screenCoord.left, screenCoord.down, 0 }; GLuint rectVBO[3]; glGenBuffers(1, &rectVBO[0]); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[0]); glBufferData(GL_ARRAY_BUFFER, sizeof(rectVertices), rectVertices, GL_STATIC_DRAW); float rectTexCoord[8] = { 0, 1, 1, 1, 1, 0, 0, 0 }; glGenBuffers(1, &rectVBO[1]); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[1]); glBufferData(GL_ARRAY_BUFFER, sizeof(rectTexCoord), rectTexCoord, GL_STATIC_DRAW); unsigned int rectIndices[6] = { 0, 1, 2, 0, 2, 3 }; glGenBuffers(1, &rectVBO[2]); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, rectVBO[2]); glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(rectIndices), rectIndices, GL_STATIC_DRAW); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); glBindBuffer(GL_ARRAY_BUFFER, 0); // Initialize hit value float hit = 0.02f; // Initialize a boolean that will be used to stop the application’s loop and another one to pause/unpause rendering bool end = false; bool refresh = true; // SDL variable that will be used to store input events SDL_Event events; // Initialize time variables. They will be used to limit the number of frames rendered per second. // Frame counter unsigned int riftc = 0, zedc = 1; // Chronometer unsigned int rifttime = 0, zedtime = 0, zedFPS = 0; int time1 = 0, timePerFrame = 0; int frameRate = (int)(1000 / MAX_FPS); // This boolean is used to test if the application is focused bool isVisible = true; // Enable the shader glUseProgram(shader.getProgramId()); // Bind the Vertex Buffer Objects of the rectangle that displays ZED images // vertices glEnableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[0]); glVertexAttribPointer(Shader::ATTRIB_VERTICES_POS, 3, GL_FLOAT, GL_FALSE, 0, 0); // indices glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, rectVBO[2]); // texture coordinates glEnableVertexAttribArray(Shader::ATTRIB_TEXTURE2D_POS); glBindBuffer(GL_ARRAY_BUFFER, rectVBO[1]); glVertexAttribPointer(Shader::ATTRIB_TEXTURE2D_POS, 2, GL_FLOAT, GL_FALSE, 0, 0); // Main loop while (!end) { // Compute the time used to render the previous frame timePerFrame = SDL_GetTicks() - time1; // If the previous frame has been rendered too fast if (timePerFrame < frameRate) { // Pause the loop to have a max FPS equal to MAX_FPS SDL_Delay(frameRate - timePerFrame); timePerFrame = frameRate; } // Increment the ZED chronometer zedtime += timePerFrame; // If ZED chronometer reached 1 second if (zedtime > 1000) { zedFPS = zedc; zedc = 0; zedtime = 0; } // Increment the Rift chronometer and the Rift frame counter rifttime += timePerFrame; riftc++; // If Rift chronometer reached 200 milliseconds if (rifttime > 200) { // Display FPS std::cout << "\rRIFT FPS: " << 1000 / (rifttime / riftc) << " | ZED FPS: " << zedFPS; // Reset Rift chronometer rifttime = 0; // Reset Rift frame counter riftc = 0; } // Start frame chronometer time1 = SDL_GetTicks(); // While there is an event catched and not tested while (SDL_PollEvent(&events)) { // If a key is released if (events.type == SDL_KEYUP) { // If Q quit the application if (events.key.keysym.scancode == SDL_SCANCODE_Q) end = true; // If R reset the hit value else if (events.key.keysym.scancode == SDL_SCANCODE_R) hit = 0.0f; // If C pause/unpause rendering else if (events.key.keysym.scancode == SDL_SCANCODE_C) refresh = !refresh; } // If the mouse wheel is used if (events.type == SDL_MOUSEWHEEL) { // Increase or decrease hit value float s; events.wheel.y > 0 ? s = 1.0f : s = -1.0f; hit += 0.005f * s; } } // Get texture swap index where we must draw our frame GLuint curTexId; int curIndex; ovr_GetTextureSwapChainCurrentIndex(session, textureChain, &curIndex); ovr_GetTextureSwapChainBufferGL(session, textureChain, curIndex, &curTexId); // Call ovr_GetRenderDesc each frame to get the ovrEyeRenderDesc, as the returned values (e.g. HmdToEyeOffset) may change at runtime. eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]); eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]); hmdToEyeOffset[0] = eyeRenderDesc[0].HmdToEyeOffset; hmdToEyeOffset[1] = eyeRenderDesc[1].HmdToEyeOffset; // Get eye poses, feeding in correct IPD offset ovr_GetEyePoses(session, frameIndex, ovrTrue, hmdToEyeOffset, eyeRenderPose, &sensorSampleTime); // If the application is focused if (isVisible) { // If successful grab a new ZED image if (!zed->grab(sl::zed::SENSING_MODE::RAW, false, false)) { // Update the ZED frame counter zedc++; if (refresh) { #if OPENGL_GPU_INTEROP sl::zed::Mat m = zed->retrieveImage_gpu(sl::zed::SIDE::LEFT); cudaArray_t arrIm; cudaGraphicsMapResources(1, &cimg_L, 0); cudaGraphicsSubResourceGetMappedArray(&arrIm, cimg_L, 0, 0); cudaMemcpy2DToArray(arrIm, 0, 0, m.data, m.step, zedWidth * 4, zedHeight, cudaMemcpyDeviceToDevice); cudaGraphicsUnmapResources(1, &cimg_L, 0); m = zed->retrieveImage_gpu(sl::zed::SIDE::RIGHT); cudaGraphicsMapResources(1, &cimg_R, 0); cudaGraphicsSubResourceGetMappedArray(&arrIm, cimg_R, 0, 0); cudaMemcpy2DToArray(arrIm, 0, 0, m.data, m.step, zedWidth * 4, zedHeight, cudaMemcpyDeviceToDevice); // *4 = 4 channels * 1 bytes (uint) cudaGraphicsUnmapResources(1, &cimg_R, 0); #endif // Bind the frame buffer glBindFramebuffer(GL_FRAMEBUFFER, fboID); // Set its color layer 0 as the current swap texture glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, curTexId, 0); // Set its depth layer as our depth buffer glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthBuffID, 0); // Clear the frame buffer glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glClearColor(0, 0, 0, 1); // Render for each Oculus eye the equivalent ZED image for (int eye = 0; eye < 2; eye++) { // Set the left or right vertical half of the buffer as the viewport glViewport(eye == ovrEye_Left ? 0 : bufferSize.w / 2, 0, bufferSize.w / 2, bufferSize.h); // Bind the left or right ZED image glBindTexture(GL_TEXTURE_2D, eye == ovrEye_Left ? zedTextureID_L : zedTextureID_R); #if !OPENGL_GPU_INTEROP glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, zed->retrieveImage(eye == ovrEye_Left ? sl::zed::SIDE::LEFT : sl::zed::SIDE::RIGHT).data); #endif // Bind the hit value glUniform1f(glGetUniformLocation(shader.getProgramId(), "hit"), eye == ovrEye_Left ? hit : -hit); // Bind the isLeft value glUniform1ui(glGetUniformLocation(shader.getProgramId(), "isLeft"), eye == ovrEye_Left ? 1U : 0U); // Draw the ZED image glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0); } // Avoids an error when calling SetAndClearRenderSurface during next iteration. // Without this, during the next while loop iteration SetAndClearRenderSurface // would bind a framebuffer with an invalid COLOR_ATTACHMENT0 because the texture ID // associated with COLOR_ATTACHMENT0 had been unlocked by calling wglDXUnlockObjectsNV. glBindFramebuffer(GL_FRAMEBUFFER, fboID); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, 0, 0); // Commit changes to the textures so they get picked up frame ovr_CommitTextureSwapChain(session, textureChain); } // Do not forget to increment the frameIndex! frameIndex++; } } /* Note: Even if we don't ask to refresh the framebuffer or if the Camera::grab() doesn't catch a new frame, we have to submit an image to the Rift; it needs 75Hz refresh. Else there will be jumbs, black frames and/or glitches in the headset. */ ovrLayerEyeFov ld; ld.Header.Type = ovrLayerType_EyeFov; // Tell to the Oculus compositor that our texture origin is at the bottom left ld.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft; // Because OpenGL | Disable head tracking // Set the Oculus layer eye field of view for each view for (int eye = 0; eye < 2; ++eye) { // Set the color texture as the current swap texture ld.ColorTexture[eye] = textureChain; // Set the viewport as the right or left vertical half part of the color texture ld.Viewport[eye] = OVR::Recti(eye == ovrEye_Left ? 0 : bufferSize.w / 2, 0, bufferSize.w / 2, bufferSize.h); // Set the field of view ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye]; // Set the pose matrix ld.RenderPose[eye] = eyeRenderPose[eye]; } ld.SensorSampleTime = sensorSampleTime; ovrLayerHeader* layers = &ld.Header; // Submit the frame to the Oculus compositor // which will display the frame in the Oculus headset result = ovr_SubmitFrame(session, frameIndex, nullptr, &layers, 1); if (!OVR_SUCCESS(result)) { std::cout << "ERROR: failed to submit frame" << std::endl; glDeleteBuffers(3, rectVBO); ovr_DestroyTextureSwapChain(session, textureChain); ovr_DestroyMirrorTexture(session, mirrorTexture); ovr_Destroy(session); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; return -1; } if (result == ovrSuccess && !isVisible) { std::cout << "The application is now shown in the headset." << std::endl; } isVisible = (result == ovrSuccess); // This is not really needed for this application but it may be usefull for an more advanced application ovrSessionStatus sessionStatus; ovr_GetSessionStatus(session, &sessionStatus); if (sessionStatus.ShouldRecenter) { std::cout << "Recenter Tracking asked by Session" << std::endl; ovr_RecenterTrackingOrigin(session); } // Copy the frame to the mirror buffer // which will be drawn in the SDL2 image glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFBOID); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0); GLint w = winWidth; GLint h = winHeight; glBlitFramebuffer(0, h, w, 0, 0, 0, w, h, GL_COLOR_BUFFER_BIT, GL_NEAREST); glBindFramebuffer(GL_READ_FRAMEBUFFER, 0); // Swap the SDL2 window SDL_GL_SwapWindow(window); } // Disable all OpenGL buffer glDisableVertexAttribArray(Shader::ATTRIB_TEXTURE2D_POS); glDisableVertexAttribArray(Shader::ATTRIB_VERTICES_POS); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); glBindBuffer(GL_ARRAY_BUFFER, 0); glBindTexture(GL_TEXTURE_2D, 0); glUseProgram(0); glBindVertexArray(0); // Delete the Vertex Buffer Objects of the rectangle glDeleteBuffers(3, rectVBO); // Delete SDL, OpenGL, Oculus and ZED context ovr_DestroyTextureSwapChain(session, textureChain); ovr_DestroyMirrorTexture(session, mirrorTexture); ovr_Destroy(session); ovr_Shutdown(); SDL_GL_DeleteContext(glContext); SDL_DestroyWindow(window); SDL_Quit(); delete zed; // quit return 0; }
///@brief Called once a GL context has been set up. void initVR() { const ovrHmdDesc& hmd = m_Hmd; for (int eye = 0; eye < 2; ++eye) { const ovrSizei& bufferSize = ovr_GetFovTextureSize(g_session, ovrEyeType(eye), hmd.DefaultEyeFov[eye], 1.f); LOG_INFO("Eye %d tex : %dx%d @ ()", eye, bufferSize.w, bufferSize.h); ovrTextureSwapChain textureSwapChain = 0; ovrTextureSwapChainDesc desc = {}; desc.Type = ovrTexture_2D; desc.ArraySize = 1; desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; desc.Width = bufferSize.w; desc.Height = bufferSize.h; desc.MipLevels = 1; desc.SampleCount = 1; desc.StaticImage = ovrFalse; // Allocate the frameBuffer that will hold the scene, and then be // re-rendered to the screen with distortion ovrTextureSwapChain& chain = g_textureSwapChain[eye]; if (ovr_CreateTextureSwapChainGL(g_session, &desc, &chain) == ovrSuccess) { int length = 0; ovr_GetTextureSwapChainLength(g_session, chain, &length); for (int i = 0; i < length; ++i) { GLuint chainTexId; ovr_GetTextureSwapChainBufferGL(g_session, chain, i, &chainTexId); glBindTexture(GL_TEXTURE_2D, chainTexId); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); } } else { LOG_ERROR("Unable to create swap textures"); return; } // Manually assemble swap FBO FBO& swapfbo = m_swapFBO[eye]; swapfbo.w = bufferSize.w; swapfbo.h = bufferSize.h; glGenFramebuffers(1, &swapfbo.id); glBindFramebuffer(GL_FRAMEBUFFER, swapfbo.id); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, swapfbo.tex, 0); swapfbo.depth = 0; glGenRenderbuffers(1, &swapfbo.depth); glBindRenderbuffer(GL_RENDERBUFFER, swapfbo.depth); glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, bufferSize.w, bufferSize.h); glBindRenderbuffer(GL_RENDERBUFFER, 0); glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, swapfbo.depth); // Check status const GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER); if (status != GL_FRAMEBUFFER_COMPLETE) { LOG_ERROR("Framebuffer status incomplete: %d %x", status, status); } glBindFramebuffer(GL_FRAMEBUFFER, 0); } // Initialize mirror texture ovrMirrorTextureDesc desc; memset(&desc, 0, sizeof(desc)); desc.Width = g_mirrorWindowSz.x; desc.Height = g_mirrorWindowSz.y; desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB; const ovrResult result = ovr_CreateMirrorTextureGL(g_session, &desc, &g_mirrorTexture); if (!OVR_SUCCESS(result)) { LOG_ERROR("Unable to create mirror texture"); return; } // Manually assemble mirror FBO m_mirrorFBO.w = g_mirrorWindowSz.x; m_mirrorFBO.h = g_mirrorWindowSz.y; glGenFramebuffers(1, &m_mirrorFBO.id); glBindFramebuffer(GL_FRAMEBUFFER, m_mirrorFBO.id); GLuint texId; ovr_GetMirrorTextureBufferGL(g_session, g_mirrorTexture, &texId); m_mirrorFBO.tex = texId; glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_mirrorFBO.tex, 0); const ovrSizei sz = { 600, 600 }; g_tweakbarQuad.initGL(g_session, sz); glBindFramebuffer(GL_FRAMEBUFFER, 0); glBindTexture(GL_TEXTURE_2D, 0); g_hmdVisible = true; }
FD3D11Texture2DSet* FD3D11Texture2DSet::D3D11CreateTexture2DSet( FD3D11DynamicRHI* InD3D11RHI, const FOvrSessionSharedPtr& InOvrSession, ovrTextureSwapChain InTextureSet, const D3D11_TEXTURE2D_DESC& InDsDesc, EPixelFormat InFormat, uint32 InFlags ) { FOvrSessionShared::AutoSession OvrSession(InOvrSession); check(InTextureSet); TArray<TRefCountPtr<ID3D11RenderTargetView> > TextureSetRenderTargetViews; FD3D11Texture2DSet* NewTextureSet = new FD3D11Texture2DSet( InD3D11RHI, nullptr, nullptr, false, 1, TextureSetRenderTargetViews, /*DepthStencilViews=*/ NULL, InDsDesc.Width, InDsDesc.Height, 0, InDsDesc.MipLevels, InDsDesc.SampleDesc.Count, InFormat, /*bInCubemap=*/ false, InFlags, /*bPooledTexture=*/ false ); int TexCount; ovr_GetTextureSwapChainLength(OvrSession, InTextureSet, &TexCount); const bool bSRGB = (InFlags & TexCreate_SRGB) != 0; const DXGI_FORMAT PlatformResourceFormat = (DXGI_FORMAT)GPixelFormats[InFormat].PlatformFormat; const DXGI_FORMAT PlatformShaderResourceFormat = FindShaderResourceDXGIFormat(PlatformResourceFormat, bSRGB); const DXGI_FORMAT PlatformRenderTargetFormat = FindShaderResourceDXGIFormat(PlatformResourceFormat, bSRGB); D3D11_RTV_DIMENSION RenderTargetViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D; if (InDsDesc.SampleDesc.Count > 1) { RenderTargetViewDimension = D3D11_RTV_DIMENSION_TEXTURE2DMS; } for (int32 i = 0; i < TexCount; ++i) { TRefCountPtr<ID3D11Texture2D> pD3DTexture; ovrResult res = ovr_GetTextureSwapChainBufferDX(OvrSession, InTextureSet, i, IID_PPV_ARGS(pD3DTexture.GetInitReference())); if (!OVR_SUCCESS(res)) { UE_LOG(LogHMD, Error, TEXT("ovr_GetTextureSwapChainBufferDX failed, error = %d"), int(res)); return nullptr; } TArray<TRefCountPtr<ID3D11RenderTargetView> > RenderTargetViews; if (InFlags & TexCreate_RenderTargetable) { // Create a render target view for each mip for (uint32 MipIndex = 0; MipIndex < InDsDesc.MipLevels; MipIndex++) { check(!(InFlags & TexCreate_TargetArraySlicesIndependently)); // not supported D3D11_RENDER_TARGET_VIEW_DESC RTVDesc; FMemory::Memzero(&RTVDesc, sizeof(RTVDesc)); RTVDesc.Format = PlatformRenderTargetFormat; RTVDesc.ViewDimension = RenderTargetViewDimension; RTVDesc.Texture2D.MipSlice = MipIndex; TRefCountPtr<ID3D11RenderTargetView> RenderTargetView; VERIFYD3D11RESULT_EX(InD3D11RHI->GetDevice()->CreateRenderTargetView(pD3DTexture, &RTVDesc, RenderTargetView.GetInitReference()), InD3D11RHI->GetDevice()); RenderTargetViews.Add(RenderTargetView); } } TRefCountPtr<ID3D11ShaderResourceView> ShaderResourceView; // Create a shader resource view for the texture. if (InFlags & TexCreate_ShaderResource) { D3D11_SRV_DIMENSION ShaderResourceViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D; D3D11_SHADER_RESOURCE_VIEW_DESC SRVDesc; SRVDesc.Format = PlatformShaderResourceFormat; SRVDesc.ViewDimension = ShaderResourceViewDimension; SRVDesc.Texture2D.MostDetailedMip = 0; SRVDesc.Texture2D.MipLevels = InDsDesc.MipLevels; VERIFYD3D11RESULT_EX(InD3D11RHI->GetDevice()->CreateShaderResourceView(pD3DTexture, &SRVDesc, ShaderResourceView.GetInitReference()), InD3D11RHI->GetDevice()); check(IsValidRef(ShaderResourceView)); } NewTextureSet->AddTexture(pD3DTexture, ShaderResourceView, &RenderTargetViews); } if (InFlags & TexCreate_RenderTargetable) { NewTextureSet->SetCurrentGPUAccess(EResourceTransitionAccess::EWritable); } NewTextureSet->TextureSet = InTextureSet; NewTextureSet->InitWithCurrentElement(0); return NewTextureSet; }