예제 #1
0
void OculusWindow::finish_frame() {

    ovr_CommitTextureSwapChain(hmd_session_, texture_swap_chain_);

    ovrLayerHeader* layers = &color_layer_.Header;
    ovrResult       result = ovr_SubmitFrame(hmd_session_, framecount_, nullptr, &layers, 1);

    if (!OVR_SUCCESS(result)) {
        gua::Logger::LOG_WARNING << "Failed to submit frame to the oculus rift.\n";
    }

    GlfwWindow::finish_frame();
}
예제 #2
0
파일: main.cpp 프로젝트: GainLee/OculusSDK
//-------------------------------------------------------------------------------------
int WINAPI WinMain(HINSTANCE hinst, HINSTANCE, LPSTR, int)
{
    // Initializes LibOVR, and the Rift
    ovrResult result = ovr_Initialize(nullptr);
    VALIDATE(OVR_SUCCESS(result), "Failed to initialize libOVR.");

    VALIDATE(DIRECTX.InitWindow(hinst, L"Oculus Room Tiny (DX11)"), "Failed to open window.");

    DIRECTX.Run(MainLoop);

    ovr_Shutdown();
    return(0);
}
예제 #3
0
FOculusInput::FOculusInput( const TSharedRef< FGenericApplicationMessageHandler >& InMessageHandler )
	: MessageHandler( InMessageHandler )
	, ControllerPairs()
	, TriggerThreshold(0.8f)
{
	// Initializes LibOVR. 
	ovrInitParams initParams;
	FMemory::Memset(initParams, 0);
	initParams.Flags = ovrInit_RequestVersion;
	initParams.RequestedMinorVersion = OVR_MINOR_VERSION;
#if !UE_BUILD_SHIPPING
//	initParams.LogCallback = OvrLogCallback;
#endif

	ovrResult initStatus = ovr_Initialize(&initParams);
	if (!OVR_SUCCESS(initStatus) && initStatus == ovrError_LibLoad)
	{
		// fatal errors: can't load library
 		UE_LOG(LogOcInput, Log, TEXT("Can't find Oculus library %s: is proper Runtime installed? Version: %s"),
 			TEXT(OVR_FILE_DESCRIPTION_STRING), TEXT(OVR_VERSION_STRING));
		return;
	}

	FOculusTouchControllerPair& ControllerPair = *new(ControllerPairs) FOculusTouchControllerPair();

	// @todo: Unreal controller index should be assigned to us by the engine to ensure we don't contest with other devices
	ControllerPair.UnrealControllerIndex = 0; //???? NextUnrealControllerIndex++;

	// Load the config, even if we failed to initialize a controller
	LoadConfig();

	IModularFeatures::Get().RegisterModularFeature( GetModularFeatureName(), this );
	GEngine->MotionControllerDevices.AddUnique(this);

	// Register the FKeys
	EKeys::AddKey(FKeyDetails(FOculusTouchCapacitiveKey::OculusTouch_Left_Thumbstick, LOCTEXT("OculusTouch_Left_Thumbstick", "Oculus Touch (L) Thumbstick CapTouch"), FKeyDetails::GamepadKey | FKeyDetails::FloatAxis));
	EKeys::AddKey(FKeyDetails(FOculusTouchCapacitiveKey::OculusTouch_Left_FaceButton1, LOCTEXT("OculusTouch_Left_FaceButton1", "Oculus Touch (L) X Button CapTouch"), FKeyDetails::GamepadKey | FKeyDetails::FloatAxis));
	EKeys::AddKey(FKeyDetails(FOculusTouchCapacitiveKey::OculusTouch_Left_Trigger, LOCTEXT("OculusTouch_Left_Trigger", "Oculus Touch (L) Trigger CapTouch"), FKeyDetails::GamepadKey | FKeyDetails::FloatAxis));
	EKeys::AddKey(FKeyDetails(FOculusTouchCapacitiveKey::OculusTouch_Left_FaceButton2, LOCTEXT("OculusTouch_Left_FaceButton2", "Oculus Touch (L) Y Button CapTouch"), FKeyDetails::GamepadKey | FKeyDetails::FloatAxis));
	EKeys::AddKey(FKeyDetails(FOculusTouchCapacitiveKey::OculusTouch_Left_IndexPointing, LOCTEXT("OculusTouch_Left_IndexPointing", "Oculus Touch (L) Pointing CapTouch"), FKeyDetails::GamepadKey | FKeyDetails::FloatAxis));
	EKeys::AddKey(FKeyDetails(FOculusTouchCapacitiveKey::OculusTouch_Left_ThumbUp, LOCTEXT("OculusTouch_Left_ThumbUp", "Oculus Touch (L) Thumb Up CapTouch"), FKeyDetails::GamepadKey | FKeyDetails::FloatAxis));

	EKeys::AddKey(FKeyDetails(FOculusTouchCapacitiveKey::OculusTouch_Right_Thumbstick, LOCTEXT("OculusTouch_Right_Thumbstick", "Oculus Touch (R) Thumbstick CapTouch"), FKeyDetails::GamepadKey | FKeyDetails::FloatAxis));
	EKeys::AddKey(FKeyDetails(FOculusTouchCapacitiveKey::OculusTouch_Right_FaceButton1, LOCTEXT("OculusTouch_Right_FaceButton1", "Oculus Touch (R) A Button CapTouch"), FKeyDetails::GamepadKey | FKeyDetails::FloatAxis));
	EKeys::AddKey(FKeyDetails(FOculusTouchCapacitiveKey::OculusTouch_Right_Trigger, LOCTEXT("OculusTouch_Right_Trigger", "Oculus Touch (R) Trigger CapTouch"), FKeyDetails::GamepadKey | FKeyDetails::FloatAxis));
	EKeys::AddKey(FKeyDetails(FOculusTouchCapacitiveKey::OculusTouch_Right_FaceButton2, LOCTEXT("OculusTouch_Right_FaceButton2", "Oculus Touch (R) B Button CapTouch"), FKeyDetails::GamepadKey | FKeyDetails::FloatAxis));
	EKeys::AddKey(FKeyDetails(FOculusTouchCapacitiveKey::OculusTouch_Right_IndexPointing, LOCTEXT("OculusTouch_Right_IndexPointing", "Oculus Touch (R) Pointing CapTouch"), FKeyDetails::GamepadKey | FKeyDetails::FloatAxis));
	EKeys::AddKey(FKeyDetails(FOculusTouchCapacitiveKey::OculusTouch_Right_ThumbUp, LOCTEXT("OculusTouch_Right_ThumbUp", "Oculus Touch (R) Thumb Up CapTouch"), FKeyDetails::GamepadKey | FKeyDetails::FloatAxis));

	UE_LOG(LogOcInput, Log, TEXT("OculusInput is initialized. Init status %d. Runtime version: %s"), int(initStatus), *FString(ANSI_TO_TCHAR(ovr_GetVersionString())));
}
예제 #4
0
파일: main.cpp 프로젝트: jherico/OculusSDK
void GuardianSystemDemo::Render()
{
    // Get current eye pose for rendering
    double eyePoseTime = 0;
    ovrPosef eyePose[ovrEye_Count] = {};
    ovr_GetEyePoses(mSession, mFrameIndex, ovrTrue, mHmdToEyeOffset, eyePose, &eyePoseTime);

    // Render each eye
    for (int i = 0; i < ovrEye_Count; ++i) {
        int renderTargetIndex = 0;
        ovr_GetTextureSwapChainCurrentIndex(mSession, mTextureChain[i], &renderTargetIndex);
        ID3D11RenderTargetView* renderTargetView = mEyeRenderTargets[i][renderTargetIndex];
        ID3D11DepthStencilView* depthTargetView = mEyeDepthTarget[i];

        // Clear and set render/depth target and viewport
        DIRECTX.SetAndClearRenderTarget(renderTargetView, depthTargetView, 0.2f, 0.2f, 0.2f, 1.0f);
        DIRECTX.SetViewport((float)mEyeRenderViewport[i].Pos.x, (float)mEyeRenderViewport[i].Pos.y, 
            (float)mEyeRenderViewport[i].Size.w, (float)mEyeRenderViewport[i].Size.h);

        // Eye
        XMVECTOR eyeRot = XMVectorSet(eyePose[i].Orientation.x, eyePose[i].Orientation.y, 
            eyePose[i].Orientation.z, eyePose[i].Orientation.w);
        XMVECTOR eyePos = XMVectorSet(eyePose[i].Position.x, eyePose[i].Position.y, eyePose[i].Position.z, 0);
        XMVECTOR eyeForward = XMVector3Rotate(XMVectorSet(0, 0, -1, 0), eyeRot);

        // Matrices
        XMMATRIX viewMat = XMMatrixLookAtRH(eyePos, XMVectorAdd(eyePos, eyeForward), 
            XMVector3Rotate(XMVectorSet(0.0f, 1.0f, 0.0f, 0.0f), eyeRot));
        ovrMatrix4f proj = ovrMatrix4f_Projection(mEyeRenderLayer.Fov[i], 0.001f, 1000.0f, ovrProjection_None);
        XMMATRIX projMat = XMMatrixTranspose(XMMATRIX(&proj.M[0][0]));
        XMMATRIX viewProjMat = XMMatrixMultiply(viewMat, projMat);

        // Render and commit to swap chain
        mDynamicScene.Render(&viewProjMat, 1.0f, 1.0f, 1.0f, 1.0f, true);
        ovr_CommitTextureSwapChain(mSession, mTextureChain[i]);

        // Update eye layer
        mEyeRenderLayer.ColorTexture[i] = mTextureChain[i];
        mEyeRenderLayer.RenderPose[i] = eyePose[i];
        mEyeRenderLayer.SensorSampleTime = eyePoseTime;
    }

    // Submit frames
    ovrLayerHeader* layers = &mEyeRenderLayer.Header;
    ovrResult result = ovr_SubmitFrame(mSession, mFrameIndex++, nullptr, &layers, 1);
    if (!OVR_SUCCESS(result)) {
        printf("ovr_SubmitFrame failed"); exit(-1);
    }
}
예제 #5
0
파일: OculusOVR.cpp 프로젝트: bjth/xenko
	DLL_EXPORT_API void xnOvrGetStatus(xnOvrSession* session, xnOvrSessionStatus* statusOut)
	{
		ovrSessionStatus status;
		if (!OVR_SUCCESS(ovr_GetSessionStatus(session->Session, &status)))
		{
			return;
		}

		statusOut->IsVisible = status.IsVisible;
		statusOut->HmdPresent = status.HmdPresent;
		statusOut->HmdMounted = status.HmdMounted;
		statusOut->DisplayLost = status.DisplayLost;
		statusOut->ShouldQuit = status.ShouldQuit;
		statusOut->ShouldRecenter = status.ShouldRecenter;
	}
예제 #6
0
파일: hmd_ovr.cpp 프로젝트: lealzhan/bgfx
	bool VRImplOVR::init()
	{
		if (NULL != g_platformData.session)
		{
			return true;
		}

		ovrResult initialized = ovr_Initialize(NULL);
		if (!OVR_SUCCESS(initialized))
		{
			BX_TRACE("Unable to initialize OVR runtime.");
			return false;
		}

		return true;
	}
예제 #7
0
파일: OculusOVR.cpp 프로젝트: bjth/xenko
	DLL_EXPORT_API xnOvrQuadLayer* xnOvrCreateQuadLayerTexturesDx(xnOvrSession* session, void* dxDevice, int* outTextureCount, int width, int height, int mipLevels, int sampleCount)
	{
		auto layer = new xnOvrQuadLayer;

		ovrTextureSwapChainDesc texDesc = {};
		texDesc.Type = ovrTexture_2D;
		texDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
		texDesc.ArraySize = 1;
		texDesc.Width = width;
		texDesc.Height = height;
		texDesc.MipLevels = mipLevels;
		texDesc.SampleCount = sampleCount;
		texDesc.StaticImage = ovrFalse;
		texDesc.MiscFlags = ovrTextureMisc_None;
		texDesc.BindFlags = ovrTextureBind_DX_RenderTarget;

		if (!OVR_SUCCESS(ovr_CreateTextureSwapChainDX(session->Session, dxDevice, &texDesc, &layer->SwapChain)))
		{
			delete layer;
			return NULL;
		}

		auto count = 0;
		ovr_GetTextureSwapChainLength(session->Session, layer->SwapChain, &count);
		*outTextureCount = count;

		layer->Layer.Header.Type = ovrLayerType_Quad;
		layer->Layer.Header.Flags = ovrLayerFlag_HighQuality;
		layer->Layer.ColorTexture = layer->SwapChain;
		layer->Layer.Viewport.Pos.x = 0;
		layer->Layer.Viewport.Pos.y = 0;
		layer->Layer.Viewport.Size.w = width;
		layer->Layer.Viewport.Size.h = height;
		layer->Layer.QuadPoseCenter.Orientation.x = 0;
		layer->Layer.QuadPoseCenter.Orientation.y = 0;
		layer->Layer.QuadPoseCenter.Orientation.z = 0;
		layer->Layer.QuadPoseCenter.Orientation.w = 1;
		layer->Layer.QuadPoseCenter.Position.x = 0;
		layer->Layer.QuadPoseCenter.Position.y = 0;
		layer->Layer.QuadPoseCenter.Position.z = -1;
		layer->Layer.QuadSize.x = 2;
		layer->Layer.QuadSize.y = 2;

		return layer;
	}
FOculusInput::FOculusInput( const TSharedRef< FGenericApplicationMessageHandler >& InMessageHandler )
	: MessageHandler( InMessageHandler )
	, ControllerPairs()
{
	PreInit(); // @TODO: call it sooner to avoid 'Warning InputKey Event specifies invalid' when loading a map using the custom keys

	// take care of backward compatibility of Remote with Gamepad 
	if (bRemoteKeysMappedToGamepad)
	{
		Remote.ReinitButtonsForGamepadCompat();
	}

	// Only initialize plug-in when not running a dedicated server, and Oculus service is running
	if(!IsRunningDedicatedServer() && ovr_Detect(0).IsOculusServiceRunning)
	{
		// Initializes LibOVR. 
		ovrInitParams initParams;
		FMemory::Memset(initParams, 0);
		initParams.Flags = ovrInit_RequestVersion;
		initParams.RequestedMinorVersion = OVR_MINOR_VERSION;
#if !UE_BUILD_SHIPPING
//		initParams.LogCallback = OvrLogCallback;
#endif

		ovrResult initStatus = ovr_Initialize(&initParams);
		if (!OVR_SUCCESS(initStatus) && initStatus == ovrError_LibLoad)
		{
			// fatal errors: can't load library
 			UE_LOG(LogOcInput, Log, TEXT("Can't find Oculus library %s: is proper Runtime installed? Version: %s"),
 				TEXT(OVR_FILE_DESCRIPTION_STRING), TEXT(OVR_VERSION_STRING));
			return;
		}

		FOculusTouchControllerPair& ControllerPair = *new(ControllerPairs) FOculusTouchControllerPair();

		// @todo: Unreal controller index should be assigned to us by the engine to ensure we don't contest with other devices
		ControllerPair.UnrealControllerIndex = 0; //???? NextUnrealControllerIndex++;

		IModularFeatures::Get().RegisterModularFeature( GetModularFeatureName(), this );
		GEngine->MotionControllerDevices.AddUnique(this);

		UE_LOG(LogOcInput, Log, TEXT("OculusInput is initialized. Init status %d. Runtime version: %s"), int(initStatus), *FString(ANSI_TO_TCHAR(ovr_GetVersionString())));
	}
}
예제 #9
0
파일: OculusOVR.cpp 프로젝트: bjth/xenko
	DLL_EXPORT_API xnOvrSession* xnOvrCreateSessionDx(int64_t* luidOut)
	{
		ovrSession session;
		ovrGraphicsLuid luid;
		ovrResult result = ovr_Create(&session, &luid);

		bool success = OVR_SUCCESS(result);
		if(success)
		{
			auto sessionOut = new xnOvrSession();
			sessionOut->Session = session;
			sessionOut->SwapChain = NULL;

			*luidOut = *((int64_t*)luid.Reserved);
			return sessionOut;
		}

		return NULL;
	}
예제 #10
0
void handleOVREvents() {
    if (!session) {
        return;
    }

    ovrSessionStatus status;
    if (!OVR_SUCCESS(ovr_GetSessionStatus(session, &status))) {
        return;
    }

    _quitRequested = status.ShouldQuit;
    _reorientRequested = status.ShouldRecenter;

 #ifdef OCULUS_APP_ID

    if (qApp->property(hifi::properties::OCULUS_STORE).toBool()) {
        // pop messages to see if we got a return for an entitlement check
        ovrMessageHandle message = ovr_PopMessage();

        while (message) {
            switch (ovr_Message_GetType(message)) {
                case ovrMessage_Entitlement_GetIsViewerEntitled: {
                    if (!ovr_Message_IsError(message)) {
                        // this viewer is entitled, no need to flag anything
                        qCDebug(oculus) << "Oculus Platform entitlement check succeeded, proceeding normally";
                    } else {
                        // we failed the entitlement check, set our flag so the app can stop
                        qCDebug(oculus) << "Oculus Platform entitlement check failed, app will now quit" << OCULUS_APP_ID;
                        _quitRequested = true;
                    }
                }
            }

            // free the message handle to cleanup and not leak
            ovr_FreeMessage(message);

            // pop the next message to check, if there is one
            message = ovr_PopMessage();
        }
    }
#endif
}
예제 #11
0
int main()
{
	// Register a callback to clean up when the process exits.
    atexit(Shutdown);

    while (true)
    {
		// Retry initialization each frame until it succeeds. It will early-out thereafter.
		Initialize();

		// Each frame is 10ms to save CPU.
        Sleep(10);

		// Handle button input from LibOVR.

        ovrInputState inputState;
        memset(&inputState, 0, sizeof(ovrInputState));

        if (OVR_SUCCESS(ovr_GetInputState(g_session, g_controllerType, &inputState)))
			SendKeysForInputState(inputState);
    }
}
예제 #12
0
파일: OculusOVR.cpp 프로젝트: bjth/xenko
	DLL_EXPORT_API void xnOvrGetInputProperties(xnOvrSession* session, xnOvrInputProperties* properties)
	{
		ovrInputState state;
		auto res = ovr_GetInputState(session->Session, ovrControllerType_Touch, &state);
		if(OVR_SUCCESS(res))
		{
			properties->Valid = true;
			properties->Buttons = state.Buttons;
			properties->Touches = state.Touches;
			properties->HandTriggerLeft = state.HandTrigger[0];
			properties->HandTriggerRight = state.HandTrigger[1];
			properties->IndexTriggerLeft = state.IndexTrigger[0];
			properties->IndexTriggerRight = state.IndexTrigger[1];
			properties->ThumbstickLeft[0] = state.Thumbstick[0].x;
			properties->ThumbstickLeft[1] = state.Thumbstick[0].y;
			properties->ThumbstickRight[0] = state.Thumbstick[1].x;
			properties->ThumbstickRight[1] = state.Thumbstick[1].y;
		}
		else
		{
			properties->Valid = false;
		}
	}
예제 #13
0
파일: main.cpp 프로젝트: GainLee/OculusSDK
    bool Init(ovrSession session, int sizeW, int sizeH)
    {
        Session = session;

        ovrTextureSwapChainDesc desc = {};
        desc.Type = ovrTexture_2D;
        desc.ArraySize = 1;
        desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
        desc.Width = sizeW;
        desc.Height = sizeH;
        desc.MipLevels = 1;
        desc.SampleCount = 1;
        desc.MiscFlags = ovrTextureMisc_DX_Typeless;
        desc.StaticImage = ovrFalse;
        desc.BindFlags = ovrTextureBind_DX_RenderTarget;

        ovrResult result = ovr_CreateTextureSwapChainDX(session, DIRECTX.Device, &desc, &TextureChain);
        if (!OVR_SUCCESS(result))
            return false;

        int textureCount = 0;
        ovr_GetTextureSwapChainLength(Session, TextureChain, &textureCount);
        VALIDATE(textureCount == TextureCount, "TextureCount mismatch.");

        for (int i = 0; i < TextureCount; ++i)
        {
            ID3D11Texture2D* tex = nullptr;
            ovr_GetTextureSwapChainBufferDX(Session, TextureChain, i, IID_PPV_ARGS(&tex));
            D3D11_RENDER_TARGET_VIEW_DESC rtvd = {};
            rtvd.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
            rtvd.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
            DIRECTX.Device->CreateRenderTargetView(tex, &rtvd, &TexRtv[i]);
            tex->Release();
        }

        return true;
    }
예제 #14
0
int main(int argc, char **argv)
{
	// Initialize SDL2's context
	SDL_Init(SDL_INIT_VIDEO);
	// Initialize Oculus' context
	ovrResult result = ovr_Initialize(nullptr);
	if (OVR_FAILURE(result))
	{
		std::cout << "ERROR: Failed to initialize libOVR" << std::endl;
		SDL_Quit();
		return -1;
	}
	
	ovrSession  session;
	ovrGraphicsLuid luid;
	// Connect to the Oculus headset
	result = ovr_Create(&session, &luid);
	if (OVR_FAILURE(result))
	{
		std::cout << "ERROR: Oculus Rift not detected" << std::endl;
		ovr_Shutdown();
		SDL_Quit();
		return -1;
	}
	
	int x = SDL_WINDOWPOS_CENTERED, y = SDL_WINDOWPOS_CENTERED;
	int winWidth = 1280;
	int winHeight = 720;
	Uint32 flags = SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN;
	// Create SDL2 Window
	SDL_Window* window = SDL_CreateWindow("OVR ZED App", x, y, winWidth, winHeight, flags);
	// Create OpenGL context
	SDL_GLContext glContext = SDL_GL_CreateContext(window);
	// Initialize GLEW
	glewInit();
	// Turn off vsync to let the compositor do its magic
	SDL_GL_SetSwapInterval(0);

	// Initialize the ZED Camera
	sl::zed::Camera* zed = 0;
	zed = new sl::zed::Camera(sl::zed::HD720);
	sl::zed::ERRCODE zederr = zed->init(sl::zed::MODE::PERFORMANCE, 0);
	int zedWidth = zed->getImageSize().width;
	int zedHeight = zed->getImageSize().height;
	if (zederr != sl::zed::SUCCESS)
	{
		std::cout << "ERROR: " << sl::zed::errcode2str(zederr) << std::endl;
		ovr_Destroy(session);
		ovr_Shutdown();
		SDL_GL_DeleteContext(glContext);
		SDL_DestroyWindow(window);
		SDL_Quit();
		delete zed;
		return -1;
	}

	GLuint zedTextureID_L, zedTextureID_R;
	// Generate OpenGL texture for left images of the ZED camera
	glGenTextures(1, &zedTextureID_L);
	glBindTexture(GL_TEXTURE_2D, zedTextureID_L);
	glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
	// Generate OpenGL texture for right images of the ZED camera
	glGenTextures(1, &zedTextureID_R);
	glBindTexture(GL_TEXTURE_2D, zedTextureID_R);
	glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
	glBindTexture(GL_TEXTURE_2D, 0);

#if OPENGL_GPU_INTEROP
	cudaGraphicsResource* cimg_L;
	cudaGraphicsResource* cimg_R;
	cudaError_t errL, errR;
	errL = cudaGraphicsGLRegisterImage(&cimg_L, zedTextureID_L, GL_TEXTURE_2D, cudaGraphicsMapFlagsNone);
	errR = cudaGraphicsGLRegisterImage(&cimg_R, zedTextureID_R, GL_TEXTURE_2D, cudaGraphicsMapFlagsNone);
	if (errL != cudaSuccess || errR != cudaSuccess)
	{
		std::cout << "ERROR: cannot create CUDA texture : " << errL << "|" << errR << std::endl;
	}
#endif

	ovrHmdDesc hmdDesc = ovr_GetHmdDesc(session);
	// Get the texture sizes of Oculus eyes
	ovrSizei textureSize0 = ovr_GetFovTextureSize(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0], 1.0f);
	ovrSizei textureSize1 = ovr_GetFovTextureSize(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1], 1.0f);
	// Compute the final size of the render buffer
	ovrSizei bufferSize;
	bufferSize.w = textureSize0.w + textureSize1.w;
	bufferSize.h = std::max(textureSize0.h, textureSize1.h);
	// Initialize OpenGL swap textures to render
	ovrTextureSwapChain textureChain = nullptr;
	// Description of the swap chain
	ovrTextureSwapChainDesc descTextureSwap = {};
	descTextureSwap.Type = ovrTexture_2D;
	descTextureSwap.ArraySize = 1;
	descTextureSwap.Width = bufferSize.w;
	descTextureSwap.Height = bufferSize.h;
	descTextureSwap.MipLevels = 1;
	descTextureSwap.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
	descTextureSwap.SampleCount = 1;
	descTextureSwap.StaticImage = ovrFalse;
	// Create the OpenGL texture swap chain
	result = ovr_CreateTextureSwapChainGL(session, &descTextureSwap, &textureChain);

	int length = 0;
	ovr_GetTextureSwapChainLength(session, textureChain, &length);
	
	if (OVR_SUCCESS(result))
	{
		for (int i = 0; i < length; ++i)
		{
			GLuint chainTexId;
			ovr_GetTextureSwapChainBufferGL(session, textureChain, i, &chainTexId);
			glBindTexture(GL_TEXTURE_2D, chainTexId);
			glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
			glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
			glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
			glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
		}
	}
	else
	{
		std::cout << "ERROR: failed creating swap texture" << std::endl;
		ovr_Destroy(session);
		ovr_Shutdown();
		SDL_GL_DeleteContext(glContext);
		SDL_DestroyWindow(window);
		SDL_Quit();
		delete zed;
		return -1;
	}
	// Generate frame buffer to render
	GLuint fboID;
	glGenFramebuffers(1, &fboID);
	// Generate depth buffer of the frame buffer
	GLuint depthBuffID;
	glGenTextures(1, &depthBuffID);
	glBindTexture(GL_TEXTURE_2D, depthBuffID);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
	GLenum internalFormat = GL_DEPTH_COMPONENT24;
	GLenum type = GL_UNSIGNED_INT;
	glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, bufferSize.w, bufferSize.h, 0, GL_DEPTH_COMPONENT, type, NULL);

	// Create a mirror texture to display the render result in the SDL2 window
	ovrMirrorTextureDesc descMirrorTexture;
	memset(&descMirrorTexture, 0, sizeof(descMirrorTexture));
	descMirrorTexture.Width = winWidth;
	descMirrorTexture.Height = winHeight;
	descMirrorTexture.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;

	ovrMirrorTexture mirrorTexture = nullptr;
	result = ovr_CreateMirrorTextureGL(session, &descMirrorTexture, &mirrorTexture);
	if (!OVR_SUCCESS(result))
	{
		std::cout << "ERROR: Failed to create mirror texture" << std::endl;
	}
	GLuint mirrorTextureId;
	ovr_GetMirrorTextureBufferGL(session, mirrorTexture, &mirrorTextureId);

	GLuint mirrorFBOID;
	glGenFramebuffers(1, &mirrorFBOID);
	glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFBOID);
	glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mirrorTextureId, 0);
	glFramebufferRenderbuffer(GL_READ_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, 0);
	glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
	// Frame index used by the compositor
	// it needs to be updated each new frame
	long long frameIndex = 0;

	// FloorLevel will give tracking poses where the floor height is 0
	ovr_SetTrackingOriginType(session, ovrTrackingOrigin_FloorLevel);

	// Initialize a default Pose
	ovrPosef eyeRenderPose[2];

	// Get the render description of the left and right "eyes" of the Oculus headset
	ovrEyeRenderDesc eyeRenderDesc[2];
	eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]);
	eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]);
	// Get the Oculus view scale description
	ovrVector3f hmdToEyeOffset[2];
	double sensorSampleTime;

	// Create and compile the shader's sources
	Shader shader(OVR_ZED_VS, OVR_ZED_FS);

	// Compute the ZED image field of view with the ZED parameters
	float zedFovH = atanf(zed->getImageSize().width / (zed->getParameters()->LeftCam.fx *2.f)) * 2.f;
	// Compute the Horizontal Oculus' field of view with its parameters
	float ovrFovH = (atanf(hmdDesc.DefaultEyeFov[0].LeftTan) + atanf(hmdDesc.DefaultEyeFov[0].RightTan));
	// Compute the useful part of the ZED image
	unsigned int usefulWidth = zed->getImageSize().width * ovrFovH / zedFovH;
	// Compute the size of the final image displayed in the headset with the ZED image's aspect-ratio kept
	unsigned int widthFinal = bufferSize.w / 2;
	float heightGL = 1.f;
	float widthGL = 1.f;
	if (usefulWidth > 0.f)
	{
		unsigned int heightFinal = zed->getImageSize().height * widthFinal / usefulWidth;
		// Convert this size to OpenGL viewport's frame's coordinates
		heightGL = (heightFinal) / (float)(bufferSize.h);
		widthGL = ((zed->getImageSize().width * (heightFinal / (float)zed->getImageSize().height)) / (float)widthFinal);
	}
	else
	{
		std::cout << "WARNING: ZED parameters got wrong values."
			"Default vertical and horizontal FOV are used.\n"
			"Check your calibration file or check if your ZED is not too close to a surface or an object."
			<< std::endl;
	}

	// Compute the Vertical Oculus' field of view with its parameters
	float ovrFovV = (atanf(hmdDesc.DefaultEyeFov[0].UpTan) + atanf(hmdDesc.DefaultEyeFov[0].DownTan));

	// Compute the center of the optical lenses of the headset
	float offsetLensCenterX = ((atanf(hmdDesc.DefaultEyeFov[0].LeftTan)) / ovrFovH) * 2.f - 1.f;
	float offsetLensCenterY = ((atanf(hmdDesc.DefaultEyeFov[0].UpTan)) / ovrFovV) * 2.f - 1.f;


	// Create a rectangle with the computed coordinates and push it in GPU memory.
	struct GLScreenCoordinates
	{
		float left, up, right, down;
	} screenCoord;
	screenCoord.up    = heightGL + offsetLensCenterY;
	screenCoord.down  = heightGL - offsetLensCenterY;
	screenCoord.right = widthGL + offsetLensCenterX;
	screenCoord.left  = widthGL - offsetLensCenterX;

	float rectVertices[12] = { -screenCoord.left,  -screenCoord.up,   0,
								screenCoord.right, -screenCoord.up,   0, 
								screenCoord.right,  screenCoord.down, 0, 
							   -screenCoord.left,   screenCoord.down, 0 };
	GLuint rectVBO[3];
	glGenBuffers(1, &rectVBO[0]);
	glBindBuffer(GL_ARRAY_BUFFER, rectVBO[0]);
	glBufferData(GL_ARRAY_BUFFER, sizeof(rectVertices), rectVertices, GL_STATIC_DRAW);

	float rectTexCoord[8] = { 0, 1, 1, 1, 1, 0, 0, 0 };
	glGenBuffers(1, &rectVBO[1]);
	glBindBuffer(GL_ARRAY_BUFFER, rectVBO[1]);
	glBufferData(GL_ARRAY_BUFFER, sizeof(rectTexCoord), rectTexCoord, GL_STATIC_DRAW);

	unsigned int rectIndices[6] = { 0, 1, 2, 0, 2, 3 };
	glGenBuffers(1, &rectVBO[2]);
	glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, rectVBO[2]);
	glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(rectIndices), rectIndices, GL_STATIC_DRAW);

	glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
	glBindBuffer(GL_ARRAY_BUFFER, 0);
	
	// Initialize hit value
	float hit = 0.02f;
	// Initialize a boolean that will be used to stop the application’s loop and another one to pause/unpause rendering
	bool end = false;
	bool refresh = true;
	// SDL variable that will be used to store input events
	SDL_Event events;
	// Initialize time variables. They will be used to limit the number of frames rendered per second.
	// Frame counter
	unsigned int riftc = 0, zedc = 1;
	// Chronometer
	unsigned int rifttime = 0, zedtime = 0, zedFPS = 0;
	int time1 = 0, timePerFrame = 0;
	int frameRate = (int)(1000 / MAX_FPS);

	// This boolean is used to test if the application is focused
	bool isVisible = true;

	// Enable the shader
	glUseProgram(shader.getProgramId());
	// Bind the Vertex Buffer Objects of the rectangle that displays ZED images
	// vertices
	glEnableVertexAttribArray(Shader::ATTRIB_VERTICES_POS);
	glBindBuffer(GL_ARRAY_BUFFER, rectVBO[0]);
	glVertexAttribPointer(Shader::ATTRIB_VERTICES_POS, 3, GL_FLOAT, GL_FALSE, 0, 0);
	// indices
	glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, rectVBO[2]);
	// texture coordinates
	glEnableVertexAttribArray(Shader::ATTRIB_TEXTURE2D_POS);
	glBindBuffer(GL_ARRAY_BUFFER, rectVBO[1]);
	glVertexAttribPointer(Shader::ATTRIB_TEXTURE2D_POS, 2, GL_FLOAT, GL_FALSE, 0, 0);

	// Main loop
	while (!end)
	{
		// Compute the time used to render the previous frame
		timePerFrame = SDL_GetTicks() - time1;
		// If the previous frame has been rendered too fast
		if (timePerFrame < frameRate)
		{
			// Pause the loop to have a max FPS equal to MAX_FPS
			SDL_Delay(frameRate - timePerFrame);
			timePerFrame = frameRate;
		}
		// Increment the ZED chronometer
		zedtime += timePerFrame;
		// If ZED chronometer reached 1 second
		if (zedtime > 1000)
		{
			zedFPS = zedc;
			zedc = 0;
			zedtime = 0;
		}
		// Increment the Rift chronometer and the Rift frame counter
		rifttime += timePerFrame;
		riftc++;
		// If Rift chronometer reached 200 milliseconds
		if (rifttime > 200)
		{
			// Display FPS
			std::cout << "\rRIFT FPS: " << 1000 / (rifttime / riftc) << " | ZED FPS: " << zedFPS;
			// Reset Rift chronometer
			rifttime = 0;
			// Reset Rift frame counter
			riftc = 0;			
		}
		// Start frame chronometer
		time1 = SDL_GetTicks();
		
		// While there is an event catched and not tested
		while (SDL_PollEvent(&events))
		{
			// If a key is released
			if (events.type == SDL_KEYUP)
			{
				// If Q quit the application
				if (events.key.keysym.scancode == SDL_SCANCODE_Q)
					end = true;
				// If R reset the hit value
				else if (events.key.keysym.scancode == SDL_SCANCODE_R)
					hit = 0.0f;
				// If C pause/unpause rendering
				else if (events.key.keysym.scancode == SDL_SCANCODE_C)
					refresh = !refresh;
			}
			// If the mouse wheel is used
			if (events.type == SDL_MOUSEWHEEL)
			{
				// Increase or decrease hit value
				float s;
				events.wheel.y > 0 ? s = 1.0f : s = -1.0f;
				hit += 0.005f * s;
			}
		}

		// Get texture swap index where we must draw our frame
		GLuint curTexId;
		int curIndex;
		ovr_GetTextureSwapChainCurrentIndex(session, textureChain, &curIndex);
		ovr_GetTextureSwapChainBufferGL(session, textureChain, curIndex, &curTexId);

		// Call ovr_GetRenderDesc each frame to get the ovrEyeRenderDesc, as the returned values (e.g. HmdToEyeOffset) may change at runtime.
		eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]);
		eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]);
		hmdToEyeOffset[0] = eyeRenderDesc[0].HmdToEyeOffset;
		hmdToEyeOffset[1] = eyeRenderDesc[1].HmdToEyeOffset;
		// Get eye poses, feeding in correct IPD offset
		ovr_GetEyePoses(session, frameIndex, ovrTrue, hmdToEyeOffset, eyeRenderPose, &sensorSampleTime);

		// If the application is focused
		if (isVisible)
		{
			// If successful grab a new ZED image
			if (!zed->grab(sl::zed::SENSING_MODE::RAW, false, false))
			{
				// Update the ZED frame counter
				zedc++;
				if (refresh)
				{
#if OPENGL_GPU_INTEROP
					sl::zed::Mat m = zed->retrieveImage_gpu(sl::zed::SIDE::LEFT);
					cudaArray_t arrIm;
					cudaGraphicsMapResources(1, &cimg_L, 0);
					cudaGraphicsSubResourceGetMappedArray(&arrIm, cimg_L, 0, 0);
					cudaMemcpy2DToArray(arrIm, 0, 0, m.data, m.step, zedWidth * 4, zedHeight, cudaMemcpyDeviceToDevice);
					cudaGraphicsUnmapResources(1, &cimg_L, 0);

					m = zed->retrieveImage_gpu(sl::zed::SIDE::RIGHT);
					cudaGraphicsMapResources(1, &cimg_R, 0);
					cudaGraphicsSubResourceGetMappedArray(&arrIm, cimg_R, 0, 0);
					cudaMemcpy2DToArray(arrIm, 0, 0, m.data, m.step, zedWidth * 4, zedHeight, cudaMemcpyDeviceToDevice); // *4 = 4 channels * 1 bytes (uint)
					cudaGraphicsUnmapResources(1, &cimg_R, 0);
#endif

					// Bind the frame buffer
					glBindFramebuffer(GL_FRAMEBUFFER, fboID);
					// Set its color layer 0 as the current swap texture
					glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, curTexId, 0);
					// Set its depth layer as our depth buffer
					glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthBuffID, 0);
					// Clear the frame buffer
					glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
					glClearColor(0, 0, 0, 1);

					// Render for each Oculus eye the equivalent ZED image
					for (int eye = 0; eye < 2; eye++)
					{
						// Set the left or right vertical half of the buffer as the viewport
						glViewport(eye == ovrEye_Left ? 0 : bufferSize.w / 2, 0, bufferSize.w / 2, bufferSize.h);
						// Bind the left or right ZED image
						glBindTexture(GL_TEXTURE_2D, eye == ovrEye_Left ? zedTextureID_L : zedTextureID_R);
#if !OPENGL_GPU_INTEROP
						glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, zed->retrieveImage(eye == ovrEye_Left ? sl::zed::SIDE::LEFT : sl::zed::SIDE::RIGHT).data);
#endif
						// Bind the hit value
						glUniform1f(glGetUniformLocation(shader.getProgramId(), "hit"), eye == ovrEye_Left ? hit : -hit);
						// Bind the isLeft value
						glUniform1ui(glGetUniformLocation(shader.getProgramId(), "isLeft"), eye == ovrEye_Left ? 1U : 0U);
						// Draw the ZED image
						glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
					}

					// Avoids an error when calling SetAndClearRenderSurface during next iteration.
					// Without this, during the next while loop iteration SetAndClearRenderSurface
					// would bind a framebuffer with an invalid COLOR_ATTACHMENT0 because the texture ID
					// associated with COLOR_ATTACHMENT0 had been unlocked by calling wglDXUnlockObjectsNV.
					glBindFramebuffer(GL_FRAMEBUFFER, fboID);
					glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
					glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
					// Commit changes to the textures so they get picked up frame
					ovr_CommitTextureSwapChain(session, textureChain);
				}

				// Do not forget to increment the frameIndex!
				frameIndex++;
			}
		}
		/*
		Note: Even if we don't ask to refresh the framebuffer or if the Camera::grab() 
		      doesn't catch a new frame, we have to submit an image to the Rift; it 
			  needs 75Hz refresh. Else there will be jumbs, black frames and/or glitches 
			  in the headset.
		*/

		ovrLayerEyeFov ld;
		ld.Header.Type = ovrLayerType_EyeFov;
		// Tell to the Oculus compositor that our texture origin is at the bottom left
		ld.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft;   // Because OpenGL | Disable head tracking
		// Set the Oculus layer eye field of view for each view
		for (int eye = 0; eye < 2; ++eye)
		{
			// Set the color texture as the current swap texture
			ld.ColorTexture[eye] = textureChain;
			// Set the viewport as the right or left vertical half part of the color texture
			ld.Viewport[eye] = OVR::Recti(eye == ovrEye_Left ? 0 : bufferSize.w / 2, 0, bufferSize.w / 2, bufferSize.h);
			// Set the field of view
			ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye];
			// Set the pose matrix
			ld.RenderPose[eye] = eyeRenderPose[eye];
		}

		ld.SensorSampleTime = sensorSampleTime;

		ovrLayerHeader* layers = &ld.Header;
		// Submit the frame to the Oculus compositor
		// which will display the frame in the Oculus headset
		result = ovr_SubmitFrame(session, frameIndex, nullptr, &layers, 1);
		
		if (!OVR_SUCCESS(result))
		{
			std::cout << "ERROR: failed to submit frame" << std::endl;
			glDeleteBuffers(3, rectVBO);
			ovr_DestroyTextureSwapChain(session, textureChain);
			ovr_DestroyMirrorTexture(session, mirrorTexture);
			ovr_Destroy(session);
			ovr_Shutdown();
			SDL_GL_DeleteContext(glContext);
			SDL_DestroyWindow(window);
			SDL_Quit();
			delete zed;
			return -1;
		}
		
		if (result == ovrSuccess && !isVisible)
		{
			std::cout << "The application is now shown in the headset." << std::endl;
		}
		isVisible = (result == ovrSuccess);

		// This is not really needed for this application but it may be usefull for an more advanced application
		ovrSessionStatus sessionStatus;
		ovr_GetSessionStatus(session, &sessionStatus);
		if (sessionStatus.ShouldRecenter)
		{
			std::cout << "Recenter Tracking asked by Session" << std::endl;
			ovr_RecenterTrackingOrigin(session);
		}

		// Copy the frame to the mirror buffer
		// which will be drawn in the SDL2 image
		glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFBOID);
		glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
		GLint w = winWidth;
		GLint h = winHeight;
		glBlitFramebuffer(0, h, w, 0,
			0, 0, w, h,
			GL_COLOR_BUFFER_BIT, GL_NEAREST);
		glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
		// Swap the SDL2 window
		SDL_GL_SwapWindow(window);
	}
	
	// Disable all OpenGL buffer
	glDisableVertexAttribArray(Shader::ATTRIB_TEXTURE2D_POS);
	glDisableVertexAttribArray(Shader::ATTRIB_VERTICES_POS);
	glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
	glBindBuffer(GL_ARRAY_BUFFER, 0);
	glBindTexture(GL_TEXTURE_2D, 0);
	glUseProgram(0);
	glBindVertexArray(0);
	// Delete the Vertex Buffer Objects of the rectangle
	glDeleteBuffers(3, rectVBO);
	// Delete SDL, OpenGL, Oculus and ZED context
	ovr_DestroyTextureSwapChain(session, textureChain);
	ovr_DestroyMirrorTexture(session, mirrorTexture);
	ovr_Destroy(session);
	ovr_Shutdown();
	SDL_GL_DeleteContext(glContext);
	SDL_DestroyWindow(window);
	SDL_Quit();
	delete zed;
	// quit
	return 0;
}
예제 #15
0
// return true to retry later (e.g. after display lost)
static bool MainLoop(bool retryCreate)
{
    // Initialize these to nullptr here to handle device lost failures cleanly
    ovrMirrorTexture            mirrorTexture = nullptr;
    OculusEyeTexture*           pEyeRenderTexture[2] = { nullptr, nullptr };
    Scene*                      roomScene = nullptr; 
    Camera*                     mainCam = nullptr;
    ovrMirrorTextureDesc        mirrorDesc = {};

    ovrSession session;
    ovrGraphicsLuid luid;
    ovrResult result = ovr_Create(&session, &luid);
    if (!OVR_SUCCESS(result))
        return retryCreate;

    ovrHmdDesc hmdDesc = ovr_GetHmdDesc(session);

    // Setup Device and Graphics
    // Note: the mirror window can be any size, for this sample we use 1/2 the HMD resolution
    if (!DIRECTX.InitDevice(hmdDesc.Resolution.w / 2, hmdDesc.Resolution.h / 2, reinterpret_cast<LUID*>(&luid)))
        goto Done;

    // Make the eye render buffers (caution if actual size < requested due to HW limits). 
    ovrRecti eyeRenderViewport[2];

    for (int eye = 0; eye < 2; ++eye)
    {
        ovrSizei idealSize = ovr_GetFovTextureSize(session, (ovrEyeType)eye, hmdDesc.DefaultEyeFov[eye], 1.0f);
        pEyeRenderTexture[eye] = new OculusEyeTexture();
        if (!pEyeRenderTexture[eye]->Init(session, idealSize.w, idealSize.h, true))
        {
            if (retryCreate) goto Done;
            FATALERROR("Failed to create eye texture.");
        }

        eyeRenderViewport[eye].Pos.x = 0;
        eyeRenderViewport[eye].Pos.y = 0;
        eyeRenderViewport[eye].Size = idealSize;
        if (!pEyeRenderTexture[eye]->TextureChain)
        {
            if (retryCreate) goto Done;
            FATALERROR("Failed to create texture.");
        }
    }

    // Create a mirror to see on the monitor.
    mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
    mirrorDesc.Width = DIRECTX.WinSizeW;
    mirrorDesc.Height = DIRECTX.WinSizeH;
    result = ovr_CreateMirrorTextureDX(session, DIRECTX.CommandQueue, &mirrorDesc, &mirrorTexture);
    if (!OVR_SUCCESS(result))
    {
        if (retryCreate) goto Done;
        FATALERROR("Failed to create mirror texture.");
    }

    // Create the room model
    roomScene = new Scene(false);

    // Create camera
    mainCam = new Camera(XMVectorSet(0.0f, 1.6f, 5.0f, 0), XMQuaternionIdentity());

    // Setup VR components, filling out description
    ovrEyeRenderDesc eyeRenderDesc[2];
    eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]);
    eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]);

    long long frameIndex = 0;

    bool drawMirror = true;

    DIRECTX.InitFrame(drawMirror);

    // Main loop
    while (DIRECTX.HandleMessages())
    {
        ovrSessionStatus sessionStatus;
        ovr_GetSessionStatus(session, &sessionStatus);
        if (sessionStatus.ShouldQuit)
        {
            // Because the application is requested to quit, should not request retry
            retryCreate = false;
            break;
        }
        if (sessionStatus.ShouldRecenter)
            ovr_RecenterTrackingOrigin(session);

        if (sessionStatus.IsVisible)
        {
            XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, -0.05f, 0), mainCam->GetRotVec());
            XMVECTOR right   = XMVector3Rotate(XMVectorSet(0.05f, 0, 0, 0),  mainCam->GetRotVec());
            XMVECTOR mainCamPos = mainCam->GetPosVec();
            XMVECTOR mainCamRot = mainCam->GetRotVec();
            if (DIRECTX.Key['W'] || DIRECTX.Key[VK_UP])      mainCamPos = XMVectorAdd(     mainCamPos, forward);
            if (DIRECTX.Key['S'] || DIRECTX.Key[VK_DOWN])    mainCamPos = XMVectorSubtract(mainCamPos, forward);
            if (DIRECTX.Key['D'])                            mainCamPos = XMVectorAdd(     mainCamPos, right);
            if (DIRECTX.Key['A'])                            mainCamPos = XMVectorSubtract(mainCamPos, right);
            static float Yaw = 0;
            if (DIRECTX.Key[VK_LEFT])  mainCamRot = XMQuaternionRotationRollPitchYaw(0, Yaw += 0.02f, 0);
            if (DIRECTX.Key[VK_RIGHT]) mainCamRot = XMQuaternionRotationRollPitchYaw(0, Yaw -= 0.02f, 0);

            mainCam->SetPosVec(mainCamPos);
            mainCam->SetRotVec(mainCamRot);

            // Animate the cube
            static float cubeClock = 0;
            roomScene->Models[0]->Pos = XMFLOAT3(9 * sin(cubeClock), 3, 9 * cos(cubeClock += 0.015f));

            // Get both eye poses simultaneously, with IPD offset already included. 
            ovrPosef    EyeRenderPose[2];
            ovrVector3f HmdToEyeOffset[2] = { eyeRenderDesc[0].HmdToEyeOffset,
                                              eyeRenderDesc[1].HmdToEyeOffset };

            double sensorSampleTime;    // sensorSampleTime is fed into the layer later
            ovr_GetEyePoses(session, frameIndex, ovrTrue, HmdToEyeOffset, EyeRenderPose, &sensorSampleTime);

            // Render Scene to Eye Buffers
            for (int eye = 0; eye < 2; ++eye)
            {
                DIRECTX.SetActiveContext(eye == 0 ? DrawContext_EyeRenderLeft : DrawContext_EyeRenderRight);

                DIRECTX.SetActiveEye(eye);

                CD3DX12_RESOURCE_BARRIER resBar = CD3DX12_RESOURCE_BARRIER::Transition(pEyeRenderTexture[eye]->GetD3DResource(),
                                                                                       D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE,
                                                                                       D3D12_RESOURCE_STATE_RENDER_TARGET);
                DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(1, &resBar);

                DIRECTX.SetAndClearRenderTarget(pEyeRenderTexture[eye]->GetRtv(), pEyeRenderTexture[eye]->GetDsv());
                DIRECTX.SetViewport((float)eyeRenderViewport[eye].Pos.x, (float)eyeRenderViewport[eye].Pos.y,
                                    (float)eyeRenderViewport[eye].Size.w, (float)eyeRenderViewport[eye].Size.h);
                                
                //Get the pose information in XM format
                XMVECTOR eyeQuat = XMVectorSet(EyeRenderPose[eye].Orientation.x, EyeRenderPose[eye].Orientation.y,
                                               EyeRenderPose[eye].Orientation.z, EyeRenderPose[eye].Orientation.w);
                XMVECTOR eyePos = XMVectorSet(EyeRenderPose[eye].Position.x, EyeRenderPose[eye].Position.y, EyeRenderPose[eye].Position.z, 0);

                // Get view and projection matrices for the Rift camera
                Camera finalCam(XMVectorAdd(mainCamPos, XMVector3Rotate(eyePos, mainCamRot)), XMQuaternionMultiply(eyeQuat, mainCamRot));
                XMMATRIX view = finalCam.GetViewMatrix();
                ovrMatrix4f p = ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.2f, 1000.0f, ovrProjection_None);
                XMMATRIX proj = XMMatrixSet(p.M[0][0], p.M[1][0], p.M[2][0], p.M[3][0],
                                            p.M[0][1], p.M[1][1], p.M[2][1], p.M[3][1],
                                            p.M[0][2], p.M[1][2], p.M[2][2], p.M[3][2],
                                            p.M[0][3], p.M[1][3], p.M[2][3], p.M[3][3]);
                XMMATRIX prod = XMMatrixMultiply(view, proj);

                roomScene->Render(&prod, 1, 1, 1, 1, true);

                resBar = CD3DX12_RESOURCE_BARRIER::Transition(pEyeRenderTexture[eye]->GetD3DResource(),
                                                              D3D12_RESOURCE_STATE_RENDER_TARGET,
                                                              D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE);
                DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(1, &resBar);

                // Commit rendering to the swap chain
                pEyeRenderTexture[eye]->Commit();

                // kick off eye render command lists before ovr_SubmitFrame()
                DIRECTX.SubmitCommandList(DIRECTX.ActiveContext);
            }

            // Initialize our single full screen Fov layer.
            ovrLayerEyeFov ld = {};
            ld.Header.Type = ovrLayerType_EyeFov;
            ld.Header.Flags = 0;

            for (int eye = 0; eye < 2; ++eye)
            {
                ld.ColorTexture[eye] = pEyeRenderTexture[eye]->TextureChain;
                ld.Viewport[eye] = eyeRenderViewport[eye];
                ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye];
                ld.RenderPose[eye] = EyeRenderPose[eye];
                ld.SensorSampleTime = sensorSampleTime;
            }

            ovrLayerHeader* layers = &ld.Header;
            result = ovr_SubmitFrame(session, frameIndex, nullptr, &layers, 1);
            // exit the rendering loop if submit returns an error, will retry on ovrError_DisplayLost
            if (!OVR_SUCCESS(result))
                goto Done;
            
            frameIndex++;
        }
        
        if (drawMirror)
        {
            DIRECTX.SetActiveContext(DrawContext_Final);

            DIRECTX.SetViewport(0.0f, 0.0f, (float)hmdDesc.Resolution.w / 2, (float)hmdDesc.Resolution.h / 2);

            // Render mirror
            ID3D12Resource* mirrorTexRes = nullptr;
            ovr_GetMirrorTextureBufferDX(session, mirrorTexture, IID_PPV_ARGS(&mirrorTexRes));

            //DIRECTX.SetAndClearRenderTarget(DIRECTX.CurrentFrameResources().SwapChainRtvHandle, nullptr, 1.0f, 0.5f, 0.0f, 1.0f);

            CD3DX12_RESOURCE_BARRIER preMirrorBlitBar[] =
            {
                CD3DX12_RESOURCE_BARRIER::Transition(DIRECTX.CurrentFrameResources().SwapChainBuffer, D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_COPY_DEST),
                CD3DX12_RESOURCE_BARRIER::Transition(mirrorTexRes, D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_COPY_SOURCE)
            };

            // Indicate that the back buffer will now be copied into
            DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(ARRAYSIZE(preMirrorBlitBar), preMirrorBlitBar);

            DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->CopyResource(DIRECTX.CurrentFrameResources().SwapChainBuffer, mirrorTexRes);

            CD3DX12_RESOURCE_BARRIER resBar = CD3DX12_RESOURCE_BARRIER::Transition(mirrorTexRes,
                                                                                   D3D12_RESOURCE_STATE_COPY_SOURCE,
                                                                                   D3D12_RESOURCE_STATE_RENDER_TARGET);
            DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(1, &resBar);
        }

        DIRECTX.SubmitCommandListAndPresent(drawMirror);
    }

    // Release resources
Done:
    delete mainCam;
    delete roomScene;
    if (mirrorTexture)
        ovr_DestroyMirrorTexture(session, mirrorTexture);

    for (int eye = 0; eye < 2; ++eye)
    {
        delete pEyeRenderTexture[eye];
    }
    DIRECTX.ReleaseDevice();
    ovr_Destroy(session);

    // Retry on ovrError_DisplayLost
    return retryCreate || (result == ovrError_DisplayLost);
}
예제 #16
0
OVR_PUBLIC_FUNCTION(ovrResult) ovrHmd_CreateSwapTextureSetD3D11(ovrHmd hmd,
	ID3D11Device* device,
	const D3D11_TEXTURE2D_DESC* desc,
	ovrSwapTextureSet** outTextureSet) {
	BOOST_LOG_TRIVIAL(trace) << "ovrHmd_CreateSwapTextureSetD3D11 format " << desc->Format << " samples " << desc->SampleDesc.Count << " bindflags " << desc->BindFlags << " miscflags " << desc->MiscFlags;

	D3D11_TEXTURE2D_DESC descClone;
	memcpy(&descClone, desc, sizeof(D3D11_TEXTURE2D_DESC));

	ovrTextureSwapChainDesc1_3 d;
	d.Type = ovrTexture_2D;
	d.ArraySize = desc->ArraySize;
	
	d.Format = getOVRFormat(desc->Format);

	if (d.Format == 0) {
		BOOST_LOG_TRIVIAL(error) << "ovrHmd_CreateSwapTextureSetD3D11 unknown format";
		return -1;
	}
	
	d.Width = desc->Width;
	d.Height = desc->Height;
	d.MipLevels = desc->MipLevels;
	d.SampleCount = desc->SampleDesc.Count;
	
	d.MiscFlags = 0;
	
	switch (d.Format) {
	case OVR_FORMAT_R8G8B8A8_UNORM_SRGB:	
	case OVR_FORMAT_B8G8R8A8_UNORM_SRGB:	
	case OVR_FORMAT_B8G8R8X8_UNORM_SRGB:
		d.MiscFlags |= ovrTextureMisc_DX_Typeless;		
		break;
	}

	if (getWrapperSettings()->srgbCorrectionEnabled) {
		switch (d.Format) {
		case OVR_FORMAT_R8G8B8A8_UNORM_SRGB:
			descClone.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
			break;
		case OVR_FORMAT_B8G8R8A8_UNORM_SRGB:
			descClone.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
			break;
		case OVR_FORMAT_B8G8R8X8_UNORM_SRGB:
			descClone.Format = DXGI_FORMAT_B8G8R8X8_UNORM;
			break;
		}
	}	
	
	bool makeShaderView = false;
	d.BindFlags = 0;
	/*if (desc->BindFlags & D3D11_BIND_RENDER_TARGET) {
		d.BindFlags |= ovrTextureBind_DX_RenderTarget;
	}
	if (desc->BindFlags & D3D11_BIND_UNORDERED_ACCESS) {
		d.BindFlags |= ovrTextureBind_DX_UnorderedAccess;
	}*/
	if (desc->BindFlags & D3D11_BIND_DEPTH_STENCIL) {
		d.MiscFlags |= ovrTextureMisc_DX_Typeless;
		d.BindFlags |= ovrTextureBind_DX_DepthStencil;
	}	
	if (desc->BindFlags & D3D11_BIND_SHADER_RESOURCE) {
		
		makeShaderView = true;
	}
	
	d.StaticImage = ovrFalse;

	ovrTextureSwapChainWrapper* chainwrapper = (ovrTextureSwapChainWrapper*)malloc(sizeof(ovrTextureSwapChainWrapper));
	device->GetImmediateContext(&chainwrapper->pContext);

	ovrResult result = ovr_CreateTextureSwapChainDX1_3((ovrSession1_3)hmd->Handle, (IUnknown*)device, &d, &chainwrapper->swapChain);

	if (!OVR_SUCCESS(result)) {
		BOOST_LOG_TRIVIAL(error) << "ovrHmd_CreateSwapTextureSetD3D11 could not create TextureSwapChain";
		return result;
	}
	
	ovrSwapTextureSet* ts = (ovrSwapTextureSet*)malloc(sizeof(ovrSwapTextureSet));

	setChain((ovrSession1_3)hmd->Handle, ts, chainwrapper);

	ovr_GetTextureSwapChainLength1_3((ovrSession1_3)hmd->Handle, chainwrapper->swapChain, &chainwrapper->textureCount);

	chainwrapper->textures = (ID3D11Texture2D**)calloc(chainwrapper->textureCount, sizeof(ID3D11Texture2D*));

	ts->TextureCount = 2;
	ts->CurrentIndex = 0;	
	ts->Textures = (ovrTexture*)calloc(ts->TextureCount, sizeof(ovrD3D11Texture));

	for (int i = 0; i < chainwrapper->textureCount; ++i)
	{		
		result = ovr_GetTextureSwapChainBufferDX1_3((ovrSession1_3)hmd->Handle, chainwrapper->swapChain, i, IID_ID3D11Texture2D, (void**)&chainwrapper->textures[i]);
		if (!OVR_SUCCESS(result)) {
			BOOST_LOG_TRIVIAL(error) << "ovrHmd_CreateSwapTextureSetD3D11 could not allocate TextureSwapChainBuffer";
			return result;
		}
	}

	for (int i = 0;i < 2;i++) {
		ovrD3D11Texture* ovrtext = (ovrD3D11Texture*)&ts->Textures[i];

		HRESULT hr = device->CreateTexture2D(&descClone, nullptr, &ovrtext->D3D11.pTexture);

		if (hr < 0) {
			BOOST_LOG_TRIVIAL(error) << "ovrHmd_CreateSwapTextureSetD3D11 could create texture";
			return ovrError_ServiceError;
		}

		if (makeShaderView) {
			HRESULT rs;

			D3D11_SHADER_RESOURCE_VIEW_DESC depthSrv;
			ZeroMemory(&depthSrv, sizeof(D3D11_SHADER_RESOURCE_VIEW_DESC));
			depthSrv.Format = getShaderResourceFormat(descClone.Format);
			depthSrv.ViewDimension = desc->SampleDesc.Count > 1 ? D3D11_SRV_DIMENSION_TEXTURE2DMS : D3D11_SRV_DIMENSION_TEXTURE2D;
			depthSrv.Texture2D.MostDetailedMip = 0;
			depthSrv.Texture2D.MipLevels = desc->MipLevels;
			
			rs = device->CreateShaderResourceView((ID3D11Resource*)ovrtext->D3D11.pTexture, &depthSrv, &(ovrtext->D3D11.pSRView));
			
			if (rs < 0) {
				BOOST_LOG_TRIVIAL(error) << "ovrHmd_CreateSwapTextureSetD3D11 could not create ShaderResourceView";
				return ovrError_ServiceError;
			}
		}

		ovrtext->D3D11.Header.API = ovrRenderAPI_D3D11;
		ovrtext->D3D11.Header.TextureSize.w = d.Width;
		ovrtext->D3D11.Header.TextureSize.h = d.Height;
	}
	
	*outTextureSet = ts;	

	return result;
}
예제 #17
0
OVR_PUBLIC_FUNCTION(ovrResult) ovrHmd_CreateMirrorTextureD3D11(ovrHmd hmd,
	ID3D11Device* device,
	const D3D11_TEXTURE2D_DESC* desc,
	ovrTexture** outMirrorTexture) {
	BOOST_LOG_TRIVIAL(trace) << "ovrHmd_CreateMirrorTextureD3D11 format " << desc->Format << " samples " << desc->SampleDesc.Count << " bindflags " << desc->BindFlags << " miscflags " << desc->MiscFlags;

	ovrMirrorTextureDesc1_3 d;
	
	d.Format = getOVRFormat(desc->Format);

	d.Width = desc->Width;
	d.Height = desc->Height;

	d.MiscFlags = 0;

	switch (d.Format) {
	case OVR_FORMAT_R8G8B8A8_UNORM_SRGB:
	case OVR_FORMAT_B8G8R8A8_UNORM_SRGB:
	case OVR_FORMAT_B8G8R8X8_UNORM_SRGB:
		d.MiscFlags |= ovrTextureMisc_DX_Typeless;		
		break;
	}

	ovrMirrorTexture1_3* mirror = (ovrMirrorTexture1_3*)malloc(sizeof(ovrMirrorTexture1_3));

	ovrResult result = ovr_CreateMirrorTextureDX1_3((ovrSession1_3)hmd->Handle, (IUnknown*)device, &d, mirror);

	if (!OVR_SUCCESS(result)) {
		ovrErrorInfo1_3 info;
		ovr_GetLastErrorInfo1_3(&info);

		BOOST_LOG_TRIVIAL(error) << "ovrHmd_CreateMirrorTextureD3D11 could not allocate Mirrortexture:" << info.ErrorString;
		return result;
	}

	ovrD3D11Texture* ovrtext = (ovrD3D11Texture*)malloc(sizeof(ovrD3D11Texture));

	ID3D11Texture2D* texture = 0;	
	ovr_GetMirrorTextureBufferDX1_3((ovrSession1_3)hmd->Handle, *mirror, IID_ID3D11Texture2D, (void**)&texture);

	ovrtext->D3D11.pTexture = texture;

	if (desc->BindFlags & D3D11_BIND_SHADER_RESOURCE) {
		HRESULT rs;

		D3D11_SHADER_RESOURCE_VIEW_DESC depthSrv;
		ZeroMemory(&depthSrv, sizeof(D3D11_SHADER_RESOURCE_VIEW_DESC));
		depthSrv.Format = getShaderResourceFormat(desc->Format);
		depthSrv.ViewDimension = desc->SampleDesc.Count > 1 ? D3D11_SRV_DIMENSION_TEXTURE2DMS : D3D11_SRV_DIMENSION_TEXTURE2D;
		depthSrv.Texture2D.MostDetailedMip = 0;
		depthSrv.Texture2D.MipLevels = desc->MipLevels;

		rs = device->CreateShaderResourceView((ID3D11Resource*)ovrtext->D3D11.pTexture, &depthSrv, &(ovrtext->D3D11.pSRView));

		if (rs < 0) {
			BOOST_LOG_TRIVIAL(error) << "ovrHmd_CreateMirrorTextureD3D11 could not create ShaderResourceView";
			return ovrError_ServiceError;
		}
	}

	ovrtext->D3D11.Header.API = ovrRenderAPI_D3D11;
	ovrtext->D3D11.Header.TextureSize.w = d.Width;
	ovrtext->D3D11.Header.TextureSize.h = d.Height;

	*outMirrorTexture = (ovrTexture*)ovrtext;

	setMirror(mirror);

	return result;

}
예제 #18
0
int main(int argc, char **argv)
{
	// Initialize SDL2's context
	SDL_Init(SDL_INIT_VIDEO);
	// Initialize Oculus' context
	ovrResult result = ovr_Initialize(nullptr);
	if (OVR_FAILURE(result))
	{
		std::cout << "ERROR: Failed to initialize libOVR" << std::endl;
		SDL_Quit();
		return -1;
	}
	
	ovrSession  hmd;
	ovrGraphicsLuid luid;
	// Connect to the Oculus headset
	result = ovr_Create(&hmd, &luid);
	if (OVR_FAILURE(result))
	{
		std::cout << "ERROR: Oculus Rift not detected" << std::endl;
		ovr_Shutdown();
		SDL_Quit();
		return -1;
	}
	
	int x = SDL_WINDOWPOS_CENTERED, y = SDL_WINDOWPOS_CENTERED;
	int winWidth = 1280;
	int winHeight = 720;
	Uint32 flags = SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN;
	// Create SDL2 Window
	SDL_Window* window = SDL_CreateWindow("OVR ZED App", x, y, winWidth, winHeight, flags);
	// Create OpenGL context
	SDL_GLContext glContext = SDL_GL_CreateContext(window);
	// Initialize GLEW
	glewInit();
	// Turn off vsync to let the compositor do its magic
	SDL_GL_SetSwapInterval(0);

	// Initialize the ZED Camera
	sl::zed::Camera* zed = 0;
	zed = new sl::zed::Camera(sl::zed::HD720);
	sl::zed::ERRCODE zederr = zed->init(sl::zed::MODE::PERFORMANCE, 0);
	int zedWidth = zed->getImageSize().width;
	int zedHeight = zed->getImageSize().height;
	if (zederr != sl::zed::SUCCESS)
	{
		std::cout << "ERROR: " << sl::zed::errcode2str(zederr) << std::endl;
		ovr_Destroy(hmd);
		ovr_Shutdown();
		SDL_GL_DeleteContext(glContext);
		SDL_DestroyWindow(window);
		SDL_Quit();
		delete zed;
		return -1;
	}

	GLuint zedTextureID_L, zedTextureID_R;
	// Generate OpenGL texture for left images of the ZED camera
	glGenTextures(1, &zedTextureID_L);
	glBindTexture(GL_TEXTURE_2D, zedTextureID_L);
	glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
	// Generate OpenGL texture for right images of the ZED camera
	glGenTextures(1, &zedTextureID_R);
	glBindTexture(GL_TEXTURE_2D, zedTextureID_R);
	glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
	glBindTexture(GL_TEXTURE_2D, 0);

#if OPENGL_GPU_INTEROP
	cudaGraphicsResource* cimg_L;
	cudaGraphicsResource* cimg_R;
	cudaError_t errL, errR;
	errL = cudaGraphicsGLRegisterImage(&cimg_L, zedTextureID_L, GL_TEXTURE_2D, cudaGraphicsMapFlagsNone);
	errR = cudaGraphicsGLRegisterImage(&cimg_R, zedTextureID_R, GL_TEXTURE_2D, cudaGraphicsMapFlagsNone);
	if (errL != cudaSuccess || errR != cudaSuccess)
	{
		std::cout << "ERROR: cannot create CUDA texture : " << errL << "|" << errR << std::endl;
	}
#endif

	ovrHmdDesc hmdDesc = ovr_GetHmdDesc(hmd);
	// Get the texture sizes of Oculus eyes
	ovrSizei textureSize0 = ovr_GetFovTextureSize(hmd, ovrEye_Left, hmdDesc.DefaultEyeFov[0], 1.0f);
	ovrSizei textureSize1 = ovr_GetFovTextureSize(hmd, ovrEye_Right, hmdDesc.DefaultEyeFov[1], 1.0f);
	// Compute the final size of the render buffer
	ovrSizei bufferSize;
	bufferSize.w = textureSize0.w + textureSize1.w;
	bufferSize.h = std::max(textureSize0.h, textureSize1.h);
	// Initialize OpenGL swap textures to render
	ovrSwapTextureSet* ptextureSet = 0;
	
	if (OVR_SUCCESS(ovr_CreateSwapTextureSetGL(hmd, GL_SRGB8_ALPHA8, bufferSize.w, bufferSize.h, &ptextureSet)))
	{
		for (int i = 0; i < ptextureSet->TextureCount; ++i)
		{
			ovrGLTexture* tex = (ovrGLTexture*)&ptextureSet->Textures[i];
			glBindTexture(GL_TEXTURE_2D, tex->OGL.TexId);
			glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
			glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
			glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
			glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
		}
	}
	else
	{
		std::cout << "ERROR: failed creating swap texture" << std::endl;
		ovr_Destroy(hmd);
		ovr_Shutdown();
		SDL_GL_DeleteContext(glContext);
		SDL_DestroyWindow(window);
		SDL_Quit();
		delete zed;
		return -1;
	}
	// Generate frame buffer to render
	GLuint fboID;
	glGenFramebuffers(1, &fboID);
	// Generate depth buffer of the frame buffer
	GLuint depthBuffID;
	glGenTextures(1, &depthBuffID);
	glBindTexture(GL_TEXTURE_2D, depthBuffID);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
	GLenum internalFormat = GL_DEPTH_COMPONENT24;
	GLenum type = GL_UNSIGNED_INT;
	glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, bufferSize.w, bufferSize.h, 0, GL_DEPTH_COMPONENT, type, NULL);

	// Create a mirror texture to display the render result in the SDL2 window
	ovrGLTexture* mirrorTexture = nullptr;
	result = ovr_CreateMirrorTextureGL(hmd, GL_SRGB8_ALPHA8, winWidth, winHeight, reinterpret_cast<ovrTexture**>(&mirrorTexture));
	if (!OVR_SUCCESS(result))
	{
		std::cout << "ERROR: Failed to create mirror texture" << std::endl;
	}
	GLuint mirrorFBOID;
	glGenFramebuffers(1, &mirrorFBOID);
	glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFBOID);
	glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mirrorTexture->OGL.TexId, 0);
	glFramebufferRenderbuffer(GL_READ_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, 0);
	glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);

	// Initialize a default Pose
	ovrPosef eyeRenderPose;
	// Set Identity quaternion
	eyeRenderPose.Orientation.x = 0;
	eyeRenderPose.Orientation.y = 0;
	eyeRenderPose.Orientation.z = 0;
	eyeRenderPose.Orientation.w = 1;
	// Set World's origin position
	eyeRenderPose.Position.x = 0.f;
	eyeRenderPose.Position.y = 0.f;
	eyeRenderPose.Position.z = 0;

	ovrLayerEyeFov ld;
	ld.Header.Type = ovrLayerType_EyeFov;
	// Tell to the Oculus compositor that our texture origin is at the bottom left
	ld.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft | ovrLayerFlag_HeadLocked;   // Because OpenGL | Disable head tracking
	// Set the Oculus layer eye field of view for each view
	for (int eye = 0; eye < 2; ++eye)
	{
		// Set the color texture as the current swap texture
		ld.ColorTexture[eye] = ptextureSet;
		// Set the viewport as the right or left vertical half part of the color texture
		ld.Viewport[eye] = OVR::Recti(eye == ovrEye_Left ? 0 : bufferSize.w / 2, 0, bufferSize.w / 2, bufferSize.h);
		// Set the field of view
		ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye];
		// Set the pose matrix
		ld.RenderPose[eye] = eyeRenderPose;
	}
	double sensorSampleTime = ovr_GetTimeInSeconds();
	ld.SensorSampleTime = sensorSampleTime;

	// Get the render description of the left and right "eyes" of the Oculus headset
	ovrEyeRenderDesc eyeRenderDesc[2];
	eyeRenderDesc[0] = ovr_GetRenderDesc(hmd, ovrEye_Left, hmdDesc.DefaultEyeFov[0]);
	eyeRenderDesc[1] = ovr_GetRenderDesc(hmd, ovrEye_Right, hmdDesc.DefaultEyeFov[1]);
	// Get the Oculus view scale description
	ovrVector3f viewOffset[2] = { eyeRenderDesc[0].HmdToEyeViewOffset, eyeRenderDesc[1].HmdToEyeViewOffset };
	ovrViewScaleDesc viewScaleDesc;
	viewScaleDesc.HmdSpaceToWorldScaleInMeters = 1.0f;
	viewScaleDesc.HmdToEyeViewOffset[0] = viewOffset[0];
	viewScaleDesc.HmdToEyeViewOffset[1] = viewOffset[1];

	// Create and compile the shader's sources
	Shader shader(OVR_ZED_VS, OVR_ZED_FS);

	// Compute the ZED image field of view with the ZED parameters
	float zedFovH = atanf(zed->getImageSize().width / (zed->getParameters()->LeftCam.fx *2.f)) * 2.f;
	// Compute the Oculus' field of view with its parameters
	float ovrFovH = (atanf(hmdDesc.DefaultEyeFov[0].LeftTan) + atanf(hmdDesc.DefaultEyeFov[0].RightTan));
	// Compute the useful part of the ZED image
	unsigned int usefulWidth = zed->getImageSize().width * ovrFovH / zedFovH;
	// Compute the size of the final image displayed in the headset with the ZED image's aspect-ratio kept
	unsigned int widthFinal = bufferSize.w / 2;
	unsigned int heightFinal = zed->getImageSize().height * widthFinal / usefulWidth;
	// Convert this size to OpenGL viewport's frame's coordinates
	float heightGL = (heightFinal) / (float)(bufferSize.h);
	float widthGL = ((zed->getImageSize().width * (heightFinal / (float)zed->getImageSize().height)) / (float)widthFinal);

	// Create a rectangle with the coordonates computed and push it in GPU memory.
	float rectVertices[12] = { -widthGL, -heightGL, 0, widthGL, -heightGL, 0, widthGL, heightGL, 0, -widthGL, heightGL, 0 };
	GLuint rectVBO[3];
	glGenBuffers(1, &rectVBO[0]);
	glBindBuffer(GL_ARRAY_BUFFER, rectVBO[0]);
	glBufferData(GL_ARRAY_BUFFER, sizeof(rectVertices), rectVertices, GL_STATIC_DRAW);

	float rectTexCoord[8] = { 0, 1, 1, 1, 1, 0, 0, 0 };
	glGenBuffers(1, &rectVBO[1]);
	glBindBuffer(GL_ARRAY_BUFFER, rectVBO[1]);
	glBufferData(GL_ARRAY_BUFFER, sizeof(rectTexCoord), rectTexCoord, GL_STATIC_DRAW);

	unsigned int rectIndices[6] = { 0, 1, 2, 0, 2, 3 };
	glGenBuffers(1, &rectVBO[2]);
	glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, rectVBO[2]);
	glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(rectIndices), rectIndices, GL_STATIC_DRAW);

	glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
	glBindBuffer(GL_ARRAY_BUFFER, 0);
	
	// Initialize hit value
	float hit = 0.02f;
	// Initialize a boolean that will be used to stop the application’s loop and another one to pause/unpause rendering
	bool end = false;
	bool refresh = true;
	// SDL variable that will be used to store input events
	SDL_Event events;
	// Initialize time variables. They will be used to limit the number of frames rendered per second.
	// Frame counter
	unsigned int riftc = 0, zedc = 1;
	// Chronometer
	unsigned int rifttime = 0, zedtime = 0, zedFPS = 0;
	int time1 = 0, timePerFrame = 0;
	int frameRate = (int)(1000 / MAX_FPS);

	// Enable the shader
	glUseProgram(shader.getProgramId());
	// Bind the Vertex Buffer Objects of the rectangle that displays ZED images
	// vertices
	glEnableVertexAttribArray(Shader::ATTRIB_VERTICES_POS);
	glBindBuffer(GL_ARRAY_BUFFER, rectVBO[0]);
	glVertexAttribPointer(Shader::ATTRIB_VERTICES_POS, 3, GL_FLOAT, GL_FALSE, 0, 0);
	// indices
	glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, rectVBO[2]);
	// texture coordinates
	glEnableVertexAttribArray(Shader::ATTRIB_TEXTURE2D_POS);
	glBindBuffer(GL_ARRAY_BUFFER, rectVBO[1]);
	glVertexAttribPointer(Shader::ATTRIB_TEXTURE2D_POS, 2, GL_FLOAT, GL_FALSE, 0, 0);

	// Main loop
	while (!end)
	{
		// Compute the time used to render the previous frame
		timePerFrame = SDL_GetTicks() - time1;
		// If the previous frame has been rendered too fast
		if (timePerFrame < frameRate)
		{
			// Pause the loop to have a max FPS equal to MAX_FPS
			SDL_Delay(frameRate - timePerFrame);
			timePerFrame = frameRate;
		}
		// Increment the ZED chronometer
		zedtime += timePerFrame;
		// If ZED chronometer reached 1 second
		if (zedtime > 1000)
		{
			zedFPS = zedc;
			zedc = 0;
			zedtime = 0;
		}
		// Increment the Rift chronometer and the Rift frame counter
		rifttime += timePerFrame;
		riftc++;
		// If Rift chronometer reached 200 milliseconds
		if (rifttime > 200)
		{
			// Display FPS
			std::cout << "\rRIFT FPS: " << 1000 / (rifttime / riftc) << " | ZED FPS: " << zedFPS;
			// Reset Rift chronometer
			rifttime = 0;
			// Reset Rift frame counter
			riftc = 0;			
		}
		// Start frame chronometer
		time1 = SDL_GetTicks();
		
		// While there is an event catched and not tested
		while (SDL_PollEvent(&events))
		{
			// If a key is released
			if (events.type == SDL_KEYUP)
			{
				// If Q quit the application
				if (events.key.keysym.scancode == SDL_SCANCODE_Q)
					end = true;
				// If R reset the hit value
				else if (events.key.keysym.scancode == SDL_SCANCODE_R)
					hit = 0.0f;
				// If C pause/unpause rendering
				else if (events.key.keysym.scancode == SDL_SCANCODE_C)
					refresh = !refresh;
			}
			// If the mouse wheel is used
			if (events.type == SDL_MOUSEWHEEL)
			{
				// Increase or decrease hit value
				float s;
				events.wheel.y > 0 ? s = 1.0f : s = -1.0f;
				hit += 0.005f * s;
			}
		}

		// If rendering is unpaused and 
		// successful grab ZED image
		if (!zed->grab(sl::zed::SENSING_MODE::RAW, false, false))
		{
			// Update the ZED frame counter
			zedc++;
			if (refresh)
			{
#if OPENGL_GPU_INTEROP
				sl::zed::Mat m = zed->retrieveImage_gpu(sl::zed::SIDE::LEFT);
				cudaArray_t arrIm;
				cudaGraphicsMapResources(1, &cimg_L, 0);
				cudaGraphicsSubResourceGetMappedArray(&arrIm, cimg_L, 0, 0);
				cudaMemcpy2DToArray(arrIm, 0, 0, m.data, m.step, zedWidth * 4, zedHeight, cudaMemcpyDeviceToDevice);
				cudaGraphicsUnmapResources(1, &cimg_L, 0);

				m = zed->retrieveImage_gpu(sl::zed::SIDE::RIGHT);
				cudaGraphicsMapResources(1, &cimg_R, 0);
				cudaGraphicsSubResourceGetMappedArray(&arrIm, cimg_R, 0, 0);
				cudaMemcpy2DToArray(arrIm, 0, 0, m.data, m.step, zedWidth * 4, zedHeight, cudaMemcpyDeviceToDevice); // *4 = 4 channels * 1 bytes (uint)
				cudaGraphicsUnmapResources(1, &cimg_R, 0);
#endif
				// Increment the CurrentIndex to point to the next texture within the output swap texture set.
				// CurrentIndex must be advanced round-robin fashion every time we draw a new frame
				ptextureSet->CurrentIndex = (ptextureSet->CurrentIndex + 1) % ptextureSet->TextureCount;
				// Get the current swap texture pointer
				auto tex = reinterpret_cast<ovrGLTexture*>(&ptextureSet->Textures[ptextureSet->CurrentIndex]);
				// Bind the frame buffer
				glBindFramebuffer(GL_FRAMEBUFFER, fboID);
				// Set its color layer 0 as the current swap texture
				glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex->OGL.TexId, 0);
				// Set its depth layer as our depth buffer
				glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthBuffID, 0);
				// Clear the frame buffer
				glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
				glClearColor(0, 0, 0, 1);

				// Render for each Oculus eye the equivalent ZED image
				for (int eye = 0; eye < 2; eye++)
				{
					// Set the left or right vertical half of the buffer as the viewport
					glViewport(ld.Viewport[eye].Pos.x, ld.Viewport[eye].Pos.y, ld.Viewport[eye].Size.w, ld.Viewport[eye].Size.h);
					// Bind the left or right ZED image
					glBindTexture(GL_TEXTURE_2D, eye == ovrEye_Left ? zedTextureID_L : zedTextureID_R);
#if !OPENGL_GPU_INTEROP
					glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, zedWidth, zedHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, zed->retrieveImage(eye == ovrEye_Left ? sl::zed::SIDE::LEFT : sl::zed::SIDE::RIGHT).data);
#endif
					// Bind the hit value
					glUniform1f(glGetUniformLocation(shader.getProgramId(), "hit"), eye == ovrEye_Left ? hit : -hit);
					// Draw the ZED image
					glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
				}
			}
		}
		/*
		Note: Even if we don't ask to refresh the framebuffer or if the Camera::grab() 
		      doesn't catch a new frame, we have to submit an image to the Rift; it 
			  needs 75Hz refresh. Else there will be jumbs, black frames and/or glitches 
			  in the headset.
		*/
		ovrLayerHeader* layers = &ld.Header;
		// Submit the frame to the Oculus compositor
		// which will display the frame in the Oculus headset
		result = ovr_SubmitFrame(hmd, 0, &viewScaleDesc, &layers, 1);

		if (!OVR_SUCCESS(result))
		{
			std::cout << "ERROR: failed to submit frame" << std::endl;
			glDeleteBuffers(3, rectVBO);
			ovr_DestroySwapTextureSet(hmd, ptextureSet);
			ovr_DestroyMirrorTexture(hmd, &mirrorTexture->Texture);
			ovr_Destroy(hmd);
			ovr_Shutdown();
			SDL_GL_DeleteContext(glContext);
			SDL_DestroyWindow(window);
			SDL_Quit();
			delete zed;
			return -1;
		}

		// Copy the frame to the mirror buffer
		// which will be drawn in the SDL2 image
		glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFBOID);
		glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
		GLint w = mirrorTexture->OGL.Header.TextureSize.w;
		GLint h = mirrorTexture->OGL.Header.TextureSize.h;
		glBlitFramebuffer(0, h, w, 0,
			0, 0, w, h,
			GL_COLOR_BUFFER_BIT, GL_NEAREST);
		glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
		// Swap the SDL2 window
		SDL_GL_SwapWindow(window);
	}
	
	// Disable all OpenGL buffer
	glDisableVertexAttribArray(Shader::ATTRIB_TEXTURE2D_POS);
	glDisableVertexAttribArray(Shader::ATTRIB_VERTICES_POS);
	glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
	glBindBuffer(GL_ARRAY_BUFFER, 0);
	glBindTexture(GL_TEXTURE_2D, 0);
	glUseProgram(0);
	glBindVertexArray(0);
	// Delete the Vertex Buffer Objects of the rectangle
	glDeleteBuffers(3, rectVBO);
	// Delete SDL, OpenGL, Oculus and ZED context
	ovr_DestroySwapTextureSet(hmd, ptextureSet);
	ovr_DestroyMirrorTexture(hmd, &mirrorTexture->Texture);
	ovr_Destroy(hmd);
	ovr_Shutdown();
	SDL_GL_DeleteContext(glContext);
	SDL_DestroyWindow(window);
	SDL_Quit();
	delete zed;
	// quit
	return 0;
}
예제 #19
0
void OculusBaseDisplayPlugin::activate() {
    if (!OVR_SUCCESS(ovr_Initialize(nullptr))) {
        qFatal("Could not init OVR");
    }

    if (!OVR_SUCCESS(ovr_Create(&_session, &_luid))) {
        qFatal("Failed to acquire HMD");
    }

    WindowOpenGLDisplayPlugin::activate();

    _hmdDesc = ovr_GetHmdDesc(_session);

    _ipd = ovr_GetFloat(_session, OVR_KEY_IPD, _ipd);

    glm::uvec2 eyeSizes[2];
    ovr_for_each_eye([&](ovrEyeType eye) {
        _eyeFovs[eye] = _hmdDesc.DefaultEyeFov[eye];
        ovrEyeRenderDesc& erd = _eyeRenderDescs[eye] = ovr_GetRenderDesc(_session, eye, _eyeFovs[eye]);
        ovrMatrix4f ovrPerspectiveProjection =
            ovrMatrix4f_Projection(erd.Fov, DEFAULT_NEAR_CLIP, DEFAULT_FAR_CLIP, ovrProjection_RightHanded);
        _eyeProjections[eye] = toGlm(ovrPerspectiveProjection);

        ovrPerspectiveProjection =
            ovrMatrix4f_Projection(erd.Fov, 0.001f, 10.0f, ovrProjection_RightHanded);
        _compositeEyeProjections[eye] = toGlm(ovrPerspectiveProjection);

        _eyeOffsets[eye] = erd.HmdToEyeViewOffset;
        eyeSizes[eye] = toGlm(ovr_GetFovTextureSize(_session, eye, erd.Fov, 1.0f));
    });
    ovrFovPort combined = _eyeFovs[Left];
    combined.LeftTan = std::max(_eyeFovs[Left].LeftTan, _eyeFovs[Right].LeftTan);
    combined.RightTan = std::max(_eyeFovs[Left].RightTan, _eyeFovs[Right].RightTan);
    ovrMatrix4f ovrPerspectiveProjection =
        ovrMatrix4f_Projection(combined, DEFAULT_NEAR_CLIP, DEFAULT_FAR_CLIP, ovrProjection_RightHanded);
    _eyeProjections[Mono] = toGlm(ovrPerspectiveProjection);



    _desiredFramebufferSize = uvec2(
        eyeSizes[0].x + eyeSizes[1].x,
        std::max(eyeSizes[0].y, eyeSizes[1].y));

    if (!OVR_SUCCESS(ovr_ConfigureTracking(_session,
        ovrTrackingCap_Orientation | ovrTrackingCap_Position | ovrTrackingCap_MagYawCorrection, 0))) {
        qFatal("Could not attach to sensor device");
    }

    // Parent class relies on our _session intialization, so it must come after that.
    memset(&_sceneLayer, 0, sizeof(ovrLayerEyeFov));
    _sceneLayer.Header.Type = ovrLayerType_EyeFov;
    _sceneLayer.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft;
    ovr_for_each_eye([&](ovrEyeType eye) {
        ovrFovPort & fov = _sceneLayer.Fov[eye] = _eyeRenderDescs[eye].Fov;
        ovrSizei & size = _sceneLayer.Viewport[eye].Size = ovr_GetFovTextureSize(_session, eye, fov, 1.0f);
        _sceneLayer.Viewport[eye].Pos = { eye == ovrEye_Left ? 0 : size.w, 0 };
    });

    if (!OVR_SUCCESS(ovr_ConfigureTracking(_session,
        ovrTrackingCap_Orientation | ovrTrackingCap_Position | ovrTrackingCap_MagYawCorrection, 0))) {
        qFatal("Could not attach to sensor device");
    }
}
예제 #20
0
void FOculusInput::SendControllerEvents()
{
	const double CurrentTime = FPlatformTime::Seconds();

	// @todo: Should be made configurable and unified with other controllers handling of repeat
	const float InitialButtonRepeatDelay = 0.2f;
	const float ButtonRepeatDelay = 0.1f;
	const float AnalogButtonPressThreshold = TriggerThreshold;

	if(IOculusRiftPlugin::IsAvailable())
	{
		IOculusRiftPlugin& OculusRiftPlugin = IOculusRiftPlugin::Get();
		FOvrSessionShared::AutoSession OvrSession(IOculusRiftPlugin::Get().GetSession());
		UE_CLOG(OVR_DEBUG_LOGGING, LogOcInput, Log, TEXT("SendControllerEvents: OvrSession = %p"), ovrSession(OvrSession));
		if (OvrSession && MessageHandler.IsValid() && FApp::HasVRFocus())
		{
			ovrInputState OvrInput;
			ovrTrackingState OvrTrackingState;

			ovrResult OvrRes = ovr_GetInputState(OvrSession, ovrControllerType_Remote, &OvrInput);
			UE_CLOG(OVR_DEBUG_LOGGING, LogOcInput, Log, TEXT("SendControllerEvents: ovr_GetInputState(Remote) ret = %d"), int(OvrRes));
			if (OVR_SUCCESS(OvrRes) && OvrInput.ControllerType == ovrControllerType_Remote)
			{
				for (int32 ButtonIndex = 0; ButtonIndex < (int32)EOculusRemoteControllerButton::TotalButtonCount; ++ButtonIndex)
				{
					FOculusButtonState& ButtonState = Remote.Buttons[ButtonIndex];
					check(!ButtonState.Key.IsNone()); // is button's name initialized?

					// Determine if the button is pressed down
					bool bButtonPressed = false;
					switch ((EOculusRemoteControllerButton)ButtonIndex)
					{
					case EOculusRemoteControllerButton::DPad_Up:
						bButtonPressed = (OvrInput.Buttons & ovrButton_Up) != 0;
						break;

					case EOculusRemoteControllerButton::DPad_Down:
						bButtonPressed = (OvrInput.Buttons & ovrButton_Down) != 0;
						break;

					case EOculusRemoteControllerButton::DPad_Left:
						bButtonPressed = (OvrInput.Buttons & ovrButton_Left) != 0;
						break;

					case EOculusRemoteControllerButton::DPad_Right:
						bButtonPressed = (OvrInput.Buttons & ovrButton_Right) != 0;
						break;

					case EOculusRemoteControllerButton::Enter:
						bButtonPressed = (OvrInput.Buttons & ovrButton_Enter) != 0;
						break;

					case EOculusRemoteControllerButton::Back:
						bButtonPressed = (OvrInput.Buttons & ovrButton_Back) != 0;
						break;

					case EOculusRemoteControllerButton::VolumeUp:
						#ifdef SUPPORT_INTERNAL_BUTTONS
						bButtonPressed = (OvrInput.Buttons & ovrButton_VolUp) != 0;
						#endif
						break;

					case EOculusRemoteControllerButton::VolumeDown:
						#ifdef SUPPORT_INTERNAL_BUTTONS
						bButtonPressed = (OvrInput.Buttons & ovrButton_VolDown) != 0;
						#endif
						break;

					case EOculusRemoteControllerButton::Home:
						#ifdef SUPPORT_INTERNAL_BUTTONS
						bButtonPressed = (OvrInput.Buttons & ovrButton_Home) != 0;
						#endif
						break;

					default:
						check(0); // unhandled button, shouldn't happen
						break;
					}

					// Update button state
					if (bButtonPressed != ButtonState.bIsPressed)
					{
						const bool bIsRepeat = false;

						ButtonState.bIsPressed = bButtonPressed;
						if (ButtonState.bIsPressed)
						{
							MessageHandler->OnControllerButtonPressed(ButtonState.Key, 0, bIsRepeat);

							// Set the timer for the first repeat
							ButtonState.NextRepeatTime = CurrentTime + ButtonRepeatDelay;
						}
						else
						{
							MessageHandler->OnControllerButtonReleased(ButtonState.Key, 0, bIsRepeat);
						}
					}

					// Apply key repeat, if its time for that
					if (ButtonState.bIsPressed && ButtonState.NextRepeatTime <= CurrentTime)
					{
						const bool bIsRepeat = true;
						MessageHandler->OnControllerButtonPressed(ButtonState.Key, 0, bIsRepeat);

						// Set the timer for the next repeat
						ButtonState.NextRepeatTime = CurrentTime + ButtonRepeatDelay;
					}
				}
			}

			OvrRes = ovr_GetInputState(OvrSession, ovrControllerType_Touch, &OvrInput);
			const bool bOvrGCTRes = OculusRiftPlugin.GetCurrentTrackingState(&OvrTrackingState);

			UE_CLOG(OVR_DEBUG_LOGGING, LogOcInput, Log, TEXT("SendControllerEvents: ovr_GetInputState(Touch) ret = %d, GetCurrentTrackingState ret = %d"), int(OvrRes), int(bOvrGCTRes));

			if (OVR_SUCCESS(OvrRes) && bOvrGCTRes)
			{
				UE_CLOG(OVR_DEBUG_LOGGING, LogOcInput, Log, TEXT("SendControllerEvents: ButtonState = 0x%X"), OvrInput.Buttons);
				UE_CLOG(OVR_DEBUG_LOGGING, LogOcInput, Log, TEXT("SendControllerEvents: Touches = 0x%X"), OvrInput.Touches);

				for (FOculusTouchControllerPair& ControllerPair : ControllerPairs)
				{
					for( int32 HandIndex = 0; HandIndex < ARRAY_COUNT( ControllerPair.ControllerStates ); ++HandIndex )
					{
						FOculusTouchControllerState& State = ControllerPair.ControllerStates[ HandIndex ];

						const bool bIsLeft = (HandIndex == (int32)EControllerHand::Left);
						bool bIsCurrentlyTracked = (bIsLeft ? (OvrInput.ControllerType & ovrControllerType_LTouch) != 0 : (OvrInput.ControllerType & ovrControllerType_RTouch) != 0);

#if OVR_TESTING
						bIsCurrentlyTracked = true;
						static float _angle = 0;
						OvrTrackingState.HandPoses[HandIndex].ThePose.Orientation = OVR::Quatf(OVR::Vector3f(0, 0, 1), _angle);
						_angle += 0.1f;

						OvrTrackingState.HandPoses[HandIndex].ThePose = OvrTrackingState.HeadPose.ThePose;
						UE_CLOG(OVR_DEBUG_LOGGING, LogOcInput, Error, TEXT("SendControllerEvents: OVR_TESTING is enabled!"));
#endif

						if (bIsCurrentlyTracked)
						{
							State.bIsCurrentlyTracked = true;

							const float OvrTriggerAxis = OvrInput.IndexTrigger[HandIndex];
							const float OvrGripAxis = OvrInput.HandTrigger[HandIndex];

							UE_CLOG(OVR_DEBUG_LOGGING, LogOcInput, Log, TEXT("SendControllerEvents: IndexTrigger[%d] = %f"), int(HandIndex), OvrTriggerAxis);
							UE_CLOG(OVR_DEBUG_LOGGING, LogOcInput, Log, TEXT("SendControllerEvents: HandTrigger[%d] = %f"), int(HandIndex), OvrGripAxis);
							UE_CLOG(OVR_DEBUG_LOGGING, LogOcInput, Log, TEXT("SendControllerEvents: ThumbStick[%d] = { %f, %f }"), int(HandIndex), OvrInput.Thumbstick[HandIndex].x, OvrInput.Thumbstick[HandIndex].y );

							if (OvrTriggerAxis != State.TriggerAxis)
							{
								State.TriggerAxis = OvrTriggerAxis;
								MessageHandler->OnControllerAnalog(bIsLeft ? FGamepadKeyNames::MotionController_Left_TriggerAxis : FGamepadKeyNames::MotionController_Right_TriggerAxis, ControllerPair.UnrealControllerIndex, State.TriggerAxis);
							}

							if (OvrGripAxis != State.GripAxis)
							{
								State.GripAxis = OvrGripAxis;
								MessageHandler->OnControllerAnalog(bIsLeft ? FGamepadKeyNames::MotionController_Left_Grip1Axis : FGamepadKeyNames::MotionController_Right_Grip1Axis, ControllerPair.UnrealControllerIndex, State.GripAxis);
							}

							if (OvrInput.Thumbstick[HandIndex].x != State.ThumbstickAxes.X)
							{
								State.ThumbstickAxes.X = OvrInput.Thumbstick[HandIndex].x;
								MessageHandler->OnControllerAnalog(bIsLeft ? FGamepadKeyNames::MotionController_Left_Thumbstick_X : FGamepadKeyNames::MotionController_Right_Thumbstick_X, ControllerPair.UnrealControllerIndex, State.ThumbstickAxes.X);
							}

							if (OvrInput.Thumbstick[HandIndex].y != State.ThumbstickAxes.Y)
							{
								State.ThumbstickAxes.Y = OvrInput.Thumbstick[HandIndex].y;
								// we need to negate Y value to match XBox controllers
								MessageHandler->OnControllerAnalog(bIsLeft ? FGamepadKeyNames::MotionController_Left_Thumbstick_Y : FGamepadKeyNames::MotionController_Right_Thumbstick_Y, ControllerPair.UnrealControllerIndex, -State.ThumbstickAxes.Y);
							}

							for (int32 ButtonIndex = 0; ButtonIndex < (int32)EOculusTouchControllerButton::TotalButtonCount; ++ButtonIndex)
							{
								FOculusButtonState& ButtonState = State.Buttons[ButtonIndex];
								check(!ButtonState.Key.IsNone()); // is button's name initialized?

								// Determine if the button is pressed down
								bool bButtonPressed = false;
								switch ((EOculusTouchControllerButton)ButtonIndex)
								{
								case EOculusTouchControllerButton::Trigger:
									bButtonPressed = State.TriggerAxis >= AnalogButtonPressThreshold;
									break;

								case EOculusTouchControllerButton::Grip:
									bButtonPressed = State.GripAxis >= AnalogButtonPressThreshold;
									break;

								case EOculusTouchControllerButton::XA:
									bButtonPressed = bIsLeft ? (OvrInput.Buttons & ovrButton_X) != 0 : (OvrInput.Buttons & ovrButton_A) != 0;
									break;

								case EOculusTouchControllerButton::YB:
									bButtonPressed = bIsLeft ? (OvrInput.Buttons & ovrButton_Y) != 0 : (OvrInput.Buttons & ovrButton_B) != 0;
									break;

								case EOculusTouchControllerButton::Thumbstick:
									bButtonPressed = bIsLeft ? (OvrInput.Buttons & ovrButton_LThumb) != 0 : (OvrInput.Buttons & ovrButton_RThumb) != 0;
									break;

								default:
									check(0);
									break;
								}

								// Update button state
								if (bButtonPressed != ButtonState.bIsPressed)
								{
									const bool bIsRepeat = false;

									ButtonState.bIsPressed = bButtonPressed;
									if (ButtonState.bIsPressed)
									{
										MessageHandler->OnControllerButtonPressed(ButtonState.Key, ControllerPair.UnrealControllerIndex, bIsRepeat);

										// Set the timer for the first repeat
										ButtonState.NextRepeatTime = CurrentTime + ButtonRepeatDelay;
									}
									else
									{
										MessageHandler->OnControllerButtonReleased(ButtonState.Key, ControllerPair.UnrealControllerIndex, bIsRepeat);
									}
								}

								// Apply key repeat, if its time for that
								if (ButtonState.bIsPressed && ButtonState.NextRepeatTime <= CurrentTime)
								{
									const bool bIsRepeat = true;
									MessageHandler->OnControllerButtonPressed(ButtonState.Key, ControllerPair.UnrealControllerIndex, bIsRepeat);

									// Set the timer for the next repeat
									ButtonState.NextRepeatTime = CurrentTime + ButtonRepeatDelay;
								}
							}

							// Handle Capacitive States
							for (int32 CapTouchIndex = 0; CapTouchIndex < (int32)EOculusTouchCapacitiveAxes::TotalAxisCount; ++CapTouchIndex)
							{
								FOculusTouchCapacitiveState& CapState = State.CapacitiveAxes[CapTouchIndex];

								float CurrentAxisVal = 0.f;
								switch ((EOculusTouchCapacitiveAxes)CapTouchIndex)
								{
								case EOculusTouchCapacitiveAxes::XA:
								{
									const uint32 mask = (bIsLeft) ? ovrTouch_X : ovrTouch_A;
									CurrentAxisVal = (OvrInput.Touches & mask) != 0 ? 1.f : 0.f;
									break;
								}
								case EOculusTouchCapacitiveAxes::YB:
								{
									const uint32 mask = (bIsLeft) ? ovrTouch_Y : ovrTouch_B;
									CurrentAxisVal = (OvrInput.Touches & mask) != 0 ? 1.f : 0.f;
									break;
								}
								case EOculusTouchCapacitiveAxes::Thumbstick:
								{
									const uint32 mask = (bIsLeft) ? ovrTouch_LThumb : ovrTouch_RThumb;
									CurrentAxisVal = (OvrInput.Touches & mask) != 0 ? 1.f : 0.f;
									break;
								}
								case EOculusTouchCapacitiveAxes::Trigger:
								{
									const uint32 mask = (bIsLeft) ? ovrTouch_LIndexTrigger : ovrTouch_RIndexTrigger;
									CurrentAxisVal = (OvrInput.Touches & mask) != 0 ? 1.f : 0.f;
									break;
								}
								case EOculusTouchCapacitiveAxes::IndexPointing:
								{
									const uint32 mask = (bIsLeft) ? ovrTouch_LIndexPointing : ovrTouch_RIndexPointing;
									CurrentAxisVal = (OvrInput.Touches & mask) != 0 ? 1.f : 0.f;
									break;
								}
								case EOculusTouchCapacitiveAxes::ThumbUp:
								{
									const uint32 mask = (bIsLeft) ? ovrTouch_LThumbUp : ovrTouch_RThumbUp;
									CurrentAxisVal = (OvrInput.Touches & mask) != 0 ? 1.f : 0.f;
									break;
								}
								default:
									check(0);
								}
							
								if (CurrentAxisVal != CapState.State)
								{
									MessageHandler->OnControllerAnalog(CapState.Axis, ControllerPair.UnrealControllerIndex, CurrentAxisVal);

									CapState.State = CurrentAxisVal;
								}
							}

							const ovrPosef& OvrHandPose = OvrTrackingState.HandPoses[HandIndex].ThePose;
							FVector NewLocation;
							FQuat NewOrientation;
							if (OculusRiftPlugin.PoseToOrientationAndPosition(OvrHandPose, /* Out */ NewOrientation, /* Out */ NewLocation))
							{
								// OK, we have up to date positional data!
								State.Orientation = NewOrientation;
								State.Location = NewLocation;

								UE_CLOG(OVR_DEBUG_LOGGING, LogOcInput, Log, TEXT("SendControllerEvents: HandPOSE[%d]: Pos %.3f %.3f %.3f"), HandIndex, NewLocation.X, NewLocation.Y, NewLocation.Y);
								UE_CLOG(OVR_DEBUG_LOGGING, LogOcInput, Log, TEXT("SendControllerEvents: HandPOSE[%d]: Yaw %.3f Pitch %.3f Roll %.3f"), HandIndex, NewOrientation.Rotator().Yaw, NewOrientation.Rotator().Pitch, NewOrientation.Rotator().Roll);
							}
							else
							{
								// HMD wasn't ready.  This can currently happen if we try to grab motion data before we've rendered at least one frame
								UE_CLOG(OVR_DEBUG_LOGGING, LogOcInput, Log, TEXT("SendControllerEvents: PoseToOrientationAndPosition returned false"));
							}
						}
						else
						{
							// Controller isn't available right now.  Zero out input state, so that if it comes back it will send fresh event deltas
							State = FOculusTouchControllerState((EControllerHand)HandIndex);
							UE_CLOG(OVR_DEBUG_LOGGING, LogOcInput, Log, TEXT("SendControllerEvents: Controller for the hand %d is not tracked"), int(HandIndex));
						}
					}
				}
			}
		}
	}
	UE_CLOG(OVR_DEBUG_LOGGING, LogOcInput, Log, TEXT(""));
}
///@brief Called once a GL context has been set up.
void OVRSDK06AppSkeleton::initVR(bool swapBackBufferDims)
{
    if (m_Hmd == NULL)
        return;

    for (int i = 0; i < 2; ++i)
    {
        if (m_pTexSet[i])
        {
            ovrHmd_DestroySwapTextureSet(m_Hmd, m_pTexSet[i]);
            m_pTexSet[i] = nullptr;
        }
    }

    // Set up eye fields of view
    ovrLayerEyeFov& layer = m_layerEyeFov;
    layer.Header.Type = ovrLayerType_EyeFov;
    layer.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft;

    // Create eye render target textures and FBOs
    for (ovrEyeType eye = ovrEyeType::ovrEye_Left;
        eye < ovrEyeType::ovrEye_Count;
        eye = static_cast<ovrEyeType>(eye + 1))
    {
        const ovrFovPort& fov = layer.Fov[eye] = m_Hmd->MaxEyeFov[eye];
        const ovrSizei& size = layer.Viewport[eye].Size = ovrHmd_GetFovTextureSize(m_Hmd, eye, fov, 1.f);
        layer.Viewport[eye].Pos = { 0, 0 };
        LOG_INFO("Eye %d tex : %dx%d @ (%d,%d)", eye, size.w, size.h,
            layer.Viewport[eye].Pos.x, layer.Viewport[eye].Pos.y);

        ovrEyeRenderDesc & erd = m_eyeRenderDescs[eye];
        erd = ovrHmd_GetRenderDesc(m_Hmd, eye, m_Hmd->MaxEyeFov[eye]);
        ovrMatrix4f ovrPerspectiveProjection =
            ovrMatrix4f_Projection(erd.Fov, .1f, 10000.f, ovrProjection_RightHanded);
        m_eyeProjections[eye] = glm::transpose(glm::make_mat4(&ovrPerspectiveProjection.M[0][0]));
        m_eyeOffsets[eye] = erd.HmdToEyeViewOffset;

        // Allocate the frameBuffer that will hold the scene, and then be
        // re-rendered to the screen with distortion
        if (!OVR_SUCCESS(ovrHmd_CreateSwapTextureSetGL(m_Hmd, GL_RGBA, size.w, size.h, &m_pTexSet[eye])))
        {
            LOG_ERROR("Unable to create swap textures");
            return;
        }
        ovrSwapTextureSet& swapSet = *m_pTexSet[eye];
        for (int i = 0; i < swapSet.TextureCount; ++i)
        {
            const ovrGLTexture& ovrTex = (ovrGLTexture&)swapSet.Textures[i];
            glBindTexture(GL_TEXTURE_2D, ovrTex.OGL.TexId);
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
            glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
        }

        // Manually assemble swap FBO
        m_swapFBO.w = size.w;
        m_swapFBO.h = size.h;
        glGenFramebuffers(1, &m_swapFBO.id);
        glBindFramebuffer(GL_FRAMEBUFFER, m_swapFBO.id);
        const int idx = 0;
        const ovrGLTextureData* pGLData = reinterpret_cast<ovrGLTextureData*>(&swapSet.Textures[idx]);
        m_swapFBO.tex = pGLData->TexId;
        glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_swapFBO.tex, 0);

        m_swapFBO.depth = 0;
        glGenRenderbuffers(1, &m_swapFBO.depth);
        glBindRenderbuffer(GL_RENDERBUFFER, m_swapFBO.depth);
        glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, size.w, size.h);
        glBindRenderbuffer(GL_RENDERBUFFER, 0);
        glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, m_swapFBO.depth);

        // Check status
        {
            const GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT);
            if (status != GL_FRAMEBUFFER_COMPLETE_EXT)
            {
                LOG_ERROR("Framebuffer status incomplete: %d %x", status, status);
            }
        }
        glBindFramebuffer(GL_FRAMEBUFFER, 0);

        layer.ColorTexture[eye] = m_pTexSet[eye];
    }

    // Mirror texture for displaying to desktop window
    if (m_pMirrorTex)
    {
        ovrHmd_DestroyMirrorTexture(m_Hmd, m_pMirrorTex);
    }

    const ovrEyeType eye = ovrEyeType::ovrEye_Left;
    const ovrFovPort& fov = layer.Fov[eye] = m_Hmd->MaxEyeFov[eye];
    const ovrSizei& size = layer.Viewport[eye].Size = ovrHmd_GetFovTextureSize(m_Hmd, eye, fov, 1.f);
    ovrResult result = ovrHmd_CreateMirrorTextureGL(m_Hmd, GL_RGBA, size.w, size.h, &m_pMirrorTex);
    if (!OVR_SUCCESS(result))
    {
        LOG_ERROR("Unable to create mirror texture");
        return;
    }

    // Manually assemble mirror FBO
    m_mirrorFBO.w = size.w;
    m_mirrorFBO.h = size.h;
    glGenFramebuffers(1, &m_mirrorFBO.id);
    glBindFramebuffer(GL_FRAMEBUFFER, m_mirrorFBO.id);
    const ovrGLTextureData* pMirrorGLData = reinterpret_cast<ovrGLTextureData*>(m_pMirrorTex);
    m_mirrorFBO.tex = pMirrorGLData->TexId;
    glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_mirrorFBO.tex, 0);

    // Check status
    {
        const GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT);
        if (status != GL_FRAMEBUFFER_COMPLETE_EXT)
        {
            LOG_ERROR("Framebuffer status incomplete: %d %x", status, status);
        }
    }
    glBindFramebuffer(GL_FRAMEBUFFER, 0);
    glBindTexture(GL_TEXTURE_2D, 0);

    glEnable(GL_DEPTH_TEST);
}
예제 #22
0
// return true to retry later (e.g. after display lost)
static bool MainLoop(bool retryCreate)
{
    // Initialize these to nullptr here to handle device lost failures cleanly
	ovrTexture     * mirrorTexture = nullptr;
	OculusTexture  * pEyeRenderTexture[2] = { nullptr, nullptr };
	DepthBuffer    * pEyeDepthBuffer[2] = { nullptr, nullptr };
    Scene          * roomScene = nullptr; 
    Camera         * mainCam = nullptr;
	D3D11_TEXTURE2D_DESC td = {};

	ovrHmd HMD;
	ovrGraphicsLuid luid;
	ovrResult result = ovr_Create(&HMD, &luid);
    if (!OVR_SUCCESS(result))
        return retryCreate;

    ovrHmdDesc hmdDesc = ovr_GetHmdDesc(HMD);

	// -------------------------------------------------------------------
	// Add: Make Instance that CL Eye Camera Capture Class
	CLEyeCameraCapture* cam[2] = { NULL };

	// Query for number of connected camera
	int numCams = CLEyeGetCameraCount();
	if (numCams == 0)
	{
		printf_s("No PS3Eye Camera detected\n");
		goto Done;
	}
	printf_s("Found %d cameras\n", numCams);

	for (int iCam = 0; iCam < numCams; iCam++)
	{
		char windowName[64];

		// Query unique camera uuid
		GUID guid = CLEyeGetCameraUUID(iCam);
		printf("Camera %d GUID: [%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x]\n",
			iCam + 1, guid.Data1, guid.Data2, guid.Data3,
			guid.Data4[0], guid.Data4[1], guid.Data4[2],
			guid.Data4[3], guid.Data4[4], guid.Data4[5],
			guid.Data4[6], guid.Data4[7]);
		sprintf_s(windowName, "Camera Window %d", iCam + 1);

		// Create camera capture object
		cam[iCam] = new CLEyeCameraCapture(windowName, guid, CLEYE_COLOR_RAW, CLEYE_VGA, 30);
		cam[iCam]->StartCapture();
	}
	// -------------------------------------------------------------------

	// Setup Device and Graphics
	// Note: the mirror window can be any size, for this sample we use 1/2 the HMD resolution
    if (!DIRECTX.InitDevice(hmdDesc.Resolution.w / 2, hmdDesc.Resolution.h / 2, reinterpret_cast<LUID*>(&luid)))
        goto Done;

	// Make the eye render buffers (caution if actual size < requested due to HW limits). 
	ovrRecti         eyeRenderViewport[2];

	for (int eye = 0; eye < 2; ++eye)
	{
		ovrSizei idealSize = ovr_GetFovTextureSize(HMD, (ovrEyeType)eye, hmdDesc.DefaultEyeFov[eye], 1.0f);
		pEyeRenderTexture[eye] = new OculusTexture();
        if (!pEyeRenderTexture[eye]->Init(HMD, idealSize.w, idealSize.h))
        {
            if (retryCreate) goto Done;
	        VALIDATE(OVR_SUCCESS(result), "Failed to create eye texture.");
        }
		pEyeDepthBuffer[eye] = new DepthBuffer(DIRECTX.Device, idealSize.w, idealSize.h);
		eyeRenderViewport[eye].Pos.x = 0;
		eyeRenderViewport[eye].Pos.y = 0;
		eyeRenderViewport[eye].Size = idealSize;
        if (!pEyeRenderTexture[eye]->TextureSet)
        {
            if (retryCreate) goto Done;
            VALIDATE(false, "Failed to create texture.");
        }
	}

	// Create a mirror to see on the monitor.
	td.ArraySize = 1;
    td.Format = DXGI_FORMAT_R8G8B8A8_UNORM_SRGB;
	td.Width = DIRECTX.WinSizeW;
	td.Height = DIRECTX.WinSizeH;
	td.Usage = D3D11_USAGE_DEFAULT;
	td.SampleDesc.Count = 1;
	td.MipLevels = 1;
    result = ovr_CreateMirrorTextureD3D11(HMD, DIRECTX.Device, &td, 0, &mirrorTexture);
    if (!OVR_SUCCESS(result))
    {
        if (retryCreate) goto Done;
        VALIDATE(false, "Failed to create mirror texture.");
    }

	// Create the room model
    roomScene = new Scene(false);

	// Create camera
    mainCam = new Camera(&XMVectorSet(0.0f, 1.6f, 5.0f, 0), &XMQuaternionIdentity());

	// Setup VR components, filling out description
	ovrEyeRenderDesc eyeRenderDesc[2];
	eyeRenderDesc[0] = ovr_GetRenderDesc(HMD, ovrEye_Left, hmdDesc.DefaultEyeFov[0]);
	eyeRenderDesc[1] = ovr_GetRenderDesc(HMD, ovrEye_Right, hmdDesc.DefaultEyeFov[1]);

    bool isVisible = true;

	DCB portConfig;
	portConfig.BaudRate = 115200;
	portConfig.Parity = EVENPARITY;

	g_seriPort.Start("\\\\.\\COM3", &portConfig);


	// Main loop
	while (DIRECTX.HandleMessages())
	{
		XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, -0.05f, 0), mainCam->Rot);
		XMVECTOR right   = XMVector3Rotate(XMVectorSet(0.05f, 0, 0, 0),  mainCam->Rot);
		if (DIRECTX.Key['W'] || DIRECTX.Key[VK_UP])	  mainCam->Pos = XMVectorAdd(mainCam->Pos, forward);
		if (DIRECTX.Key['S'] || DIRECTX.Key[VK_DOWN]) mainCam->Pos = XMVectorSubtract(mainCam->Pos, forward);
		if (DIRECTX.Key['D'])                         mainCam->Pos = XMVectorAdd(mainCam->Pos, right);
		if (DIRECTX.Key['A'])                         mainCam->Pos = XMVectorSubtract(mainCam->Pos, right);
		static float Yaw = 0;
		if (DIRECTX.Key[VK_LEFT])  mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw += 0.02f, 0);
		if (DIRECTX.Key[VK_RIGHT]) mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw -= 0.02f, 0);

		// Animate the cube
		static float cubeClock = 0;
		roomScene->Models[0]->Pos = XMFLOAT3(9 * sin(cubeClock), 3, 9 * cos(cubeClock += 0.015f));

		// Get both eye poses simultaneously, with IPD offset already included. 
		ovrPosef         EyeRenderPose[2];
		ovrVector3f      HmdToEyeViewOffset[2] = { eyeRenderDesc[0].HmdToEyeViewOffset,
			                                       eyeRenderDesc[1].HmdToEyeViewOffset };
        double frameTime = ovr_GetPredictedDisplayTime(HMD, 0);
        // Keeping sensorSampleTime as close to ovr_GetTrackingState as possible - fed into the layer
        double           sensorSampleTime = ovr_GetTimeInSeconds();
		ovrTrackingState hmdState = ovr_GetTrackingState(HMD, frameTime, ovrTrue);
		ovr_CalcEyePoses(hmdState.HeadPose.ThePose, HmdToEyeViewOffset, EyeRenderPose);

		// --------------------------------------------------------------------------
		// Add: Get Head Yaw Roll Pitch
		float hmdPitch = 0.0f;
		float hmdRoll = 0.0f;
		float hmdYaw = 0.0f;

		OVR::Posef HeadPose = hmdState.HeadPose.ThePose;
		HeadPose.Rotation.GetEulerAngles<OVR::Axis_Y, OVR::Axis_X, OVR::Axis_Z>(&hmdYaw, &hmdPitch, &hmdRoll);

		SetPos(2, ServoRoll(hmdYaw));
		SetPos(3, ServoRoll(hmdPitch));

		// --------------------------------------------------------------------------


		// Render Scene to Eye Buffers
        if (isVisible)
        {
            for (int eye = 0; eye < 2; ++eye)
		    {
			    // Increment to use next texture, just before writing
			    pEyeRenderTexture[eye]->AdvanceToNextTexture();

			    // Clear and set up rendertarget
			    int texIndex = pEyeRenderTexture[eye]->TextureSet->CurrentIndex;
			    DIRECTX.SetAndClearRenderTarget(pEyeRenderTexture[eye]->TexRtv[texIndex], pEyeDepthBuffer[eye]);
			    DIRECTX.SetViewport((float)eyeRenderViewport[eye].Pos.x, (float)eyeRenderViewport[eye].Pos.y,
				    (float)eyeRenderViewport[eye].Size.w, (float)eyeRenderViewport[eye].Size.h);

			    //Get the pose information in XM format
			    XMVECTOR eyeQuat = XMVectorSet(EyeRenderPose[eye].Orientation.x, EyeRenderPose[eye].Orientation.y,
				                               EyeRenderPose[eye].Orientation.z, EyeRenderPose[eye].Orientation.w);
			    XMVECTOR eyePos = XMVectorSet(EyeRenderPose[eye].Position.x, EyeRenderPose[eye].Position.y, EyeRenderPose[eye].Position.z, 0);

			    // Get view and projection matrices for the Rift camera
			    XMVECTOR CombinedPos = XMVectorAdd(mainCam->Pos, XMVector3Rotate(eyePos, mainCam->Rot));
			    Camera finalCam(&CombinedPos, &(XMQuaternionMultiply(eyeQuat,mainCam->Rot)));
			    XMMATRIX view = finalCam.GetViewMatrix();
			    ovrMatrix4f p = ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.2f, 1000.0f, ovrProjection_RightHanded);
			    XMMATRIX proj = XMMatrixSet(p.M[0][0], p.M[1][0], p.M[2][0], p.M[3][0],
				                            p.M[0][1], p.M[1][1], p.M[2][1], p.M[3][1],
				                            p.M[0][2], p.M[1][2], p.M[2][2], p.M[3][2],
				                            p.M[0][3], p.M[1][3], p.M[2][3], p.M[3][3]);
			    XMMATRIX prod = XMMatrixMultiply(view, proj);
			    roomScene->Render(&prod, 1, 1, 1, 1, true);
		    }
        }

		// Initialize our single full screen Fov layer.
        ovrLayerEyeFov ld = {};
		ld.Header.Type = ovrLayerType_EyeFov;
		ld.Header.Flags = 0;

		for (int eye = 0; eye < 2; ++eye)
		{
			ld.ColorTexture[eye] = pEyeRenderTexture[eye]->TextureSet;
			ld.Viewport[eye] = eyeRenderViewport[eye];
			ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye];
			ld.RenderPose[eye] = EyeRenderPose[eye];
            ld.SensorSampleTime = sensorSampleTime;
		}

        ovrLayerHeader* layers = &ld.Header;
        result = ovr_SubmitFrame(HMD, 0, nullptr, &layers, 1);
        // exit the rendering loop if submit returns an error, will retry on ovrError_DisplayLost
        if (!OVR_SUCCESS(result))
            goto Done;

        isVisible = (result == ovrSuccess);

        // Render mirror
        ovrD3D11Texture* tex = (ovrD3D11Texture*)mirrorTexture;
        DIRECTX.Context->CopyResource(DIRECTX.BackBuffer, tex->D3D11.pTexture);
        DIRECTX.SwapChain->Present(0, 0);
	}

	// Release resources
Done:
    delete mainCam;
    delete roomScene;
	if (mirrorTexture) ovr_DestroyMirrorTexture(HMD, mirrorTexture);
    for (int eye = 0; eye < 2; ++eye)
    {
	    delete pEyeRenderTexture[eye];
        delete pEyeDepthBuffer[eye];
    }
	DIRECTX.ReleaseDevice();
	ovr_Destroy(HMD);

	g_seriPort.End();

	for (int iCam = 0; iCam < numCams; iCam++)
	{
		cam[iCam]->StopCapture();
		delete cam[iCam];
	}

    // Retry on ovrError_DisplayLost
    return retryCreate || OVR_SUCCESS(result) || (result == ovrError_DisplayLost);
}
예제 #23
0
파일: OculusOVR.cpp 프로젝트: bjth/xenko
	DLL_EXPORT_API npBool xnOvrStartup()
	{
		ovrResult result = ovr_Initialize(NULL);
		return OVR_SUCCESS(result);
	}
예제 #24
0
void VR::nextTracking()
{
#if defined(_DEBUG)
	// make sure we are only caled once per frame:
	static vector<bool> called;
	if (xapp->getFramenum() < 50000) {
		size_t framenum = (size_t) xapp->getFramenum();
		assert(called.size() <= framenum);
		called.push_back(true);
		assert(called.size() == framenum+1);
	}
#endif

	// Get both eye poses simultaneously, with IPD offset already included. 
	ovrVector3f useHmdToEyeViewOffset[2] = { eyeRenderDesc[0].HmdToEyeOffset, eyeRenderDesc[1].HmdToEyeOffset };
	//ovrPosef temp_EyeRenderPose[2];
	double displayMidpointSeconds = ovr_GetPredictedDisplayTime(session, 0);
	ovrTrackingState ts = ovr_GetTrackingState(session, displayMidpointSeconds, false);
	ovr_CalcEyePoses(ts.HeadPose.ThePose, useHmdToEyeViewOffset, layer.RenderPose);
	ovrResult result;
	ovrBoundaryTestResult btest;
	ovrBool visible;
	result = ovr_GetBoundaryVisible(session, &visible);
	if (0) {
		Log("visible = " << (visible == ovrTrue) << endl);

		result = ovr_TestBoundary(session, ovrTrackedDevice_HMD, ovrBoundary_Outer, &btest);
		if (OVR_SUCCESS(result)) {
			//Log("boundary success");
			if (result == ovrSuccess) Log("success" << endl);
			if (result == ovrSuccess_BoundaryInvalid) Log("success boundary invalid" << endl);
			if (result == ovrSuccess_DeviceUnavailable) Log("success device unavailable" << endl);
		}
	}
	layer.Fov[0] = eyeRenderDesc[0].Fov;
	layer.Fov[1] = eyeRenderDesc[1].Fov;

	// Render the two undistorted eye views into their render buffers.  
	for (int eye = 0; eye < 2; eye++)
	{
		ovrPosef    * useEyePose = &EyeRenderPose[eye];
		float       * useYaw = &YawAtRender[eye];
		float Yaw = XM_PI;
		*useEyePose = layer.RenderPose[eye];
		*useYaw = Yaw;

		// Get view and projection matrices (note near Z to reduce eye strain)
		Matrix4f rollPitchYaw = Matrix4f::RotationY(Yaw);
		Matrix4f finalRollPitchYaw = rollPitchYaw * Matrix4f(useEyePose->Orientation);
		// fix finalRollPitchYaw for LH coordinate system:
		Matrix4f s = Matrix4f::Scaling(1.0f, -1.0f, -1.0f);  // 1 1 -1
		finalRollPitchYaw = s * finalRollPitchYaw * s;

		Vector3f finalUp = finalRollPitchYaw.Transform(Vector3f(0, 1, 0));
		Vector3f finalForward = finalRollPitchYaw.Transform(Vector3f(0, 0, -1));//0 0 1
		Vector3f Posf;
		Posf.x = xapp->camera.pos.x;
		Posf.y = xapp->camera.pos.y;
		Posf.z = xapp->camera.pos.z;
		Vector3f diff = rollPitchYaw.Transform(useEyePose->Position);
		//diff /= 10.0f;
		//diff.x = 0.0f;
		//diff.y = 0.0f;
		//diff.z = 0.0f;
		Vector3f shiftedEyePos;
		shiftedEyePos.x = Posf.x - diff.x;
		shiftedEyePos.y = Posf.y + diff.y;
		shiftedEyePos.z = Posf.z + diff.z;
		xapp->camera.look.x = finalForward.x;
		xapp->camera.look.y = finalForward.y;
		xapp->camera.look.z = finalForward.z;

		Matrix4f view = Matrix4f::LookAtLH(shiftedEyePos, shiftedEyePos + finalForward, finalUp);
		Matrix4f projO = ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.2f, 2000.0f,  ovrProjection_LeftHanded);
		Matrix4fToXM(this->viewOVR[eye], view.Transposed());
		Matrix4fToXM(this->projOVR[eye], projO.Transposed());
	}
}
예제 #25
0
파일: hmd_ovr.cpp 프로젝트: JaapSuter/bgfx
	void VRImplOVR::connect(VRDesc* _desc)
	{
		ovrGraphicsLuid luid;
		ovrResult result = ovr_Create(&m_session, &luid);
		if (!OVR_SUCCESS(result))
		{
			BX_TRACE("Failed to create OVR device.");
			return;
		}

		BX_STATIC_ASSERT(sizeof(_desc->m_adapterLuid) >= sizeof(luid));
		memcpy(&_desc->m_adapterLuid, &luid, sizeof(luid));

		ovrHmdDesc hmdDesc = ovr_GetHmdDesc(m_session);
		_desc->m_deviceType = hmdDesc.Type;
		_desc->m_refreshRate = hmdDesc.DisplayRefreshRate;
		_desc->m_deviceSize.m_w = hmdDesc.Resolution.w;
		_desc->m_deviceSize.m_h = hmdDesc.Resolution.h;

		BX_TRACE("OVR HMD: %s, %s, firmware: %d.%d"
			, hmdDesc.ProductName
			, hmdDesc.Manufacturer
			, hmdDesc.FirmwareMajor
			, hmdDesc.FirmwareMinor
			);

		ovrSizei eyeSize[2] =
		{
			ovr_GetFovTextureSize(m_session, ovrEye_Left, hmdDesc.DefaultEyeFov[0], 1.0f),
			ovr_GetFovTextureSize(m_session, ovrEye_Right, hmdDesc.DefaultEyeFov[0], 1.0f),
		};

		for (int eye = 0; eye < 2; ++eye)
		{
			BX_STATIC_ASSERT(sizeof(_desc->m_eyeFov[eye]) == sizeof(hmdDesc.DefaultEyeFov[eye]));
			memcpy(&_desc->m_eyeFov[eye], &hmdDesc.DefaultEyeFov[eye], sizeof(_desc->m_eyeFov[eye]));
			_desc->m_eyeSize[eye].m_w = eyeSize[eye].w;
			_desc->m_eyeSize[eye].m_h = eyeSize[eye].h;
		}

		float neckOffset[2] = {OVR_DEFAULT_NECK_TO_EYE_HORIZONTAL, OVR_DEFAULT_NECK_TO_EYE_VERTICAL};
		ovr_GetFloatArray(m_session, OVR_KEY_NECK_TO_EYE_DISTANCE, neckOffset, 2);
		_desc->m_neckOffset[0] = neckOffset[0];
		_desc->m_neckOffset[1] = neckOffset[1];

		// build constant layer settings
		m_renderLayer.Header.Type = ovrLayerType_EyeFov;
		m_renderLayer.Header.Flags = 0;
		for (int eye = 0; eye < 2; ++eye)
		{
			m_renderLayer.Fov[eye] = hmdDesc.DefaultEyeFov[eye];
			m_renderLayer.Viewport[eye].Pos.x = 0;
			m_renderLayer.Viewport[eye].Pos.y = 0;
			m_renderLayer.Viewport[eye].Size = eyeSize[eye];
		}

		m_viewScale.HmdSpaceToWorldScaleInMeters = 1.0f;
		for (int eye = 0; eye < 2; ++eye)
		{
			ovrEyeRenderDesc erd = ovr_GetRenderDesc(m_session, static_cast<ovrEyeType>(eye), hmdDesc.DefaultEyeFov[eye]);
			m_viewScale.HmdToEyeOffset[eye] = erd.HmdToEyeOffset;
			m_eyeFov[eye] = erd.Fov;
			m_pixelsPerTanAngleAtCenter[eye] = erd.PixelsPerTanAngleAtCenter;
		}
	}
FD3D11Texture2DSet* FD3D11Texture2DSet::D3D11CreateTexture2DSet(
	FD3D11DynamicRHI* InD3D11RHI,
	const FOvrSessionSharedPtr& InOvrSession,
	ovrTextureSwapChain InTextureSet,
	const D3D11_TEXTURE2D_DESC& InDsDesc,
	EPixelFormat InFormat,
	uint32 InFlags
	)
{
	FOvrSessionShared::AutoSession OvrSession(InOvrSession);
	check(InTextureSet);

	TArray<TRefCountPtr<ID3D11RenderTargetView> > TextureSetRenderTargetViews;
	FD3D11Texture2DSet* NewTextureSet = new FD3D11Texture2DSet(
		InD3D11RHI,
		nullptr,
		nullptr,
		false,
		1,
		TextureSetRenderTargetViews,
		/*DepthStencilViews=*/ NULL,
		InDsDesc.Width,
		InDsDesc.Height,
		0,
		InDsDesc.MipLevels,
		InDsDesc.SampleDesc.Count,
		InFormat,
		/*bInCubemap=*/ false,
		InFlags,
		/*bPooledTexture=*/ false
		);

	int TexCount;
	ovr_GetTextureSwapChainLength(OvrSession, InTextureSet, &TexCount);
	const bool bSRGB = (InFlags & TexCreate_SRGB) != 0;

	const DXGI_FORMAT PlatformResourceFormat = (DXGI_FORMAT)GPixelFormats[InFormat].PlatformFormat;
	const DXGI_FORMAT PlatformShaderResourceFormat = FindShaderResourceDXGIFormat(PlatformResourceFormat, bSRGB);
	const DXGI_FORMAT PlatformRenderTargetFormat = FindShaderResourceDXGIFormat(PlatformResourceFormat, bSRGB);
	D3D11_RTV_DIMENSION RenderTargetViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
	if (InDsDesc.SampleDesc.Count > 1)
	{
		RenderTargetViewDimension = D3D11_RTV_DIMENSION_TEXTURE2DMS;
	}
	for (int32 i = 0; i < TexCount; ++i)
	{
		TRefCountPtr<ID3D11Texture2D> pD3DTexture;
		ovrResult res = ovr_GetTextureSwapChainBufferDX(OvrSession, InTextureSet, i, IID_PPV_ARGS(pD3DTexture.GetInitReference()));
		if (!OVR_SUCCESS(res))
		{
			UE_LOG(LogHMD, Error, TEXT("ovr_GetTextureSwapChainBufferDX failed, error = %d"), int(res));
			return nullptr;
		}

		TArray<TRefCountPtr<ID3D11RenderTargetView> > RenderTargetViews;
		if (InFlags & TexCreate_RenderTargetable)
		{
			// Create a render target view for each mip
			for (uint32 MipIndex = 0; MipIndex < InDsDesc.MipLevels; MipIndex++)
			{
				check(!(InFlags & TexCreate_TargetArraySlicesIndependently)); // not supported
				D3D11_RENDER_TARGET_VIEW_DESC RTVDesc;
				FMemory::Memzero(&RTVDesc, sizeof(RTVDesc));
				RTVDesc.Format = PlatformRenderTargetFormat;
				RTVDesc.ViewDimension = RenderTargetViewDimension;
				RTVDesc.Texture2D.MipSlice = MipIndex;

				TRefCountPtr<ID3D11RenderTargetView> RenderTargetView;
				VERIFYD3D11RESULT_EX(InD3D11RHI->GetDevice()->CreateRenderTargetView(pD3DTexture, &RTVDesc, RenderTargetView.GetInitReference()), InD3D11RHI->GetDevice());
				RenderTargetViews.Add(RenderTargetView);
			}
		}

		TRefCountPtr<ID3D11ShaderResourceView> ShaderResourceView;

		// Create a shader resource view for the texture.
		if (InFlags & TexCreate_ShaderResource)
		{
			D3D11_SRV_DIMENSION ShaderResourceViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
			D3D11_SHADER_RESOURCE_VIEW_DESC SRVDesc;
			SRVDesc.Format = PlatformShaderResourceFormat;

			SRVDesc.ViewDimension = ShaderResourceViewDimension;
			SRVDesc.Texture2D.MostDetailedMip = 0;
			SRVDesc.Texture2D.MipLevels = InDsDesc.MipLevels;

			VERIFYD3D11RESULT_EX(InD3D11RHI->GetDevice()->CreateShaderResourceView(pD3DTexture, &SRVDesc, ShaderResourceView.GetInitReference()), InD3D11RHI->GetDevice());

			check(IsValidRef(ShaderResourceView));
		}

		NewTextureSet->AddTexture(pD3DTexture, ShaderResourceView, &RenderTargetViews);
	}

	if (InFlags & TexCreate_RenderTargetable)
	{
		NewTextureSet->SetCurrentGPUAccess(EResourceTransitionAccess::EWritable);
	}
	NewTextureSet->TextureSet = InTextureSet;
	NewTextureSet->InitWithCurrentElement(0);
	return NewTextureSet;
}
예제 #27
0
파일: OculusOVR.cpp 프로젝트: bjth/xenko
	DLL_EXPORT_API npBool xnOvrCreateTexturesDx(xnOvrSession* session, void* dxDevice, int* outTextureCount, float pixelPerDisplayPixel, int mirrorBufferWidth, int mirrorBufferHeight)
	{
		session->HmdDesc = ovr_GetHmdDesc(session->Session);
		ovrSizei sizel = ovr_GetFovTextureSize(session->Session, ovrEye_Left, session->HmdDesc.DefaultEyeFov[0], pixelPerDisplayPixel);
		ovrSizei sizer = ovr_GetFovTextureSize(session->Session, ovrEye_Right, session->HmdDesc.DefaultEyeFov[1], pixelPerDisplayPixel);
		ovrSizei bufferSize;
		bufferSize.w = sizel.w + sizer.w;
		bufferSize.h = fmax(sizel.h, sizer.h);

		ovrTextureSwapChainDesc texDesc = {};
		texDesc.Type = ovrTexture_2D;
		texDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
		texDesc.ArraySize = 1;
		texDesc.Width = bufferSize.w;
		texDesc.Height = bufferSize.h;
		texDesc.MipLevels = 1;
		texDesc.SampleCount = 1;
		texDesc.StaticImage = ovrFalse;
		texDesc.MiscFlags = ovrTextureMisc_None;
		texDesc.BindFlags = ovrTextureBind_DX_RenderTarget;

		if(!OVR_SUCCESS(ovr_CreateTextureSwapChainDX(session->Session, dxDevice, &texDesc, &session->SwapChain)))
		{
			return false;
		}

		auto count = 0;
		ovr_GetTextureSwapChainLength(session->Session, session->SwapChain, &count);
		*outTextureCount = count;
		
		//init structures
		session->EyeRenderDesc[0] = ovr_GetRenderDesc(session->Session, ovrEye_Left, session->HmdDesc.DefaultEyeFov[0]);
		session->EyeRenderDesc[1] = ovr_GetRenderDesc(session->Session, ovrEye_Right, session->HmdDesc.DefaultEyeFov[1]);
		session->HmdToEyeViewOffset[0] = session->EyeRenderDesc[0].HmdToEyeOffset;
		session->HmdToEyeViewOffset[1] = session->EyeRenderDesc[1].HmdToEyeOffset;

		session->Layer.Header.Type = ovrLayerType_EyeFov;
		session->Layer.Header.Flags = 0;
		session->Layer.ColorTexture[0] = session->SwapChain;
		session->Layer.ColorTexture[1] = session->SwapChain;
		session->Layer.Fov[0] = session->EyeRenderDesc[0].Fov;
		session->Layer.Fov[1] = session->EyeRenderDesc[1].Fov;
		session->Layer.Viewport[0].Pos.x = 0;
		session->Layer.Viewport[0].Pos.y = 0;
		session->Layer.Viewport[0].Size.w = bufferSize.w / 2;
		session->Layer.Viewport[0].Size.h = bufferSize.h;
		session->Layer.Viewport[1].Pos.x = bufferSize.w / 2;
		session->Layer.Viewport[1].Pos.y = 0;
		session->Layer.Viewport[1].Size.w = bufferSize.w / 2;
		session->Layer.Viewport[1].Size.h = bufferSize.h;

		//create mirror as well
		if (mirrorBufferHeight != 0 && mirrorBufferWidth != 0)
		{
			ovrMirrorTextureDesc mirrorDesc = {};
			mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
			mirrorDesc.Width = mirrorBufferWidth;
			mirrorDesc.Height = mirrorBufferHeight;
			if (!OVR_SUCCESS(ovr_CreateMirrorTextureDX(session->Session, dxDevice, &mirrorDesc, &session->Mirror)))
			{
				return false;
			}
		}
		
		return true;
	}
예제 #28
0
///@brief Called once a GL context has been set up.
void initVR()
{
    const ovrHmdDesc& hmd = m_Hmd;

    for (int eye = 0; eye < 2; ++eye)
    {
        const ovrSizei& bufferSize = ovr_GetFovTextureSize(g_session, ovrEyeType(eye), hmd.DefaultEyeFov[eye], 1.f);
        LOG_INFO("Eye %d tex : %dx%d @ ()", eye, bufferSize.w, bufferSize.h);

        ovrTextureSwapChain textureSwapChain = 0;
        ovrTextureSwapChainDesc desc = {};
        desc.Type = ovrTexture_2D;
        desc.ArraySize = 1;
        desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
        desc.Width = bufferSize.w;
        desc.Height = bufferSize.h;
        desc.MipLevels = 1;
        desc.SampleCount = 1;
        desc.StaticImage = ovrFalse;

        // Allocate the frameBuffer that will hold the scene, and then be
        // re-rendered to the screen with distortion
        ovrTextureSwapChain& chain = g_textureSwapChain[eye];
        if (ovr_CreateTextureSwapChainGL(g_session, &desc, &chain) == ovrSuccess)
        {
            int length = 0;
            ovr_GetTextureSwapChainLength(g_session, chain, &length);

            for (int i = 0; i < length; ++i)
            {
                GLuint chainTexId;
                ovr_GetTextureSwapChainBufferGL(g_session, chain, i, &chainTexId);
                glBindTexture(GL_TEXTURE_2D, chainTexId);
                glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
                glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
                glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
                glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
            }
        }
        else
        {
            LOG_ERROR("Unable to create swap textures");
            return;
        }

        // Manually assemble swap FBO
        FBO& swapfbo = m_swapFBO[eye];
        swapfbo.w = bufferSize.w;
        swapfbo.h = bufferSize.h;
        glGenFramebuffers(1, &swapfbo.id);
        glBindFramebuffer(GL_FRAMEBUFFER, swapfbo.id);
        glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, swapfbo.tex, 0);

        swapfbo.depth = 0;
        glGenRenderbuffers(1, &swapfbo.depth);
        glBindRenderbuffer(GL_RENDERBUFFER, swapfbo.depth);
        glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, bufferSize.w, bufferSize.h);
        glBindRenderbuffer(GL_RENDERBUFFER, 0);
        glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, swapfbo.depth);

        // Check status
        const GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
        if (status != GL_FRAMEBUFFER_COMPLETE)
        {
            LOG_ERROR("Framebuffer status incomplete: %d %x", status, status);
        }
        glBindFramebuffer(GL_FRAMEBUFFER, 0);
    }

    // Initialize mirror texture
    ovrMirrorTextureDesc desc;
    memset(&desc, 0, sizeof(desc));
    desc.Width = g_mirrorWindowSz.x;
    desc.Height = g_mirrorWindowSz.y;
    desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;

    const ovrResult result = ovr_CreateMirrorTextureGL(g_session, &desc, &g_mirrorTexture);
    if (!OVR_SUCCESS(result))
    {
        LOG_ERROR("Unable to create mirror texture");
        return;
    }

    // Manually assemble mirror FBO
    m_mirrorFBO.w = g_mirrorWindowSz.x;
    m_mirrorFBO.h = g_mirrorWindowSz.y;
    glGenFramebuffers(1, &m_mirrorFBO.id);
    glBindFramebuffer(GL_FRAMEBUFFER, m_mirrorFBO.id);
    GLuint texId;
    ovr_GetMirrorTextureBufferGL(g_session, g_mirrorTexture, &texId);
    m_mirrorFBO.tex = texId;
    glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_mirrorFBO.tex, 0);

    const ovrSizei sz = { 600, 600 };
    g_tweakbarQuad.initGL(g_session, sz);

    glBindFramebuffer(GL_FRAMEBUFFER, 0);
    glBindTexture(GL_TEXTURE_2D, 0);

    g_hmdVisible = true;
}
예제 #29
0
/**
* Render the Virtual Cinema Theatre.
***/
void* OculusTracker::Provoke(void* pThis, int eD3D, int eD3DInterface, int eD3DMethod, DWORD dwNumberConnected, int& nProvokerIndex)
{
	// update game timer
	m_cGameTimer.Tick();

	static UINT unFrameSkip = 200;
	if (unFrameSkip > 0)
	{
		unFrameSkip--;
		return nullptr;
	}

	// #define _DEBUG_OTR
#ifdef _DEBUG_OTR
	{ wchar_t buf[128]; wsprintf(buf, L"[OTR] ifc %u mtd %u", eD3DInterface, eD3DMethod); OutputDebugString(buf); }
#endif

	// save ini file ?
	if (m_nIniFrameCount)
	{
		if (m_nIniFrameCount == 1)
			SaveIniSettings();
		m_nIniFrameCount--;
	}

	// main menu update ?
	if (m_sMenu.bOnChanged)
	{
		// set back event bool, set ini file frame count
		m_sMenu.bOnChanged = false;
		m_nIniFrameCount = 300;

		// loop through entries
		for (size_t nIx = 0; nIx < m_sMenu.asEntries.size(); nIx++)
		{
			// entry index changed ?
			if (m_sMenu.asEntries[nIx].bOnChanged)
			{
				m_sMenu.asEntries[nIx].bOnChanged = false;

				// touch entries ?
				if (nIx < 25)
				{
					// set new vk code by string
					m_aaunKeys[1][nIx] = GetVkCodeByString(m_sMenu.asEntries[nIx].astrValueEnumeration[m_sMenu.asEntries[nIx].unValue]);
				}
			}
		}
	}

	if (m_hSession)
	{
#pragma region controller
		// controller indices
		static const uint32_t s_unIndexRemote = 0;
		static const uint32_t s_unIndexTouch = 1;
		static const uint32_t s_unIndexXBox = 2;

		// get all connected input states
		ovrInputState sInputState[3] = {};
		unsigned int unControllersConnected = ovr_GetConnectedControllerTypes(m_hSession);
#pragma region Remote
		if (unControllersConnected & ovrControllerType_Remote)
		{
			ovr_GetInputState(m_hSession, ovrControllerType_Remote, &sInputState[s_unIndexRemote]);

			// handle all remote buttons except Oculus private ones
			if (sInputState[s_unIndexRemote].Buttons & ovrButton_Up)
				m_sMenu.bOnUp = true;
			if (sInputState[s_unIndexRemote].Buttons & ovrButton_Down)
				m_sMenu.bOnDown = true;
			if (sInputState[s_unIndexRemote].Buttons & ovrButton_Left)
				m_sMenu.bOnLeft = true;
			if (sInputState[s_unIndexRemote].Buttons & ovrButton_Right)
				m_sMenu.bOnRight = true;
			if (sInputState[s_unIndexRemote].Buttons & ovrButton_Enter)
				m_sMenu.bOnAccept = true;
			if (sInputState[s_unIndexRemote].Buttons & ovrButton_Back)
				m_sMenu.bOnBack = true;
		}
#pragma endregion
#pragma region touch
		if (unControllersConnected & ovrControllerType_Touch)
		{
			// get input state
			ovr_GetInputState(m_hSession, ovrControllerType_Touch, &sInputState[s_unIndexTouch]);

			// loop through controller buttons
			for (UINT unButtonIx = 0; unButtonIx < unButtonNo; unButtonIx++)
			{
				// cast keyboard event
				if (sInputState[s_unIndexTouch].Buttons & aunButtonIds[unButtonIx])
				{
					if (!m_aabKeys[s_unIndexTouch][unButtonIx])
						MapButtonDown(s_unIndexTouch, unButtonIx);
				}
				else
				if (m_aabKeys[s_unIndexTouch][unButtonIx])
					MapButtonUp(s_unIndexTouch, unButtonIx);
			}
		}
#pragma endregion
		if (unControllersConnected & ovrControllerType_XBox)
			ovr_GetInputState(m_hSession, ovrControllerType_XBox, &sInputState[s_unIndexXBox]);



#pragma endregion
#pragma region hmd
		/*// Start the sensor which informs of the Rift's pose and motion   .... obsolete for SDK 1.3.x ??
		ovr_ConfigureTracking(m_hSession, ovrTrackingCap_Orientation | ovrTrackingCap_MagYawCorrection |
		ovrTrackingCap_Position, 0);*/

		// get the current tracking state
		ovrTrackingState sTrackingState = ovr_GetTrackingState(m_hSession, ovr_GetTimeInSeconds(), false);

		if (TRUE)//(sTrackingState.StatusFlags & (ovrStatus_OrientationTracked | ovrStatus_PositionTracked))
		{
			// get pose
			ovrPoseStatef sPoseState = sTrackingState.HeadPose;
			m_sPose = sPoseState.ThePose;
			m_sOrientation.x = m_sPose.Orientation.x;
			m_sOrientation.y = m_sPose.Orientation.y;
			m_sOrientation.z = m_sPose.Orientation.z;
			m_sOrientation.w = m_sPose.Orientation.w;

			// backup old euler angles and velocity
			float fEulerOld[3];
			float fEulerVelocityOld[3];
			memcpy(&fEulerOld[0], &m_fEuler[0], sizeof(float)* 3);
			memcpy(&fEulerVelocityOld[0], &m_fEulerVelocity[0], sizeof(float)* 3);

			// predicted euler angles ? for Oculus, due to ATW, we do not predict the euler angles
			if (FALSE)
			{
				// get angles
				m_sOrientation.GetEulerAngles<Axis::Axis_Y, Axis::Axis_X, Axis::Axis_Z, RotateDirection::Rotate_CW, HandedSystem::Handed_R >(&m_fEuler[1], &m_fEuler[0], &m_fEuler[2]);

				// quick fix here...
				m_fEuler[1] *= -1.0f;
				m_fEuler[0] *= -1.0f;
				m_fEuler[2] *= -1.0f;

				// get euler velocity + acceleration
				float fEulerAcceleration[3];
				for (UINT unI = 0; unI < 3; unI++)
				{
					// get the velocity
					m_fEulerVelocity[unI] = (m_fEuler[unI] - fEulerOld[unI]) / (float)m_cGameTimer.DeltaTime();

					// get the acceleration
					fEulerAcceleration[unI] = (m_fEulerVelocity[unI] - fEulerVelocityOld[unI]) / (float)m_cGameTimer.DeltaTime();
				}

				// get predicted euler
				for (UINT unI = 0; unI < 3; unI++)
				{
					// compute predicted euler
					m_fEulerPredicted[unI] = (0.5f * fEulerAcceleration[unI] * ((float)m_cGameTimer.DeltaTime() * (float)m_cGameTimer.DeltaTime())) + (m_fEulerVelocity[unI] * (float)m_cGameTimer.DeltaTime()) + m_fEuler[unI];
				}
			}
			else
			{
				// get angles
				m_sOrientation.GetEulerAngles<Axis::Axis_Y, Axis::Axis_X, Axis::Axis_Z, RotateDirection::Rotate_CW, HandedSystem::Handed_R >(&m_fEulerPredicted[1], &m_fEulerPredicted[0], &m_fEulerPredicted[2]);

				// quick fix here...
				m_fEulerPredicted[1] *= -1.0f;
				m_fEulerPredicted[0] *= -1.0f;
				m_fEulerPredicted[2] *= -1.0f;
			}

			// set the drawing update to true
			m_bControlUpdate = true;

			// set position
			m_afPosition[0] = (float)-m_sPose.Position.x - m_afPositionOrigin[0];
			m_afPosition[1] = (float)-m_sPose.Position.y - m_afPositionOrigin[1];
			m_afPosition[2] = (float)m_sPose.Position.z + m_afPositionOrigin[2];

			// get eye render pose and other fields
			ovrEyeRenderDesc asEyeRenderDesc[2];
			asEyeRenderDesc[0] = ovr_GetRenderDesc(m_hSession, ovrEye_Left, m_sHMDDesc.DefaultEyeFov[0]);
			asEyeRenderDesc[1] = ovr_GetRenderDesc(m_hSession, ovrEye_Right, m_sHMDDesc.DefaultEyeFov[1]);
			ovrPosef asHmdToEyePose[2] = { asEyeRenderDesc[0].HmdToEyePose,asEyeRenderDesc[1].HmdToEyePose };
			//ovrVector3f      asHmdToEyeViewOffset[2] = { asEyeRenderDesc[0].HmdToEyePose, asEyeRenderDesc[1].HmdToEyePose };
			ovrPosef         asEyeRenderPose[2];
			static long long s_frameIndex = 0;
			static double s_sensorSampleTime = 0.0;    // sensorSampleTime is fed into the layer later
			ovr_GetEyePoses(m_hSession, s_frameIndex, ovrTrue, asHmdToEyePose, asEyeRenderPose, &s_sensorSampleTime);
			// ovr_CalcEyePoses(sTrackingState.HeadPose.ThePose, asHmdToEyePose, asEyeRenderPose);

			// create rotation matrix from euler angles
			D3DXMATRIX sRotation;
			D3DXMATRIX sPitch, sYaw, sRoll;
			D3DXMatrixRotationX(&sPitch, m_fEulerPredicted[0]);
			D3DXMatrixRotationY(&sYaw, m_fEulerPredicted[1]);
			D3DXMatrixRotationZ(&sRoll, -m_fEulerPredicted[2]);
			sRotation = sYaw * sPitch * sRoll;

			// create per eye view matrix from rotation and position
			D3DXMATRIX sView[2];
			for (UINT unEye = 0; unEye < 2; unEye++)
			{
				D3DXMATRIX sTranslation;
				D3DXMatrixTranslation(&sTranslation, (float)-asEyeRenderPose[unEye].Position.x - m_afPositionOrigin[0], (float)-asEyeRenderPose[unEye].Position.y - m_afPositionOrigin[1], (float)asEyeRenderPose[unEye].Position.z + m_afPositionOrigin[2]);
				sView[unEye] = sTranslation * sRotation;
			}

			// create head pose view matrix
			D3DXMATRIX sTranslation;
			D3DXMatrixTranslation(&sTranslation, (float)-sTrackingState.HeadPose.ThePose.Position.x - m_afPositionOrigin[0], (float)-sTrackingState.HeadPose.ThePose.Position.y - m_afPositionOrigin[1], (float)sTrackingState.HeadPose.ThePose.Position.z + m_afPositionOrigin[2]);
			m_sView = sTranslation * sRotation;

			// create inverse view matrix
			D3DXMATRIX sVInv = {};
			D3DXMatrixInverse(&sVInv, nullptr, &m_sView);

			// get projection matrices left/right
			D3DXMATRIX asToEye[2];
			D3DXMATRIX asProjection[2];
			for (UINT unEye = 0; unEye < 2; unEye++)
			{
				// get ovr projection
				ovrMatrix4f sProj = ovrMatrix4f_Projection(m_sHMDDesc.DefaultEyeFov[unEye], 0.01f, 30.0f, ovrProjection_LeftHanded);

				// create dx projection
				asProjection[unEye] = D3DXMATRIX(&sProj.M[0][0]);
				D3DXMatrixTranspose(&asProjection[unEye], &asProjection[unEye]);

				// create eventual projection using inverse matrix of the head pose view matrix
				m_asProjection[unEye] = sVInv * sView[unEye] * asProjection[unEye];
			}
		}
#pragma endregion
	}
	else
	{
		// Initialize LibOVR, and the Rift... then create hmd handle
		ovrResult result = ovr_Initialize(nullptr);
		if (!OVR_SUCCESS(result))
		{
			OutputDebugString(L"[OVR] Failed to initialize libOVR.");
			return nullptr;
		}

		result = ovr_Create(&m_hSession, &m_sLuid);
		if (!OVR_SUCCESS(result))
		{
			OutputDebugString(L"[OVR] Failed to retreive HMD handle.");
			return nullptr;
		}
		else
			OutputDebugString(L"[OVR] HMD handle initialized !");

		if (m_hSession)
		{
			// get the description and set pointers
			m_sHMDDesc = ovr_GetHmdDesc(m_hSession);

			// Configure Stereo settings.
			ovrSizei sRecommenedTex0Size = ovr_GetFovTextureSize(m_hSession, ovrEye_Left,
				m_sHMDDesc.DefaultEyeFov[0], 1.0f);
			ovrSizei sRecommenedTex1Size = ovr_GetFovTextureSize(m_hSession, ovrEye_Right,
				m_sHMDDesc.DefaultEyeFov[1], 1.0f);

			ovrSizei sTextureSize;
			sTextureSize.w = max(sRecommenedTex0Size.w, sRecommenedTex1Size.w);
			sTextureSize.h = max(sRecommenedTex0Size.h, sRecommenedTex1Size.h);
			m_unRenderTextureWidth = (UINT)sTextureSize.w;
			m_unRenderTextureHeight = (UINT)sTextureSize.h;

			// get view offset
			ovrEyeRenderDesc asEyeRenderDesc[2];
			asEyeRenderDesc[0] = ovr_GetRenderDesc(m_hSession, ovrEye_Left, m_sHMDDesc.DefaultEyeFov[0]);
			asEyeRenderDesc[1] = ovr_GetRenderDesc(m_hSession, ovrEye_Right, m_sHMDDesc.DefaultEyeFov[1]);
			ovrVector3f asViewOffset[2] = { asEyeRenderDesc[0].HmdToEyePose.Position, asEyeRenderDesc[1].HmdToEyePose.Position };

			// get projection matrices left/right
			D3DXMATRIX asToEye[2];
			D3DXMATRIX asProjection[2];
			for (UINT unEye = 0; unEye < 2; unEye++)
			{
				// get ovr projection
				ovrMatrix4f sProj = ovrMatrix4f_Projection(m_sHMDDesc.DefaultEyeFov[unEye], 0.01f, 30.0f, ovrProjection_LeftHanded);

				// create dx projection
				asProjection[unEye] = D3DXMATRIX(&sProj.M[0][0]);
				D3DXMatrixTranspose(&asProjection[unEye], &asProjection[unEye]);

				// create view offset translation matrix
				D3DXMatrixTranslation(&asToEye[unEye], -asViewOffset[unEye].x, -asViewOffset[unEye].y, -asViewOffset[unEye].z);

				// create eventual projection
				m_asProjection[unEye] = asToEye[unEye] * asProjection[unEye];
			}
		}
	}

	return nullptr;
}
예제 #30
0
// Display to an HMD with OVR SDK backend.
void displayHMD()
{
    ovrSessionStatus sessionStatus;
    ovr_GetSessionStatus(g_session, &sessionStatus);

    if (sessionStatus.HmdPresent == false)
    {
        displayMonitor();
        return;
    }

    const ovrHmdDesc& hmdDesc = m_Hmd;
    double sensorSampleTime; // sensorSampleTime is fed into the layer later
    if (g_hmdVisible)
    {
        // Call ovr_GetRenderDesc each frame to get the ovrEyeRenderDesc, as the returned values (e.g. HmdToEyeOffset) may change at runtime.
        ovrEyeRenderDesc eyeRenderDesc[2];
        eyeRenderDesc[0] = ovr_GetRenderDesc(g_session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]);
        eyeRenderDesc[1] = ovr_GetRenderDesc(g_session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]);

        // Get eye poses, feeding in correct IPD offset
        ovrVector3f HmdToEyeOffset[2] = {
            eyeRenderDesc[0].HmdToEyeOffset,
            eyeRenderDesc[1].HmdToEyeOffset };
#if 0
        // Get both eye poses simultaneously, with IPD offset already included.
        double displayMidpointSeconds = ovr_GetPredictedDisplayTime(g_session, 0);
        ovrTrackingState hmdState = ovr_GetTrackingState(g_session, displayMidpointSeconds, ovrTrue);
        ovr_CalcEyePoses(hmdState.HeadPose.ThePose, HmdToEyeOffset, m_eyePoses);
#else
        ovr_GetEyePoses(g_session, g_frameIndex, ovrTrue, HmdToEyeOffset, m_eyePoses, &sensorSampleTime);
#endif
        storeHmdPose(m_eyePoses[0]);

        for (int eye = 0; eye < 2; ++eye)
        {
            const FBO& swapfbo = m_swapFBO[eye];
            const ovrTextureSwapChain& chain = g_textureSwapChain[eye];

            int curIndex;
            ovr_GetTextureSwapChainCurrentIndex(g_session, chain, &curIndex);
            GLuint curTexId;
            ovr_GetTextureSwapChainBufferGL(g_session, chain, curIndex, &curTexId);

            glBindFramebuffer(GL_FRAMEBUFFER, swapfbo.id);
            glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, curTexId, 0);

            glViewport(0, 0, swapfbo.w, swapfbo.h);
            glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
            glEnable(GL_FRAMEBUFFER_SRGB);

            {
                glClearColor(0.3f, 0.3f, 0.3f, 0.f);
                glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

                const ovrSizei& downSize = ovr_GetFovTextureSize(g_session, ovrEyeType(eye), hmdDesc.DefaultEyeFov[eye], m_fboScale);
                ovrRecti vp = { 0, 0, downSize.w, downSize.h };
                const int texh = swapfbo.h;
                vp.Pos.y = (texh - vp.Size.h) / 2;
                glViewport(vp.Pos.x, vp.Pos.y, vp.Size.w, vp.Size.h);

                // Cinemascope - letterbox bars scissoring off pixels above and below vp center
                const float hc = .5f * m_cinemaScope;
                const int scisPx = static_cast<int>(hc * static_cast<float>(vp.Size.h));
                ovrRecti sp = vp;
                sp.Pos.y += scisPx;
                sp.Size.h -= 2 * scisPx;
                glScissor(sp.Pos.x, sp.Pos.y, sp.Size.w, sp.Size.h);
                glEnable(GL_SCISSOR_TEST);
                glEnable(GL_DEPTH_TEST);

                // Render the scene for the current eye
                const ovrPosef& eyePose = m_eyePoses[eye];
                const glm::mat4 mview =
                    makeWorldToChassisMatrix() *
                    makeMatrixFromPose(eyePose, m_headSize);
                const ovrMatrix4f ovrproj = ovrMatrix4f_Projection(hmdDesc.DefaultEyeFov[eye], 0.2f, 1000.0f, ovrProjection_None);
                const glm::mat4 proj = makeGlmMatrixFromOvrMatrix(ovrproj);
                g_pScene->RenderForOneEye(glm::value_ptr(glm::inverse(mview)), glm::value_ptr(proj));

                const ovrTextureSwapChain& chain = g_textureSwapChain[eye];
                const ovrResult commitres = ovr_CommitTextureSwapChain(g_session, chain);
                if (!OVR_SUCCESS(commitres))
                {
                    LOG_ERROR("ovr_CommitTextureSwapChain returned %d", commitres);
                    return;
                }
            }
            glDisable(GL_SCISSOR_TEST);

            // Grab a copy of the left eye's undistorted render output for presentation
            // to the desktop window instead of the barrel distorted mirror texture.
            // This blit, while cheap, could cost some framerate to the HMD.
            // An over-the-shoulder view is another option, at a greater performance cost.
            if (0)
            {
                if (eye == ovrEyeType::ovrEye_Left)
                {
                    BlitLeftEyeRenderToUndistortedMirrorTexture();
                }
            }

            glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
            glBindFramebuffer(GL_FRAMEBUFFER, 0);
        }
    }

    std::vector<const ovrLayerHeader*> layerHeaders;
    {
        // Do distortion rendering, Present and flush/sync
        ovrLayerEyeFov ld;
        ld.Header.Type = ovrLayerType_EyeFov;
        ld.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft; // Because OpenGL.

        for (int eye = 0; eye < 2; ++eye)
        {
            const FBO& swapfbo = m_swapFBO[eye];
            const ovrTextureSwapChain& chain = g_textureSwapChain[eye];

            ld.ColorTexture[eye] = chain;

            const ovrSizei& downSize = ovr_GetFovTextureSize(g_session, ovrEyeType(eye), hmdDesc.DefaultEyeFov[eye], m_fboScale);
            ovrRecti vp = { 0, 0, downSize.w, downSize.h };
            const int texh = swapfbo.h;
            vp.Pos.y = (texh - vp.Size.h) / 2;

            ld.Viewport[eye] = vp;
            ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye];
            ld.RenderPose[eye] = m_eyePoses[eye];
            ld.SensorSampleTime = sensorSampleTime;
        }
        layerHeaders.push_back(&ld.Header);

        // Submit layers to HMD for display
        ovrLayerQuad ql;
        if (g_tweakbarQuad.m_showQuadInWorld)
        {
            ql.Header.Type = ovrLayerType_Quad;
            ql.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft; // Because OpenGL.

            ql.ColorTexture = g_tweakbarQuad.m_swapChain;
            ovrRecti vp;
            vp.Pos.x = 0;
            vp.Pos.y = 0;
            vp.Size.w = 600; ///@todo
            vp.Size.h = 600; ///@todo
            ql.Viewport = vp;
            ql.QuadPoseCenter = g_tweakbarQuad.m_QuadPoseCenter;
            ql.QuadSize = { 1.f, 1.f }; ///@todo Pass in

            g_tweakbarQuad.SetHmdEyeRay(m_eyePoses[ovrEyeType::ovrEye_Left]); // Writes to m_layerQuad.QuadPoseCenter
            g_tweakbarQuad.DrawToQuad();
            layerHeaders.push_back(&ql.Header);
        }
    }

#if 0
    ovrViewScaleDesc viewScaleDesc;
    viewScaleDesc.HmdToEyeOffset[0] = m_eyeOffsets[0];
    viewScaleDesc.HmdToEyeOffset[1] = m_eyeOffsets[1];
    viewScaleDesc.HmdSpaceToWorldScaleInMeters = 1.f;
#endif

    const ovrResult result = ovr_SubmitFrame(g_session, g_frameIndex, nullptr, &layerHeaders[0], layerHeaders.size());
    if (result == ovrSuccess)
    {
        g_hmdVisible = true;
    }
    else if (result == ovrSuccess_NotVisible)
    {
        g_hmdVisible = false;
        ///@todo Enter a lower-power, polling "no focus/HMD not worn" mode
    }
    else if (result == ovrError_DisplayLost)
    {
        LOG_INFO("ovr_SubmitFrame returned ovrError_DisplayLost");
        g_hmdVisible = false;
        ///@todo Tear down textures and session and re-create
    }
    else
    {
        LOG_INFO("ovr_SubmitFrame returned %d", result);
        //g_hmdVisible = false;
    }

    // Handle OVR session events
    ovr_GetSessionStatus(g_session, &sessionStatus);
    if (sessionStatus.ShouldQuit)
    {
        glfwSetWindowShouldClose(g_pMirrorWindow, 1);
    }
    if (sessionStatus.ShouldRecenter)
    {
        ovr_RecenterTrackingOrigin(g_session);
    }

    // Blit mirror texture to monitor window
    if (g_hmdVisible)
    {
        glViewport(0, 0, g_mirrorWindowSz.x, g_mirrorWindowSz.y);
        const FBO& srcFBO = m_mirrorFBO;
        glBindFramebuffer(GL_READ_FRAMEBUFFER, srcFBO.id);
        glBlitFramebuffer(
            0, srcFBO.h, srcFBO.w, 0,
            0, 0, g_mirrorWindowSz.x, g_mirrorWindowSz.y,
            GL_COLOR_BUFFER_BIT, GL_NEAREST);
        glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
    }
    else
    {
        displayMonitor();
    }
    ++g_frameIndex;

#ifdef USE_ANTTWEAKBAR
    if (g_tweakbarQuad.m_showQuadInWorld)
    {
        TwDraw();
    }
#endif
}