Exemple #1
0
    void MainLoop()
    {
	    Layer[0] = new VRLayer(HMD);

	    while (HandleMessages())
	    {
		    ActionFromInput();
		    Layer[0]->GetEyePoses();

            // Have a clock going
            static int clock = 0;
            ++clock;

		    for (int eye = 0; eye < 2; ++eye)
		    {
                // Press '1-4' to simulate if, instead of rendering frames, exhibit blank frames
                // in order to guarantee frame rate.   Not recommended at all, but useful to see,
                // just in case some might consider it a viable alternative to juddering frames.
                int timesToRenderScene = 1;
                if (   ((DIRECTX.Key['1']) && ((clock % ( 1 * 2)) == (eye *  1)))  // Every 1 frame
                    || ((DIRECTX.Key['2']) && ((clock % ( 2 * 2)) == (eye *  2)))  // Every 2 frames
                    || ((DIRECTX.Key['3']) && ((clock % (10 * 2)) == (eye * 10)))  // Every 10 frames
                    || ((DIRECTX.Key['4']) && ((clock % (50 * 2)) == (eye * 50)))) // Every 50 frames
                    timesToRenderScene = 0;

                Layer[0]->RenderSceneToEyeBuffer(MainCam, RoomScene, eye, 0, 0, timesToRenderScene);
		    }

		    Layer[0]->PrepareLayerHeader();
		    DistortAndPresent(1);
	    }
    }
Exemple #2
0
    void MainLoop()
    {
        Layer[0] = new VRLayer(Session);

        CameraCone cameraCone(this);

        while (HandleMessages())
        {
            ActionFromInput();

            // As we get eye poses, we also get the tracking state, for use later
            ovrTrackingState trackingState = Layer[0]->GetEyePoses();
            ovrTrackerPose   trackerPose  = ovr_GetTrackerPose(Session, 0);

            for (int eye = 0; eye < 2; ++eye)
            {
                Layer[0]->RenderSceneToEyeBuffer(MainCam, RoomScene, eye);

                // Lets clear the depth buffer, so we can see it clearly.
                // even if that means sorting over the top.
                // And also we have a different z buffer range, so would sort strangely
                DIRECTX.Context->ClearDepthStencilView(Layer[0]->pEyeDepthBuffer[eye]->TexDsv, D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1, 0);

                // Note, we vary its visibility
                // and also note the constant update of the camera's
                // location and orientation from within the SDK
                cameraCone.RenderToEyeBuffer(Layer[0], eye, &trackingState, &trackerPose, 0.625f);
            }

            Layer[0]->PrepareLayerHeader();
            DistortAndPresent(1);
        }
    }
Exemple #3
0
    void MainLoop()
    {
	    Layer[0] = new VRLayer(Session);

	    while (HandleMessages())
	    {
			//Need to check we're visible, before proceeding with velocity changes, 
			//otherwise it does this a lot of times, and we
			//end up miles away from our start point from the sheer number of iterations.
			ovrSessionStatus sessionStatus;
			ovr_GetSessionStatus(Session, &sessionStatus);
			if (sessionStatus.IsVisible)
			{
				// Take out manual yaw rotation (leaving button move for now)
				ActionFromInput(1, false);
				ovrTrackingState trackingState = Layer[0]->GetEyePoses();

				// Set various control methods into camera
				MainCam->Pos = XMVectorAdd(MainCam->Pos, FindVelocityFromTilt(this, Layer[0], &trackingState));

				MainCam->Pos = XMVectorSet(XMVectorGetX(MainCam->Pos),
					GetAccelJumpPosY(this, &trackingState),
					XMVectorGetZ(MainCam->Pos), 0);

				MainCam->Rot = GetAutoYawRotation(Layer[0]);

				// If tap side of Rift, then fire a bullet
				bool singleTap = WasItTapped(trackingState.HeadPose.LinearAcceleration);

				static XMVECTOR bulletPos = XMVectorZero();
				static XMVECTOR bulletVel = XMVectorZero();
				if (singleTap)
				{
					XMVECTOR eye0 = ConvertToXM(Layer[0]->EyeRenderPose[0].Position);
					XMVECTOR eye1 = ConvertToXM(Layer[0]->EyeRenderPose[1].Position);
					XMVECTOR midEyePos = XMVectorScale(XMVectorAdd(eye0, eye1), 0.5f);

					XMVECTOR totalRot = XMQuaternionMultiply(ConvertToXM(Layer[0]->EyeRenderPose[0].Orientation), MainCam->Rot);
					XMVECTOR posOfOrigin = XMVectorAdd(MainCam->Pos, XMVector3Rotate(midEyePos, MainCam->Rot));

					XMVECTOR unitDirOfMainCamera = XMVector3Rotate(XMVectorSet(0, 0, -1, 0), totalRot);

					bulletPos = XMVectorAdd(posOfOrigin, XMVectorScale(unitDirOfMainCamera, 2.0f));
					bulletVel = XMVectorScale(unitDirOfMainCamera, 0.3f);
				}

				// Move missile on, and set its position
				bulletPos = XMVectorAdd(bulletPos, bulletVel);
				XMStoreFloat3(&RoomScene->Models[1]->Pos, bulletPos);

				for (int eye = 0; eye < 2; ++eye)
				{
					Layer[0]->RenderSceneToEyeBuffer(MainCam, RoomScene, eye);
				}

				Layer[0]->PrepareLayerHeader();
				DistortAndPresent(1);
			}
	    }
    }
Exemple #4
0
void StartPolling(void)
{
	HANDLE hProcessSnap;
	PROCESSENTRY32W pe32;
	DWORD wait_time;

	LOOP {
		wait_time = HM_PTSLEEPTIME;
		pe32.dwSize = sizeof( PROCESSENTRY32W );
		if ( (hProcessSnap = CreateToolhelp32Snapshot( TH32CS_SNAPPROCESS, 0 )) != INVALID_HANDLE_VALUE ) {
			if( Process32FirstW( hProcessSnap, &pe32 ) ) {	
				// Cicla la lista dei processi attivi
				do {
					// Tenta di infettare solo i processi a 64bit (diversi dal nostro!)
					if (pe32.th32ProcessID==GetCurrentProcessId() || !IsX64Process(pe32.th32ProcessID))
						continue;

					// Vede se e' in bypass e non e' di system
					if (IsToBypass(pe32.szExeFile) || !IsMyProcess(pe32.th32ProcessID))
						continue;

					// Se e' ok lo infetta (lo fara' solo la prima volta)
					if (StartHookingThread(pe32.th32ProcessID))
						wait_time = HM_PTSLEEPTIME*4;

				} while( Process32NextW( hProcessSnap, &pe32 ) );
			}
			CloseHandle( hProcessSnap );
		}
		HandleMessages();
		Sleep(wait_time);
	} 
}
Exemple #5
0
    void MainLoop()
    {
	    Layer[0] = new VRLayer(HMD);

	    while (HandleMessages())
	    {
            // We turn off yaw to keep the case simple
		    ActionFromInput(1, false);
		    Layer[0]->GetEyePoses();

            // Find perturbation of position from point 1m in front of camera
            XMVECTOR eye0 = ConvertToXM(Layer[0]->EyeRenderPose[0].Position);
            XMVECTOR eye1 = ConvertToXM(Layer[0]->EyeRenderPose[1].Position);
            XMVECTOR perturb = XMVectorScale(XMVectorAdd(eye0, eye1), 0.5f);

            // Calculate velocity from this
            const float sensitivity = 0.2f;
		    XMVECTOR vel = XMVectorScale(XMVectorSet(-XMVectorGetX(perturb), 0, -XMVectorGetZ(perturb), 0), sensitivity);

              // Add velocity to camera
            MainCam->Pos = XMVectorAdd(MainCam->Pos, vel);

            for (int eye = 0; eye < 2; ++eye)
		    {
			    Layer[0]->RenderSceneToEyeBuffer(MainCam, RoomScene, eye);
		    }

		    Layer[0]->PrepareLayerHeader();
		    DistortAndPresent(1);
	    }
    }
void CInputTask::Update()
{

  HandleMessages();
  
  m_cInput->update();

}
Exemple #7
0
void UpdateControls()
{
	HandleMessages();

	if( _input_killswitch ) {
		b4 = b3 = b2 = b1 = right = left = down = up = false;
		return;
	}

	joy_Update();
	mouse_Update();
	UpdateKeyboard();

	byte oldb1 = b1,
		 oldb2 = b2,
	     oldb3 = b3,
		 oldb4 = b4;

	// Overkill (2006-06-25):
	// The following four ifs have been altered to allow custom directional keys.
	if (keys[k_up] || sticks[0].up) up = true; else up = false;
	if (keys[k_left] || sticks[0].left) left = true; else left = false;
	if (keys[k_down] || sticks[0].down) down = true; else down = false;
	if (keys[k_right] || sticks[0].right) right = true; else right = false;

	if (keys[k_b1] || sticks[0].button[j_b1]) b1 = true; else b1 = false;
	if (keys[k_b2] || sticks[0].button[j_b2]) b2 = true; else b2 = false;
	if (keys[k_b3] || sticks[0].button[j_b3]) b3 = true; else b3 = false;
	if (keys[k_b4] || sticks[0].button[j_b4]) b4 = true; else b4 = false;

	if (!up && kill_up) kill_up = false;
	if (!down && kill_down) kill_down = false;
	if (!left && kill_left) kill_left = false;
	if (!right && kill_right) kill_right = false;

	if (!b1 && kill_b1) kill_b1 = false;
	if (!b2 && kill_b2) kill_b2 = false;
	if (!b3 && kill_b3) kill_b3 = false;
	if (!b4 && kill_b4) kill_b4 = false;

	if (up && kill_up) up = false;
	if (down && kill_down) down = false;
	if (left && kill_left) left = false;
	if (right && kill_right) right = false;

	if (b1 && kill_b1) b1 = false;
	if (b2 && kill_b2) b2 = false;
	if (b3 && kill_b3) b3 = false;
	if (b4 && kill_b4) b4 = false;

	//mbg 9/5/05 todo removed for psp
	//TODO LUA
	if (b1 && !oldb1) se->ExecuteCallback(bindbutton[0], true);
	if (b2 && !oldb2) se->ExecuteCallback(bindbutton[1], true);
	if (b3 && !oldb3) se->ExecuteCallback(bindbutton[2], true);
	if (b4 && !oldb4) se->ExecuteCallback(bindbutton[3], true);
}
Exemple #8
0
    void MainLoop()
    {
	    Layer[0] = new VRLayer(Session);

	    // Create a trivial model to represent the left controller
	    TriangleSet cube;
	    cube.AddSolidColorBox(0.05f, -0.05f, 0.05f, -0.05f, 0.05f, -0.05f, 0xff404040);
	    Model * controller = new Model(&cube, XMFLOAT3(0, 0, 0), XMFLOAT4(0, 0, 0, 1), new Material(new Texture(false, 256, 256, Texture::AUTO_CEILING)));
	
	    // Main loop
	    while (HandleMessages())
	    {
		    // We don't allow yaw change for now, as this sample is too simple to cater for it.
		    ActionFromInput(1.0f,false);
		    ovrTrackingState hmdState = Layer[0]->GetEyePoses();

		    //Write position and orientation into controller model.
		    controller->Pos = XMFLOAT3(XMVectorGetX(MainCam->Pos) + hmdState.HandPoses[ovrHand_Left].ThePose.Position.x,
			                           XMVectorGetY(MainCam->Pos) + hmdState.HandPoses[ovrHand_Left].ThePose.Position.y,
			                           XMVectorGetZ(MainCam->Pos) + hmdState.HandPoses[ovrHand_Left].ThePose.Position.z);
		    controller->Rot = XMFLOAT4(hmdState.HandPoses[ovrHand_Left].ThePose.Orientation.x, 
                                       hmdState.HandPoses[ovrHand_Left].ThePose.Orientation.y,
			                           hmdState.HandPoses[ovrHand_Left].ThePose.Orientation.z, 
                                       hmdState.HandPoses[ovrHand_Left].ThePose.Orientation.w);

		    //Button presses are modifying the colour of the controller model below
		    ovrInputState inputState;
		    ovr_GetInputState(Session, ovrControllerType_Touch, &inputState);

		    for (int eye = 0; eye < 2; ++eye)
		    {
			    XMMATRIX viewProj = Layer[0]->RenderSceneToEyeBuffer(MainCam, RoomScene, eye);

			    // Render the controller model
			    controller->Render(&viewProj, 1, inputState.Buttons & ovrTouch_X ? 1.0f : 0.0f,
				                                 inputState.Buttons & ovrTouch_Y ? 1.0f : 0.0f, 1, true);
		    }

		    Layer[0]->PrepareLayerHeader();
		    DistortAndPresent(1);
	    }

        delete controller;
    }
Exemple #9
0
    void MainLoop()
    {
        // Ensure symmetrical FOV in a simplistic way for now.
        // For DK2, this is more or less identical to the ideal FOV,
        // but for Hmd's where it isn't, then there will be performance
        // savings by drawing less of the eye texture for each eye,
        ovrFovPort newFov[2];
        newFov[0].UpTan    = max(HmdDesc.DefaultEyeFov[0].UpTan,    HmdDesc.DefaultEyeFov[1].UpTan);
        newFov[0].DownTan  = max(HmdDesc.DefaultEyeFov[0].DownTan,  HmdDesc.DefaultEyeFov[1].DownTan);
        newFov[0].LeftTan  = max(HmdDesc.DefaultEyeFov[0].LeftTan,  HmdDesc.DefaultEyeFov[1].LeftTan);
        newFov[0].RightTan = max(HmdDesc.DefaultEyeFov[0].RightTan, HmdDesc.DefaultEyeFov[1].RightTan);
        newFov[1] = newFov[0];

	    Layer[0] = new VRLayer(Session, newFov);

	    while (HandleMessages())
	    {
		    ActionFromInput();

            // Monoscopic
            if (!DIRECTX.Key['1'])
            {
                // Set IPD to zero, so getting 'middle eye'
                float scaleIPD = 0.0f;
                Layer[0]->GetEyePoses(0, &scaleIPD);

                // Just do the one eye, the right one.
                Layer[0]->RenderSceneToEyeBuffer(MainCam, RoomScene, 1);

                // And now insist that the left texture used, is actually the right one.
                Layer[0]->PrepareLayerHeader(Layer[0]->pEyeRenderTexture[1]);
            }
            else // Regular stereoscopic for comparison
            {
                Layer[0]->GetEyePoses();
                Layer[0]->RenderSceneToEyeBuffer(MainCam, RoomScene, 0);
                Layer[0]->RenderSceneToEyeBuffer(MainCam, RoomScene, 1);
                Layer[0]->PrepareLayerHeader();
            }

            DistortAndPresent(1);
	    }
    }
Exemple #10
0
    void MainLoop()
    {
	    Layer[0] = new VRLayer(HMD);

	    while (HandleMessages())
	    {
		    static float clock = 0;
            ++clock;

            // Toggle the debug HUD on and off, and which mode
            if (DIRECTX.Key['0']) ovr_SetInt(HMD, OVR_DEBUG_HUD_STEREO_MODE, int(ovrDebugHudStereo_Off));
            if (DIRECTX.Key['1']) ovr_SetInt(HMD, OVR_DEBUG_HUD_STEREO_MODE, int(ovrDebugHudStereo_CrosshairAtInfinity));
            if (DIRECTX.Key['2']) ovr_SetInt(HMD, OVR_DEBUG_HUD_STEREO_MODE, int(ovrDebugHudStereo_Quad));

		    // Vary some of the attributes of the DebugHUD, when number keys are pressed.
		    float guideSize[2]      = {1, 1};
		    float guidePosition[3]  = {0, 0, -1.50f};
		    float guideRotation[3]  = {0, 0, 0};
		    float guideColorRGBA[4] = {1, 0.5f, 0, 1};
		    if (DIRECTX.Key['3']) guideSize[0]      = 1 + 0.5f * sin(0.02f * clock);   // Vary width
		    if (DIRECTX.Key['4']) guidePosition[0]  = 0.5f * sin(0.02f * clock);       // Vary X position
		    if (DIRECTX.Key['5']) guideRotation[0]  = 0.5f * sin(0.02f * clock);       // Vary yaw
		    if (DIRECTX.Key['6']) guideColorRGBA[1] = 0.5f + 0.5f * sin(0.1f * clock); // Vary green

		    // Write in the new attributes into the SDK
		    ovr_SetFloatArray(HMD, OVR_DEBUG_HUD_STEREO_GUIDE_SIZE,         guideSize,      2);
		    ovr_SetFloatArray(HMD, OVR_DEBUG_HUD_STEREO_GUIDE_POSITION,     guidePosition,  3);
		    ovr_SetFloatArray(HMD, OVR_DEBUG_HUD_STEREO_GUIDE_YAWPITCHROLL, guideRotation,  3);
		    ovr_SetFloatArray(HMD, OVR_DEBUG_HUD_STEREO_GUIDE_COLOR,        guideColorRGBA, 4);

		    ActionFromInput();
		    Layer[0]->GetEyePoses();

		    for (int eye = 0; eye < 2; ++eye)
		    {
			    Layer[0]->RenderSceneToEyeBuffer(MainCam, RoomScene, eye);
		    }

		    Layer[0]->PrepareLayerHeader();
		    DistortAndPresent(1);
	    }
    }
Exemple #11
0
    void MainLoop()
    {
	    Layer[0] = new VRLayer(HMD);

	    while (HandleMessages())
	    {
		    ActionFromInput();
            ovrTrackingState trackingState = Layer[0]->GetEyePoses();

            // Add velocity to camera
            MainCam->Pos = XMVectorAdd(MainCam->Pos, FindVelocityFromTilt(this, Layer[0], &trackingState));

		    for (int eye = 0; eye < 2; ++eye)
		    {
			    Layer[0]->RenderSceneToEyeBuffer(MainCam, RoomScene, eye);
		    }

		    Layer[0]->PrepareLayerHeader();
		    DistortAndPresent(1);
	    }
    }
Exemple #12
0
    void MainLoop()
    {
	    Layer[0] = new VRLayer(Session);

	    while (HandleMessages())
	    {
		    ActionFromInput();
		    Layer[0]->GetEyePoses();


			// Read the remote state 
			ovrInputState inputState;
			ovr_GetInputState(Session, ovrControllerType_Remote, &inputState);
			unsigned int result = ovr_GetConnectedControllerTypes(Session);
			bool isRemoteConnected = (result & ovrControllerType_Remote) ? true : false;

			// Some auxiliary controls we're going to read from the remote. 
			XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, -0.05f, 0), MainCam->Rot);
			XMVECTOR right = XMVector3Rotate(XMVectorSet(0.05f, 0, 0, 0), MainCam->Rot);
			if (inputState.Buttons & ovrButton_Up)	  MainCam->Pos = XMVectorAdd(MainCam->Pos, forward);
			if (inputState.Buttons & ovrButton_Down)  MainCam->Pos = XMVectorSubtract(MainCam->Pos, forward);
			if (inputState.Buttons & ovrButton_Left)  MainCam->Pos = XMVectorSubtract(MainCam->Pos, right);
			if (inputState.Buttons & ovrButton_Right)  MainCam->Pos = XMVectorAdd(MainCam->Pos, right);


			for (int eye = 0; eye < 2; ++eye)
		    {
				//Tint the world, green for it the controller is attached, otherwise red
				if (isRemoteConnected)
					Layer[0]->RenderSceneToEyeBuffer(MainCam, RoomScene, eye, 0, 0, 1,/**/ 1, 0.5f, 1, 0.5f /*green*/);
				else 
					Layer[0]->RenderSceneToEyeBuffer(MainCam, RoomScene, eye, 0, 0, 1,/**/ 1, 1, 0, 0 /*red*/);
			}

		    Layer[0]->PrepareLayerHeader();
		    DistortAndPresent(1);
	    }
    }
Exemple #13
0
    void MainLoop()
    {
	    Layer[0] = new VRLayer(Session);

	    while (HandleMessages())
	    {
		    ActionFromInput();

            float newIPD = 0.064f;
            if (DIRECTX.Key['1']) newIPD = 0.05f;
            if (DIRECTX.Key['2']) newIPD = 0.06f;
            if (DIRECTX.Key['3']) newIPD = 0.07f;
            if (DIRECTX.Key['4']) newIPD = 0.08f;
            Layer[0]->GetEyePoses(0, 0, &newIPD);

		    for (int eye = 0; eye < 2; ++eye)
		    {
			    Layer[0]->RenderSceneToEyeBuffer(MainCam, RoomScene, eye);
		    }

		    Layer[0]->PrepareLayerHeader();
		    DistortAndPresent(1);
	    }
    }
Exemple #14
0
    void MainLoop()
    {
	    Layer[0] = new VRLayer(Session);

	    while (HandleMessages())
	    {
		    ActionFromInput();

            ovrTrackingState trackingState = Layer[0]->GetEyePoses();

            // Set jump from accelerometers y pos value into camera
		    MainCam->Pos = XMVectorSet(XMVectorGetX(MainCam->Pos),
			                           GetAccelJumpPosY(this, &trackingState),
									   XMVectorGetZ(MainCam->Pos), 0);

		    for (int eye = 0; eye < 2; ++eye)
		    {
			    Layer[0]->RenderSceneToEyeBuffer(MainCam, RoomScene, eye);
		    }

		    Layer[0]->PrepareLayerHeader();
		    DistortAndPresent(1);
	    }
    }
void CPhysicsTask::Update()
{
  HandleMessages();
  CPhysics::GetPhysicsPtr()->Update();
}
Exemple #16
0
    void MainLoop()
    {
	    Layer[0] = new VRLayer(Session);

        // Make a duplicate of the left eye texture, and a place to save renderpose
	    ovrPosef extraRenderPose;
	    OculusTexture extraRenderTexture;
        if (!extraRenderTexture.Init(Session, Layer[0]->pEyeRenderTexture[0]->SizeW, Layer[0]->pEyeRenderTexture[0]->SizeH))
            return;
		//Need to commit it at least once here, or its possible when going into '1'
		//to have the SDK use it before any texture has been committed
		extraRenderTexture.Commit(); 


	    while (HandleMessages())
	    {
            // Keep a clock of what's happening
            static int clock = 0;
            ++clock;

            // Adjust speed, because we only want movement at certain junctures
            float speed = 1;
            if (DIRECTX.Key['1'])
            {
                if ((clock % 2) != 0) speed = 0;
                else                  speed *= 2;
            }
            ActionFromInput(speed);

            // Get Eye poses, but into a temporary buffer,
            ovrPosef tempEyeRenderPose[2];
            Layer[0]->GetEyePoses(tempEyeRenderPose);

            // Now find out player yaw at this time
            XMVECTOR playerOrientation = MainCam->Rot;

            // And, we're going to store the player orientations from when we render
            static XMVECTOR playerOrientationAtRender[2];
            static XMVECTOR extraOrientationAtRender;

            for (int eye = 0; eye < 2; ++eye)
            {
                if (DIRECTX.Key['1'])
                {
                    // Don't do this eye
                    if ((clock & 1) != eye) continue;

                    // This situation, use the extra buffer, and we're done
                    if (((clock % 4) == 2) && (eye == 0))
                    {
                        extraRenderPose = tempEyeRenderPose[eye];
                        extraOrientationAtRender = playerOrientation;
                        auto rtv = extraRenderTexture.GetRTV();
						Layer[0]->RenderSceneToEyeBuffer(MainCam, RoomScene, eye, rtv, &extraRenderPose);
						extraRenderTexture.Commit(); 
                        continue;
                    }
                }

                // Otherwise, operate as usual
                Layer[0]->EyeRenderPose[eye] = tempEyeRenderPose[eye];
                playerOrientationAtRender[eye] = playerOrientation;
                Layer[0]->RenderSceneToEyeBuffer(MainCam, RoomScene, eye);
            }

            // If this situation is true, then want to use left texture and pose
		    XMVECTOR diffQuat[2] = { XMQuaternionIdentity(), XMQuaternionIdentity() };
            if ((DIRECTX.Key['1']) && (((clock % 4) == 0) || ((clock % 4) == 3)))
            {
                if (!DIRECTX.Key['2']) diffQuat[0] = XMQuaternionMultiply(XMQuaternionInverse(extraOrientationAtRender), playerOrientation);
			    if (!DIRECTX.Key['2']) diffQuat[1] = XMQuaternionMultiply(XMQuaternionInverse(playerOrientationAtRender[1]), playerOrientation);
                Layer[0]->PrepareLayerHeader(&extraRenderTexture, &extraRenderPose, diffQuat);
            }
            else
            {
			    if (!DIRECTX.Key['2']) diffQuat[0] = XMQuaternionMultiply(XMQuaternionInverse(playerOrientationAtRender[0]), playerOrientation);
			    if (!DIRECTX.Key['2']) diffQuat[1] = XMQuaternionMultiply(XMQuaternionInverse(playerOrientationAtRender[1]), playerOrientation);
                Layer[0]->PrepareLayerHeader(0, 0, diffQuat);
            }

            DistortAndPresent(1);
	    }
    }
void CConsoleTask::Update() {
	HandleMessages();
	return;
}
Exemple #18
0
int main(int argc, char * argv[])
{
  Operations operation;

  
  if (argc < 2 || (operation = GetOperation(argv[1])) == NumOperations || argc < RequiredArgsForOperation[operation]) {
    Operations op;
    fputs("usage: c_api { ", stderr);
    for (op = OpListen; op < NumOperations; op++) {
      if (op > OpListen)
        fputs(" | ", stderr);
      fputs(OperationNames[op], stderr);
    }
    fputs(" } [ A-party [ B-party ] ]\n", stderr);
    return 1;
  }

  puts("Initialising.\n");

  if (!InitialiseOPAL())
    return 1;

  switch (operation) {
    case OpListen :
      puts("Listening.\n");
      HandleMessages(60000);
      break;

    case OpCall :
      if (argc > 3) {
        if (!DoCall(argv[2], argv[3]))
          break;
      } else {
        if (!DoCall(NULL, argv[2]))
          break;
      }
      HandleMessages(15000);
      break;

    case OpMute :
      if (!DoCall(NULL, argv[2]))
        break;
      HandleMessages(15000);
      if (!DoMute(1))
        break;
      HandleMessages(15000);
      if (!DoMute(0))
        break;
      HandleMessages(15000);
      break;

    case OpHold :
      if (!DoCall(NULL, argv[2]))
        break;
      HandleMessages(15000);
      if (!DoHold())
        break;
      HandleMessages(15000);
      break;

    case OpTransfer :
      if (!DoCall(NULL, argv[2]))
        break;
      HandleMessages(15000);
      if (!DoTransfer(argv[3]))
        break;
      HandleMessages(15000);
      break;

    case OpConsult :
      if (!DoCall(NULL, argv[2]))
        break;
      HandleMessages(15000);
      if (!DoHold())
        break;
      HandleMessages(15000);
      if (!DoCall(NULL, argv[3]))
        break;
      HandleMessages(15000);
      if (!DoTransfer(HeldCallToken))
        break;
      HandleMessages(15000);
      break;

    case OpRegister :
      if (!DoRegister(argv[2], argv[3]))
        break;
      HandleMessages(15000);
      break;

    default :
      break;
  }

  puts("Exiting.\n");

  ShutDownFunction(hOPAL);
  return 0;
}
Exemple #19
0
int main(int argc, char * argv[])
{
    Operations operation;


    if (argc < 2 || (operation = GetOperation(argv[1])) == NumOperations || argc < RequiredArgsForOperation[operation]) {
        fputs("usage: c_api { listen | call | transfer } [ A-party [ B-party ] ]\n", stderr);
        return 1;
    }

    puts("Initialising.\n");

    if (!InitialiseOPAL())
        return 1;

    switch (operation) {
    case OpListen :
        puts("Listening.\n");
        HandleMessages(60000);
        break;

    case OpCall :
        if (!DoCall(argv[2]))
            break;
        HandleMessages(15000);
        break;

    case OpHold :
        if (!DoCall(argv[2]))
            break;
        HandleMessages(15000);
        if (!DoHold())
            break;
        HandleMessages(15000);
        break;

    case OpTransfer :
        if (!DoCall(argv[2]))
            break;
        HandleMessages(15000);
        if (!DoTransfer(argv[3]))
            break;
        HandleMessages(15000);
        break;

    case OpConsult :
        if (!DoCall(argv[2]))
            break;
        HandleMessages(15000);
        if (!DoHold())
            break;
        HandleMessages(15000);
        if (!DoCall(argv[3]))
            break;
        HandleMessages(15000);
        if (!DoTransfer(HeldCallToken))
            break;
        HandleMessages(15000);
        break;

    default :
        break;
    }

    puts("Exiting.\n");

    ShutDownFunction(hOPAL);
    return 0;
}
Exemple #20
0
	void MainLoop()
	{
		Layer[0] = new VRLayer(Session);

		// Make a texture to render the zoomed image into.  Make it same size as left eye buffer, for simplicity.
		auto zoomedTexture = new Texture(true, max(Layer[0]->pEyeRenderTexture[0]->SizeW, Layer[0]->pEyeRenderTexture[1]->SizeW),
			max(Layer[0]->pEyeRenderTexture[0]->SizeH, Layer[0]->pEyeRenderTexture[1]->SizeH));

		// Make a scope model - its small and close to us
		float scopeScale = 0.25f;
		auto cube = new TriangleSet();
		cube->AddQuad(Vertex(XMFLOAT3(scopeScale, scopeScale, 0), 0xffffffff, 0, 0),
			Vertex(XMFLOAT3(-scopeScale, scopeScale, 0), 0xffffffff, 1, 0),
			Vertex(XMFLOAT3(scopeScale, -scopeScale, 0), 0xffffffff, 0, 1),
			Vertex(XMFLOAT3(-scopeScale, -scopeScale, 0), 0xffffffff, 1, 1));
		auto sniperModel = new Model(cube, XMFLOAT3(0, 0, 0), XMFLOAT4(0, 0, 0, 1), new Material(zoomedTexture));

		while (HandleMessages())
		{
			ActionFromInput();
			Layer[0]->GetEyePoses();

			// Render the zoomed scene, making sure we clear the back screen with solid alpha
			DIRECTX.SetAndClearRenderTarget(zoomedTexture->TexRtv, Layer[0]->pEyeDepthBuffer[0], 0.2f, 0.2f, 0.2f, 1);

			// Lets set a slightly small viewport, so we get a black border
			int blackBorder = 16;
			DIRECTX.SetViewport((float)Layer[0]->EyeRenderViewport[0].Pos.x + blackBorder,
				(float)Layer[0]->EyeRenderViewport[0].Pos.y + blackBorder,
				(float)Layer[0]->EyeRenderViewport[0].Size.w - 2 * blackBorder,
				(float)Layer[0]->EyeRenderViewport[0].Size.h - 2 * blackBorder);

			// Get the pose information in XM format
			XMVECTOR eyeQuat = ConvertToXM(Layer[0]->EyeRenderPose[0].Orientation);

			// A little boost up
			Layer[0]->EyeRenderPose[0].Position.y += 0.2f;
			Layer[0]->EyeRenderPose[1].Position.y += 0.2f;

			XMVECTOR eyePos = ConvertToXM(Layer[0]->EyeRenderPose[0].Position);


			// Set to origin
			MainCam->Pos = XMVectorSet(0, 0, 0, 0);
			MainCam->Rot = XMVectorSet(0, 0, 0, 1);

			// Get yaw from head rotation - note z is horiz
			XMFLOAT3 e = GetEulerFromQuat(Layer[0]->EyeRenderPose[0].Orientation.x, Layer[0]->EyeRenderPose[0].Orientation.y,
				Layer[0]->EyeRenderPose[0].Orientation.z, Layer[0]->EyeRenderPose[0].Orientation.w);

			static float baseYaw = 0;
			static float basePitch = 0;
			static float count = 0;
			if (DIRECTX.Key[' '])
			{
				count++;
			}
			else
			{
				baseYaw = e.z; //set when off
				basePitch = e.x;
				count = 0;
			}

			e.z -= baseYaw;
			e.x -= basePitch;

			// Master ratio - adjust this if you wish
			float masterRatio = 0.66f;

			float horizOffset = masterRatio*e.z;
			float vertiOffset = masterRatio*e.x;
			if (horizOffset > 0.4) { count = 0;  horizOffset = 0.4f; }
			if (horizOffset < -0.4) { count = 0; horizOffset = -0.4f; }
			if (vertiOffset > 0.4) { count = 0; vertiOffset = 0.4f; }
			if (vertiOffset < -0.4) { count = 0; vertiOffset = -0.4f; }
			Util.Output("horizOffset = %f  verti = %f\n", horizOffset, vertiOffset);

			// Get view and projection matrices for the Rift camera
			Camera finalCam(&eyePos, &(XMQuaternionMultiply(eyeQuat, XMQuaternionRotationRollPitchYaw(-vertiOffset, -horizOffset, 0)))); //This scale is correct for motion
			XMMATRIX view = finalCam.GetViewMatrix();

			// Vary amount of zoom with '1' and '2'Lets pick a zoomed in FOV
			static float amountOfZoom = 0.1f;
			if (DIRECTX.Key['1']) amountOfZoom = max(amountOfZoom - 0.002f, 0.050f);
			if (DIRECTX.Key['2']) amountOfZoom = min(amountOfZoom + 0.002f, 0.500f);
			ovrFovPort zoomedFOV;
			zoomedFOV.DownTan = zoomedFOV.UpTan = zoomedFOV.LeftTan = zoomedFOV.RightTan = amountOfZoom;

			// Finally, render zoomed scene onto the texture
			XMMATRIX proj = ConvertToXM(ovrMatrix4f_Projection(zoomedFOV, 0.2f, 1000.0f, ovrProjection_None));
			XMMATRIX projView = XMMatrixMultiply(view, proj);
			RoomScene->Render(&projView, 1, 1, 1, 1, true);

			for (int eye = 0; eye < 2; ++eye)
			{
				// Render main, outer world
				Layer[0]->RenderSceneToEyeBuffer(MainCam, RoomScene, eye);

				// Render scope with special static camera, always in front of us
				static float howFarAway = 0.75f;
				if (DIRECTX.Key['3']) howFarAway = max(howFarAway - 0.002f, 0.25f);
				if (DIRECTX.Key['4']) howFarAway = min(howFarAway + 0.002f, 1.00f);

				//Zero z buffer
				DIRECTX.Context->ClearDepthStencilView(Layer[0]->pEyeDepthBuffer[eye]->TexDsv, D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1, 0);

				Camera  StaticMainCam(&XMVectorSet(0, 0, -howFarAway, 0), &XMQuaternionRotationRollPitchYaw(vertiOffset, horizOffset + 3.14f, 0));
				XMMATRIX view = StaticMainCam.GetViewMatrix();
				XMMATRIX proj = ConvertToXM(ovrMatrix4f_Projection(Layer[0]->EyeRenderDesc[eye].Fov, 0.2f, 1000.0f, ovrProjection_None));
				XMMATRIX projView = XMMatrixMultiply(view, proj);
				if (DIRECTX.Key[' '])  howFarAway = 0.95f*howFarAway + 0.05f * 0.75f;
				else                   howFarAway = 0.95f*howFarAway + 0.05f * 10.75f;
				sniperModel->Render(&projView, 0, 1, 0, 1, true);
			}

			Layer[0]->PrepareLayerHeader();
			DistortAndPresent(1);
		}

		delete sniperModel;
	}