示例#1
0
void Graphics::exe_cam_rot(uint32_t const _i_zad) {
	XMFLOAT3 _katy = ((TaskCamRot*)task[_i_zad])->angles;

	// obrót poziomy
	XMVECTOR _kwat_obr = XMQuaternionRotationRollPitchYaw(0.0f, _katy.y, 0.0f);
	XMVECTOR _kwat = XMLoadFloat4(&cam.quat);
	_kwat = XMQuaternionMultiply(_kwat, _kwat_obr);

	// obrót pionowy
	_kwat_obr = XMQuaternionRotationRollPitchYaw(_katy.x, 0.0f, 0.0f);
	XMStoreFloat4(&cam.quat, XMQuaternionMultiply(_kwat_obr, _kwat));

	task.erase(_i_zad);
}
示例#2
0
void Canister::Update(float DeltaTime, Terrain* terrain)
{
	XMFLOAT3	newPos	=	GetFloat3Value( Position );

	if ( !gPointLight )
	{
		gPointLight	=	new PointLight();
		gPointLight->GetGPULight()->Color		=	XMFLOAT4( 0.0f, 0.0f, 1.0f, 0.0f );
		gPointLight->GetGPULight()->Position	=	newPos;
		gPointLight->GetGPULight()->Range		=	33.333f * 0.40f;
		gPointLight->GetGPULight()->HasShadow	=	false;

		AddLight( gPointLight );
	}

	gTimeSpan	+=	DeltaTime;

	XMVECTOR QuatV = XMQuaternionRotationRollPitchYaw(0, DeltaTime, 0);
	XMFLOAT4 Quat;

	XMStoreFloat4(&Quat, QuatV);
	AddRotation( Quat );
	newPos.y	=	gOffset.y + ( gOffset.y - 2 ) * sin( 8 * gTimeSpan );
	MoveTo( newPos );
	newPos.y	-=	1.0f;
	if ( gPointLight )
		gPointLight->GetGPULight()->Position	=	newPos;

	Item::Update( DeltaTime, terrain );
}
示例#3
0
void Camera::rotate(float amt)
{
	XMVECTOR fwd = XMLoadFloat3(&forward);
	XMVECTOR camPos = XMLoadFloat3(&position);
	XMVECTOR rotation = XMQuaternionRotationRollPitchYaw(0, amt, 0);  
	fwd = XMVector3Rotate(fwd, rotation);
	XMStoreFloat3(&forward, fwd);
}
示例#4
0
bool MyListener::frameStarted(float timeSinceLastFrame)
{
	
	mKeyboard->capture();
	mMouse->capture();
	cameraMan->update(timeSinceLastFrame);
	debugWindow->updateFPS(timeSinceLastFrame);

	elapsedTime+=timeSinceLastFrame*0.3f;
	Light* l = mSceneMgr->mShadingMgr->directionalLight;

	
	//XMStoreFloat3(&baseDir,XMVector2Normalize(XMLoadFloat3(&baseDir)));

	//l->direction = baseDir;//XMFLOAT3(baseDir.x*cos(elapsedTime)-baseDir.z*sin(elapsedTime),baseDir.y,baseDir.x*cos(elapsedTime)+baseDir.z*sin(elapsedTime));
	//XMStoreFloat3(&l->direction,XMVector2Normalize(XMLoadFloat3(&l->direction)));
	XMStoreFloat3(&l->direction,XMVector3Rotate(XMLoadFloat3(&baseDir),XMLoadFloat4(&lightQuat)));

	mSceneMgr->getGuiManager()->update(timeSinceLastFrame/1000.0f);

	mPhysicsMgr->fetchResults(true);
	mPhysicsMgr->synchronizeEntities();
	mPhysicsMgr->startSimulating(timeSinceLastFrame);

	mShadowMapping->renderShadowMaps();

	//if(count%2==0)
	voxelScene->voxelizeScene(XMFLOAT3(30,30,30),XMFLOAT3(0,0,0));

	mShadowMapping->renderCaustics();

	voxelScene->endFrame(XMFLOAT3(30,30,30),XMFLOAT3(0,0,0));

	count++;

	//voxelScene->unifyVoxels();
	mSceneMgr->setCurrentCamera(mSceneMgr->getCamera("main"));
	mSceneMgr->mShadingMgr->updatePerFrameConstants(timeSinceLastFrame,mSceneMgr->getCamera(),mSceneMgr->getCamera("sun"));
	
	mRS->setBackbufferAsRenderTarget();
	mRS->clearViews();
	//mSceneMgr->renderSceneWithMaterial(mSceneMgr->getMaterial("depthWrite"));
	//mSceneMgr->renderScene();

	pp->render();


	mSceneMgr->getGuiManager()->render();

	mRS->swapChain_->Present(0,0);

	if(xp) xr+=timeSinceLastFrame; else if(xm) xr-=timeSinceLastFrame;
	if(zp) zr+=timeSinceLastFrame; else if(zm) zr-=timeSinceLastFrame;

	XMStoreFloat4(&lightQuat,XMQuaternionRotationRollPitchYaw(xr,0,zr));

	return continue_rendering;
}
示例#5
0
Quat Json2Quat(const Json::Value& value)
{
	Quat ret;
	Vector3 v3;
	v3.x = (float)value[(size_t)0].asDouble();
	v3.y = (float)value[1].asDouble();
	v3.z = (float)value[2].asDouble();
	XMVECTOR q = XMQuaternionRotationRollPitchYaw(v3.x * Angle2Radian, v3.y * Angle2Radian, v3.z * Angle2Radian);
	XMStoreFloat4((XMFLOAT4*)&ret, q);
	return ret;
}
void FirstPersonCamera::SetRotation(const XMFLOAT2& rotation, float roll)
{
    _rotation.x = XMScalarModAngle(rotation.x);
    _rotation.y = clamp(rotation.y, -PiOver2, PiOver2);
    _roll = XMScalarModAngle(roll);

    XMVECTOR quat = XMQuaternionRotationRollPitchYaw(_rotation.y, _rotation.x, _roll);

    XMFLOAT4 newOrientation;
    XMStoreFloat4(&newOrientation, quat);

    SetOrientation(newOrientation);
}
示例#7
0
// return true to retry later (e.g. after display lost)
static bool MainLoop(bool retryCreate)
{
    // Initialize these to nullptr here to handle device lost failures cleanly
    ovrMirrorTexture            mirrorTexture = nullptr;
    OculusEyeTexture*           pEyeRenderTexture[2] = { nullptr, nullptr };
    Scene*                      roomScene = nullptr; 
    Camera*                     mainCam = nullptr;
    ovrMirrorTextureDesc        mirrorDesc = {};

    ovrSession session;
    ovrGraphicsLuid luid;
    ovrResult result = ovr_Create(&session, &luid);
    if (!OVR_SUCCESS(result))
        return retryCreate;

    ovrHmdDesc hmdDesc = ovr_GetHmdDesc(session);

    // Setup Device and Graphics
    // Note: the mirror window can be any size, for this sample we use 1/2 the HMD resolution
    if (!DIRECTX.InitDevice(hmdDesc.Resolution.w / 2, hmdDesc.Resolution.h / 2, reinterpret_cast<LUID*>(&luid)))
        goto Done;

    // Make the eye render buffers (caution if actual size < requested due to HW limits). 
    ovrRecti eyeRenderViewport[2];

    for (int eye = 0; eye < 2; ++eye)
    {
        ovrSizei idealSize = ovr_GetFovTextureSize(session, (ovrEyeType)eye, hmdDesc.DefaultEyeFov[eye], 1.0f);
        pEyeRenderTexture[eye] = new OculusEyeTexture();
        if (!pEyeRenderTexture[eye]->Init(session, idealSize.w, idealSize.h, true))
        {
            if (retryCreate) goto Done;
            FATALERROR("Failed to create eye texture.");
        }

        eyeRenderViewport[eye].Pos.x = 0;
        eyeRenderViewport[eye].Pos.y = 0;
        eyeRenderViewport[eye].Size = idealSize;
        if (!pEyeRenderTexture[eye]->TextureChain)
        {
            if (retryCreate) goto Done;
            FATALERROR("Failed to create texture.");
        }
    }

    // Create a mirror to see on the monitor.
    mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
    mirrorDesc.Width = DIRECTX.WinSizeW;
    mirrorDesc.Height = DIRECTX.WinSizeH;
    result = ovr_CreateMirrorTextureDX(session, DIRECTX.CommandQueue, &mirrorDesc, &mirrorTexture);
    if (!OVR_SUCCESS(result))
    {
        if (retryCreate) goto Done;
        FATALERROR("Failed to create mirror texture.");
    }

    // Create the room model
    roomScene = new Scene(false);

    // Create camera
    mainCam = new Camera(XMVectorSet(0.0f, 1.6f, 5.0f, 0), XMQuaternionIdentity());

    // Setup VR components, filling out description
    ovrEyeRenderDesc eyeRenderDesc[2];
    eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]);
    eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]);

    long long frameIndex = 0;

    bool drawMirror = true;

    DIRECTX.InitFrame(drawMirror);

    // Main loop
    while (DIRECTX.HandleMessages())
    {
        ovrSessionStatus sessionStatus;
        ovr_GetSessionStatus(session, &sessionStatus);
        if (sessionStatus.ShouldQuit)
        {
            // Because the application is requested to quit, should not request retry
            retryCreate = false;
            break;
        }
        if (sessionStatus.ShouldRecenter)
            ovr_RecenterTrackingOrigin(session);

        if (sessionStatus.IsVisible)
        {
            XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, -0.05f, 0), mainCam->GetRotVec());
            XMVECTOR right   = XMVector3Rotate(XMVectorSet(0.05f, 0, 0, 0),  mainCam->GetRotVec());
            XMVECTOR mainCamPos = mainCam->GetPosVec();
            XMVECTOR mainCamRot = mainCam->GetRotVec();
            if (DIRECTX.Key['W'] || DIRECTX.Key[VK_UP])      mainCamPos = XMVectorAdd(     mainCamPos, forward);
            if (DIRECTX.Key['S'] || DIRECTX.Key[VK_DOWN])    mainCamPos = XMVectorSubtract(mainCamPos, forward);
            if (DIRECTX.Key['D'])                            mainCamPos = XMVectorAdd(     mainCamPos, right);
            if (DIRECTX.Key['A'])                            mainCamPos = XMVectorSubtract(mainCamPos, right);
            static float Yaw = 0;
            if (DIRECTX.Key[VK_LEFT])  mainCamRot = XMQuaternionRotationRollPitchYaw(0, Yaw += 0.02f, 0);
            if (DIRECTX.Key[VK_RIGHT]) mainCamRot = XMQuaternionRotationRollPitchYaw(0, Yaw -= 0.02f, 0);

            mainCam->SetPosVec(mainCamPos);
            mainCam->SetRotVec(mainCamRot);

            // Animate the cube
            static float cubeClock = 0;
            roomScene->Models[0]->Pos = XMFLOAT3(9 * sin(cubeClock), 3, 9 * cos(cubeClock += 0.015f));

            // Get both eye poses simultaneously, with IPD offset already included. 
            ovrPosef    EyeRenderPose[2];
            ovrVector3f HmdToEyeOffset[2] = { eyeRenderDesc[0].HmdToEyeOffset,
                                              eyeRenderDesc[1].HmdToEyeOffset };

            double sensorSampleTime;    // sensorSampleTime is fed into the layer later
            ovr_GetEyePoses(session, frameIndex, ovrTrue, HmdToEyeOffset, EyeRenderPose, &sensorSampleTime);

            // Render Scene to Eye Buffers
            for (int eye = 0; eye < 2; ++eye)
            {
                DIRECTX.SetActiveContext(eye == 0 ? DrawContext_EyeRenderLeft : DrawContext_EyeRenderRight);

                DIRECTX.SetActiveEye(eye);

                CD3DX12_RESOURCE_BARRIER resBar = CD3DX12_RESOURCE_BARRIER::Transition(pEyeRenderTexture[eye]->GetD3DResource(),
                                                                                       D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE,
                                                                                       D3D12_RESOURCE_STATE_RENDER_TARGET);
                DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(1, &resBar);

                DIRECTX.SetAndClearRenderTarget(pEyeRenderTexture[eye]->GetRtv(), pEyeRenderTexture[eye]->GetDsv());
                DIRECTX.SetViewport((float)eyeRenderViewport[eye].Pos.x, (float)eyeRenderViewport[eye].Pos.y,
                                    (float)eyeRenderViewport[eye].Size.w, (float)eyeRenderViewport[eye].Size.h);
                                
                //Get the pose information in XM format
                XMVECTOR eyeQuat = XMVectorSet(EyeRenderPose[eye].Orientation.x, EyeRenderPose[eye].Orientation.y,
                                               EyeRenderPose[eye].Orientation.z, EyeRenderPose[eye].Orientation.w);
                XMVECTOR eyePos = XMVectorSet(EyeRenderPose[eye].Position.x, EyeRenderPose[eye].Position.y, EyeRenderPose[eye].Position.z, 0);

                // Get view and projection matrices for the Rift camera
                Camera finalCam(XMVectorAdd(mainCamPos, XMVector3Rotate(eyePos, mainCamRot)), XMQuaternionMultiply(eyeQuat, mainCamRot));
                XMMATRIX view = finalCam.GetViewMatrix();
                ovrMatrix4f p = ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.2f, 1000.0f, ovrProjection_None);
                XMMATRIX proj = XMMatrixSet(p.M[0][0], p.M[1][0], p.M[2][0], p.M[3][0],
                                            p.M[0][1], p.M[1][1], p.M[2][1], p.M[3][1],
                                            p.M[0][2], p.M[1][2], p.M[2][2], p.M[3][2],
                                            p.M[0][3], p.M[1][3], p.M[2][3], p.M[3][3]);
                XMMATRIX prod = XMMatrixMultiply(view, proj);

                roomScene->Render(&prod, 1, 1, 1, 1, true);

                resBar = CD3DX12_RESOURCE_BARRIER::Transition(pEyeRenderTexture[eye]->GetD3DResource(),
                                                              D3D12_RESOURCE_STATE_RENDER_TARGET,
                                                              D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE);
                DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(1, &resBar);

                // Commit rendering to the swap chain
                pEyeRenderTexture[eye]->Commit();

                // kick off eye render command lists before ovr_SubmitFrame()
                DIRECTX.SubmitCommandList(DIRECTX.ActiveContext);
            }

            // Initialize our single full screen Fov layer.
            ovrLayerEyeFov ld = {};
            ld.Header.Type = ovrLayerType_EyeFov;
            ld.Header.Flags = 0;

            for (int eye = 0; eye < 2; ++eye)
            {
                ld.ColorTexture[eye] = pEyeRenderTexture[eye]->TextureChain;
                ld.Viewport[eye] = eyeRenderViewport[eye];
                ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye];
                ld.RenderPose[eye] = EyeRenderPose[eye];
                ld.SensorSampleTime = sensorSampleTime;
            }

            ovrLayerHeader* layers = &ld.Header;
            result = ovr_SubmitFrame(session, frameIndex, nullptr, &layers, 1);
            // exit the rendering loop if submit returns an error, will retry on ovrError_DisplayLost
            if (!OVR_SUCCESS(result))
                goto Done;
            
            frameIndex++;
        }
        
        if (drawMirror)
        {
            DIRECTX.SetActiveContext(DrawContext_Final);

            DIRECTX.SetViewport(0.0f, 0.0f, (float)hmdDesc.Resolution.w / 2, (float)hmdDesc.Resolution.h / 2);

            // Render mirror
            ID3D12Resource* mirrorTexRes = nullptr;
            ovr_GetMirrorTextureBufferDX(session, mirrorTexture, IID_PPV_ARGS(&mirrorTexRes));

            //DIRECTX.SetAndClearRenderTarget(DIRECTX.CurrentFrameResources().SwapChainRtvHandle, nullptr, 1.0f, 0.5f, 0.0f, 1.0f);

            CD3DX12_RESOURCE_BARRIER preMirrorBlitBar[] =
            {
                CD3DX12_RESOURCE_BARRIER::Transition(DIRECTX.CurrentFrameResources().SwapChainBuffer, D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_COPY_DEST),
                CD3DX12_RESOURCE_BARRIER::Transition(mirrorTexRes, D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_COPY_SOURCE)
            };

            // Indicate that the back buffer will now be copied into
            DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(ARRAYSIZE(preMirrorBlitBar), preMirrorBlitBar);

            DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->CopyResource(DIRECTX.CurrentFrameResources().SwapChainBuffer, mirrorTexRes);

            CD3DX12_RESOURCE_BARRIER resBar = CD3DX12_RESOURCE_BARRIER::Transition(mirrorTexRes,
                                                                                   D3D12_RESOURCE_STATE_COPY_SOURCE,
                                                                                   D3D12_RESOURCE_STATE_RENDER_TARGET);
            DIRECTX.CurrentFrameResources().CommandLists[DIRECTX.ActiveContext]->ResourceBarrier(1, &resBar);
        }

        DIRECTX.SubmitCommandListAndPresent(drawMirror);
    }

    // Release resources
Done:
    delete mainCam;
    delete roomScene;
    if (mirrorTexture)
        ovr_DestroyMirrorTexture(session, mirrorTexture);

    for (int eye = 0; eye < 2; ++eye)
    {
        delete pEyeRenderTexture[eye];
    }
    DIRECTX.ReleaseDevice();
    ovr_Destroy(session);

    // Retry on ovrError_DisplayLost
    return retryCreate || (result == ovrError_DisplayLost);
}
示例#8
0
//-------------------------------------------
// とりあえずIK
void BoneModel::VMDIkAnimation()
{

	//XMStoreFloat4()
	//XMLoadFloat4()
	if (mBone.empty())return;
	if (mMotion.empty())return;

	DWORD mBoneNum = mBone.size();
	DWORD mIkNum = mIk.size();
	// IK計算
	for (DWORD i = 0; i < mIkNum; i++){
		//{
		//	int i = 0;
		Ik& ik = mIk[i];
		UINT tg_idx = ik.target_bone_index;
		UINT ik_idx = ik.bone_index;

		for (UINT ite = 0; ite<ik.iterations; ++ite){
			for (UINT chn = 0; chn<ik.chain_length; ++chn){
				UINT link_idx = ik.child_bone_index[chn];//
				if (link_idx >= mBoneNum)continue;
				Bone& link_bone = mBone[link_idx];

				//UINT link_pidx = link_bone.mIkBoneIdx;
				UINT link_pidx = link_bone.mHierarchy.mIdxParent;

				//if (link_bone.mIkBoneIdx != 0){
				//	continue;
				//}

				if (link_pidx >= mBoneNum)continue;
				Bone& link_parent = mBone[link_pidx];

				Bone& tg_bone = mBone[tg_idx];
				(void)tg_bone;
				Bone& ik_bone = mBone[ik_idx];
				(void)ik_bone;

				XMVECTOR target_wpos = mBone[tg_idx].mMtxPose.r[3];
				XMVECTOR ik_wpos = mBone[ik_idx].mMtxPose.r[3];
				XMVECTOR lp_wpos = link_parent.mMtxPose.r[3];

				//Linkボーンのローカル空間に変換
				XMVECTOR Determinant;
				XMMATRIX inv_mtx = XMMatrixInverse(&Determinant, link_bone.mMtxPose);
				XMVECTOR tg_pos = XMVector4Transform(target_wpos, inv_mtx);
				XMVECTOR ik_pos = XMVector4Transform(ik_wpos, inv_mtx);
				XMVECTOR lp_pos = XMVector4Transform(lp_wpos, inv_mtx);


				// 回転軸と角度 
				XMVECTOR rot_axis = XMVectorSet(1, 0, 0, 0);
				float ang = 0.0f;
				bool same_dir = false;
				if (!RotDir(tg_pos, ik_pos, ik.control_weight, &rot_axis, &ang)){
					same_dir = true;
				}

				if (!same_dir){

					//tg_dirをik_dirに一致させるための回転
					XMVECTOR rot = XMQuaternionRotationAxis(rot_axis, ang);

					XMVECTOR lrot = FloatToVector(link_bone.mRot);
					XMVECTOR bone_rot_before = lrot;
					link_bone.mRot = VectorToFloat(XMQuaternionMultiply(rot, lrot));

					float dist_tg = XMVectorGetX(XMVector3Length(tg_pos));
					float dist_ik = XMVectorGetX(XMVector3Length(ik_pos));
					(void)dist_ik;
					float dist_lp = XMVectorGetX(XMVector3Length(lp_pos));
					(void)dist_lp;
					float dist_pltg = XMVectorGetX(XMVector3Length(lp_pos - tg_pos));
					float dist_plik = XMVectorGetX(XMVector3Length(lp_pos - ik_pos));
					float dot_tgik = XMVectorGetX(XMVector3Dot(XMVector3Normalize(tg_pos), XMVector3Normalize(ik_pos)));
					(void)dot_tgik;

					// 回転制限
					if (/*link.bLimit*/ 1){
						XMVECTOR rotmax, rotmin;
						//114.5916 = 2
						float a = 2;// XM_PI / 180.0f * 57.25f;
						rotmax = XMVectorSet(a, a, a, 0);//link.vMax;
						rotmin = XMVectorSet(-a, -a, -a, 0);//link.vMin;

						//名前に"ひざ"があったら回転制限
						if (std::string::npos != link_bone.mStrName.find("ひざ")){
							rotmax = XMVectorSet(-XM_PI / 180.0f*0.5f, 0, 0, 0);
							rotmin = XMVectorSet(-XM_PI, 0, 0, 0);
						}
						struct IkLink{
							XMFLOAT4 mMax;
							XMFLOAT4 mMin;
						};
						IkLink link = { VectorToFloat(rotmax), VectorToFloat(rotmin) };
						//Bone& link = link_bone;
						link_bone.mRot = VectorToFloat(LimitAngle(FloatToVector(link_bone.mRot), rotmin, rotmax));

						XMVECTOR angxyz = GetAngle(rot);
						//膝を曲げるための仮処理 かなりてきとう
						if (XMVectorGetX(angxyz) >= 0 &&
							//0.9f < dot_tgik &&
							//dist_tg > dist_ik &&
							dist_pltg > dist_plik &&
							link.mMax.x < 0 && link.mMax.y == link.mMin.y && link.mMax.z == link.mMin.z){
							//親リンクの回転接平面(できるだけこの平面に近づけたほうがよりIK目標に近づける)
							XMVECTOR lp_nor = XMVector3Normalize(-lp_pos);//平面の法線
							//lp_norとの内積が0になる位置を目標にする
							//2つあるので回転制限後の|内積|が小さいほう
							XMVECTOR tng = XMVector3Cross(XMVectorSet(1, 0, 0, 0), lp_nor);
							//+tngと-tngの2つ
							XMVECTOR rot_axis0, rot_axis1;
							float ang0 = 0, ang1 = 0;

							// 回転軸をXに限定
							rot_axis1 = rot_axis0 = XMVectorSet(1, 0, 0, 0);
							XMVECTOR tdir = XMVector3Normalize(XMVectorSetX(tg_pos, 0));
							tng = XMVector3Normalize(XMVectorSetX(tng, 0));
							RotDir(tdir, tng, ik.control_weight, &rot_axis0, &ang0);
							RotDir(tdir, -tng, ik.control_weight, &rot_axis1, &ang1);
							if (XMVectorGetX(rot_axis0) < 0.0f)ang0 = -ang0;
							if (XMVectorGetX(rot_axis1) < 0.0f)ang1 = -ang1;

							//これは絶対違う ぴくぴく対策
							float coef = (dist_pltg - dist_plik) / dist_tg;
							if (coef > 1)coef = 1;
							ang0 *= coef;
							ang1 *= coef;


							//ang0,1は現在の位置からの相対角度 
							// 回転制限を考慮した相対角度に
							float angx_b = XMVectorGetX(GetAngle(bone_rot_before));
							float angx_a0 = angx_b + ang0;
							float angx_a1 = angx_b + ang1;
							if (angx_a0 < link.mMin.x) angx_a0 = link.mMin.x;
							if (angx_a0 > link.mMax.x) angx_a0 = link.mMax.x;
							if (angx_a1 < link.mMin.x) angx_a1 = link.mMin.x;
							if (angx_a1 > link.mMax.x) angx_a1 = link.mMax.x;
							ang0 = angx_a0 - angx_b;
							ang1 = angx_a1 - angx_b;


							XMVECTOR rot0 = XMQuaternionRotationRollPitchYaw(ang0, 0, 0);
							XMVECTOR rot1 = XMQuaternionRotationRollPitchYaw(ang1, 0, 0);

							XMVECTOR tdir0 = XMVector3TransformCoord(tdir, XMMatrixRotationQuaternion(rot0));
							XMVECTOR tdir1 = XMVector3TransformCoord(tdir, XMMatrixRotationQuaternion(rot1));
							float d0 = XMVectorGetX(XMVectorAbs(XMVector3Dot(tdir0, lp_nor)));
							float d1 = XMVectorGetX(XMVectorAbs(XMVector3Dot(tdir1, lp_nor)));
							if (d0 < d1){
								link_bone.mRot = VectorToFloat(XMQuaternionMultiply(rot0, bone_rot_before));
							}
							else{
								link_bone.mRot = VectorToFloat(XMQuaternionMultiply(rot1, bone_rot_before));
							}
						}
					}

				}




				//ワールド行列更新
				link_bone.mMtxPose = SQTMatrix(FloatToVector(link_bone.mScale), FloatToVector(link_bone.mRot), FloatToVector(link_bone.mPos));
				if (link_bone.mHierarchy.mIdxParent < mBoneNum){
					link_bone.mMtxPose = XMMatrixMultiply(link_bone.mMtxPose, mBone[link_bone.mHierarchy.mIdxParent].mMtxPose);
				}

				// 子階層のリンク再計算
				for (int lidown = chn - 1; lidown >= 0; --lidown){
					UINT idx = ik.child_bone_index[lidown];
					if (idx >= mBoneNum)continue;
					Bone& linkb = mBone[idx];
					linkb.mMtxPose = SQTMatrix(FloatToVector(linkb.mScale), FloatToVector(linkb.mRot), FloatToVector(linkb.mPos));
					if (linkb.mHierarchy.mIdxParent < mBoneNum){
						linkb.mMtxPose = XMMatrixMultiply(linkb.mMtxPose, mBone[linkb.mHierarchy.mIdxParent].mMtxPose);
					}
				}

				mBone[tg_idx].mMtxPose = SQTMatrix(FloatToVector(mBone[tg_idx].mScale), FloatToVector(mBone[tg_idx].mRot), FloatToVector(mBone[tg_idx].mPos));
				if (mBone[tg_idx].mHierarchy.mIdxParent < mBoneNum){
					mBone[tg_idx].mMtxPose = XMMatrixMultiply(mBone[tg_idx].mMtxPose, mBone[mBone[tg_idx].mHierarchy.mIdxParent].mMtxPose);
				}
			}
		}


		//Bone& b = mBone[tg_idx];
		//Bone& b2 = mBone[mBone[tg_idx].mHierarchy.mIdxParent];
		//Bone& b3 = mBone[b2.mHierarchy.mIdxParent];
		//int sa = 1;

		//IKの計算結果を子階層に反映
		//UpdatePose();
	}
	UpdatePose();
}
示例#9
0
/**
 *  Utility/Rotations.cpp
 *  (c) Jonathan Capps
 *  Created 11 Oct. 2011
 */

#include <Windows.h>
#include <xnamath.h>

extern const XMVECTOR IDENTITY_QUAT = XMQuaternionIdentity();

extern const XMVECTOR QUARTER_TURN = XMQuaternionRotationAxis(
	XMVectorSet( 0.0f, 0.0f, -1.0f, 0.0f ),
	XM_PIDIV2 );

extern const XMVECTOR HALF_TURN = XMQuaternionRotationAxis(
	XMVectorSet( 0.0f, 0.0f, -1.0f, 0.0f ),
	XM_PI );

extern const XMVECTOR THREE_QUARTER_TURN = XMQuaternionRotationAxis(
	XMVectorSet( 0.0f, 0.0f, -1.0f, 0.0f ),
	3.0f * XM_PIDIV2 );

extern const XMVECTOR TOP_JOIN = XMQuaternionRotationRollPitchYaw(
	0.0f, -XM_PIDIV2, 0.0f );

extern const XMVECTOR SIDE_JOIN = XMQuaternionRotationRollPitchYaw(
	-XM_PIDIV2, -XM_PIDIV2, 0.0f );
示例#10
0
// return true to retry later (e.g. after display lost)
static bool MainLoop(bool retryCreate)
{
    // Initialize these to nullptr here to handle device lost failures cleanly
    ovrMirrorTexture mirrorTexture = nullptr;
    OculusTexture  * pEyeRenderTexture = nullptr;
    DepthBuffer    * pEyeDepthBuffer = nullptr;
    Scene          * roomScene = nullptr; 
    Camera         * mainCam = nullptr;
    ovrMirrorTextureDesc desc = {};

    bool isVisible          = true;
    long long frameIndex    = 0;
    bool useInstancing      = false;
    const int repeatDrawing = 1;

    ovrSession session;
    ovrGraphicsLuid luid;
    ovrResult result = ovr_Create(&session, &luid);
    if (!OVR_SUCCESS(result))
        return retryCreate;

    ovrHmdDesc hmdDesc = ovr_GetHmdDesc(session);

    // Setup Device and Graphics
    // Note: the mirror window can be any size, for this sample we use 1/2 the HMD resolution
    if (!DIRECTX.InitDevice(hmdDesc.Resolution.w / 2, hmdDesc.Resolution.h / 2, reinterpret_cast<LUID*>(&luid)))
        goto Done;

    ovrRecti eyeRenderViewport[2];

    // Make a single eye texture
    {
        ovrSizei eyeTexSizeL = ovr_GetFovTextureSize(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0], 1.0f);
        ovrSizei eyeTexSizeR = ovr_GetFovTextureSize(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1], 1.0f);
        ovrSizei textureSize;
        textureSize.w = eyeTexSizeL.w + eyeTexSizeR.w;
        textureSize.h = max(eyeTexSizeL.h, eyeTexSizeR.h);

        pEyeRenderTexture = new OculusTexture();
        if (!pEyeRenderTexture->Init(session, textureSize.w, textureSize.h))
        {
            if (retryCreate) goto Done;
            VALIDATE(OVR_SUCCESS(result), "Failed to create eye texture.");
        }

        pEyeDepthBuffer = new DepthBuffer(DIRECTX.Device, textureSize.w, textureSize.h);

        // set viewports
        eyeRenderViewport[0].Pos.x = 0;
        eyeRenderViewport[0].Pos.y = 0;
        eyeRenderViewport[0].Size = eyeTexSizeL;

        eyeRenderViewport[1].Pos.x = eyeTexSizeL.w;
        eyeRenderViewport[1].Pos.y = 0;
        eyeRenderViewport[1].Size = eyeTexSizeR;
    }

    if (!pEyeRenderTexture->TextureChain)
    {
        if (retryCreate) goto Done;
        VALIDATE(false, "Failed to create texture.");
    }

    // Create a mirror to see on the monitor.
    desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
    desc.Width = DIRECTX.WinSizeW;
    desc.Height = DIRECTX.WinSizeH;
    result = ovr_CreateMirrorTextureDX(session, DIRECTX.Device, &desc, &mirrorTexture);
    if (!OVR_SUCCESS(result))
    {
        if (retryCreate) goto Done;
        VALIDATE(false, "Failed to create mirror texture.");
    }

    // Create the room model
    roomScene = new Scene(false);

    // Create camera
    mainCam = new Camera(&XMVectorSet(0.0f, 1.6f, 5.0f, 0), &XMQuaternionIdentity());

    // Setup VR components, filling out description
    ovrEyeRenderDesc eyeRenderDesc[2];
    eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]);
    eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]);

    // Main loop
    while (DIRECTX.HandleMessages())
    {
        XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, -0.05f, 0), mainCam->Rot);
        XMVECTOR right   = XMVector3Rotate(XMVectorSet(0.05f, 0, 0, 0),  mainCam->Rot);
        XMVECTOR up      = XMVector3Rotate(XMVectorSet(0, 0.05f, 0, 0), mainCam->Rot);
        if (DIRECTX.Key['W'] || DIRECTX.Key[VK_UP])	  mainCam->Pos = XMVectorAdd(mainCam->Pos, forward);
        if (DIRECTX.Key['S'] || DIRECTX.Key[VK_DOWN]) mainCam->Pos = XMVectorSubtract(mainCam->Pos, forward);
        if (DIRECTX.Key['D'])                         mainCam->Pos = XMVectorAdd(mainCam->Pos, right);
        if (DIRECTX.Key['A'])                         mainCam->Pos = XMVectorSubtract(mainCam->Pos, right);
        if (DIRECTX.Key['Q'])                         mainCam->Pos = XMVectorAdd(mainCam->Pos, up);
        if (DIRECTX.Key['E'])                         mainCam->Pos = XMVectorSubtract(mainCam->Pos, up);

        static float Yaw = 0;
        if (DIRECTX.Key[VK_LEFT])  mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw += 0.02f, 0);
        if (DIRECTX.Key[VK_RIGHT]) mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw -= 0.02f, 0);

        if (DIRECTX.Key['P'])
            ovr_SetInt(session, OVR_PERF_HUD_MODE, int(ovrPerfHud_AppRenderTiming));
        else
            ovr_SetInt(session, OVR_PERF_HUD_MODE, int(ovrPerfHud_Off));

        useInstancing = DIRECTX.Key['I'];

        // Animate the cube
        static float cubeClock = 0;
        roomScene->Models[0]->Pos = XMFLOAT3(9 * sin(cubeClock), 3, 9 * cos(cubeClock += 0.015f));

        // Get both eye poses simultaneously, with IPD offset already included. 
        ovrPosef         EyeRenderPose[2];
        ovrVector3f      HmdToEyeOffset[2] = { eyeRenderDesc[0].HmdToEyeOffset,
                                               eyeRenderDesc[1].HmdToEyeOffset };

        double sensorSampleTime;    // sensorSampleTime is fed into the layer later
        ovr_GetEyePoses(session, frameIndex, ovrTrue, HmdToEyeOffset, EyeRenderPose, &sensorSampleTime);

        // Render scene to eye texture
        if (isVisible)
        {
            DIRECTX.SetAndClearRenderTarget(pEyeRenderTexture->GetRTV(), pEyeDepthBuffer);

            // calculate eye transforms
            XMMATRIX viewProjMatrix[2];
            for (int eye = 0; eye < 2; ++eye)
            {
                //Get the pose information in XM format
                XMVECTOR eyeQuat = XMLoadFloat4((XMFLOAT4 *)&EyeRenderPose[eye].Orientation.x);
                XMVECTOR eyePos = XMVectorSet(EyeRenderPose[eye].Position.x, EyeRenderPose[eye].Position.y, EyeRenderPose[eye].Position.z, 0);

                // Get view and projection matrices for the Rift camera
                XMVECTOR CombinedPos = XMVectorAdd(mainCam->Pos, XMVector3Rotate(eyePos, mainCam->Rot));
                Camera finalCam(&CombinedPos, &(XMQuaternionMultiply(eyeQuat, mainCam->Rot)));
                XMMATRIX view = finalCam.GetViewMatrix();
                ovrMatrix4f p = ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.1f, 100.0f, ovrProjection_None);
                XMMATRIX proj = XMMatrixSet(p.M[0][0], p.M[1][0], p.M[2][0], p.M[3][0],
                    p.M[0][1], p.M[1][1], p.M[2][1], p.M[3][1],
                    p.M[0][2], p.M[1][2], p.M[2][2], p.M[3][2],
                    p.M[0][3], p.M[1][3], p.M[2][3], p.M[3][3]);

                if (useInstancing)
                {
                    // scale and offset projection matrix to shift image to correct part of texture for each eye
                    XMMATRIX scale = XMMatrixScaling(0.5f, 1.0f, 1.0f);
                    XMMATRIX translate = XMMatrixTranslation((eye==0) ? -0.5f : 0.5f, 0.0f, 0.0f);
                    proj = XMMatrixMultiply(proj, scale);
                    proj = XMMatrixMultiply(proj, translate);
                }

                viewProjMatrix[eye] = XMMatrixMultiply(view, proj);
            }

            if (useInstancing)
            {
                // use instancing for stereo
                DIRECTX.SetViewport(0.0f, 0.0f, (float)eyeRenderViewport[0].Size.w + eyeRenderViewport[1].Size.w, (float)eyeRenderViewport[0].Size.h);

                // render scene
                for (int i = 0; i < repeatDrawing; i++)
                    roomScene->RenderInstanced(&viewProjMatrix[0], 1, 1, 1, 1, true);
            }
            else
            {
                // non-instanced path
                for (int eye = 0; eye < 2; ++eye)
                {
                    // set viewport
                    DIRECTX.SetViewport((float)eyeRenderViewport[eye].Pos.x, (float)eyeRenderViewport[eye].Pos.y,
                        (float)eyeRenderViewport[eye].Size.w, (float)eyeRenderViewport[eye].Size.h);

                    // render scene
                    for (int i = 0; i < repeatDrawing; i++)
                        roomScene->Render(&viewProjMatrix[eye], 1, 1, 1, 1, true);
                }
            }

            // Commit rendering to the swap chain
            pEyeRenderTexture->Commit();
        }

        // Initialize our single full screen Fov layer.
        ovrLayerEyeFov ld = {};
        ld.Header.Type = ovrLayerType_EyeFov;
        ld.Header.Flags = 0;
        ld.SensorSampleTime = sensorSampleTime;

        for (int eye = 0; eye < 2; ++eye)
        {
            ld.ColorTexture[eye] = pEyeRenderTexture->TextureChain;
            ld.Viewport[eye] = eyeRenderViewport[eye];
            ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye];
            ld.RenderPose[eye] = EyeRenderPose[eye];
        }

        ovrLayerHeader* layers = &ld.Header;
        result = ovr_SubmitFrame(session, frameIndex, nullptr, &layers, 1);
        // exit the rendering loop if submit returns an error, will retry on ovrError_DisplayLost
        if (!OVR_SUCCESS(result))
            goto Done;

        isVisible = (result == ovrSuccess);

        // Render mirror
        ID3D11Texture2D* tex = nullptr;
        ovr_GetMirrorTextureBufferDX(session, mirrorTexture, IID_PPV_ARGS(&tex));
        DIRECTX.Context->CopyResource(DIRECTX.BackBuffer, tex);
        tex->Release();
        DIRECTX.SwapChain->Present(0, 0);

        frameIndex++;
    }

    // Release resources
Done:
    delete mainCam;
    delete roomScene;
    if (mirrorTexture) ovr_DestroyMirrorTexture(session, mirrorTexture);
    delete pEyeRenderTexture;
    delete pEyeDepthBuffer;

    DIRECTX.ReleaseDevice();
    ovr_Destroy(session);

    // Retry on ovrError_DisplayLost
    return retryCreate || OVR_SUCCESS(result) || (result == ovrError_DisplayLost);
}
示例#11
0
void AaEntity::pitch(float pitch) 
{ 
	dirtyWM=true; 
	*quaternion = XMQuaternionMultiply(*quaternion,XMQuaternionRotationRollPitchYaw(pitch,0,0));
}
示例#12
0
void AaEntity::yaw(float yaw) 
{ 
	dirtyWM=true; 
	*quaternion = XMQuaternionMultiply(*quaternion,XMQuaternionRotationRollPitchYaw(0,yaw,0));
}
示例#13
0
// return true to retry later (e.g. after display lost)
static bool MainLoop(bool retryCreate)
{
    // Initialize these to nullptr here to handle device lost failures cleanly
	ovrTexture     * mirrorTexture = nullptr;
	OculusTexture  * pEyeRenderTexture[2] = { nullptr, nullptr };
	DepthBuffer    * pEyeDepthBuffer[2] = { nullptr, nullptr };
    Scene          * roomScene = nullptr; 
    Camera         * mainCam = nullptr;
	D3D11_TEXTURE2D_DESC td = {};

	ovrHmd HMD;
	ovrGraphicsLuid luid;
	ovrResult result = ovr_Create(&HMD, &luid);
    if (!OVR_SUCCESS(result))
        return retryCreate;

    ovrHmdDesc hmdDesc = ovr_GetHmdDesc(HMD);

	// -------------------------------------------------------------------
	// Add: Make Instance that CL Eye Camera Capture Class
	CLEyeCameraCapture* cam[2] = { NULL };

	// Query for number of connected camera
	int numCams = CLEyeGetCameraCount();
	if (numCams == 0)
	{
		printf_s("No PS3Eye Camera detected\n");
		goto Done;
	}
	printf_s("Found %d cameras\n", numCams);

	for (int iCam = 0; iCam < numCams; iCam++)
	{
		char windowName[64];

		// Query unique camera uuid
		GUID guid = CLEyeGetCameraUUID(iCam);
		printf("Camera %d GUID: [%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x]\n",
			iCam + 1, guid.Data1, guid.Data2, guid.Data3,
			guid.Data4[0], guid.Data4[1], guid.Data4[2],
			guid.Data4[3], guid.Data4[4], guid.Data4[5],
			guid.Data4[6], guid.Data4[7]);
		sprintf_s(windowName, "Camera Window %d", iCam + 1);

		// Create camera capture object
		cam[iCam] = new CLEyeCameraCapture(windowName, guid, CLEYE_COLOR_RAW, CLEYE_VGA, 30);
		cam[iCam]->StartCapture();
	}
	// -------------------------------------------------------------------

	// Setup Device and Graphics
	// Note: the mirror window can be any size, for this sample we use 1/2 the HMD resolution
    if (!DIRECTX.InitDevice(hmdDesc.Resolution.w / 2, hmdDesc.Resolution.h / 2, reinterpret_cast<LUID*>(&luid)))
        goto Done;

	// Make the eye render buffers (caution if actual size < requested due to HW limits). 
	ovrRecti         eyeRenderViewport[2];

	for (int eye = 0; eye < 2; ++eye)
	{
		ovrSizei idealSize = ovr_GetFovTextureSize(HMD, (ovrEyeType)eye, hmdDesc.DefaultEyeFov[eye], 1.0f);
		pEyeRenderTexture[eye] = new OculusTexture();
        if (!pEyeRenderTexture[eye]->Init(HMD, idealSize.w, idealSize.h))
        {
            if (retryCreate) goto Done;
	        VALIDATE(OVR_SUCCESS(result), "Failed to create eye texture.");
        }
		pEyeDepthBuffer[eye] = new DepthBuffer(DIRECTX.Device, idealSize.w, idealSize.h);
		eyeRenderViewport[eye].Pos.x = 0;
		eyeRenderViewport[eye].Pos.y = 0;
		eyeRenderViewport[eye].Size = idealSize;
        if (!pEyeRenderTexture[eye]->TextureSet)
        {
            if (retryCreate) goto Done;
            VALIDATE(false, "Failed to create texture.");
        }
	}

	// Create a mirror to see on the monitor.
	td.ArraySize = 1;
    td.Format = DXGI_FORMAT_R8G8B8A8_UNORM_SRGB;
	td.Width = DIRECTX.WinSizeW;
	td.Height = DIRECTX.WinSizeH;
	td.Usage = D3D11_USAGE_DEFAULT;
	td.SampleDesc.Count = 1;
	td.MipLevels = 1;
    result = ovr_CreateMirrorTextureD3D11(HMD, DIRECTX.Device, &td, 0, &mirrorTexture);
    if (!OVR_SUCCESS(result))
    {
        if (retryCreate) goto Done;
        VALIDATE(false, "Failed to create mirror texture.");
    }

	// Create the room model
    roomScene = new Scene(false);

	// Create camera
    mainCam = new Camera(&XMVectorSet(0.0f, 1.6f, 5.0f, 0), &XMQuaternionIdentity());

	// Setup VR components, filling out description
	ovrEyeRenderDesc eyeRenderDesc[2];
	eyeRenderDesc[0] = ovr_GetRenderDesc(HMD, ovrEye_Left, hmdDesc.DefaultEyeFov[0]);
	eyeRenderDesc[1] = ovr_GetRenderDesc(HMD, ovrEye_Right, hmdDesc.DefaultEyeFov[1]);

    bool isVisible = true;

	DCB portConfig;
	portConfig.BaudRate = 115200;
	portConfig.Parity = EVENPARITY;

	g_seriPort.Start("\\\\.\\COM3", &portConfig);


	// Main loop
	while (DIRECTX.HandleMessages())
	{
		XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, -0.05f, 0), mainCam->Rot);
		XMVECTOR right   = XMVector3Rotate(XMVectorSet(0.05f, 0, 0, 0),  mainCam->Rot);
		if (DIRECTX.Key['W'] || DIRECTX.Key[VK_UP])	  mainCam->Pos = XMVectorAdd(mainCam->Pos, forward);
		if (DIRECTX.Key['S'] || DIRECTX.Key[VK_DOWN]) mainCam->Pos = XMVectorSubtract(mainCam->Pos, forward);
		if (DIRECTX.Key['D'])                         mainCam->Pos = XMVectorAdd(mainCam->Pos, right);
		if (DIRECTX.Key['A'])                         mainCam->Pos = XMVectorSubtract(mainCam->Pos, right);
		static float Yaw = 0;
		if (DIRECTX.Key[VK_LEFT])  mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw += 0.02f, 0);
		if (DIRECTX.Key[VK_RIGHT]) mainCam->Rot = XMQuaternionRotationRollPitchYaw(0, Yaw -= 0.02f, 0);

		// Animate the cube
		static float cubeClock = 0;
		roomScene->Models[0]->Pos = XMFLOAT3(9 * sin(cubeClock), 3, 9 * cos(cubeClock += 0.015f));

		// Get both eye poses simultaneously, with IPD offset already included. 
		ovrPosef         EyeRenderPose[2];
		ovrVector3f      HmdToEyeViewOffset[2] = { eyeRenderDesc[0].HmdToEyeViewOffset,
			                                       eyeRenderDesc[1].HmdToEyeViewOffset };
        double frameTime = ovr_GetPredictedDisplayTime(HMD, 0);
        // Keeping sensorSampleTime as close to ovr_GetTrackingState as possible - fed into the layer
        double           sensorSampleTime = ovr_GetTimeInSeconds();
		ovrTrackingState hmdState = ovr_GetTrackingState(HMD, frameTime, ovrTrue);
		ovr_CalcEyePoses(hmdState.HeadPose.ThePose, HmdToEyeViewOffset, EyeRenderPose);

		// --------------------------------------------------------------------------
		// Add: Get Head Yaw Roll Pitch
		float hmdPitch = 0.0f;
		float hmdRoll = 0.0f;
		float hmdYaw = 0.0f;

		OVR::Posef HeadPose = hmdState.HeadPose.ThePose;
		HeadPose.Rotation.GetEulerAngles<OVR::Axis_Y, OVR::Axis_X, OVR::Axis_Z>(&hmdYaw, &hmdPitch, &hmdRoll);

		SetPos(2, ServoRoll(hmdYaw));
		SetPos(3, ServoRoll(hmdPitch));

		// --------------------------------------------------------------------------


		// Render Scene to Eye Buffers
        if (isVisible)
        {
            for (int eye = 0; eye < 2; ++eye)
		    {
			    // Increment to use next texture, just before writing
			    pEyeRenderTexture[eye]->AdvanceToNextTexture();

			    // Clear and set up rendertarget
			    int texIndex = pEyeRenderTexture[eye]->TextureSet->CurrentIndex;
			    DIRECTX.SetAndClearRenderTarget(pEyeRenderTexture[eye]->TexRtv[texIndex], pEyeDepthBuffer[eye]);
			    DIRECTX.SetViewport((float)eyeRenderViewport[eye].Pos.x, (float)eyeRenderViewport[eye].Pos.y,
				    (float)eyeRenderViewport[eye].Size.w, (float)eyeRenderViewport[eye].Size.h);

			    //Get the pose information in XM format
			    XMVECTOR eyeQuat = XMVectorSet(EyeRenderPose[eye].Orientation.x, EyeRenderPose[eye].Orientation.y,
				                               EyeRenderPose[eye].Orientation.z, EyeRenderPose[eye].Orientation.w);
			    XMVECTOR eyePos = XMVectorSet(EyeRenderPose[eye].Position.x, EyeRenderPose[eye].Position.y, EyeRenderPose[eye].Position.z, 0);

			    // Get view and projection matrices for the Rift camera
			    XMVECTOR CombinedPos = XMVectorAdd(mainCam->Pos, XMVector3Rotate(eyePos, mainCam->Rot));
			    Camera finalCam(&CombinedPos, &(XMQuaternionMultiply(eyeQuat,mainCam->Rot)));
			    XMMATRIX view = finalCam.GetViewMatrix();
			    ovrMatrix4f p = ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.2f, 1000.0f, ovrProjection_RightHanded);
			    XMMATRIX proj = XMMatrixSet(p.M[0][0], p.M[1][0], p.M[2][0], p.M[3][0],
				                            p.M[0][1], p.M[1][1], p.M[2][1], p.M[3][1],
				                            p.M[0][2], p.M[1][2], p.M[2][2], p.M[3][2],
				                            p.M[0][3], p.M[1][3], p.M[2][3], p.M[3][3]);
			    XMMATRIX prod = XMMatrixMultiply(view, proj);
			    roomScene->Render(&prod, 1, 1, 1, 1, true);
		    }
        }

		// Initialize our single full screen Fov layer.
        ovrLayerEyeFov ld = {};
		ld.Header.Type = ovrLayerType_EyeFov;
		ld.Header.Flags = 0;

		for (int eye = 0; eye < 2; ++eye)
		{
			ld.ColorTexture[eye] = pEyeRenderTexture[eye]->TextureSet;
			ld.Viewport[eye] = eyeRenderViewport[eye];
			ld.Fov[eye] = hmdDesc.DefaultEyeFov[eye];
			ld.RenderPose[eye] = EyeRenderPose[eye];
            ld.SensorSampleTime = sensorSampleTime;
		}

        ovrLayerHeader* layers = &ld.Header;
        result = ovr_SubmitFrame(HMD, 0, nullptr, &layers, 1);
        // exit the rendering loop if submit returns an error, will retry on ovrError_DisplayLost
        if (!OVR_SUCCESS(result))
            goto Done;

        isVisible = (result == ovrSuccess);

        // Render mirror
        ovrD3D11Texture* tex = (ovrD3D11Texture*)mirrorTexture;
        DIRECTX.Context->CopyResource(DIRECTX.BackBuffer, tex->D3D11.pTexture);
        DIRECTX.SwapChain->Present(0, 0);
	}

	// Release resources
Done:
    delete mainCam;
    delete roomScene;
	if (mirrorTexture) ovr_DestroyMirrorTexture(HMD, mirrorTexture);
    for (int eye = 0; eye < 2; ++eye)
    {
	    delete pEyeRenderTexture[eye];
        delete pEyeDepthBuffer[eye];
    }
	DIRECTX.ReleaseDevice();
	ovr_Destroy(HMD);

	g_seriPort.End();

	for (int iCam = 0; iCam < numCams; iCam++)
	{
		cam[iCam]->StopCapture();
		delete cam[iCam];
	}

    // Retry on ovrError_DisplayLost
    return retryCreate || OVR_SUCCESS(result) || (result == ovrError_DisplayLost);
}
示例#14
0
	void MainLoop()
	{
		Layer[0] = new VRLayer(Session);

		// Make a texture to render the zoomed image into.  Make it same size as left eye buffer, for simplicity.
		auto zoomedTexture = new Texture(true, max(Layer[0]->pEyeRenderTexture[0]->SizeW, Layer[0]->pEyeRenderTexture[1]->SizeW),
			max(Layer[0]->pEyeRenderTexture[0]->SizeH, Layer[0]->pEyeRenderTexture[1]->SizeH));

		// Make a scope model - its small and close to us
		float scopeScale = 0.25f;
		auto cube = new TriangleSet();
		cube->AddQuad(Vertex(XMFLOAT3(scopeScale, scopeScale, 0), 0xffffffff, 0, 0),
			Vertex(XMFLOAT3(-scopeScale, scopeScale, 0), 0xffffffff, 1, 0),
			Vertex(XMFLOAT3(scopeScale, -scopeScale, 0), 0xffffffff, 0, 1),
			Vertex(XMFLOAT3(-scopeScale, -scopeScale, 0), 0xffffffff, 1, 1));
		auto sniperModel = new Model(cube, XMFLOAT3(0, 0, 0), XMFLOAT4(0, 0, 0, 1), new Material(zoomedTexture));

		while (HandleMessages())
		{
			ActionFromInput();
			Layer[0]->GetEyePoses();

			// Render the zoomed scene, making sure we clear the back screen with solid alpha
			DIRECTX.SetAndClearRenderTarget(zoomedTexture->TexRtv, Layer[0]->pEyeDepthBuffer[0], 0.2f, 0.2f, 0.2f, 1);

			// Lets set a slightly small viewport, so we get a black border
			int blackBorder = 16;
			DIRECTX.SetViewport((float)Layer[0]->EyeRenderViewport[0].Pos.x + blackBorder,
				(float)Layer[0]->EyeRenderViewport[0].Pos.y + blackBorder,
				(float)Layer[0]->EyeRenderViewport[0].Size.w - 2 * blackBorder,
				(float)Layer[0]->EyeRenderViewport[0].Size.h - 2 * blackBorder);

			// Get the pose information in XM format
			XMVECTOR eyeQuat = ConvertToXM(Layer[0]->EyeRenderPose[0].Orientation);

			// A little boost up
			Layer[0]->EyeRenderPose[0].Position.y += 0.2f;
			Layer[0]->EyeRenderPose[1].Position.y += 0.2f;

			XMVECTOR eyePos = ConvertToXM(Layer[0]->EyeRenderPose[0].Position);


			// Set to origin
			MainCam->Pos = XMVectorSet(0, 0, 0, 0);
			MainCam->Rot = XMVectorSet(0, 0, 0, 1);

			// Get yaw from head rotation - note z is horiz
			XMFLOAT3 e = GetEulerFromQuat(Layer[0]->EyeRenderPose[0].Orientation.x, Layer[0]->EyeRenderPose[0].Orientation.y,
				Layer[0]->EyeRenderPose[0].Orientation.z, Layer[0]->EyeRenderPose[0].Orientation.w);

			static float baseYaw = 0;
			static float basePitch = 0;
			static float count = 0;
			if (DIRECTX.Key[' '])
			{
				count++;
			}
			else
			{
				baseYaw = e.z; //set when off
				basePitch = e.x;
				count = 0;
			}

			e.z -= baseYaw;
			e.x -= basePitch;

			// Master ratio - adjust this if you wish
			float masterRatio = 0.66f;

			float horizOffset = masterRatio*e.z;
			float vertiOffset = masterRatio*e.x;
			if (horizOffset > 0.4) { count = 0;  horizOffset = 0.4f; }
			if (horizOffset < -0.4) { count = 0; horizOffset = -0.4f; }
			if (vertiOffset > 0.4) { count = 0; vertiOffset = 0.4f; }
			if (vertiOffset < -0.4) { count = 0; vertiOffset = -0.4f; }
			Util.Output("horizOffset = %f  verti = %f\n", horizOffset, vertiOffset);

			// Get view and projection matrices for the Rift camera
			Camera finalCam(&eyePos, &(XMQuaternionMultiply(eyeQuat, XMQuaternionRotationRollPitchYaw(-vertiOffset, -horizOffset, 0)))); //This scale is correct for motion
			XMMATRIX view = finalCam.GetViewMatrix();

			// Vary amount of zoom with '1' and '2'Lets pick a zoomed in FOV
			static float amountOfZoom = 0.1f;
			if (DIRECTX.Key['1']) amountOfZoom = max(amountOfZoom - 0.002f, 0.050f);
			if (DIRECTX.Key['2']) amountOfZoom = min(amountOfZoom + 0.002f, 0.500f);
			ovrFovPort zoomedFOV;
			zoomedFOV.DownTan = zoomedFOV.UpTan = zoomedFOV.LeftTan = zoomedFOV.RightTan = amountOfZoom;

			// Finally, render zoomed scene onto the texture
			XMMATRIX proj = ConvertToXM(ovrMatrix4f_Projection(zoomedFOV, 0.2f, 1000.0f, ovrProjection_None));
			XMMATRIX projView = XMMatrixMultiply(view, proj);
			RoomScene->Render(&projView, 1, 1, 1, 1, true);

			for (int eye = 0; eye < 2; ++eye)
			{
				// Render main, outer world
				Layer[0]->RenderSceneToEyeBuffer(MainCam, RoomScene, eye);

				// Render scope with special static camera, always in front of us
				static float howFarAway = 0.75f;
				if (DIRECTX.Key['3']) howFarAway = max(howFarAway - 0.002f, 0.25f);
				if (DIRECTX.Key['4']) howFarAway = min(howFarAway + 0.002f, 1.00f);

				//Zero z buffer
				DIRECTX.Context->ClearDepthStencilView(Layer[0]->pEyeDepthBuffer[eye]->TexDsv, D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1, 0);

				Camera  StaticMainCam(&XMVectorSet(0, 0, -howFarAway, 0), &XMQuaternionRotationRollPitchYaw(vertiOffset, horizOffset + 3.14f, 0));
				XMMATRIX view = StaticMainCam.GetViewMatrix();
				XMMATRIX proj = ConvertToXM(ovrMatrix4f_Projection(Layer[0]->EyeRenderDesc[eye].Fov, 0.2f, 1000.0f, ovrProjection_None));
				XMMATRIX projView = XMMatrixMultiply(view, proj);
				if (DIRECTX.Key[' '])  howFarAway = 0.95f*howFarAway + 0.05f * 0.75f;
				else                   howFarAway = 0.95f*howFarAway + 0.05f * 10.75f;
				sniperModel->Render(&projView, 0, 1, 0, 1, true);
			}

			Layer[0]->PrepareLayerHeader();
			DistortAndPresent(1);
		}

		delete sniperModel;
	}
示例#15
0
Quaternion Quaternion::FromEuler(float x, float y, float z)
{
    XMVECTOR q = XMQuaternionRotationRollPitchYaw(x, y, z);
    return Quaternion(q);
}
		Quaternion(float yaw, float pitch, float roll)
		{
			XMStoreFloat4A(this, XMQuaternionRotationRollPitchYaw(pitch, yaw, roll));
		}
示例#17
0
void AaEntity::roll(float roll) 
{ 
	dirtyWM=true; 
	*quaternion = XMQuaternionMultiply(*quaternion,XMQuaternionRotationRollPitchYaw(0,0,roll));
}