void FPCamera::Reset(float eyeX, float eyeY, float eyeZ, float centerX, float centerY, float centerZ, float upX, float upY, float upZ) { glm::vec3 eyePt(eyeX, eyeY, eyeZ); glm::vec3 centerPt(centerX, centerY, centerZ); glm::vec3 upVec(upX, upY, upZ); Reset(eyePt, centerPt, upVec); }
void SetupMatrix(float timeDelta) { static float total = 0.0f; total += timeDelta; // translate model to origin D3DXMATRIX world ; D3DXMatrixTranslation(&world, 0.0f, 0.0f, 0.0f) ; D3DXMATRIX rotMatrix; D3DXMatrixRotationY(&rotMatrix, total); world *= rotMatrix; g_pd3dDevice->SetTransform(D3DTS_WORLD, &world) ; // set view D3DXVECTOR3 eyePt(0.0f, 0.0f, -10.0f) ; D3DXVECTOR3 upVec(0.0f, 1.0f, 0.0f) ; D3DXVECTOR3 lookCenter(0.0f, 0.0f, 0.0f) ; D3DXMATRIX view ; D3DXMatrixLookAtLH(&view, &eyePt, &lookCenter, &upVec) ; g_pd3dDevice->SetTransform(D3DTS_VIEW, &view) ; // set projection D3DXMATRIX proj ; D3DXMatrixPerspectiveFovLH(&proj, D3DX_PI / 4, 1.0f, 1.0f, 1000.0f) ; g_pd3dDevice->SetTransform(D3DTS_PROJECTION, &proj) ; }
void OrthoCamera::apply( glm::mat4x4 modelMatrix ) { glm::vec3 pos( 0.0f, 0.0f, 10.0f ); glm::vec3 center( 0.0f, 0.0f, 0.0f ); glm::vec3 upVec( 0.0f, 1.0f, 0.0f ); glm::mat4 viewMtx = glm::lookAt( pos, center, upVec ); glm::mat4 mv = viewMtx * modelMatrix; GLint viewport[4]; glGetIntegerv( GL_VIEWPORT, viewport ); int width = viewport[2]; int height = viewport[3]; float aspect = (float)width / (float)height; float halfH = ( (float)height * aspect ) / 2.0f; float halfV = (float)height / 2.0f; glm::mat4 orthoMtx = glm::ortho( -halfH, halfH, -halfV, halfV, mNear, mFar ); glm::mat4 mvp = orthoMtx * mv; mCache.cameraPosition = pos; mCache.model = modelMatrix; mCache.view = viewMtx; mCache.modelView = mv; mCache.projection = orthoMtx; mCache.mvp = mvp; mUniformBlock_Matrix.writeData( &mCache ); mUniformBlock_Matrix.updateBuffer(); mUniformBlock_Matrix.bindToBufferTarget( 0 ); }
void Camera::setCamera(const VisualizationParameterSet& visParams, const int generateCubeMap) { // set the camera if(generateCubeMap > 0) { graphicsManager->setCubeMapCamera(generateCubeMap, graphicsManager->reflectionObject ? graphicsManager->reflectionObject->getPosition() : Vector3d()); return; } else { Vector3d forwardVec(1.0,0.0,0.0); Vector3d upVec(0.0,0.0,1.0); // if the camera is bound to a physical object and motion blur is active // the camera needs to be transformed across the render passes if(visParams.mb_mode != MOTIONBLUR_OFF) { if(visParams.mb_mode == MOTIONBLUR_6PASSES || visParams.mb_mode == MOTIONBLUR_12PASSES || visParams.mb_mode == MOTIONBLUR_24PASSES || visParams.mb_mode == MOTIONBLUR_ACCUMULATION) { if(cycle_order_offset >= 0 && visParams.mb_renderPasses > 1) { float pos_offset = cycle_order_offset - ((float)(visParams.mb_currentPass-1.0f)/(visParams.mb_renderPasses-1.0f))*graphicsManager->getExposure2Physic(); GLHelper::getGLH()->setInterpolatedCameraLookAt( previous_positions, previous_rotations, pos_offset); } else GLHelper::getGLH()->setMatrix(position, rotation); } else if(visParams.mb_mode == MOTIONBLUR_VELOCITY_BUFFER || visParams.mb_mode == MOTIONBLUR_VELOCITY_BUFFER_2 || visParams.mb_mode == MOTIONBLUR_VELOCITY_BUFFER_2_GEO) { forwardVec.rotate(rotation); upVec.rotate(rotation); gluLookAt(position.v[0], position.v[1], position.v[2], position.v[0] + forwardVec.v[0], position.v[1] + forwardVec.v[1], position.v[2] + forwardVec.v[2], upVec.v[0], upVec.v[1], upVec.v[2]); } } else { forwardVec.rotate(rotation); upVec.rotate(rotation); gluLookAt(position.v[0], position.v[1], position.v[2], position.v[0] + forwardVec.v[0], position.v[1] + forwardVec.v[1], position.v[2] + forwardVec.v[2], upVec.v[0], upVec.v[1], upVec.v[2]); } } }
void DrawCube() { // translate model to origin D3DXMATRIX world ; D3DXMatrixTranslation(&world, 0.0f, 0.0f, 0.0f) ; g_pd3dDevice->SetTransform(D3DTS_WORLD, &world) ; // set view D3DXVECTOR3 eyePt(5.0f, 5.0f, -5.0f) ; D3DXVECTOR3 upVec(0.0f, 1.0f, 0.0f) ; D3DXVECTOR3 lookCenter(0.0f, 0.0f, 0.0f) ; D3DXMATRIX view ; D3DXMatrixLookAtLH(&view, &eyePt, &lookCenter, &upVec) ; g_pd3dDevice->SetTransform(D3DTS_VIEW, &view) ; // set projection D3DXMATRIX proj ; D3DXMatrixPerspectiveFovLH(&proj, D3DX_PI / 4, 1.0f, 1.0f, 1000.0f) ; g_pd3dDevice->SetTransform(D3DTS_PROJECTION, &proj) ; D3DXMATRIX worldviewproj = world * view * proj; // Set matrix g_pEffect->SetMatrix(g_hWVP, &worldviewproj); // Set technique g_pEffect->SetTechnique(g_hTech); // Render pass UINT numPass = 0; g_pEffect->Begin(&numPass, 0); g_pEffect->BeginPass(0); // Set texture D3DXCreateTextureFromFile(g_pd3dDevice, "../Common/Media/crate.jpg", &g_pCubeTexture) ; g_pEffect->SetTexture("g_pCubeTexture", g_pCubeTexture); /*g_pd3dDevice->SetSamplerState(0, D3DSAMP_MAGFILTER, D3DTEXF_LINEAR); g_pd3dDevice->SetSamplerState(0, D3DSAMP_MINFILTER, D3DTEXF_LINEAR); g_pd3dDevice->SetSamplerState(0, D3DSAMP_MIPFILTER, D3DTEXF_LINEAR);*/ // Set stream source, and index buffer g_pd3dDevice->SetStreamSource( 0, g_pVB, 0, sizeof(Vertex) ); g_pd3dDevice->SetIndices(g_pIB) ; g_pd3dDevice->SetFVF(VERTEX_FVF) ; // Totally 24 points and 12 triangles g_pd3dDevice->DrawIndexedPrimitive(D3DPT_TRIANGLELIST, 0, 0, 24, 0, 12) ; g_pEffect->EndPass(); g_pEffect->End(); }
cv::Mat QOpticalDevice::getUpVector() { // first we define what "up" is cv::Vec3d upVec(0.0,1.0,0.0); cv::Mat up; up = cv::Mat(upVec).reshape(1); // std::cout << "Up: " << up << "\n"; // then we rotate it to match this device return orientation * up; }
HRESULT InitD3D( HWND hWnd ) { // Create the D3D object, which is needed to create the D3DDevice. if( NULL == ( g_pD3D = Direct3DCreate9( D3D_SDK_VERSION ) ) ) { MessageBoxA(NULL, "Create D3D9 object failed!", "Error", 0) ; return E_FAIL; } D3DPRESENT_PARAMETERS d3dpp; ZeroMemory( &d3dpp, sizeof(d3dpp) ); d3dpp.Windowed = TRUE; // use window mode, not full screen d3dpp.SwapEffect = D3DSWAPEFFECT_DISCARD; d3dpp.BackBufferFormat = D3DFMT_UNKNOWN; d3dpp.EnableAutoDepthStencil = TRUE ; d3dpp.AutoDepthStencilFormat = D3DFMT_D16 ; // Create device if( FAILED( g_pD3D->CreateDevice( D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, hWnd, D3DCREATE_SOFTWARE_VERTEXPROCESSING, &d3dpp, &g_pd3dDevice ) ) ) { MessageBoxA(NULL, "Create D3D9 device failed!", "Error", 0) ; return E_FAIL; } g_pd3dDevice->SetRenderState(D3DRS_ZENABLE, D3DZB_TRUE) ; // Set view matrix D3DXVECTOR3 eyePt(0, 1.0f, -5.0f) ; D3DXVECTOR3 lookAt(0, 0.0f, 0) ; D3DXVECTOR3 upVec(0, 1.0f, 0) ; g_pCamera->SetViewParams(eyePt, lookAt, upVec) ; // Set projection matrix g_pCamera->SetProjParams(D3DX_PI / 4, 1.0f, 1.0f, 1000.0f) ; g_pd3dDevice->SetSamplerState( 0, D3DSAMP_MINFILTER, D3DTEXF_LINEAR ); g_pd3dDevice->SetSamplerState( 0, D3DSAMP_MAGFILTER, D3DTEXF_LINEAR ); g_pd3dDevice->SetSamplerState( 0, D3DSAMP_MIPFILTER, D3DTEXF_LINEAR ); g_pd3dDevice->SetSamplerState( 0, D3DSAMP_ADDRESSU, D3DTADDRESS_WRAP ); g_pd3dDevice->SetSamplerState( 0, D3DSAMP_ADDRESSV, D3DTADDRESS_WRAP ); // Create teapot D3DXCreateTeapot(g_pd3dDevice, &g_pTeapotMesh, NULL) ; return S_OK; }
VOID SetupMatrix() { // Set view matrix D3DXVECTOR3 eyePt(0, 1.0f, -15.0f) ; D3DXVECTOR3 lookAt(0, 0.0f, 0) ; D3DXVECTOR3 upVec(0, 1.0f, 0) ; D3DXMATRIX view ; D3DXMatrixLookAtLH(&view, &eyePt, &lookAt, &upVec) ; g_pd3dDevice->SetTransform(D3DTS_VIEW, &view) ; // Set projection matrix D3DXMATRIX proj ; D3DXMatrixPerspectiveFovLH(&proj, D3DX_PI / 4, 1.0f, 1.0f, 1000.0f) ; g_pd3dDevice->SetTransform(D3DTS_PROJECTION, &proj) ; }
void Camera::applyOrbital( glm::mat4x4 modelMatrix ) { glm::vec4 cameraPos( 0.0f, 0.0f, mDistance, 0.0f ); glm::mat4 identityMtx( 1.0f ); cameraPos = glm::rotate( identityMtx, mRotation.x, GLM_X_AXIS ) * cameraPos; cameraPos = glm::rotate( identityMtx, mRotation.y, GLM_Y_AXIS ) * cameraPos; glm::vec3 upVec( 0.0f, 1.0f, 0.0f ); glm::vec3 center( mPosition.x, mPosition.y, mPosition.z ); glm::mat4 viewMtx = glm::lookAt( glm::vec3( cameraPos ) + center, center, upVec ); applyModelView( modelMatrix, viewMtx ); }
void Camera::apply( glm::mat4x4 modelMatrix ) { glm::vec3 cameraPos( mPosition.x, mPosition.y, mPosition.z ); glm::vec4 centerPos( 0.0f, 0.0f, 1.0f, 0.0f ); glm::mat4 identityMtx( 1.0f ); centerPos = glm::rotate( identityMtx, mRotation.x, GLM_X_AXIS ) * centerPos; centerPos = glm::rotate( identityMtx, mRotation.y, GLM_Y_AXIS ) * centerPos; glm::vec3 upVec( 0.0f, 1.0f, 0.0f ); glm::mat4 viewMtx = glm::lookAt( cameraPos, glm::vec3( centerPos ) + cameraPos, upVec ); applyModelView( modelMatrix, viewMtx ); }
void SetupMatrix() { g_pd3dDevice->SetTransform(D3DTS_WORLD, &gWorldMatrix) ; // set view D3DXVECTOR3 eyePt(0.0f, 0.0f, -10.0f) ; D3DXVECTOR3 upVec(0.0f, 1.0f, 0.0f) ; D3DXVECTOR3 lookCenter(0.0f, 0.0f, 0.0f) ; D3DXMATRIX view ; D3DXMatrixLookAtLH(&view, &eyePt, &lookCenter, &upVec) ; g_pd3dDevice->SetTransform(D3DTS_VIEW, &view) ; // set projection D3DXMATRIX proj ; D3DXMatrixPerspectiveFovLH(&proj, D3DX_PI / 4, 1.0f, 1.0f, 1000.0f) ; g_pd3dDevice->SetTransform(D3DTS_PROJECTION, &proj) ; }
void SetupMatrix(float timeDelta) { static float totalTime = 0; totalTime += timeDelta; // translate model to origin D3DXMATRIX matWorld ; D3DXMatrixTranslation(&matWorld, 0.0f, 0.0f, 0.0f); D3DXMATRIX rotMatrix; D3DXMatrixRotationY(&rotMatrix, totalTime); matWorld *= rotMatrix; // Matrix in shader was stored in column-major order, we need transpose it first. // Effect->SetMatrix will transpose it automatically, but SetVertexShader not, this function set the raw data. D3DXMatrixTranspose(&matWorld, &matWorld); // Set vertex shader variable g_pd3dDevice->SetVertexShaderConstantF(0, matWorld, 4); // set view // Make sure eye point and light postion at the same side beyond the teapot. D3DXVECTOR3 eyePt(0.0f, 0.0f, -10.0f) ; D3DXVECTOR3 upVec(0.0f, 1.0f, 0.0f) ; D3DXVECTOR3 lookCenter(0.0f, 0.0f, 0.0f) ; D3DXMATRIX view ; D3DXMatrixLookAtLH(&view, &eyePt, &lookCenter, &upVec) ; // set projection D3DXMATRIX proj ; D3DXMatrixPerspectiveFovLH(&proj, D3DX_PI / 4, 1.0f, 1.0f, 1000.0f) ; D3DXMATRIX matViewProj = view * proj; D3DXMatrixTranspose(&matViewProj, &matViewProj); // Set vertex shader variable HRESULT hr = g_pd3dDevice->SetVertexShaderConstantF(4, matViewProj, 4); if (FAILED(hr)) { MessageBox(NULL, L"Error", L"Set vertex shader variable failed", 0); } }
int main() { std::string dataPath = dtCore::GetDeltaDataPathList(); dtCore::SetDataFilePathList(dataPath + ";" + dtCore::GetDeltaRootPath() + "/examples/data" + ";" + dtCore::GetDeltaRootPath() + "/examples/testApp/;"); dtCore::RefPtr<dtABC::Application> app = new dtABC::Application( "config.xml" ); //load some terrain dtCore::RefPtr<dtCore::Object> terrain = new dtCore::Object( "Terrain" ); terrain->LoadFile( "models/terrain_simple.ive" ); app->AddDrawable( terrain.get() ); //load an object dtCore::RefPtr<dtCore::Object> brdm = new dtCore::Object( "BRDM" ); brdm->LoadFile( "models/brdm.ive" ); app->AddDrawable( brdm.get() ); osg::Vec3 brdmPosition( 0.0f, 0.0f, 3.5f ); osg::Vec3 brdmRotation( 90.0f, 0.0f, 0.0f ); dtCore::Transform trans; trans.SetTranslation( brdmPosition ); trans.SetRotation( brdmRotation ); brdm->SetTransform( trans ); //adjust the Camera position dtCore::Transform camPos; osg::Vec3 camXYZ( 0.f, -30.f, 15.f ); osg::Vec3 lookAtXYZ ( brdmPosition ); osg::Vec3 upVec ( 0.f, 0.f, 1.f ); camPos.Set( camXYZ, lookAtXYZ, upVec ); app->GetCamera()->SetTransform( camPos ); app->Config(); app->Run(); return 0; }
void DistanceSensor::renderingInstructions(ImagingBuffer<float>& buffer) { // set viewport glViewport(0, 0, buffer.resolutionX, buffer.resolutionY); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluPerspective(Functions::toDeg(angleY), tan(angleX/2) / tan(angleY/2), minRange, maxRange); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); // clear the viewport glClearDepth(1.0); Surface* backgroundSurface = simulation->getBackgroundSurface(); glClearColor(backgroundSurface->color[0], backgroundSurface->color[1], backgroundSurface->color[2], backgroundSurface->color[3]); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // set the visual parameters VisualizationParameterSet visParams; visParams.surfaceStyle = VisualizationParameterSet::FLAT_SHADING; visParams.drawForSensor = true; simulation->enableStateDL(visParams.surfaceStyle); // set the camera Vector3d forwardVec(1.0,0.0,0.0); Vector3d upVec(0.0,0.0,1.0); forwardVec.rotate(rotation); upVec.rotate(rotation); gluLookAt(position.v[0], position.v[1], position.v[2], position.v[0] + forwardVec.v[0], position.v[1] + forwardVec.v[1], position.v[2] + forwardVec.v[2], upVec.v[0], upVec.v[1], upVec.v[2]); glGetDoublev(GL_MODELVIEW_MATRIX, buffer.modelViewMatrix); glGetDoublev(GL_PROJECTION_MATRIX, buffer.projectionMatrix); glGetIntegerv(GL_VIEWPORT, buffer.viewPort); rootNode->draw(visParams); glFlush(); simulation->disableStateDL(visParams.surfaceStyle); }
void SetupMatrix() { // translate model to origin D3DXMATRIX world ; D3DXMatrixTranslation(&world, 0.0f, 0.0f, 0.0f) ; g_pd3dDevice->SetTransform(D3DTS_WORLD, &world) ; // set view D3DXVECTOR3 upVec(0.0f, 1.0f, 0.0f) ; D3DXVECTOR3 lookCenter(0.0f, 0.0f, 0.0f) ; D3DXVECTOR3 eyePt(0.0f, 0.0f, -20.0f) ; D3DXMATRIX view ; D3DXMatrixLookAtLH(&view, &eyePt, &lookCenter, &upVec) ; g_pd3dDevice->SetTransform(D3DTS_VIEW, &view) ; // set projection D3DXMATRIX proj ; D3DXMatrixPerspectiveFovLH(&proj, D3DX_PI / 4, 1.0f, 1.0f, 1000.0f) ; g_pd3dDevice->SetTransform(D3DTS_PROJECTION, &proj) ; }
void SetupMatrix() { // Set object position D3DXMatrixTranslation(&matWorld[0], -2.0f, 0.0f, 0.0f) ; D3DXMatrixTranslation(&matWorld[1], 2.0f, 0.0f, 0.0f) ; D3DXMatrixTranslation(&matWorld[2], 0.0f, 2.0f, 0.0f) ; D3DXMatrixTranslation(&matWorld[3], 0.0f, -2.0f, 0.0f) ; // set view D3DXVECTOR3 eyePt(0.0f, 0.0f, -10.0f) ; D3DXVECTOR3 upVec(0.0f, 1.0f, 0.0f) ; D3DXVECTOR3 lookCenter(0.0f, 0.0f, 0.0f) ; D3DXMATRIX view ; D3DXMatrixLookAtLH(&view, &eyePt, &lookCenter, &upVec) ; g_pd3dDevice->SetTransform(D3DTS_VIEW, &view) ; // set projection D3DXMATRIX proj ; D3DXMatrixPerspectiveFovLH(&proj, D3DX_PI / 4, 1.0f, 1.0f, 1000.0f) ; g_pd3dDevice->SetTransform(D3DTS_PROJECTION, &proj) ; }
// Create asteroid (called at start and whenever an asteroid is destroyed) void initAsteroid(object *o, vector *p, float r) { object A; A.verts = (vector*)malloc(sizeof(vector)*13); int i; for(i = 0; i < 12; i++) { vector vi = makeVec(cos(i*pi/6.0), sin(i*pi/6.0)); A.verts[i] = scaleVec(&vi, r*(0.75+0.25*cos(rand()))); } A.verts[i] = A.verts[0]; A.size = 13; A.pos = copyVec(p); A.dir = upVec(); A.ang = (float)(rand()%10); A.turn = 2.0; vector vel = makeVec(sin((float)rand()), cos((float)rand())); A.vel = scaleVec(&vel, 2.0+cos(rand())); A.spd = 5.0; A.type = ASTEROID; A.mode = GL_LINE_LOOP; A.state = ASTEROID_ACTIVE; A.radius = r; *o = A; }
void DrawTeapot() { // translate model to origin D3DXMATRIX world ; D3DXMatrixTranslation(&world, 0.0f, 0.0f, 0.0f) ; g_pd3dDevice->SetTransform(D3DTS_WORLD, &world) ; // set view D3DXVECTOR3 eyePt(0.0f, 0.0f, -10.0f) ; D3DXVECTOR3 upVec(0.0f, 1.0f, 0.0f) ; D3DXVECTOR3 lookCenter(0.0f, 0.0f, 0.0f) ; D3DXMATRIX view ; D3DXMatrixLookAtLH(&view, &eyePt, &lookCenter, &upVec) ; g_pd3dDevice->SetTransform(D3DTS_VIEW, &view) ; // set projection D3DXMATRIX proj ; D3DXMatrixPerspectiveFovLH(&proj, D3DX_PI / 4, 1.0f, 1.0f, 1000.0f) ; g_pd3dDevice->SetTransform(D3DTS_PROJECTION, &proj) ; D3DXMATRIX worldviewproj = world * view * proj; // Set matrix g_pEffect->SetMatrix(g_hWVP, &worldviewproj); // Set technique g_pEffect->SetTechnique(g_hTech); // Render pass UINT numPass = 0; g_pEffect->Begin(&numPass, 0); g_pEffect->BeginPass(0); g_pTeapotMesh->DrawSubset(0); g_pEffect->EndPass(); g_pEffect->End(); }
// Create star that twinkles occasionally void initStar(object *o, vector *l) { object star; vector* v = (vector*)malloc(sizeof(vector)*8); star.verts = v; int i; for(i=0;i<8;i+=2) { *v = makeVec(0.1*cos(pi*(float)i/4.0), 0.1*sin(pi*(float)i/4.0)); v++; *v = makeVec(0.1*cos(pi*(float)(i+1)/4.0), 0.1*sin(pi*(float)(i+1)/4.0)); v++; } star.size = 8; star.pos = copyVec(l); star.dir = upVec(); star.ang = 0.0; star.turn = 0.0; star.vel = zeroVec(); star.spd = 50.0; star.type = STAR; star.mode = GL_LINE_LOOP; star.state = 0; *o = star; }
// Create gibs (called each time an object is destroyed) // oa: array of objects to put gibs into // o: object to create gibs form // flags: state to begin gibs in void initGib(object *oa, object *o, int flags) { int i; for (i=0; i<o->size; i++) { object gib; vector *gv = (vector*)malloc(sizeof(vector)*2); gib.verts = gv; gv->x = o->verts[i].x; gv->y = o->verts[i].y; gv++; if (i==o->size-1) { gv->x = o->verts[0].x; gv->y = o->verts[0].y; } else { gv->x = o->verts[i+1].x; gv->y = o->verts[i+1].y; } vector mid = makeVec((gib.verts[0].x+gib.verts[1].x)/2.0, (gib.verts[0].y+gib.verts[1].y)/2.0); gib.pos = localToWorld(&mid, o); // vector dv = subVec(&(gib.pos), &(o->pos)); gib.verts[0] = subVec(&mid, &(gib.verts[0])); gib.verts[1] = subVec(&mid, &(gib.verts[1])); gib.size = 2; gib.dir = upVec(); gib.ang = o->ang; gib.turn = 1.0+0.5*cos((float)rand()); vector vel = makeVec(sin((float)rand()), cos((float)rand())); gib.vel = scaleVec(&vel, 0.2+cos(rand())); gib.spd = 2.0; gib.type = GIB; gib.mode = GL_LINES; gib.state = flags; gib.lifetime = 0.5; oa[i] = gib; } }
avtVector VisitPointTool::ComputeTranslationDistance(int direction) { // This shouldn't happen, but just in case if (direction == none) return avtVector(0,0,0); if (direction == inAndOut) return ComputeDepthTranslationDistance(); vtkCamera *camera = proxy.GetCanvas()->GetActiveCamera(); int i; int *size = proxy.GetCanvas()->GetSize(); double bounds[6]; proxy.GetBounds(bounds); double dx = bounds[1] - bounds[0]; double dy = bounds[3] - bounds[2]; double dz = bounds[5] - bounds[4]; std::vector<avtVector> axes; axes.push_back(avtVector(1., 0., 0.) * (dx / double(size[1]))); axes.push_back(avtVector(-1., 0., 0.) * (dx / double(size[1]))); axes.push_back(avtVector(0., 1., 0.) * (dy / double(size[1]))); axes.push_back(avtVector(0., -1., 0.) * (dy / double(size[1]))); axes.push_back(avtVector(0., 0., 1.) * (dz / double(size[1]))); axes.push_back(avtVector(0., 0., -1.) * (dz / double(size[1]))); avtVector camvec; // The vector to dot with const double *up = camera->GetViewUp(); if (direction == upAndDown) { // Find what vector of {i,j,k,-i,-j,-k} best represents 'up' // The vector we want is the camera up vector. camvec.x = up[0]; camvec.y = up[1]; camvec.z = up[2]; } else { // Find what vector best represents 'right' // The vector we want is the cross of the focus vector // and the up vector. avtVector upVec(up[0], up[1], up[2]); const double *pos = camera->GetPosition(); const double *focus = camera->GetFocalPoint(); avtVector focusVec(focus[0]-pos[0],focus[1]-pos[1],focus[2]-pos[2]); camvec = focusVec % upVec; } camvec.normalize(); double dots[6]; for (i = 0; i < 6; ++i) dots[i] = camvec * axes[i]; // Find the index of the largest dot product. int largestDotIndex = 0; for(i = 1; i < 6; ++i) { if(dots[i] > dots[largestDotIndex]) largestDotIndex = i; } return axes[largestDotIndex]; }
HRESULT InitD3D( HWND hWnd ) { // Create the D3D object, which is needed to create the D3DDevice. if( NULL == ( g_pD3D = Direct3DCreate9( D3D_SDK_VERSION ) ) ) { MessageBoxA(NULL, "Create D3D9 object failed!", "Error", 0) ; return E_FAIL; } D3DPRESENT_PARAMETERS d3dpp; ZeroMemory( &d3dpp, sizeof(d3dpp) ); d3dpp.Windowed = TRUE; // use window mode, not full screen d3dpp.SwapEffect = D3DSWAPEFFECT_DISCARD; d3dpp.BackBufferFormat = D3DFMT_UNKNOWN; d3dpp.EnableAutoDepthStencil = TRUE ; d3dpp.AutoDepthStencilFormat = D3DFMT_D16 ; // Create device if( FAILED( g_pD3D->CreateDevice( D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, hWnd, D3DCREATE_SOFTWARE_VERTEXPROCESSING, &d3dpp, &g_pd3dDevice ) ) ) { MessageBoxA(NULL, "Create D3D9 device failed!", "Error", 0) ; return E_FAIL; } g_pd3dDevice->SetRenderState(D3DRS_ZENABLE, D3DZB_TRUE) ; // Create texture HRESULT hr = g_pd3dDevice->CreateTexture( 256, 256, 1, D3DUSAGE_RENDERTARGET, //d3dpp.BackBufferFormat, D3DFMT_R5G6B5, D3DPOOL_DEFAULT, &g_pRenderTexture, NULL) ; if (FAILED(hr)) { MessageBox(NULL, "Create texture failed!", "Error", 0) ; return E_FAIL ; } // Get texture surface hr = g_pRenderTexture->GetSurfaceLevel(0, &g_pRenderSurface) ; if (FAILED(hr)) { MessageBox(NULL, "Get surface on texture failed!", "Error", 0) ; return E_FAIL ; } // Create teapot D3DXCreateTeapot(g_pd3dDevice, &g_pTeapotMesh, NULL) ; // Create Cube CreateCube() ; // Create camera g_ModelViewCamera = new Camera() ; // Initialize view matrix D3DXVECTOR3 eyePt(0.0f, 0.0f, -5.0f) ; D3DXVECTOR3 upVec(0.0f, 1.0f, 0.0f) ; D3DXVECTOR3 lookCenter(0.0f, 0.0f, 0.0f) ; g_ModelViewCamera->SetViewParams(&eyePt, &lookCenter, &upVec) ; g_ModelViewCamera->SetProjParams(D3DX_PI / 4, 1.0f, 1.0f, 1000.0f) ; return S_OK; }
void CAnimatedGrabHandler::UpdatePosVelRot(float frameTime) { IEntity *pGrab = gEnv->pEntitySystem->GetEntity(m_grabStats.grabId); if ( !pGrab) return; IEntity *pEnt = m_pActor->GetEntity(); if (m_grabStats.grabDelay<0.001f) { Vec3 grabWPos (GetGrabBoneWorldTM ().t); // NOTE Aug 3, 2007: <pvl> the second part of this test means don't enable // the correction if animation/ik wasn't used for grabbing in the first place if (m_grabStats.readIkInaccuracyCorrection && m_grabStats.grabAnimGraphSignal[0]) { // NOTE Aug 2, 2007: <pvl> executed the first time this function is called // for a particular grabbing action m_grabStats.ikInaccuracyCorrection = grabWPos - (pGrab->GetWorldTM().GetTranslation() + m_grabStats.entityGrabSpot); m_grabStats.readIkInaccuracyCorrection = false; // FIXME Sep 13, 2007: <pvl> only putting it here because it's called just // once, at the instant when the object is grabbed - rename readIkInaccuracyCorrection // to make this clearer, or put this somewhere else DisableGrabbedAnimatedCharacter (true); } else { // NOTE Aug 2, 2007: <pvl> phase it out gradually m_grabStats.ikInaccuracyCorrection *= 0.9f; if (m_grabStats.ikInaccuracyCorrection.len2 () < 0.01f) m_grabStats.ikInaccuracyCorrection = Vec3 (0.0f, 0.0f, 0.0f); } // NOTE Sep 13, 2007: <pvl> this should prevent us from calling SetWPos() // later so that the IK "release" phase can take over m_grabStats.IKActive = false; Matrix34 tm(pGrab->GetWorldTM()); tm.SetTranslation(grabWPos - (m_grabStats.ikInaccuracyCorrection + pGrab->GetRotation() * m_grabStats.entityGrabSpot)); pGrab->SetWorldTM(tm,ENTITY_XFORM_USER); } //update IK for (int i=0;i<m_grabStats.limbNum;++i) { SIKLimb *pLimb = m_pActor->GetIKLimb(m_grabStats.limbId[i]); // NOTE Dez 14, 2006: <pvl> this class is always supposed to have // m_grabStats.usingAnimation == true if (m_grabStats.usingAnimation && m_grabStats.releaseIKTime>0.001f && m_grabStats.IKActive) { // NOTE Dez 15, 2006: <pvl> use IK to constantly offset the // animation so that the difference between where the animation // expects the object to be and where the object really is is taken // into account. Vec3 animPos = pEnt->GetSlotWorldTM(0) * pLimb->lAnimPos; Vec3 assumedGrabPos = pEnt->GetSlotWorldTM(0) * m_grabStats.grabbedObjOfs; Vec3 actualGrabPos = pGrab->GetWorldPos() + m_grabStats.entityGrabSpot; Vec3 adjustment = actualGrabPos - assumedGrabPos; pLimb->SetWPos(pEnt,animPos + adjustment,ZERO,0.5f,2.0f,1000); //gEnv->pRenderer->GetIRenderAuxGeom()->DrawSphere(pGrab->GetWorldPos() + m_grabStats.entityGrabSpot, 0.5f, ColorB(0,255,0,100)); } //if there are multiple limbs, only the first one sets the rotation of the object. if (m_grabStats.useIKRotation && i == 0 && m_grabStats.grabDelay<0.001f) { // NOTE Aug 8, 2007: <pvl> the idea here is to store current world // rotations of both the object being grabbed and the end bone of // a grabbing limb. Then track how the end bone rotates with respect // to the stored original rotation and rotate the grabbed object // the same way. That way, the grabbed object rotates the same as // the limb and appears to be "stabbed" by it. QuatT endBoneWorldRot = GetGrabBoneWorldTM (); endBoneWorldRot.q.Normalize(); // may not be necessary - just to be safe if ( ! m_grabStats.origRotationsValid) { m_grabStats.origRotation = pGrab->GetRotation(); m_grabStats.origRotation.Normalize(); // may not be necessary - just to be safe m_grabStats.origEndBoneWorldRot = endBoneWorldRot; m_grabStats.origRotationsValid = true; } Quat grabQuat( (endBoneWorldRot*m_grabStats.origEndBoneWorldRot.GetInverted()).q * m_grabStats.origRotation); grabQuat.Normalize(); // NOTE Dez 14, 2006: <pvl> this code sets up and look vectors for the grabbed // entity in case it's an Actor (the player, mostly) so that the player always // looks roughly at the grabber. The grabber is supposed to be the Hunter here // so this code is somewhat Hunter-specific. // UPDATE Aug 7, 2007: <pvl> do the above for the player only // UPDATE Sep 13, 2007: <pvl> don't do it for anybody ATM, it doesn't seem useful CActor *pGrabbedActor = (CActor *)g_pGame->GetIGameFramework()->GetIActorSystem()->GetActor(m_grabStats.grabId); if (false && pGrabbedActor && pGrabbedActor->IsClient() && pGrabbedActor->GetActorStats()) { Vec3 upVec(Quat(endBoneWorldRot.q * m_grabStats.additionalRotation).GetColumn2()); upVec.z = fabs_tpl(upVec.z) * 2.0f; upVec.NormalizeSafe(Vec3(0,0,1)); SActorStats *pAS = pGrabbedActor->GetActorStats(); if (pAS) { pAS->forceUpVector = upVec; pAS->forceLookVector = (pEnt->GetSlotWorldTM(0) * m_pActor->GetLocalEyePos(0)) - pGrabbedActor->GetEntity()->GetWorldPos(); float lookLen(pAS->forceLookVector.len()); pAS->forceLookVector *= (1.0f/lookLen)*0.33f; //pAS->forceLookVector = -Quat(boneRot * m_grabStats.additionalRotation).GetColumn1();//boneRot.GetColumn2(); } } else { pGrab->SetRotation(grabQuat,ENTITY_XFORM_USER); } } } if (m_grabStats.grabDelay<0.001f) { // NOTE Sep 16, 2007: <pvl> now that grabbed entity rotation coming from // a grabbing bone (if any) is computed, bone-space offset can be applied Matrix34 tm(pGrab->GetWorldTM()); tm.AddTranslation(GetGrabBoneWorldTM().q * m_grabStats.boneGrabOffset); pGrab->SetWorldTM(tm,ENTITY_XFORM_USER); /* { // debug draw for the grab bone QuatT grabBoneWorldTM = GetGrabBoneWorldTM(); Vec3 start = grabBoneWorldTM.t; Vec3 end = start + grabBoneWorldTM.q * Vec3 (1,0,0) * 3; gEnv->pRenderer->GetIRenderAuxGeom()->DrawLine (start, ColorB (255,0,0), end, ColorB (0,0,255), 6.0f); gEnv->pRenderer->GetIRenderAuxGeom()->DrawSphere (start, 0.5f, ColorB (255,128,0)); } */ /* { // draw complete coord systems for both the end bone and the grabbed thing QuatT grabBoneWorldTM = GetGrabBoneWorldTM(); Vec3 start = grabBoneWorldTM.t; Vec3 end = start + grabBoneWorldTM.q * Vec3 (1,0,0) * 3; gEnv->pRenderer->GetIRenderAuxGeom()->DrawLine (start, ColorB (128,0,0), end, ColorB (128,0,0), 6.0f); end = start + grabBoneWorldTM.q * Vec3 (0,1,0) * 3; gEnv->pRenderer->GetIRenderAuxGeom()->DrawLine (start, ColorB (0,128,0), end, ColorB (0,128,0), 6.0f); end = start + grabBoneWorldTM.q * Vec3 (0,0,1) * 3; gEnv->pRenderer->GetIRenderAuxGeom()->DrawLine (start, ColorB (0,0,128), end, ColorB (0,0,128), 6.0f); gEnv->pRenderer->GetIRenderAuxGeom()->DrawSphere (start, 0.2f, ColorB (255,255,255)); start = pGrab->GetWorldTM().GetTranslation(); end = start + pGrab->GetRotation() * Vec3 (1,0,0) * 3; gEnv->pRenderer->GetIRenderAuxGeom()->DrawLine (start, ColorB (128,0,0), end, ColorB (128,0,0), 6.0f); end = start + pGrab->GetRotation() * Vec3 (0,1,0) * 3; gEnv->pRenderer->GetIRenderAuxGeom()->DrawLine (start, ColorB (0,128,0), end, ColorB (0,128,0), 6.0f); end = start + pGrab->GetRotation() * Vec3 (0,0,1) * 3; gEnv->pRenderer->GetIRenderAuxGeom()->DrawLine (start, ColorB (0,0,128), end, ColorB (0,0,128), 6.0f); gEnv->pRenderer->GetIRenderAuxGeom()->DrawSphere (start, 0.2f, ColorB (64,64,64)); } */ } /* { // debug draw for the grabbed object Vec3 start = pGrab->GetWorldTM().GetTranslation(); Vec3 end = start + pGrab->GetRotation() * Vec3 (0,0,1) * 3; gEnv->pRenderer->GetIRenderAuxGeom()->DrawLine (start, ColorB (255,0,0), end, ColorB (0,0,255), 6.0f); gEnv->pRenderer->GetIRenderAuxGeom()->DrawSphere (start, 0.2f, ColorB (255,128,0)); } */ }
// Create message (called at start, activated on player death) void initMessage() { message = (object*)malloc(sizeof(object)*5); vector** letters = (vector**)malloc(sizeof(vector*)*5); letters[0] = (vector*)malloc(sizeof(vector)*7); vector* vi = letters[0]; vi->x = 3.0; vi->y = -5.0; vi++; vi->x = 3.0; vi->y = 4.0; vi++; vi->x = 1.0; vi->y = 5.0; vi++; vi->x = -1.0; vi->y = 5.0; vi++; vi->x = -3.0; vi->y = 4.0; vi++; vi->x = -3.0; vi->y = 4.0; vi++; vi->x = -3.0; vi->y = -5.0; letters[1] = (vector*)malloc(sizeof(vector)*7); vi = letters[1]; float n = 0.0; int i; for(i=0;i<7;i++){vi->x=n;vi->y=n;vi++;} letters[2] = (vector*)malloc(sizeof(vector)*7); vi = letters[2]; vi->x = 3.0; vi->y = -5.0; vi++; vi->x = -1.0; vi->y = -5.0; vi++; vi->x = -3.0; vi->y = -3.0; vi++; vi->x = -3.0; vi->y = 3.0; vi++; vi->x = -1.0; vi->y = 5.0; vi++; vi->x = 3.0; vi->y = 5.0; vi++; vi->x = 3.0; vi->y = -5.0; letters[3] = (vector*)malloc(sizeof(vector)*7); vi = letters[3]; vi->x = -3.0; vi->y = -5.0; vi++; vi->x = 3.0; vi->y = -5.0; vi++; vi->x = 3.0; vi->y = 0.0; vi++; vi->x = -3.0; vi->y = 0.0; vi++; vi->x = 3.0; vi->y = 0.0; vi++; vi->x = 3.0; vi->y = 5.0; vi++; vi->x = -3.0; vi->y = 5.0; letters[4] = (vector*)malloc(sizeof(vector)*7); vi = letters[4]; vi->x = 3.0; vi->y = -5.0; vi++; vi->x = -1.0; vi->y = -5.0; vi++; vi->x = -3.0; vi->y = -3.0; vi++; vi->x = -3.0; vi->y = 3.0; vi++; vi->x = -1.0; vi->y = 5.0; vi++; vi->x = 3.0; vi->y = 5.0; vi++; vi->x = 3.0; vi->y = -5.0; int li; for (li=0; li<5; li++) { object L; L.verts = letters[li]; L.size = 7; L.pos = makeVec(16.0-8.0*(float)li, 0.0); L.dir = upVec(); L.ang = 0.0; L.turn = 0.0; L.vel = zeroVec(); L.spd = 0.0; L.type = MESSAGE; L.mode = GL_LINE_STRIP; L.state = 0; *(message+li) = L; } }
//----------------------------------------------------------------------------- // calculate viewing region void MapCodegenState::calc_viewingregion(pointf f[], double *minz, double *maxz) { /* calculate rays through projection plane */ ntlVec3d camera( -gCamX, -gCamY, -gCamZ ); ntlVec3d lookat( -gLookatX, -gLookatY, 0.0 ); ntlVec3d direction = lookat - camera; double fovy = 90.0; double aspect = (double)gViewSizeX/gViewSizeY; /* calculate width of screen using perpendicular triangle diven by * viewing direction and screen plane */ double screenWidth = direction.getNorm()*tan( (fovy*0.5/180.0)*M_PI ); /* calculate vector orthogonal to up and viewing direction */ ntlVec3d upVec(0.0, 1.0, 0.0); ntlVec3d rightVec( upVec.crossProd(direction) ); rightVec.normalize(); /* calculate screen plane up vector, perpendicular to viewdir and right vec */ upVec = ntlVec3d( rightVec.crossProd(direction) ); upVec.normalize(); /* check if vectors are valid FIXME what to do if not? */ if( (upVec==ntlVec3d(0.0)) || (rightVec==ntlVec3d(0.0)) ) { return; } /* length from center to border of screen plane */ rightVec *= (screenWidth*aspect * -1.0); upVec *= (screenWidth * -1.0); /* calc edges positions */ double zplane = 0.0; double maxzcnt = ABS(camera[2]-zplane); double minzcnt = maxzcnt; ntlVec3d e[4]; e[0] = direction + rightVec + upVec; e[1] = direction - rightVec + upVec; e[2] = direction + rightVec - upVec; e[3] = direction - rightVec - upVec; for(int i=0;i<4;i++) { if((zplane-e[i][2])>1.0) { // only treat negative directions e[i][0] /= (zplane-e[i][2]); e[i][1] /= (zplane-e[i][2]); } //ntlVec3d p1( camera[0] + e[i][0] * camera[2], camera[1] + e[i][1] * camera[2], 0.0 ); f[i].x = gViewRegion[i].x = camera[0] + e[i][0] * camera[2]; f[i].y = gViewRegion[i].y = camera[1] + e[i][1] * camera[2]; //double currz = ABS(e[i][2] + zplane); ntlVec3d fiVec = ntlVec3d(f[i].x,f[i].y,zplane); double currz = (fiVec-camera).getNorm(); //cout << " f"<<i<<" "<<fiVec<<" c:"<< camera <<" d:"<<(fiVec-camera)<<" curr:"<<currz<<endl; //if(i==0) { //minzcnt = currz; //maxzcnt = currz; //} else { if( minzcnt > currz) minzcnt = currz; if( maxzcnt < currz) maxzcnt = currz; //} } // save min./max. z distance *maxz = maxzcnt; *minz = minzcnt; }
void OGL3DBase::PerspectiveSetup(double eyeSepMult) { Coord3D zeroOffset(0.0, 0.0, 0.0); Coord3D minCoord = plotBase.GetNormalizedCoord(currView.viewLimits.minLim, zeroOffset); Coord3D maxCoord = plotBase.GetNormalizedCoord(currView.viewLimits.maxLim, zeroOffset);; double maxSceneWidth = minCoord.Distance(maxCoord); double sceneWidth = maxSceneWidth / currView.scale; double fovRadians = Radians(plot3Dbase.fieldOfView); double focalLength = sceneWidth / 2.0 / tan(fovRadians / 2.0); double nearDist = focalLength / 5.0; double farDist = focalLength + maxSceneWidth * 4.0; double wd2 = nearDist * tan(fovRadians); double ndfl = nearDist / focalLength; double eyeSep = focalLength * eyeSepMult; int width, height; plotBase.CalcAvailablePixels(width, height); double aspect = double(width)/ double(height); double left = -aspect * wd2 + eyeSep * ndfl; double right = aspect * wd2 + eyeSep * ndfl; double top = wd2; double bottom = - wd2; glMatrixMode(GL_PROJECTION); glLoadIdentity(); glFrustum(left, right, bottom, top, nearDist, farDist); Coord3D spanOffset((1.0 - xOrthoSpan)/ 2.0 , (1.0 - yOrthoSpan)/ 2.0 , (1.0 - zOrthoSpan)/ 2.0 ); Coord3D translation = currView.translation + spanOffset; translation *= (maxCoord - minCoord); Coord3D lookAtPoint = (minCoord + maxCoord) / 2.0; lookAtPoint -= translation; double elevAngle = Radians(currView.elevation); double rotAngle = Radians(currView.azimuth); double dz = focalLength * sin(elevAngle); double xylen = focalLength * cos(elevAngle); double dx = - xylen * sin(rotAngle); double dy = - xylen * cos(rotAngle); Coord3D eyeCoord = lookAtPoint; eyeCoord.cX += dx; eyeCoord.cY += dy; eyeCoord.cZ += dz; Coord3D upVec(0.0, 0.0, 1.0); if (fabs(sin(elevAngle)) > 0.95) { upVec = Coord3D(sin(rotAngle), cos(rotAngle), 0.0); upVec.Normalize(); } if (eyeSep > stdEps) { Coord3D rsep = CrossProduct(eyeCoord, upVec); rsep.Normalize(); rsep *= eyeSep; eyeCoord += rsep; } glMatrixMode(GL_MODELVIEW); glLoadIdentity(); gluLookAt(eyeCoord.cX, eyeCoord.cY, eyeCoord.cZ, lookAtPoint.cX, lookAtPoint.cY, lookAtPoint.cZ, upVec.cX, upVec.cY, upVec.cZ); // denormalize // scale for normalizations glScaled(xOrthoSpan / normSpan.cX, yOrthoSpan / normSpan.cY, zOrthoSpan / normSpan.cZ); // translate again for normalizations glTranslated(-normMin.cX, -normMin.cY, -normMin.cZ); }
hkDemo::Result BasicMovementDemo::stepDemo() { // for PlayStation(R)2 metrowerks pushDoubleConversionCheck( false ); // Display current settings { char buf[255]; for (int i=0; i< NUM_ANIMS; i++) { hkString::sprintf(buf, "anim%d: %0.3f", i, m_control[i]->getWeight()); const int h = m_env->m_window->getHeight(); m_env->m_textDisplay->outputText( buf, 20, h-70+16*i, curveCols[i], 1); } } popDoubleConversionCheck(); // Handle the keys { bool jumping = (m_control[1]->getWeight() > 0.001f); if (!jumping) { hkVector4 upVec(0,0,1); hkReal up, turn; up = m_env->m_gamePad->getStickPosY( 1 ); // 1 is the only one on PSP const hkBool upPressed = (up > 0.0f); const hkBool easeIn = (m_control[0]->getEaseStatus() == hkaDefaultAnimationControl::EASING_IN ) || ( m_control[0]->getEaseStatus() == hkaDefaultAnimationControl::EASED_IN ); if ( easeIn && ( !upPressed )) { // Ease in stand m_control[0]->easeOut( 0.9f ); } else if (( !easeIn ) && upPressed ) { // Ease in walk cycle m_control[0]->easeIn( 0.9f ); } turn = m_env->m_gamePad->getStickPosX(1); turn *= -5.0f * HK_REAL_PI / 180.0f * m_control[0]->getWeight(); hkQuaternion q; q.setAxisAngle(upVec, turn); m_currentMotion.m_rotation.mul(q); } else { hkReal remaining = m_control[1]->getAnimationBinding()->m_animation->m_duration - m_control[1]->getLocalTime(); // If we've played through then fade out const hkBool easeIn = (m_control[1]->getEaseStatus() == hkaDefaultAnimationControl::EASING_IN ) || ( m_control[1]->getEaseStatus() == hkaDefaultAnimationControl::EASED_IN ); if ( easeIn && (remaining < 0.2f)) { m_control[1]->easeOut( remaining ); m_control[0]->setLocalTime( m_control[0]->getAnimationBinding()->m_animation->m_duration - remaining ); m_control[0]->easeIn( remaining ); } } if (m_env->m_gamePad->wasButtonPressed( HKG_PAD_BUTTON_2) && !jumping) { // Reset jump animation and play m_control[1]->setLocalTime(0.0f); m_control[1]->easeIn( 0.1f ); m_control[0]->easeOut( 0.1f ); } if (m_env->m_gamePad->wasButtonPressed( HKG_PAD_BUTTON_1)) { m_currentMotion.setIdentity(); } } // Grab accumulated motion { hkQsTransform deltaMotion; deltaMotion.setIdentity(); m_skeletonInstance->getDeltaReferenceFrame( m_timestep, deltaMotion); hkQsTransform temp; temp.setMul(m_currentMotion, deltaMotion); m_currentMotion = temp; } const int boneCount = m_skeleton->m_numBones; // Advance the active animations m_skeletonInstance->stepDeltaTime( 1.0f / 60.0f ); // Sample the active animations and combine into a single pose hkaPose pose (m_skeleton); m_skeletonInstance->sampleAndCombineAnimations( pose.accessUnsyncedPoseLocalSpace().begin(), pose.getFloatSlotValues().begin() ); AnimationUtils::drawPose( pose, hkQsTransform::getIdentity() ); // Construct the composite world transform hkLocalArray<hkTransform> compositeWorldInverse( boneCount ); compositeWorldInverse.setSize( boneCount ); // Convert accumlated info to graphics matrix hkTransform graphicsTransform; m_currentMotion.copyToTransform(graphicsTransform); // Skin the meshes { const hkArray<hkQsTransform>& poseInWorld = pose.getSyncedPoseModelSpace(); for (int i=0; i < m_numSkinBindings; i++) { // assumes either a straight map (null map) or a single one (1 palette) hkInt16* usedBones = m_skinBindings[i]->m_mappings? m_skinBindings[i]->m_mappings[0].m_mapping : HK_NULL; int numUsedBones = usedBones? m_skinBindings[i]->m_mappings[0].m_numMapping : boneCount; // Multiply through by the bind pose inverse world inverse matrices for (int p=0; p < numUsedBones; p++) { int boneIndex = usedBones? usedBones[p] : p; compositeWorldInverse[p].setMul( poseInWorld[ boneIndex ], m_skinBindings[i]->m_boneFromSkinMeshTransforms[ boneIndex ] ); } AnimationUtils::skinMesh( *m_skinBindings[i]->m_mesh, graphicsTransform, compositeWorldInverse.begin(), *m_env->m_sceneConverter ); } } return hkDemo::DEMO_OK; }
HRESULT InitD3D( HWND hWnd ) { // Create the D3D object, which is needed to create the D3DDevice. if( NULL == ( g_pD3D = Direct3DCreate9( D3D_SDK_VERSION ) ) ) { MessageBoxA(NULL, "Create D3D9 object failed!", "Error", 0) ; return E_FAIL; } D3DPRESENT_PARAMETERS d3dpp; ZeroMemory( &d3dpp, sizeof(d3dpp) ); d3dpp.Windowed = TRUE; // use window mode, not full screen d3dpp.SwapEffect = D3DSWAPEFFECT_DISCARD; d3dpp.BackBufferFormat = D3DFMT_UNKNOWN; d3dpp.EnableAutoDepthStencil = TRUE ; d3dpp.AutoDepthStencilFormat = D3DFMT_D16 ; // Create device if( FAILED( g_pD3D->CreateDevice( D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, hWnd, D3DCREATE_SOFTWARE_VERTEXPROCESSING, &d3dpp, &g_pd3dDevice ) ) ) { MessageBoxA(NULL, "Create D3D9 device failed!", "Error", 0) ; return E_FAIL; } g_pd3dDevice->SetRenderState(D3DRS_ZENABLE, D3DZB_TRUE) ; // Create teapot D3DXCreateTeapot(g_pd3dDevice, &g_pTeapotMesh, NULL) ; // Initialize terrain g_pTerrain->BuildGridsBuffer(g_pd3dDevice) ; D3DXMatrixIdentity(&g_matBillboardWorld) ; // Set view matrix D3DXVECTOR3 eyePt(0, 0.0f, -20.0f) ; D3DXVECTOR3 lookAt(0, 1.0f, 0) ; D3DXVECTOR3 upVec(0, 1.0f, 0) ; g_pCamera->SetViewParams(eyePt, lookAt, upVec) ; // Set projection matrix g_pCamera->SetProjParams(D3DX_PI / 4, 1.0f, 1.0f, 1000.0f) ; // Create texture HRESULT hr = D3DXCreateTextureFromFile(g_pd3dDevice, "../Common/Media/autumn.jpg", &g_pTexture) ; if (FAILED(hr)) { DXTRACE_ERR_MSGBOX(DXGetErrorString(hr), hr) ; } // Set texture sampler state g_pd3dDevice->SetTextureStageState( 0, D3DTSS_COLOROP, D3DTOP_MODULATE ); g_pd3dDevice->SetTextureStageState( 0, D3DTSS_COLORARG1, D3DTA_TEXTURE ); g_pd3dDevice->SetTextureStageState( 0, D3DTSS_COLORARG2, D3DTA_DIFFUSE ); g_pd3dDevice->SetTextureStageState( 0, D3DTSS_ALPHAOP, D3DTOP_SELECTARG1 ); g_pd3dDevice->SetTextureStageState( 0, D3DTSS_ALPHAARG1, D3DTA_TEXTURE ); g_pd3dDevice->SetSamplerState( 0, D3DSAMP_MINFILTER, D3DTEXF_LINEAR ); g_pd3dDevice->SetSamplerState( 0, D3DSAMP_MAGFILTER, D3DTEXF_LINEAR ); g_pd3dDevice->SetSamplerState( 0, D3DSAMP_MIPFILTER, D3DTEXF_LINEAR ); g_pd3dDevice->SetSamplerState( 0, D3DSAMP_ADDRESSU, D3DTADDRESS_WRAP ); g_pd3dDevice->SetSamplerState( 0, D3DSAMP_ADDRESSV, D3DTADDRESS_WRAP ); // Set material g_Material.Diffuse = D3DXCOLOR(1.0f, 0.0f, 0.0f, 1.0f) ; // white g_Material.Ambient = D3DXCOLOR(1.0f, 0.0f, 0.0f, 1.0f) ; // white g_Material.Specular = D3DXCOLOR(1.0f, 0.0f, 0.0f, 1.0f) ; // white // Set light g_Light.Type = D3DLIGHT_DIRECTIONAL ; g_Light.Direction = D3DXVECTOR3(1.0f, 0, 0) ; g_Light.Diffuse = D3DXCOLOR(1.0f, 1.0f, 1.0f, 1.0f) ; // white g_Light.Ambient = D3DXCOLOR(1.0f, 1.0f, 1.0f, 1.0f) ; // white g_Light.Specular = D3DXCOLOR(1.0f, 1.0f, 1.0f, 1.0f) ; // white g_Light.Range = 1000.0f ; return S_OK; }