void QCamera::rotateAboutViewCenter( const QQuaternion& q ) { setUpVector(q * upVector()); QVector3D viewVector = viewCenter() - position(); QVector3D cameraToCenter = q * viewVector; setPosition(viewCenter() - cameraToCenter); setViewCenter(position() + cameraToCenter); }
MyGlWindow::MyGlWindow(int x, int y, int w, int h) : Fl_Gl_Window(x,y,w,h) //========================================================================== { mode( FL_RGB|FL_ALPHA|FL_DOUBLE | FL_STENCIL ); fieldOfView = 45; MathVec3D<double> viewPoint( DEFAULT_VIEW_POINT ); MathVec3D<double> viewCenter( DEFAULT_VIEW_CENTER ); MathVec3D<double> upVector( DEFAULT_UP_VECTOR ); double aspect = ( w / h); m_viewer = new Viewer( viewPoint, viewCenter, upVector, 45, aspect ); m_bvh = new BVH(); m_particleList.resize(1000); for_each (m_particleList.begin(), m_particleList.end(), [] (Particle& p) { p.velocity = Vec3f(0, 0, 0); p.color = Vec3f(0.6, 0.6, 0); p.velocity = Vec3f(0, 0, 0); p.timeAlive = 0; p.lifespan = 5; }); m_range = 0; m_direction = 0; }
void QCamera::translateWorld(const QVector3D& vWorld , CameraTranslationOption option ) { // Update the camera position using the calculated world vector setPosition(position() + vWorld); // May be also update the view center coordinates if ( option == TranslateViewCenter ) setViewCenter(viewCenter() + vWorld); }
int main() { #if defined(DEBUG) || defined(_DEBUG) _CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF); //_crtBreakAlloc = 277; #endif //set the working directory to the directory where the executable exists, so that the relative paths work SetCurrentDirectory(GameHelper::ExecutableDirectory().c_str()); // create the window sf::Vector2f viewCenter(960.f, 540.f); sf::Vector2f viewSize(1920, 1080); sf::View view(viewCenter, viewSize); sf::RenderWindow window(sf::VideoMode(1920, 1080), "Game of Paths"); window.setView(view); auto getWindow = [&]() { return &window; }; sf::Clock clock; Game game(getWindow); // run the program as long as the window is open while (window.isOpen()) { std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); // check all the window's events that were triggered since the last iteration of the loop sf::Event event; while (window.pollEvent(event)) { // "close requested" event: we close the window if (event.type == sf::Event::Closed) window.close(); } // clear the window with black color window.clear(sf::Color(100, 100, 100)); float deltaTime = clock.restart().asSeconds(); game.Update(deltaTime); game.Draw(); // end the current frame window.display(); end = std::chrono::system_clock::now(); std::this_thread::sleep_for(std::chrono::microseconds(16000) - (end - start)); } return 0; }
void QCamera::translate( const QVector3D& vLocal, CameraTranslationOption option ) { QVector3D viewVector = viewCenter() - position(); // From "camera" position to view center // Calculate the amount to move by in world coordinates QVector3D vWorld; if ( !qFuzzyIsNull( vLocal.x() ) ) { // Calculate the vector for the local x axis QVector3D x = QVector3D::crossProduct(viewVector, upVector()).normalized(); vWorld += vLocal.x() * x; } if ( !qFuzzyIsNull( vLocal.y() ) ) vWorld += vLocal.y() * upVector(); if ( !qFuzzyIsNull( vLocal.z() ) ) vWorld += vLocal.z() * viewVector.normalized(); // Update the camera position using the calculated world vector setPosition(position() + vWorld); // May be also update the view center coordinates if ( option == TranslateViewCenter ) setViewCenter(viewCenter() + vWorld); // Refresh the camera -> view center vector viewVector = viewCenter() - position(); // Calculate a new up vector. We do this by: // 1) Calculate a new local x-direction vector from the cross product of the new // camera to view center vector and the old up vector. // 2) The local x vector is the normal to the plane in which the new up vector // must lay. So we can take the cross product of this normal and the new // x vector. The new normal vector forms the last part of the orthonormal basis QVector3D x = QVector3D::crossProduct(viewVector, upVector()).normalized(); setUpVector(QVector3D::crossProduct(x, viewVector).normalized()); }
osg::Matrix OculusDevice::projectionOffsetMatrix(EyeSide eye) const { osg::Matrix projectionMatrix; float eyeProjectionShift = viewCenter() - lensSeparationDistance()*0.5f; float projectionCenterOffset = 4.0f * eyeProjectionShift / hScreenSize(); if (eye == LEFT_EYE) { projectionMatrix.makeTranslate(osg::Vec3d(projectionCenterOffset, 0, 0)); } else if (eye == RIGHT_EYE) { projectionMatrix.makeTranslate(osg::Vec3d(-projectionCenterOffset, 0, 0)); } else { projectionMatrix.makeIdentity(); } return projectionMatrix; }
QQuaternion QCamera::rollRotation(float angle) const { QVector3D viewVector = viewCenter() - position(); return QQuaternion::fromAxisAndAngle(viewVector, -angle); }
QQuaternion QCamera::tiltRotation(float angle) const { QVector3D viewVector = viewCenter() - position(); QVector3D xBasis = QVector3D::crossProduct(upVector(), viewVector.normalized()).normalized(); return QQuaternion::fromAxisAndAngle( xBasis, -angle ); }
bool TrackballTransformManipulator::pan() { int dxScreen = getCurrentX() - getLastX(); int dyScreen = getLastY() - getCurrentY(); if ( dxScreen || dyScreen ) { DP_ASSERT( getViewState()->getCamera().isPtrTo<FrustumCamera>() ); TransformSharedPtr transform = m_transformPath->getTail().staticCast<Transform>(); FrustumCameraSharedPtr const& camera = getViewState()->getCamera().staticCast<FrustumCamera>(); if ( camera && transform ) { unsigned int rtWidth = getRenderTarget()->getWidth(); unsigned int rtHeight = getRenderTarget()->getHeight(); Vec2f camWinSize = camera->getWindowSize(); if ( ( 0 < rtHeight ) && ( 0 < rtWidth ) && ( FLT_EPSILON < fabs( camWinSize[0] ) ) && ( FLT_EPSILON < fabs( camWinSize[1] ) ) ) { // get all the matrices needed here Mat44f m2w, w2m; m_transformPath->getModelToWorldMatrix(m2w, w2m); // model->world and world->model Mat44f w2v = camera->getWorldToViewMatrix(); // world->view Mat44f v2w = camera->getViewToWorldMatrix(); // view->world // center of the object in view coordinates Vec4f center = Vec4f( transform->getBoundingSphere().getCenter(), 1.0f ) * m2w * w2v; // window size at distance of the center of the object Vec2f centerWindowSize = - center[2] / getViewState()->getTargetDistance() * camWinSize; checkLockAxis(dxScreen, dyScreen); if ( m_activeLockAxis[static_cast<size_t>(Axis::X)] ) { if ( dxScreen != 0 ) { dyScreen = 0; } else { return false; } } else if ( m_activeLockAxis[static_cast<size_t>(Axis::Y)] ) { if ( dyScreen != 0) { dxScreen = 0; } else { return false; } } // delta in model coordinates Vec4f viewCenter( centerWindowSize[0] * dxScreen / rtWidth , centerWindowSize[1] * dyScreen / rtHeight, 0.f, 0.f ); Vec4f modelDelta = viewCenter * v2w * w2m; // add the delta to the translation of the transform Trafo trafo = transform->getTrafo(); trafo.setTranslation( trafo.getTranslation() + Vec3f( modelDelta ) ); transform->setTrafo( trafo ); return true; } } } return false; }