void myEncodeFun() { sgct::SharedData::instance()->writeDouble(&curr_time); sharedSpeed.setVal(speed); sgct::SharedData::instance()->writeFloat(&sharedSpeed); sharedTextureOnOff.setVal(use_texture); sgct::SharedData::instance()->writeBool(&sharedTextureOnOff); sharedClearColor.setVal(glm::vec3(clear_color.x, clear_color.y, clear_color.z)); sgct::SharedData::instance()->writeObj(&sharedClearColor); }
void myPreSyncFun() { if( gEngine->isMaster() ) { // if( mouseLeftButton ) // { //double tmpYPos; //get the mouse pos from first window sgct::Engine::getMousePos( gEngine->getFocusedWindowIndex(), &mouseXPos[0], &mouseYPos[0] ); // mouseDx = mouseXPos[0] - mouseXPos[1]; // mouseDy = mouseYPos[0] - mouseYPos[1]; mouseDx = mouseXPos[0] - 960/2; mouseDy = mouseYPos[0] - 540/2; // } // else // { // mouseDx = 0.0; // mouseDy = 0.0; // } sgct::Engine::setMousePos( gEngine->getFocusedWindowIndex(), 960/2, 540/2); static float panRot = 0.0f; panRot += (static_cast<float>(mouseDx) * rotationSpeed * static_cast<float>(gEngine->getDt())); static float vertRot = 0.0f; vertRot += (static_cast<float>(mouseDy) * rotationSpeed * static_cast<float>(gEngine->getDt())); glm::mat4 ViewRotateX = glm::rotate( glm::mat4(1.0f), panRot, glm::vec3(0.0f, 1.0f, 0.0f)); //rotation around the y-axis glm::mat4 ViewRotateY = glm::rotate( glm::mat4(1.0f), vertRot, glm::vec3(1.0f, 0.0f, 0.0f)); //rotation around the x-axis view = glm::inverse(glm::mat3(ViewRotateX)) * glm::vec3(0.0f, 0.0f, 1.0f); glm::vec3 right = glm::cross(view, up); posMutex.lock(); if( arrowButtons[FORWARD] ) pos += (walkingSpeed * static_cast<float>(gEngine->getDt()) * view); if( arrowButtons[BACKWARD] ) pos -= (walkingSpeed * static_cast<float>(gEngine->getDt()) * view); if( arrowButtons[LEFT] ) pos -= (walkingSpeed * static_cast<float>(gEngine->getDt()) * right); if( arrowButtons[RIGHT] ) pos += (walkingSpeed * static_cast<float>(gEngine->getDt()) * right); posMutex.unlock(); /* To get a first person camera, the world needs to be transformed around the users head. This is done by: 1, Transform the user to coordinate system origin 2, Apply transformation 3, Transform the user back to original position However, mathwise this process need to be reversed due to the matrix multiplication order. */ // std::cout << "Position: (" << pos.x << ", " << pos.y << ", " << pos.x << ")" << std::endl; //3. transform user back to original position glm::mat4 result; result = glm::translate( glm::mat4(1.0f), sgct::Engine::getUserPtr()->getPos() ); //2. apply transformation result *= (ViewRotateY * ViewRotateX * glm::translate( glm::mat4(1.0f), pos )); //1. transform user to coordinate system origin result *= glm::translate( glm::mat4(1.0f), -sgct::Engine::getUserPtr()->getPos() ); xform.setVal( result ); } }