void get_model_matrix(float result[16]) { mat4f_identity(result); if(fitToView == 0) { /* Translate the model to where we were asked to put it */ float translate[16]; mat4f_translateVec_new(translate, placeToPutModel); /* Do inches to meters conversion if we are asked to. */ float scale[16]; mat4f_identity(scale); if(INCHES_TO_METERS) { float inchesToMeters=1/39.3701; mat4f_scale_new(scale, inchesToMeters, inchesToMeters, inchesToMeters); } mat4f_mult_mat4f_new(result, translate, scale); return; } /* Get a matrix to scale+translate the model based on the bounding * box. If the last parameter is 1, the bounding box will sit on * the XZ plane. If it is set to 0, the bounding box will be * centered at the specified point. */ float fitMatrix[16]; kuhl_bbox_fit(fitMatrix, bbox, 1); /* Get a matrix that moves the model to the correct location. */ float moveToLookPoint[16]; mat4f_translateVec_new(moveToLookPoint, placeToPutModel); /* Create a single model matrix. */ mat4f_mult_mat4f_new(result, moveToLookPoint, fitMatrix); }
void get_model_matrix(float result[16]) { mat4f_identity(result); if(FIT_TO_VIEW_AND_ROTATE == 0) { /* Translate the model to where we were asked to put it */ float translate[16]; mat4f_translateVec_new(translate, placeToPutModel); /* Do inches to meters conversion if we are asked to. */ float scale[16]; mat4f_identity(scale); if(INCHES_TO_METERS) { float inchesToMeters=1/39.3701; mat4f_scale_new(scale, inchesToMeters, inchesToMeters, inchesToMeters); } mat4f_mult_mat4f_new(result, translate, scale); return; } /* Change angle for animation. */ int count = glutGet(GLUT_ELAPSED_TIME) % 10000; // get a counter that repeats every 10 seconds /* Animate the model if there is animation information available. */ kuhl_update_model_file_ogl3(modelFilename, 0, count/1000.0); dgr_setget("count", &count, sizeof(int)); /* Calculate the width/height/depth of the bounding box and * determine which one of the three is the largest. Then, scale * the scene by 1/(largest value) to ensure that it fits in our * view frustum. */ float bb_min[3], bb_max[3], bb_center[3]; kuhl_model_bounding_box(modelFilename, bb_min, bb_max, bb_center); #define mymax(a,b) (a>b?a:b) float tmp; tmp = bb_max[0] - bb_min[0]; tmp = mymax(bb_max[1] - bb_min[1], tmp); tmp = mymax(bb_max[2] - bb_min[2], tmp); tmp = 1.f / tmp; #undef mymax float scaleBoundBox[16], moveToOrigin[16], moveToLookPoint[16]; mat4f_translate_new(moveToOrigin, -bb_center[0], -bb_center[1], -bb_center[2]); // move to origin // printf("Scaling by factor %f\n", tmp); mat4f_scale_new(scaleBoundBox, tmp, tmp, tmp); // scale model based on bounding box size mat4f_translateVec_new(moveToLookPoint, placeToPutModel); mat4f_mult_mat4f_new(result, moveToOrigin, result); mat4f_mult_mat4f_new(result, scaleBoundBox, result); mat4f_mult_mat4f_new(result, moveToLookPoint, result); }
/** Draw a object at the location and orientation of the tracked vrpn object */ void drawObject(const int objectIndex, float viewMat[16]) { const char *vrpnObject = global_argv[objectIndex]; const float scaleFactor = .5; float pos[4], orient[16]; vrpn_get(vrpnObject, NULL, pos, orient); float modelMat[16],translate[16],scale[16]; mat4f_scale_new(scale, scaleFactor, scaleFactor, scaleFactor); mat4f_translateVec_new(translate, pos); mat4f_mult_mat4f_many(modelMat, translate, orient, scale, NULL); float modelview[16]; mat4f_mult_mat4f_new(modelview, viewMat, modelMat); // modelview = view * model /* Send the modelview matrix to the vertex program. */ glUniformMatrix4fv(kuhl_get_uniform("ModelView"), 1, // number of 4x4 float matrices 0, // transpose modelview); // value glUniform1i(kuhl_get_uniform("renderStyle"), 2); kuhl_errorcheck(); kuhl_geometry_draw(modelgeom); /* Draw the model */ kuhl_errorcheck(); /* Transparency of labels may not appear right because we aren't * sorting them by depth. */ float labelScale[16]; mat4f_scale_new(labelScale, 1, 1/labelAspectRatio[objectIndex-1], 1); mat4f_mult_mat4f_new(modelview, modelview, labelScale); glUniformMatrix4fv(kuhl_get_uniform("ModelView"), 1, 0, modelview); glUniform1i(kuhl_get_uniform("renderStyle"), 1); kuhl_geometry_texture(&quad, label[objectIndex-1], "tex", 1); kuhl_geometry_draw(&quad); #if 0 printf("%s is at\n", vrpnObject); vec3f_print(pos); mat4f_print(orient); #endif }
/** Get view and projection matrices appropriate for the Oculus HMD */ static void viewmat_get_hmd_oculus(float viewmatrix[16], float projmatrix[16], int viewportID) { #ifndef MISSING_OVR /* Oculus recommends the order that we should render eyes. We * assume that smaller viewportIDs are rendered first. So, we need * to map the viewportIDs to the specific Oculus HMD eye. The * "eye" variable will be set to either ovrEye_Left (if we are * rendering the left eye) or ovrEye_Right (if we are rendering * the right eye). */ ovrEyeType eye = hmd->EyeRenderOrder[viewportID]; /* Oculus doesn't provide us with easy access to the view * frustum information. We get the projection matrix directly * from libovr. */ ovrMatrix4f ovrpersp = ovrMatrix4f_Projection(hmd->DefaultEyeFov[eye], 0.5, 500, 1); mat4f_setRow(projmatrix, &(ovrpersp.M[0][0]), 0); mat4f_setRow(projmatrix, &(ovrpersp.M[1][0]), 1); mat4f_setRow(projmatrix, &(ovrpersp.M[2][0]), 2); mat4f_setRow(projmatrix, &(ovrpersp.M[3][0]), 3); float offsetMat[16], rotMat[16], posMat[16], initPosMat[16]; mat4f_identity(offsetMat); // Viewpoint offset (IPD, etc); mat4f_identity(rotMat); // tracking system rotation mat4f_identity(posMat); // tracking system position mat4f_identity(initPosMat); // camera starting location /* Construct posMat and rotMat matrices which indicate the * position and orientation of the HMD. */ if(viewmat_vrpn_obj) // get position from VRPN { /* Get the offset for the left and right eyes from * Oculus. If you are using a separate tracking system, you * may also want to apply an offset here between the tracked * point and the eye location. */ mat4f_translate_new(offsetMat, eye_rdesc[eye].HmdToEyeViewOffset.x, // left & right IPD offset eye_rdesc[eye].HmdToEyeViewOffset.y, // vertical offset eye_rdesc[eye].HmdToEyeViewOffset.z); // forward/back offset float pos[3] = { 0,0,0 }; vrpn_get(viewmat_vrpn_obj, NULL, pos, rotMat); mat4f_translate_new(posMat, -pos[0], -pos[1], -pos[2]); // position viewmat_fix_rotation(rotMat); } else // get position from Oculus tracker { pose[eye] = ovrHmd_GetHmdPosePerEye(hmd, eye); mat4f_translate_new(posMat, // position (includes IPD offset) -pose[eye].Position.x, -pose[eye].Position.y, -pose[eye].Position.z); mat4f_rotateQuat_new(rotMat, // rotation pose[eye].Orientation.x, pose[eye].Orientation.y, pose[eye].Orientation.z, pose[eye].Orientation.w); // Starting point: // Translate the world based on the initial camera position // specified in viewmat_init(). You may choose to initialize the // camera position with y=1.5 meters to approximate a normal // standing eyeheight. float initPosVec[3]; vec3f_scalarMult_new(initPosVec, oculus_initialPos, -1.0f); mat4f_translateVec_new(initPosMat, initPosVec); // TODO: Could also get eyeheight via ovrHmd_GetFloat(hmd, OVR_KEY_EYE_HEIGHT, 1.65) } mat4f_transpose(rotMat); /* orientation sensor rotates camera, not world */ // viewmatrix = offsetMat * rotMat * posMat * initposmat mat4f_mult_mat4f_new(viewmatrix, offsetMat, rotMat); // offset is identity if we are using Oculus tracker mat4f_mult_mat4f_new(viewmatrix, viewmatrix, posMat); mat4f_mult_mat4f_new(viewmatrix, viewmatrix, initPosMat); if(0) { printf("ViewportID=%d; eye=%s\n", viewportID, eye == ovrEye_Left ? "left" : "right"); printf("Eye offset according to OVR (only used if VRPN is used): "); mat4f_print(offsetMat); printf("Rotation sensing (from OVR or VRPN): "); mat4f_print(rotMat); printf("Position tracking (from OVR or VRPN): "); mat4f_print(posMat); printf("Initial position (from set in viewmat_init()): "); mat4f_print(initPosMat); printf("Final view matrix: "); mat4f_print(viewmatrix); } #else /* We shouldn't ever get here, but we'll generate a generic view * and projection matrix just in case... */ mat4f_lookat_new(viewmatrix, 0,1.55,0, 0,1.55,-1, 0,1,0); mat4f_perspective_new(projmatrix, 50, 1, 0.5, 500); #endif }