/*! Set the Coin event manager for the widget. */ void QuarterWidget::setSoEventManager(SoEventManager * manager) { bool carrydata = false; SoNode * scene = NULL; SoCamera * camera = NULL; SbViewportRegion vp; if (PRIVATE(this)->soeventmanager && (manager != NULL)) { scene = PRIVATE(this)->soeventmanager->getSceneGraph(); camera = PRIVATE(this)->soeventmanager->getCamera(); vp = PRIVATE(this)->soeventmanager->getViewportRegion(); carrydata = true; } // ref before deleting the old scene manager to avoid that the nodes are deleted if (scene) scene->ref(); if (camera) camera->ref(); if (PRIVATE(this)->initialsoeventmanager) { delete PRIVATE(this)->soeventmanager; PRIVATE(this)->initialsoeventmanager = false; } PRIVATE(this)->soeventmanager = manager; if (carrydata) { PRIVATE(this)->soeventmanager->setSceneGraph(scene); PRIVATE(this)->soeventmanager->setCamera(camera); PRIVATE(this)->soeventmanager->setViewportRegion(vp); } if (scene) scene->unref(); if (camera) camera->unref(); }
void TaskCreateNodeSet::DefineNodesCallback(void * ud, SoEventCallback * n) { // show the wait cursor because this could take quite some time Gui::WaitCursor wc; TaskCreateNodeSet *taskBox = static_cast<TaskCreateNodeSet *>(ud); // When this callback function is invoked we must in either case leave the edit mode Gui::View3DInventorViewer* view = reinterpret_cast<Gui::View3DInventorViewer*>(n->getUserData()); view->setEditing(false); view->removeEventCallback(SoMouseButtonEvent::getClassTypeId(), DefineNodesCallback,ud); n->setHandled(); SbBool clip_inner; std::vector<SbVec2f> clPoly = view->getGLPolygon(&clip_inner); if (clPoly.size() < 3) return; if (clPoly.front() != clPoly.back()) clPoly.push_back(clPoly.front()); SoCamera* cam = view->getSoRenderManager()->getCamera(); SbViewVolume vv = cam->getViewVolume(); Gui::ViewVolumeProjection proj(vv); Base::Polygon2D polygon; for (std::vector<SbVec2f>::const_iterator it = clPoly.begin(); it != clPoly.end(); ++it) polygon.Add(Base::Vector2D((*it)[0],(*it)[1])); taskBox->DefineNodes(polygon,proj,clip_inner); }
static void selectionCallback(void * ud, SoEventCallback * cb) { Gui::View3DInventorViewer* view = reinterpret_cast<Gui::View3DInventorViewer*>(cb->getUserData()); view->removeEventCallback(SoMouseButtonEvent::getClassTypeId(), selectionCallback, ud); SoNode* root = view->getSceneGraph(); static_cast<Gui::SoFCUnifiedSelection*>(root)->selectionRole.setValue(true); std::vector<SbVec2f> picked = view->getGLPolygon(); SoCamera* cam = view->getSoRenderManager()->getCamera(); SbViewVolume vv = cam->getViewVolume(); Gui::ViewVolumeProjection proj(vv); Base::Polygon2d polygon; if (picked.size() == 2) { SbVec2f pt1 = picked[0]; SbVec2f pt2 = picked[1]; polygon.Add(Base::Vector2d(pt1[0], pt1[1])); polygon.Add(Base::Vector2d(pt1[0], pt2[1])); polygon.Add(Base::Vector2d(pt2[0], pt2[1])); polygon.Add(Base::Vector2d(pt2[0], pt1[1])); } else { for (std::vector<SbVec2f>::const_iterator it = picked.begin(); it != picked.end(); ++it) polygon.Add(Base::Vector2d((*it)[0],(*it)[1])); } FaceColors* self = reinterpret_cast<FaceColors*>(ud); self->d->view = 0; if (self->d->obj && self->d->obj->getTypeId().isDerivedFrom(Part::Feature::getClassTypeId())) { cb->setHandled(); const TopoDS_Shape& shape = static_cast<Part::Feature*>(self->d->obj)->Shape.getValue(); self->d->addFacesToSelection(view, proj, polygon, shape); view->redraw(); } }
/// return the camera definition of the active view static PyObject * povViewCamera(PyObject *self, PyObject *args) { // no arguments if (!PyArg_ParseTuple(args, "")) return NULL; PY_TRY { std::string out; const char* ppReturn=0; Gui::Application::Instance->sendMsgToActiveView("GetCamera",&ppReturn); SoNode* rootNode; SoInput in; in.setBuffer((void*)ppReturn,std::strlen(ppReturn)); SoDB::read(&in,rootNode); if (!rootNode || !rootNode->getTypeId().isDerivedFrom(SoCamera::getClassTypeId())) throw Base::Exception("CmdRaytracingWriteCamera::activated(): Could not read " "camera information from ASCII stream....\n"); // root-node returned from SoDB::readAll() has initial zero // ref-count, so reference it before we start using it to // avoid premature destruction. SoCamera * Cam = static_cast<SoCamera*>(rootNode); Cam->ref(); SbRotation camrot = Cam->orientation.getValue(); SbVec3f upvec(0, 1, 0); // init to default up vector camrot.multVec(upvec, upvec); SbVec3f lookat(0, 0, -1); // init to default view direction vector camrot.multVec(lookat, lookat); SbVec3f pos = Cam->position.getValue(); float Dist = Cam->focalDistance.getValue(); // making gp out of the Coin stuff gp_Vec gpPos(pos.getValue()[0],pos.getValue()[1],pos.getValue()[2]); gp_Vec gpDir(lookat.getValue()[0],lookat.getValue()[1],lookat.getValue()[2]); lookat *= Dist; lookat += pos; gp_Vec gpLookAt(lookat.getValue()[0],lookat.getValue()[1],lookat.getValue()[2]); gp_Vec gpUp(upvec.getValue()[0],upvec.getValue()[1],upvec.getValue()[2]); // getting image format ParameterGrp::handle hGrp = App::GetApplication().GetParameterGroupByPath("User parameter:BaseApp/Preferences/Mod/Raytracing"); int width = hGrp->GetInt("OutputWidth", 800); int height = hGrp->GetInt("OutputHeight", 600); // call the write method of PovTools.... out = PovTools::getCamera(CamDef(gpPos,gpDir,gpLookAt,gpUp),width,height); return Py::new_reference_to(Py::String(out)); } PY_CATCH; }
void ViewProviderStructured::cut(const std::vector<SbVec2f>& picked, Gui::View3DInventorViewer &Viewer) { // create the polygon from the picked points Base::Polygon2D cPoly; for (std::vector<SbVec2f>::const_iterator it = picked.begin(); it != picked.end(); ++it) { cPoly.Add(Base::Vector2D((*it)[0],(*it)[1])); } // get a reference to the point feature Points::Feature* fea = static_cast<Points::Feature*>(pcObject); const Points::PointKernel& points = fea->Points.getValue(); SoCamera* pCam = Viewer.getSoRenderManager()->getCamera(); SbViewVolume vol = pCam->getViewVolume(); // search for all points inside/outside the polygon Points::PointKernel newKernel; newKernel.reserve(points.size()); bool invalidatePoints = false; double nan = std::numeric_limits<double>::quiet_NaN(); for (Points::PointKernel::const_iterator jt = points.begin(); jt != points.end(); ++jt) { // valid point? Base::Vector3d vec(*jt); if (!(boost::math::isnan(jt->x) || boost::math::isnan(jt->y) || boost::math::isnan(jt->z))) { SbVec3f pt(jt->x,jt->y,jt->z); // project from 3d to 2d vol.projectToScreen(pt, pt); if (cPoly.Contains(Base::Vector2D(pt[0],pt[1]))) { invalidatePoints = true; vec.Set(nan, nan, nan); } } newKernel.push_back(vec); } if (invalidatePoints) { //Remove the points from the cloud and open a transaction object for the undo/redo stuff Gui::Application::Instance->activeDocument()->openCommand("Cut points"); // sets the points outside the polygon to update the Inventor node fea->Points.setValue(newKernel); // unset the modified flag because we don't need the features' execute() to be called Gui::Application::Instance->activeDocument()->commitCommand(); fea->purgeTouched(); } }
bool ViewProviderMeshNode::handleEvent(const SoEvent * const ev,Gui::View3DInventorViewer &Viewer) { if ( m_bEdit ) { unsetEdit(); std::vector<SbVec2f> clPoly = Viewer.getPickedPolygon(); if ( clPoly.size() < 3 ) return true; if ( clPoly.front() != clPoly.back() ) clPoly.push_back(clPoly.front()); // get the normal of the front clipping plane SbVec3f b,n; Viewer.getNearPlane(b, n); Base::Vector3f cPoint(b[0],b[1],b[2]), cNormal(n[0],n[1],n[2]); SoCamera* pCam = Viewer.getCamera(); SbViewVolume vol = pCam->getViewVolume (); // create a tool shape from these points std::vector<MeshCore::MeshGeomFacet> aFaces; bool ok = ViewProviderMesh::createToolMesh( clPoly, vol, cNormal, aFaces ); // Get the attached mesh property Mesh::PropertyMeshKernel& meshProp = ((Mesh::Feature*)pcObject)->Mesh; // Get the facet indices inside the tool mesh std::vector<unsigned long> indices; MeshCore::MeshKernel cToolMesh; cToolMesh = aFaces; MeshCore::MeshFacetGrid cGrid(meshProp.getValue().getKernel()); MeshCore::MeshAlgorithm cAlg(meshProp.getValue().getKernel()); cAlg.GetFacetsFromToolMesh(cToolMesh, cNormal, cGrid, indices); meshProp.deleteFacetIndices( indices ); // update open edge display if needed // if ( pcOpenEdge ) // { // showOpenEdges(false); // showOpenEdges(true); // } Viewer.render(); if ( !ok ) // note: the mouse grabbing needs to be released //QMessageBox::warning(Viewer.getWidget(),"Invalid polygon","The picked polygon seems to have self-overlappings.\n\nThis could lead to strange rersults."); Base::Console().Message("The picked polygon seems to have self-overlappings. This could lead to strange results."); } return false; }
void SoFCCSysDragger::finishDragCB(void *data, SoDragger *) { SoFCCSysDragger *sudoThis = reinterpret_cast<SoFCCSysDragger *>(data); // note: when creating a second view of the document and then closing // the first viewer it deletes the camera. However, the attached field // of the cameraSensor will be detached automatically. SoField* field = sudoThis->cameraSensor.getAttachedField(); if (field) { SoCamera* camera = static_cast<SoCamera*>(field->getContainer()); if (camera->getTypeId() == SoPerspectiveCamera::getClassTypeId()) cameraCB(sudoThis, nullptr); } }
void SoFCCSysDragger::idleCB(void *data, SoSensor *) { SoFCCSysDragger *sudoThis = reinterpret_cast<SoFCCSysDragger *>(data); SoField* field = sudoThis->cameraSensor.getAttachedField(); if (field) { SoCamera* camera = static_cast<SoCamera*>(field->getContainer()); SbMatrix localToWorld = sudoThis->getLocalToWorldMatrix(); SbVec3f origin; localToWorld.multVecMatrix(SbVec3f(0.0, 0.0, 0.0), origin); SbViewVolume viewVolume = camera->getViewVolume(); float radius = sudoThis->draggerSize.getValue() / 2.0; float localScale = viewVolume.getWorldToScreenScale(origin, radius); SbVec3f scaleVector(localScale, localScale, localScale); SoScale *localScaleNode = SO_GET_ANY_PART(sudoThis, "scaleNode", SoScale); localScaleNode->scaleFactor.setValue(scaleVector); sudoThis->autoScaleResult.setValue(localScale); } }
void ViewProviderPoints::cut(const std::vector<SbVec2f>& picked, Gui::View3DInventorViewer &Viewer) { // create the polygon from the picked points Base::Polygon2D cPoly; for (std::vector<SbVec2f>::const_iterator it = picked.begin(); it != picked.end(); ++it) { cPoly.Add(Base::Vector2D((*it)[0],(*it)[1])); } // get a reference to the point feature Points::Feature* fea = (Points::Feature*)pcObject; const Points::PointKernel& points = fea->Points.getValue(); SoCamera* pCam = Viewer.getSoRenderManager()->getCamera(); SbViewVolume vol = pCam->getViewVolume(); // search for all points inside/outside the polygon Points::PointKernel newKernel; for ( Points::PointKernel::const_iterator jt = points.begin(); jt != points.end(); ++jt ) { SbVec3f pt(jt->x,jt->y,jt->z); // project from 3d to 2d vol.projectToScreen(pt, pt); if (!cPoly.Contains(Base::Vector2D(pt[0],pt[1]))) newKernel.push_back(*jt); } if (newKernel.size() == points.size()) return; // nothing needs to be done //Remove the points from the cloud and open a transaction object for the undo/redo stuff Gui::Application::Instance->activeDocument()->openCommand("Cut points"); // sets the points outside the polygon to update the Inventor node fea->Points.setValue(newKernel); // unset the modified flag because we don't need the features' execute() to be called Gui::Application::Instance->activeDocument()->commitCommand(); fea->purgeTouched(); }
void Command::adjustCameraPosition() { Gui::Document* doc = Gui::Application::Instance->activeDocument(); if (doc) { Gui::View3DInventor* view = static_cast<Gui::View3DInventor*>(doc->getActiveView()); Gui::View3DInventorViewer* viewer = view->getViewer(); SoCamera* camera = viewer->getSoRenderManager()->getCamera(); if (!camera || !camera->isOfType(SoOrthographicCamera::getClassTypeId())) return; // get scene bounding box SoGetBoundingBoxAction action(viewer->getSoRenderManager()->getViewportRegion()); action.apply(viewer->getSceneGraph()); SbBox3f box = action.getBoundingBox(); if (box.isEmpty()) return; // get cirumscribing sphere and check if camera is inside SbVec3f cam_pos = camera->position.getValue(); SbVec3f box_cnt = box.getCenter(); SbSphere bs; bs.circumscribe(box); float radius = bs.getRadius(); float distance_to_midpoint = (box_cnt-cam_pos).length(); if (radius >= distance_to_midpoint) { // Move the camera to the edge of the bounding sphere, while still // pointing at the scene. SbVec3f direction = cam_pos - box_cnt; (void) direction.normalize(); // we know this is not a null vector camera->position.setValue(box_cnt + direction * radius); // New distance to mid point distance_to_midpoint = (camera->position.getValue() - box.getCenter()).length(); camera->nearDistance = distance_to_midpoint - radius; camera->farDistance = distance_to_midpoint + radius; camera->focalDistance = distance_to_midpoint; } } }
int main(int argc, char **argv) { using namespace std; int numObjects; long nRows, nCols; char **error; if (argc < 4) { fprintf(stderr,"Usage: lab1 <input file> <output file> <rows>\n"); return -1; } filename = (char*)malloc(sizeof(char)*strlen(argv[2])); strcpy(filename, argv[2]); error = (char**)malloc(sizeof(char**)*10); nRows = strtol(argv[3],error,10); if (**error) { fprintf(stderr,"Error! use a number for rows!\n"); return -2; } free(error); SoDB::init(); OSUInventorScene *scene = new OSUInventorScene(argv[1]); //check to see if there are objects if (numObjects = scene->Objects.getLength() < 1) { fprintf(stderr,"Error, no objects"); return -2; } //get the objects and put them into a list for (int i = 0; i < scene->Objects.getLength(); i++) { OSUObjectData *obj = (OSUObjectData *)scene->Objects[i]; /* if (!obj->Check()) { fprintf(stderr,"Error detected in OSUObjectData for object %i.\n",i); return 20; } */ SoType shape_type = obj->shape->getTypeId(); if (shape_type == SoSphere::getClassTypeId()) { Sphere *sp = new Sphere; SoSphere * sphere = (SoSphere *) obj->shape; SoTransform * transformation = obj->transformation; SbVec3f scale_vector = transformation->scaleFactor.getValue(); SbVec3f translation_vector = transformation->translation.getValue(); sp->radius = 1; sp->center = Point(0,0,0,1); //do the materials stuff SoMaterial * material = obj->material; sp->color = rgb(material->diffuseColor[0][0],material->diffuseColor[0][1],material->diffuseColor[0][2]); sp->specular = rgb(material->specularColor[0][0], material->specularColor[0][1],material->specularColor[0][2]); sp->ambient = rgb(material->ambientColor[0][0], material->ambientColor[0][1], material->ambientColor[0][2]); sp->shininess = material->shininess[0]; sp->trans = material->transparency[0]; printf("Object %d has shininess %f\n", sp->object_number, sp->shininess); //let's get our translation space matrix //void getTranslationSpaceMatrix(SbMatrix &mat, SbMatrix &inv) const SbMatrix tmat, tinv, smat, sinv, rmat, rinv; transformation->getTranslationSpaceMatrix(tmat, tinv); // transformation->getScaleSpaceMatrix(smat,sinv); transformation->getRotationSpaceMatrix(rmat, rinv); // SbMatrix & multRight(const SbMatrix &m) sp->M = tmat; sp->Mi = tinv; sp->M.transpose(); sp->Mi.transpose(); sp->type = eSPHERE; //texture data SoSFImage image; int nc; SbVec2s size; const unsigned char *texelArray; // get properties of object //obj = (OSUObjectData *)worldptr->scene->Objects[i]; if (obj->texture != NULL) { image = obj->texture->image; texelArray = image.getValue(size,nc); // sets 'size', 'nc', 'texelArray' sp->image = texelArray[0]; printf("\nimage value = %d\n",texelArray[0]); } objects.push_back(sp); sp->object_number = i; } if (shape_type == SoCube::getClassTypeId()) { Box *sp = new Box; SoSphere * sphere = (SoSphere *) obj->shape; SoTransform * transformation = obj->transformation; SbVec3f scale_vector = transformation->scaleFactor.getValue(); SbVec3f translation_vector = transformation->translation.getValue(); sp->center = Point(0,0,0,1); //do the materials stuff SoMaterial * material = obj->material; sp->color = rgb(material->diffuseColor[0][0],material->diffuseColor[0][1],material->diffuseColor[0][2]); sp->specular = rgb(material->specularColor[0][0], material->specularColor[0][1],material->specularColor[0][2]); sp->ambient = rgb(material->ambientColor[0][0], material->ambientColor[0][1], material->ambientColor[0][2]); sp->shininess = material->shininess[0]; sp->trans = material->transparency[0]; printf("Object %d has shininess %f\n", sp->object_number, sp->shininess); //let's get our translation space matrix //void getTranslationSpaceMatrix(SbMatrix &mat, SbMatrix &inv) const SbMatrix tmat, tinv, smat, sinv, rmat, rinv; transformation->getTranslationSpaceMatrix(tmat, tinv); // transformation->getScaleSpaceMatrix(smat,sinv); transformation->getRotationSpaceMatrix(rmat, rinv); // list triangles // SbMatrix & multRight(const SbMatrix &m) sp->M = tmat; sp->Mi = tinv; sp->M.transpose(); sp->Mi.transpose(); SoSFImage image; int nc; SbVec2s size; const unsigned char *texelArray; // get properties of object //obj = (OSUObjectData *)worldptr->scene->Objects[i]; if (obj->texture != NULL) { image = obj->texture->image; texelArray = image.getValue(size,nc); // sets 'size' // list triangles printf("\nimage value = %d\n",texelArray[0]); sp->image = texelArray[0]; } objects.push_back(sp); sp->type = eCUBE; sp->object_number = i; } if (shape_type == SoIndexedTriangleStripSet::getClassTypeId()) { SoIndexedTriangleStripSet * triangle_strip_set = (SoIndexedTriangleStripSet *) obj->shape; if (obj->points == NULL) { cout << " Error: Points missing for indexed triangle strip set."; } else if (obj->points->getTypeId() != SoCoordinate3::getClassTypeId()) { cout << " Error: Incorrect format for point list." << endl; cout << " Expected list of 3D coordinates."; } else { SoCoordinate3 * coord = (SoCoordinate3 *) obj->points; int numv = coord->point.getNum(); cout << " Surface has " << numv << " vertices." << endl; for (int i = 0; i < numv; i++) { cout << " Vertex " << i << " = (" << coord->point[i][0] << ", " << coord->point[i][1] << ", " << coord->point[i][2] << ")." << endl; } if (obj->normals == NULL) { cout << " No stored normals." << endl; } else { int num_normals = obj->normals->vector.getNum(); if (num_normals != numv) { // assume PER_VERTEX normal binding cout << "Error: Number of normals does not equal number of vertices." << endl; } else { for (int i = 0; i < num_normals; i++) { cout << " Normal " << i << " = (" << obj->normals->vector[i][0] << ", " << obj->normals->vector[i][1] << ", " << obj->normals->vector[i][2] << ")." << endl; } } } /* // list triangles int itriangle = 0; int icoord = 0; int coord_list_length = triangle_strip_set->coordIndex.getNum(); while (icoord < coord_list_length) { int c0 = SO_END_STRIP_INDEX; int c1 = SO_END_STRIP_INDEX; int c2 = SO_END_STRIP_INDEX; while (icoord < coord_list_length && triangle_strip_set->coordIndex[icoord] != SO_END_STRIP_INDEX) { c2 = triangle_strip_set->coordIndex[icoord]; if (c0 != SO_END_STRIP_INDEX && c1 != SO_END_STRIP_INDEX) { cout << " Triangle " << itriangle << " coordinate indices = ( " << c0 << ", " << c1 << ", " << c2 << " )." << endl; itriangle++; }; icoord++; c0 = c1; c1 = c2; } } */ } } free(obj); } printf("Number of objects seen %d\n", objects.size()); printf("Setting up camera\n"); /********************************************************************* * Camera Setup * *********************************************************************/ SoCamera * cam = scene->Camera; if (scene->Camera == NULL) { printf("No camera found. Setting Default/n"); camera.aspect = 1; camera.position = Point(0,0,0,1); camera.n = Vector(0,0,-1,0); camera.v = Vector(0,1,0,0); camera.u = cross(camera.n,camera.v); camera.height_angle = M_PI/2; } else { SbVec3f camera_position = cam->position.getValue(); SbRotation cam_orientation = cam->orientation.getValue(); SbVec3f camera_rotation_axis; float cam_rotation_angle; cam_orientation.getValue(camera_rotation_axis, cam_rotation_angle); float cam_aspect_ratio = cam->aspectRatio.getValue(); SoType cam_type = cam->getTypeId(); //Let's grab the aspect ratio nCols =(int)( nRows*cam_aspect_ratio); camera.aspect = cam_aspect_ratio; printf("Number of rows is %d columns is %d with an aspect ratio of %f\n",nRows,nCols,cam_aspect_ratio); // calculate camera direction and camera up direction SbVec3f camera_direction, camera_up; cam_orientation.multVec(SbVec3f(0, 0, -1), camera_direction); cam_orientation.multVec(SbVec3f(0, 1, 0), camera_up); camera.n = -1*Vector(camera_direction[0],camera_direction[1],camera_direction[2],0); camera.v = -1*Vector(camera_up[0],camera_up[1],camera_up[2],0); camera.position = Point(camera_position[0],camera_position[1],camera_position[2],1); camera.u = cross(camera.n,camera.v); if (cam_type == SoPerspectiveCamera::getClassTypeId()) { // camera is a perspective camera SoPerspectiveCamera * perspective_camera = (SoPerspectiveCamera *) cam; float camera_height_angle = perspective_camera->heightAngle.getValue(); camera.height_angle = camera_height_angle; } } N = 1; pixH = 2*tan(camera.height_angle/2)/nCols; pixW = 2*tan(camera.height_angle/2)/nRows; printf("Camera position is %f %f %f\n",camera.position.x,camera.position.y, camera.position.z); printf("Camera n is %f %f %f\n",camera.n.x,camera.n.y, camera.n.z); printf("Camera v is %f %f %f\n",camera.v.x,camera.v.y, camera.v.z); printf("Camera u is %f %f %f\n",camera.u.x,camera.u.y, camera.u.z); printf("N is %f\n",N); //setup for lights // list lights for (int j = 0; j < scene->Lights.getLength(); j++) { SoLight * flight = (SoLight *) scene->Lights[j]; SoType light_type = flight->getTypeId(); SoSFColor lightColor; SbColor lightClr; float lightr,lightg,lightb; lightColor = flight->color; light *l = new light; l->color = rgb(lightClr[0],lightClr[1],lightClr[2]); l->intensity = flight->intensity.getValue(); if (light_type == SoPointLight::getClassTypeId()) { SoPointLight * point_light = (SoPointLight *) flight; SbVec3f location = point_light->location.getValue(); l->center = Point(location[0],location[1],location[2],1); l->type = ePOINT; } else if (light_type == SoSpotLight::getClassTypeId()) { SoSpotLight * spot_light = (SoSpotLight *) flight; SbVec3f location = spot_light->location.getValue(); l->center = Point(location[0], location[1], location[2],1); l->theta = spot_light->cutOffAngle.getValue(); l->drop_off = spot_light->dropOffRate.getValue(); SbVec3f direction = spot_light->direction.getValue(); Vector lightDir(direction[0], direction[1], direction[2],0); l->dir = lightDir; l->type=eHOOD; } else if (light_type == SoDirectionalLight::getClassTypeId()) { l->type = eDIR; SoDirectionalLight * dir_light = (SoDirectionalLight *) flight; SbVec3f direction = dir_light->direction.getValue(); Vector lightDir(direction[0], direction[1], direction[2],0); l->dir = lightDir; } lights.push_back(l); } //start our tracer trace(objects, nRows, nCols, 1); //let's free our vector pointers int x = objects.size() - 1; fprintf(stderr,"Freeing objects\n"); while (x >= 0) { delete objects[x]; // printf("freeing object %d\n",x); x--; } x = lights.size() - 1; fprintf(stderr,"Freeing lights\n"); while (x >= 0) { delete lights[x]; x--; } }
void DefineNodesCallback(void * ud, SoEventCallback * n) { Fem::FemAnalysis *Analysis; if(getConstraintPrerequisits(&Analysis)) return; // show the wait cursor because this could take quite some time Gui::WaitCursor wc; // When this callback function is invoked we must in either case leave the edit mode Gui::View3DInventorViewer* view = reinterpret_cast<Gui::View3DInventorViewer*>(n->getUserData()); view->setEditing(false); view->removeEventCallback(SoMouseButtonEvent::getClassTypeId(), DefineNodesCallback,ud); n->setHandled(); SbBool clip_inner; std::vector<SbVec2f> clPoly = view->getGLPolygon(&clip_inner); if (clPoly.size() < 3) return; if (clPoly.front() != clPoly.back()) clPoly.push_back(clPoly.front()); SoCamera* cam = view->getCamera(); SbViewVolume vv = cam->getViewVolume(); Gui::ViewVolumeProjection proj(vv); Base::Polygon2D polygon; for (std::vector<SbVec2f>::const_iterator it = clPoly.begin(); it != clPoly.end(); ++it) polygon.Add(Base::Vector2D((*it)[0],(*it)[1])); std::vector<App::DocumentObject*> docObj = Gui::Selection().getObjectsOfType(Fem::FemMeshObject::getClassTypeId()); if(docObj.size() !=1) return; const SMESHDS_Mesh* data = const_cast<SMESH_Mesh*>(dynamic_cast<Fem::FemMeshObject*>(docObj[0])->FemMesh.getValue().getSMesh())->GetMeshDS(); SMDS_NodeIteratorPtr aNodeIter = data->nodesIterator(); Base::Vector3f pt2d; std::set<int> IntSet; for (int i=0;aNodeIter->more();) { const SMDS_MeshNode* aNode = aNodeIter->next(); Base::Vector3f vec(aNode->X(),aNode->Y(),aNode->Z()); pt2d = proj(vec); if (polygon.Contains(Base::Vector2D(pt2d.x, pt2d.y)) == true) IntSet.insert(aNode->GetID()); } std::stringstream set; set << "["; for(std::set<int>::const_iterator it=IntSet.begin();it!=IntSet.end();++it) if(it==IntSet.begin()) set << *it ; else set << "," << *it ; set << "]"; Gui::Command::openCommand("Place robot"); Gui::Command::doCommand(Gui::Command::Doc,"App.ActiveDocument.addObject('Fem::FemSetNodesObject','NodeSet')"); Gui::Command::doCommand(Gui::Command::Doc,"App.ActiveDocument.ActiveObject.Nodes = %s",set.str().c_str()); Gui::Command::doCommand(Gui::Command::Doc,"App.activeDocument().%s.Member = App.activeDocument().%s.Member + [App.activeDocument().NodeSet]",Analysis->getNameInDocument(),Analysis->getNameInDocument()); ////Gui::Command::updateActive(); Gui::Command::commitCommand(); //std::vector<Gui::ViewProvider*> views = view->getViewProvidersOfType(ViewProviderMesh::getClassTypeId()); //if (!views.empty()) { // Gui::Application::Instance->activeDocument()->openCommand("Cut"); // for (std::vector<Gui::ViewProvider*>::iterator it = views.begin(); it != views.end(); ++it) { // ViewProviderMesh* that = static_cast<ViewProviderMesh*>(*it); // if (that->getEditingMode() > -1) { // that->finishEditing(); // that->cutMesh(clPoly, *view, clip_inner); // } // } // Gui::Application::Instance->activeDocument()->commitCommand(); // view->render(); //} }
/*! The string returned from this function is only valid until the next variable is requested. */ const char * SoScXMLStateMachine::getVariable(const char * key) const { if (strncmp(key, "_event.", 7) == 0) { // printf("scan for key '%s'\n", key); const char * subkey = key + 7; const ScXMLEvent * ev = this->getCurrentEvent(); if (ev->isOfType(SoScXMLEvent::getClassTypeId())) { const SoScXMLEvent * soev = static_cast<const SoScXMLEvent *>(ev); const SoEvent * coinev = soev->getSoEvent(); if (strcmp(subkey, "getTime()") == 0) { SbTime timeval = coinev->getTime(); double doubletime = timeval.getValue(); PRIVATE(this)->varstring = SbStringConvert::toString(doubletime); return PRIVATE(this)->varstring.getString(); } else if (strcmp(subkey, "getPosition().x") == 0) { SbVec2s pos = coinev->getPosition(); PRIVATE(this)->varstring = SbStringConvert::toString(static_cast<double>(pos[0])); return PRIVATE(this)->varstring.getString(); } else if (strcmp(subkey, "getPosition().y") == 0) { SbVec2s pos = coinev->getPosition(); PRIVATE(this)->varstring = SbStringConvert::toString(static_cast<double>(pos[1])); return PRIVATE(this)->varstring.getString(); } else if (strcmp(subkey, "getPosition()") == 0) { SbVec2s pos = coinev->getPosition(); PRIVATE(this)->varstring = SbStringConvert::toString(pos); return PRIVATE(this)->varstring.getString(); } else if (strcmp(subkey, "getNormalizedPosition().x") == 0) { SbVec2f pos = coinev->getNormalizedPosition(this->getViewportRegion()); PRIVATE(this)->varstring = SbStringConvert::toString(static_cast<double>(pos[0])); return PRIVATE(this)->varstring.getString(); } else if (strcmp(subkey, "getNormalizedPosition().y") == 0) { SbVec2f pos = coinev->getNormalizedPosition(this->getViewportRegion()); PRIVATE(this)->varstring = SbStringConvert::toString(static_cast<double>(pos[1])); return PRIVATE(this)->varstring.getString(); } else if (strcmp(subkey, "getNormalizedPosition()") == 0) { SbVec2f pos = coinev->getNormalizedPosition(this->getViewportRegion()); PRIVATE(this)->varstring = SbStringConvert::toString(pos); return PRIVATE(this)->varstring.getString(); } else if (strcmp(subkey, "wasShiftDown()") == 0) { SbBool wasdown = coinev->wasShiftDown(); PRIVATE(this)->varstring = SbStringConvert::toString<bool>(wasdown); return PRIVATE(this)->varstring.getString(); } else if (strcmp(subkey, "wasCtrlDown()") == 0) { SbBool wasdown = coinev->wasCtrlDown(); PRIVATE(this)->varstring = SbStringConvert::toString<bool>(wasdown); return PRIVATE(this)->varstring.getString(); } else if (strcmp(subkey, "wasAltDown()") == 0) { SbBool wasdown = coinev->wasAltDown(); PRIVATE(this)->varstring = SbStringConvert::toString<bool>(wasdown); return PRIVATE(this)->varstring.getString(); } else if (strcmp(subkey, "getState()") == 0 && coinev->isOfType(SoButtonEvent::getClassTypeId())) { const SoButtonEvent * bevent = coin_assert_cast<const SoButtonEvent *>(coinev); SbString enumname; SoButtonEvent::enumToString(bevent->getState(), enumname); PRIVATE(this)->varstring.sprintf("'%s'", enumname.getString()); return PRIVATE(this)->varstring.getString(); } else if (strcmp(subkey, "getKey()") == 0 && coinev->isOfType(SoKeyboardEvent::getClassTypeId())) { const SoKeyboardEvent * kbevent = coin_assert_cast<const SoKeyboardEvent *>(coinev); SbString enumname; SoKeyboardEvent::enumToString(kbevent->getKey(), enumname); PRIVATE(this)->varstring.sprintf("'%s'", enumname.getString()); return PRIVATE(this)->varstring.getString(); } else if (strcmp(subkey, "getPrintableCharacter()") == 0 && coinev->isOfType(SoKeyboardEvent::getClassTypeId())) { const SoKeyboardEvent * kbevent = coin_assert_cast<const SoKeyboardEvent *>(coinev); char printable = kbevent->getPrintableCharacter(); PRIVATE(this)->varstring.sprintf("'%c'", printable); return PRIVATE(this)->varstring.getString(); } else if (strcmp(subkey, "getButton()") == 0 && coinev->isOfType(SoMouseButtonEvent::getClassTypeId())) { const SoMouseButtonEvent * mbevent = coin_assert_cast<const SoMouseButtonEvent *>(coinev); SbString enumname; SoMouseButtonEvent::enumToString(mbevent->getButton(), enumname); PRIVATE(this)->varstring.sprintf("'%s'", enumname.getString()); return PRIVATE(this)->varstring.getString(); } else if (strcmp(subkey, "getButton()") == 0 && coinev->isOfType(SoSpaceballButtonEvent::getClassTypeId())) { const SoSpaceballButtonEvent * mbevent = coin_assert_cast<const SoSpaceballButtonEvent *>(coinev); SbString enumname; SoSpaceballButtonEvent::enumToString(mbevent->getButton(), enumname); PRIVATE(this)->varstring.sprintf("'%s'", enumname.getString()); return PRIVATE(this)->varstring.getString(); } // FIXME: x., .y, .z else if (strcmp(subkey, "getTranslation()") == 0 && coinev->isOfType(SoMotion3Event::getClassTypeId())) { const SoMotion3Event * m3event = coin_assert_cast<const SoMotion3Event *>(coinev); SbVec3f translation = m3event->getTranslation(); PRIVATE(this)->varstring = SbStringConvert::toString(translation); return PRIVATE(this)->varstring.getString(); } // FIXME: .angle, .axis else if (strcmp(subkey, "getRotation()") == 0 && coinev->isOfType(SoMotion3Event::getClassTypeId())) { const SoMotion3Event * m3event = coin_assert_cast<const SoMotion3Event *>(coinev); SbRotation rotation = m3event->getRotation(); PRIVATE(this)->varstring = SbStringConvert::toString(rotation); return PRIVATE(this)->varstring.getString(); } // FIXME: make this into a evaluator-level RayPick(SbVec2f) function instead else if (strcmp(key + 7, "pickposition3") == 0) { SbVec2s location2 = coinev->getPosition(); SoRayPickAction rpa(this->getViewportRegion()); rpa.setPoint(location2); rpa.apply(this->getSceneGraphRoot()); SoPickedPoint * pp = rpa.getPickedPoint(); if (pp) { SbVec3f pickpos = pp->getPoint(); PRIVATE(this)->varstring = SbStringConvert::toString(pickpos); } else { PRIVATE(this)->varstring.sprintf("FALSE"); // need a valid undefined-value } return PRIVATE(this)->varstring.getString(); } } } else if (strncmp(key, "coin:", 5) == 0) { const char * subkey = key + 5; if (strncmp(subkey, "camera.", 7) == 0) { SoCamera * camera = this->getActiveCamera(); if (!camera) { SoDebugError::post("SoScXMLStateMachine::getVariable", "queried for camera, but no camera is set."); return NULL; } const char * detail = subkey + 7; if (strcmp(detail, "getTypeId()") == 0) { PRIVATE(this)->varstring.sprintf("'%s'", camera->getTypeId().getName().getString()); return PRIVATE(this)->varstring.getString(); } } // get generic field access working and intercept for more So-specific stuff // coin:viewport // coin:camera // coin:scene } //else { //} // couldn't resolve the symbol - try parent class to get '_data' and other '_event' // locations resolved return inherited::getVariable(key); }
void MeshSelection::selectGLCallback(void * ud, SoEventCallback * n) { // When this callback function is invoked we must leave the edit mode Gui::View3DInventorViewer* view = reinterpret_cast<Gui::View3DInventorViewer*>(n->getUserData()); MeshSelection* self = reinterpret_cast<MeshSelection*>(ud); self->stopInteractiveCallback(view); n->setHandled(); std::vector<SbVec2f> polygon = view->getGLPolygon(); if (polygon.size() < 3) return; if (polygon.front() != polygon.back()) polygon.push_back(polygon.front()); SbVec3f pnt, dir; view->getNearPlane(pnt, dir); Base::Vector3f point (pnt[0],pnt[1],pnt[2]); Base::Vector3f normal(dir[0],dir[1],dir[2]); std::list<ViewProviderMesh*> views = self->getViewProviders(); for (std::list<ViewProviderMesh*>::iterator it = views.begin(); it != views.end(); ++it) { ViewProviderMesh* vp = static_cast<ViewProviderMesh*>(*it); std::vector<unsigned long> faces; const Mesh::MeshObject& mesh = static_cast<Mesh::Feature*>((*it)->getObject())->Mesh.getValue(); const MeshCore::MeshKernel& kernel = mesh.getKernel(); // simply get all triangles under the polygon SoCamera* cam = view->getSoRenderManager()->getCamera(); SbViewVolume vv = cam->getViewVolume(); Gui::ViewVolumeProjection proj(vv); vp->getFacetsFromPolygon(polygon, proj, true, faces); if (self->onlyVisibleTriangles) { const SbVec2s& sz = view->getSoRenderManager()->getViewportRegion().getWindowSize(); short width,height; sz.getValue(width,height); std::vector<SbVec2s> pixelPoly = view->getPolygon(); SbBox2s rect; for (std::vector<SbVec2s>::iterator it = pixelPoly.begin(); it != pixelPoly.end(); ++it) { const SbVec2s& p = *it; rect.extendBy(SbVec2s(p[0],height-p[1])); } std::vector<unsigned long> rf; rf.swap(faces); std::vector<unsigned long> vf = vp->getVisibleFacetsAfterZoom (rect, view->getSoRenderManager()->getViewportRegion(), view->getSoRenderManager()->getCamera()); // get common facets of the viewport and the visible one std::sort(vf.begin(), vf.end()); std::sort(rf.begin(), rf.end()); std::back_insert_iterator<std::vector<unsigned long> > biit(faces); std::set_intersection(vf.begin(), vf.end(), rf.begin(), rf.end(), biit); } // if set filter out all triangles which do not point into user direction if (self->onlyPointToUserTriangles) { std::vector<unsigned long> screen; screen.reserve(faces.size()); MeshCore::MeshFacetIterator it_f(kernel); for (std::vector<unsigned long>::iterator it = faces.begin(); it != faces.end(); ++it) { it_f.Set(*it); if (it_f->GetNormal() * normal > 0.0f) { screen.push_back(*it); } } faces.swap(screen); } if (self->addToSelection) vp->addSelection(faces); else vp->removeSelection(faces); } view->redraw(); }
void CmdRaytracingWriteCamera::activated(int iMsg) { const char* ppReturn=0; getGuiApplication()->sendMsgToActiveView("GetCamera",&ppReturn); if (ppReturn) { std::string str(ppReturn); if (str.find("PerspectiveCamera") == std::string::npos) { int ret = QMessageBox::warning(Gui::getMainWindow(), qApp->translate("CmdRaytracingWriteView","No perspective camera"), qApp->translate("CmdRaytracingWriteView","The current view camera is not perspective" " and thus the result of the povray image later might look different to" " what you expect.\nDo you want to continue?"), QMessageBox::Yes|QMessageBox::No); if (ret != QMessageBox::Yes) return; } } SoInput in; in.setBuffer((void*)ppReturn,std::strlen(ppReturn)); SoNode* rootNode; SoDB::read(&in,rootNode); if (!rootNode || !rootNode->getTypeId().isDerivedFrom(SoCamera::getClassTypeId())) throw Base::Exception("CmdRaytracingWriteCamera::activated(): Could not read " "camera information from ASCII stream....\n"); // root-node returned from SoDB::readAll() has initial zero // ref-count, so reference it before we start using it to // avoid premature destruction. SoCamera * Cam = static_cast<SoCamera*>(rootNode); Cam->ref(); SbRotation camrot = Cam->orientation.getValue(); SbVec3f upvec(0, 1, 0); // init to default up vector camrot.multVec(upvec, upvec); SbVec3f lookat(0, 0, -1); // init to default view direction vector camrot.multVec(lookat, lookat); SbVec3f pos = Cam->position.getValue(); float Dist = Cam->focalDistance.getValue(); QStringList filter; filter << QObject::tr("Povray(*.pov)"); filter << QObject::tr("All Files (*.*)"); QString fn = Gui::FileDialog::getSaveFileName(Gui::getMainWindow(), QObject::tr("Export page"), QString(), filter.join(QLatin1String(";;"))); if (fn.isEmpty()) return; std::string cFullName = (const char*)fn.toUtf8(); // building up the python string std::stringstream out; out << "Raytracing.writeCameraFile(\"" << strToPython(cFullName) << "\"," << "(" << pos.getValue()[0] <<"," << pos.getValue()[1] <<"," << pos.getValue()[2] <<")," << "(" << lookat.getValue()[0] <<"," << lookat.getValue()[1] <<"," << lookat.getValue()[2] <<")," ; lookat *= Dist; lookat += pos; out << "(" << lookat.getValue()[0] <<"," << lookat.getValue()[1] <<"," << lookat.getValue()[2] <<")," << "(" << upvec.getValue()[0] <<"," << upvec.getValue()[1] <<"," << upvec.getValue()[2] <<") )" ; doCommand(Doc,"import Raytracing"); doCommand(Gui,out.str().c_str()); // Bring ref-count of root-node back to zero to cause the // destruction of the camera. Cam->unref(); }
void ViewProviderScattered::cut(const std::vector<SbVec2f>& picked, Gui::View3DInventorViewer &Viewer) { // create the polygon from the picked points Base::Polygon2D cPoly; for (std::vector<SbVec2f>::const_iterator it = picked.begin(); it != picked.end(); ++it) { cPoly.Add(Base::Vector2D((*it)[0],(*it)[1])); } // get a reference to the point feature Points::Feature* fea = static_cast<Points::Feature*>(pcObject); const Points::PointKernel& points = fea->Points.getValue(); SoCamera* pCam = Viewer.getSoRenderManager()->getCamera(); SbViewVolume vol = pCam->getViewVolume(); // search for all points inside/outside the polygon std::vector<unsigned long> removeIndices; removeIndices.reserve(points.size()); unsigned long index = 0; for (Points::PointKernel::const_iterator jt = points.begin(); jt != points.end(); ++jt, ++index) { SbVec3f pt(jt->x,jt->y,jt->z); // project from 3d to 2d vol.projectToScreen(pt, pt); if (cPoly.Contains(Base::Vector2D(pt[0],pt[1]))) removeIndices.push_back(index); } if (removeIndices.empty()) return; // nothing needs to be done //Remove the points from the cloud and open a transaction object for the undo/redo stuff Gui::Application::Instance->activeDocument()->openCommand("Cut points"); // sets the points outside the polygon to update the Inventor node fea->Points.removeIndices(removeIndices); std::map<std::string,App::Property*> Map; pcObject->getPropertyMap(Map); for (std::map<std::string,App::Property*>::iterator it = Map.begin(); it != Map.end(); ++it) { Base::Type type = it->second->getTypeId(); if (type == Points::PropertyNormalList::getClassTypeId()) { static_cast<Points::PropertyNormalList*>(it->second)->removeIndices(removeIndices); } else if (type == Points::PropertyGreyValueList::getClassTypeId()) { static_cast<Points::PropertyGreyValueList*>(it->second)->removeIndices(removeIndices); } else if (type == App::PropertyColorList::getClassTypeId()) { //static_cast<App::PropertyColorList*>(it->second)->removeIndices(removeIndices); const std::vector<App::Color>& colors = static_cast<App::PropertyColorList*>(it->second)->getValues(); if (removeIndices.size() > colors.size()) break; std::vector<App::Color> remainValue; remainValue.reserve(colors.size() - removeIndices.size()); std::vector<unsigned long>::iterator pos = removeIndices.begin(); for (std::vector<App::Color>::const_iterator jt = colors.begin(); jt != colors.end(); ++jt) { unsigned long index = jt - colors.begin(); if (pos == removeIndices.end()) remainValue.push_back( *jt ); else if (index != *pos) remainValue.push_back( *jt ); else ++pos; } static_cast<App::PropertyColorList*>(it->second)->setValues(remainValue); } } // unset the modified flag because we don't need the features' execute() to be called Gui::Application::Instance->activeDocument()->commitCommand(); fea->purgeTouched(); }
void CmdPartDesignBody::activated(int iMsg) { Q_UNUSED(iMsg); if ( !PartDesignGui::assureModernWorkflow( getDocument() ) ) return; App::Part *actPart = PartDesignGui::getActivePart (); App::Part* partOfBaseFeature = nullptr; std::vector<App::DocumentObject*> features = getSelection().getObjectsOfType(Part::Feature::getClassTypeId()); App::DocumentObject* baseFeature = nullptr; bool viewAll = features.empty(); if (!features.empty()) { if (features.size() == 1) { baseFeature = features[0]; if ( baseFeature->isDerivedFrom ( PartDesign::Feature::getClassTypeId() ) && PartDesign::Body::findBodyOf ( baseFeature ) ) { // Prevent creating bodies based on features already belonging to other bodies QMessageBox::warning(Gui::getMainWindow(), QObject::tr("Bad base feature"), QObject::tr("Body can't be based on a PartDesign feature.")); baseFeature = nullptr; } else if (PartDesign::Body::findBodyOf ( baseFeature )){ QMessageBox::warning(Gui::getMainWindow(), QObject::tr("Bad base feature"), QObject::tr("%1 already belongs to a body, can't use it as base feature for another body.") .arg(QString::fromUtf8(baseFeature->Label.getValue()))); baseFeature = nullptr; } else if ( baseFeature->isDerivedFrom ( Part::BodyBase::getClassTypeId() ) ) { // Prevent creating bodies based on bodies QMessageBox::warning(Gui::getMainWindow(), QObject::tr("Bad base feature"), QObject::tr("Body can't be based on another body.")); baseFeature = nullptr; } else { partOfBaseFeature = App::Part::getPartOfObject(baseFeature); if (partOfBaseFeature != 0 && partOfBaseFeature != actPart){ //prevent cross-part mess QMessageBox::warning(Gui::getMainWindow(), QObject::tr("Bad base feature"), QObject::tr("Base feature (%1) belongs to other part.") .arg(QString::fromUtf8(baseFeature->Label.getValue()))); baseFeature = nullptr; }; } } else { QMessageBox::warning(Gui::getMainWindow(), QObject::tr("Bad base feature"), QObject::tr("Body may be based no more than on one feature.")); return; } } openCommand("Add a Body"); std::string bodyName = getUniqueObjectName("Body"); // add the Body feature itself, and make it active doCommand(Doc,"App.activeDocument().addObject('PartDesign::Body','%s')", bodyName.c_str()); if (baseFeature) { if (partOfBaseFeature){ //withdraw base feature from Part, otherwise visibility mandess results doCommand(Doc,"App.activeDocument().%s.removeObject(App.activeDocument().%s)", partOfBaseFeature->getNameInDocument(), baseFeature->getNameInDocument()); } doCommand(Doc,"App.activeDocument().%s.BaseFeature = App.activeDocument().%s", bodyName.c_str(), baseFeature->getNameInDocument()); } addModule(Gui,"PartDesignGui"); // import the Gui module only once a session doCommand(Gui::Command::Gui, "Gui.activeView().setActiveObject('%s', App.activeDocument().%s)", PDBODYKEY, bodyName.c_str()); // Make the "Create sketch" prompt appear in the task panel doCommand(Gui,"Gui.Selection.clearSelection()"); doCommand(Gui,"Gui.Selection.addSelection(App.ActiveDocument.%s)", bodyName.c_str()); if (actPart) { doCommand(Doc,"App.activeDocument().%s.addObject(App.ActiveDocument.%s)", actPart->getNameInDocument(), bodyName.c_str()); } // The method 'SoCamera::viewBoundingBox' is still declared as protected in Coin3d versions // older than 4.0. #if COIN_MAJOR_VERSION >= 4 // if no part feature was there then auto-adjust the camera if (viewAll) { Gui::Document* doc = Gui::Application::Instance->getDocument(getDocument()); Gui::View3DInventor* view = doc ? qobject_cast<Gui::View3DInventor*>(doc->getActiveView()) : nullptr; if (view) { SoCamera* camera = view->getViewer()->getCamera(); SbViewportRegion vpregion = view->getViewer()->getViewportRegion(); float aspectratio = vpregion.getViewportAspectRatio(); float size = Gui::ViewProviderOrigin::defaultSize(); SbBox3f bbox; bbox.setBounds(-size,-size,-size,size,size,size); camera->viewBoundingBox(bbox, aspectratio, 1.0f); } } #endif updateActive(); }
virtual void apply(SoNode* node) { if (!headlightRot) { SoSearchAction sa; sa.setNode(viewer->getHeadlight()); sa.apply(viewer->getSceneRoot()); SoFullPath* fullPath = (SoFullPath*) sa.getPath(); if (fullPath) { SoGroup *group = (SoGroup*) fullPath->getNodeFromTail(1); headlightRot = (SoRotation*) group->getChild(0); if (!headlightRot->isOfType(SoRotation::getClassTypeId())) headlightRot = 0; } } const SbViewportRegion vpr = getViewportRegion(); const SbVec2s & size = vpr.getViewportSizePixels(); const int width = size[0]; const int height = size[1]; const int vpsize = width / 2; SoCamera * camera = viewer->getCamera(); const SbVec3f position = camera->position.getValue(); const SbRotation orientation = camera->orientation.getValue(); const float nearplane = camera->nearDistance.getValue(); const float farplane = camera->farDistance.getValue(); camera->enableNotify(false); // Front View rotateCamera(SbRotation(SbVec3f(0,0,1), M_PI)); SbViewportRegion vp; vp.setViewportPixels(SbVec2s(0, height-width/2), SbVec2s(width, width/2) ); setViewportRegion(vp); SoGLRenderAction::apply(node); // Left View SbRotation r1(SbVec3f(0,0,1), -M_PI/2); rotateCamera(r1*SbRotation(SbVec3f(0,1,0), -M_PI/2)); vp.setViewportPixels(SbVec2s(0, height-width), SbVec2s(width/2, width) ); setViewportRegion(vp); SoGLRenderAction::apply(node); // Right View rotateCamera(SbRotation(SbVec3f(0,1,0), -M_PI)); vp.setViewportPixels(SbVec2s(width/2, height-width), SbVec2s(width/2, width) ); setViewportRegion(vp); SoGLRenderAction::apply(node); setViewportRegion(vpr); camera->position = position; camera->orientation = orientation; camera->enableNotify(true); // Restore original viewport region setViewportRegion(vpr); }