コード例 #1
0
/// return the camera definition of the active view
static PyObject *
povViewCamera(PyObject *self, PyObject *args)
{
    // no arguments
    if (!PyArg_ParseTuple(args, ""))
        return NULL;
    PY_TRY {
        std::string out;
        const char* ppReturn=0;

        Gui::Application::Instance->sendMsgToActiveView("GetCamera",&ppReturn);

        SoNode* rootNode;
        SoInput in;
        in.setBuffer((void*)ppReturn,std::strlen(ppReturn));
        SoDB::read(&in,rootNode);

        if (!rootNode || !rootNode->getTypeId().isDerivedFrom(SoCamera::getClassTypeId()))
            throw Base::Exception("CmdRaytracingWriteCamera::activated(): Could not read "
                                  "camera information from ASCII stream....\n");

        // root-node returned from SoDB::readAll() has initial zero
        // ref-count, so reference it before we start using it to
        // avoid premature destruction.
        SoCamera * Cam = static_cast<SoCamera*>(rootNode);
        Cam->ref();

        SbRotation camrot = Cam->orientation.getValue();

        SbVec3f upvec(0, 1, 0); // init to default up vector
        camrot.multVec(upvec, upvec);

        SbVec3f lookat(0, 0, -1); // init to default view direction vector
        camrot.multVec(lookat, lookat);

        SbVec3f pos = Cam->position.getValue();
        float Dist = Cam->focalDistance.getValue();

        // making gp out of the Coin stuff
        gp_Vec gpPos(pos.getValue()[0],pos.getValue()[1],pos.getValue()[2]);
        gp_Vec gpDir(lookat.getValue()[0],lookat.getValue()[1],lookat.getValue()[2]);
        lookat *= Dist;
        lookat += pos;
        gp_Vec gpLookAt(lookat.getValue()[0],lookat.getValue()[1],lookat.getValue()[2]);
        gp_Vec gpUp(upvec.getValue()[0],upvec.getValue()[1],upvec.getValue()[2]);

        // getting image format
        ParameterGrp::handle hGrp = App::GetApplication().GetParameterGroupByPath("User parameter:BaseApp/Preferences/Mod/Raytracing");
        int width = hGrp->GetInt("OutputWidth", 800);
        int height = hGrp->GetInt("OutputHeight", 600);

        // call the write method of PovTools....
        out = PovTools::getCamera(CamDef(gpPos,gpDir,gpLookAt,gpUp),width,height);

        return Py::new_reference_to(Py::String(out));
    } PY_CATCH;
}
コード例 #2
0
void METKShowClusteredObjects::setCamPosition(const int stackOrFieldNr, bool isStackNr){
	float fX,fY,fZ;
	if (!isStackNr)
		m_calcVis.getFieldMaxPos(stackOrFieldNr,fX,fY,fZ);
	else
		m_calcVis.getStackMaxPos(stackOrFieldNr,fX,fY,fZ);
    
	/*_resX->setDoubleValue(fX);
	_resY->setDoubleValue(fY);
	_resZ->setDoubleValue(fZ);*/
	std::cout << "_result = " << fX << "," << fY << "," << fZ << std::endl;
	vec3 v(fX,fY,fZ);
	
	_result->setVec3fValue(vec3(fX,fY,fZ));
	
	Cam->setNormPlump(SbVec3f(0.0,0.0,1.0));
	Cam->setUpVecAngle(0.0);
	float fMX,fMY,fMZ,fbla;
	int ibla;
	m_calcVis.getSphereValues(fMX,fMY,fMZ,fbla,ibla);
	Cam->setCamPosition(SbVec3f(fX,fY,fZ),SbVec3f(fMX,fMY,fMZ));
	Cam->setHeight(3.0);
	SbRotation rot=Cam->getOrientation();
	SbVec3f axis;
	float angle,o1,o2,o3;
	rot.getValue(axis,angle);
	axis.getValue(o1,o2,o3);
	_orient->setVec4fValue(vec4(o1,o2,o3,angle));
}
コード例 #3
0
void STOP_AT_BORDER( SbVec3f pos , float speed)
{
  float posX = 0.0, posY = 0.0, posZ = 0.0;
  pos.getValue( posX, posY, posZ );
  
    if (posX < -2999)
    {
       user_avatar->setSpeed(0);
    }
  else if (posX > 2999)
    {
       user_avatar->setSpeed(0);
    }
  else if (posZ < -2999)
    {
      user_avatar->setSpeed(0);
    }
  else if (posZ > 2999)
    {
      user_avatar->setSpeed(0);
    }
  else
    {
      user_avatar->setSpeed(speed);
    }

}
コード例 #4
0
void SoColorShape::storeTriangle(const SbVec3f&  point1, const SbVec3f&  point2, const SbVec3f&  point3, 
                                 const SbVec3f& normal1, const SbVec3f& normal2, const SbVec3f& normal3,
                                 const int& colorIndex1) 
{

   // liegt das Dreieck komplett draußen, wenn ja, nicht speichern!
   if (colorIndex1 != 0) {
      // Schwerpunkt und Position im Array bestimmen
      SbVec3f barycenter;
      float baryX, baryY, baryZ;
      barycenter = point1 - _offSet;
      barycenter += point2 - _offSet;
      barycenter += point3 - _offSet;

      // durch 3 weil bestehend aus 3 Vektoren
      barycenter = barycenter / 3.0;
      // durch HASH_PARTITION weil aufteilen 
      barycenter = barycenter / float(HASH_PARTITION);
      barycenter.getValue(baryX, baryY, baryZ);

      int arrayPosition;
      arrayPosition = ((int)fabs(baryX)) +
         ((int)fabs(baryY)) * _extentX + 
         ((int)fabs(baryZ)) * _extentX * _extentY;

      // Eckpunkte abspeichern und Indices besorgen
      const Vertex *vertex1, *vertex2, *vertex3;
      vertex1 = insertVertex(point1, normal1, colorIndex1, arrayPosition);
      vertex2 = insertVertex(point2, normal2, colorIndex1, arrayPosition);
      vertex3 = insertVertex(point3, normal3, colorIndex1, arrayPosition);

      // Kanten finden bzw. anlegen
      Edge *edge1, *edge2, *edge3;
      edge3 = generateEdge(vertex1, vertex2, arrayPosition);
      edge1 = generateEdge(vertex2, vertex3, arrayPosition);
      edge2 = generateEdge(vertex3, vertex1, arrayPosition);

      // Dreieck ablegen
      Triangle* tri = new Triangle;
      tri->vertex1 = vertex1;
      tri->vertex2 = vertex2;
      tri->vertex3 = vertex3;
      tri->edge1 = edge1;
      tri->edge2 = edge2;
      tri->edge3 = edge3;
      _triangleSet[arrayPosition].insert(tri);

      // Den Kanten die Dreiecke zuweisen
      if (edge1->triangle1 == 0) edge1->triangle1 = tri;
      else edge1->triangle2 = tri;
      if (edge2->triangle1 == 0) edge2->triangle1 = tri;
      else edge2->triangle2 = tri;
      if (edge3->triangle1 == 0) edge3->triangle1 = tri;
      else edge3->triangle2 = tri;
   }
}
コード例 #5
0
void SoXipDrawClipPlane::GLRender(SoGLRenderAction* action)
{
    if (on.getValue())
    {
	    SbVec3f vertices[8] = {
		    SbVec3f(-0.25, -0.25, -0.25),
		    SbVec3f(1.25, -0.25, -0.25),
		    SbVec3f(-0.25, 1.25, -0.25),
		    SbVec3f(1.25, 1.25, -0.25),
		    SbVec3f(-0.25, -0.25, 1.25),
		    SbVec3f(1.25, -0.25, 1.25),
		    SbVec3f(-0.25, 1.25, 1.25),
		    SbVec3f(1.25, 1.25, 1.25),
	    };

	    const SbMatrix &transform = boundingBox.getValue();
	    const SbPlane &clipPlane = plane.getValue();

	    for (int i = 0; i < 8; i++) {
		    transform.multVecMatrix(vertices[i], vertices[i]);
	    }

	    float cubeValues[8];
	    for (int i = 0; i < 8; i++) {
		    cubeValues[i] = vertices[i].dot(clipPlane.getNormal()) - clipPlane.getDistanceFromOrigin();
	    }

	    unsigned char cubeIndex = 0;
	    for (int i = 0; i < 8; i++) {
		    cubeIndex <<= 1;
		    cubeIndex |= cubeValues[i] < 0;
	    }

	    SbVec3f v;
	    glBegin(GL_TRIANGLES);
	    int nTriangles = indexList[cubeIndex][0];
	    for (int iTriangle = 0; iTriangle < nTriangles; iTriangle++) {
		    for (int iEdge = 0; iEdge < 3; iEdge++) {
			    int edge = indexList[cubeIndex][3 * iTriangle + iEdge + 1];

			    int n0 = edgeList[edge][0];
			    int n1 = edgeList[edge][1];

			    float interpAmount = cubeValues[n0] / (cubeValues[n0] - cubeValues[n1]);
			    v = vertices[n0] * (1 - interpAmount) + vertices[n1] * interpAmount;
			    glVertex3fv(v.getValue());
		    }
	    }
	    glEnd();
    }
}
コード例 #6
0
ファイル: IfHasher.cpp プロジェクト: Alexpux/IvTools
int
IfHasher::addVector(const SbVec3f &newVector)
{
    ASSERT(vectorDict != NULL);
    ASSERT(dimension == 3);

    // See if we need to add this one, or is it already there
    int index;
    SbBool notThere = addIfNotThere(newVector.getValue(), index);

    // If it's not there, we need to add it to the field
    if (notThere)
	vectors3[index] = newVector;

    return index;
}
コード例 #7
0
ファイル: SoRing.cpp プロジェクト: CURG/graspit_bci
void SoRing::rayPick(SoRayPickAction * action)
{
  // Is it pickable ?
  if ( ! shouldRayPick(action) ) return;

  // compute the picking ray in our current object space
  computeObjectSpaceRay(action);

  SoPickedPoint* pp;
  SbVec3f intersection;
  SbPlane XY(SbVec3f(0,0,1),0);
  if ( XY.intersect(action->getLine(), intersection) )
  {
    float x, y, z;
    intersection.getValue(x, y, z);

    // back to the case of a disk centered at (0,0)
    float Xc, Yc;
    center.getValue().getValue(Xc,Yc);
    x -= Xc;
    y -= Yc;

    // within radius ?
    if ( sqrt(x*x+y*y+z*z) > outerRadius.getValue() ) return;
    if ( sqrt(x*x+y*y+z*z) < innerRadius.getValue() ) return;

    // within angular section ?
    float theta = sweepAngle.getValue();
    float angle = atan2(y,x);
    if ( angle < 0. ) angle += 2*M_PI;
    if ( theta != 360 && 
         angle*180.F/M_PI > theta ) return;

    if ( action->isBetweenPlanes(intersection) &&
         (pp = action->addIntersection(intersection)) != NULL )
    {
      pp->setObjectNormal(normal);
      pp->setObjectTextureCoords(SbVec4f(0.5f*(1+cos(angle)),
                                         0.5f*(1+sin(angle)), 0, 1));
    }
  }

}
コード例 #8
0
//----------------------------------------------------------------------------------
//! Handle field changes of the field \c field.
//----------------------------------------------------------------------------------
void METKShowClusteredObjects::handleNotification(Field *field){
	if (field == _init){
		init();
	}else if (field == _calc){
		kDebug::Debug("Calc viewpoint for structures ("+_currentStructures->getStringValue()+")",kDebug::DL_NONE);
		std::vector<std::string> structs;
		kBasics::split(_currentStructures->getStringValue(),' ',-1,&structs);
		omObjectContainer* oc = getObjContainer();
		if (oc){
			omObjectContainer::iterator iterObj;
			for (iterObj = oc->begin(); iterObj != oc->end(); iterObj++){
				omObject &obj = iterObj->second;
				if (obj.isValid()){// && obj.getAttribute(LAY_APPEARANCE, obj.hasAttribute(LAY_APPEARANCE, INF_VISIBLE)){
					myObjMgr->setObjAttribute( obj.getID(), LAY_APPEARANCE, INF_SILHOUETTE, new bool(false), omINFOTYPE_BOOL,true,false);
					myObjMgr->setObjAttribute( obj.getID(), LAY_APPEARANCE, INF_SILHOUETTECOLOR, new vec3(0,0,0), omINFOTYPE_VEC3,true,false);
					myObjMgr->setObjAttribute( obj.getID(), LAY_APPEARANCE, INF_SILHOUETTEWIDTH, new double(0.0), omINFOTYPE_DOUBLE,true,false);
				}
			}
		}
		std::string erg=calcPositions(structs);
		myObjMgr->setObjAttribute("CameraSolver","Camera","ClusteringResult",&erg,omINFOTYPE_STRING,true,false);
		myObjMgr->setObjAttribute("CameraSolver","Calculation","Success",new bool(1),omINFOTYPE_BOOL ,true,false);
		sendNotification();
		timerSensor->schedule();
	}

	else if (field == _calcMultiple)
	{		
		kDebug::Debug("Calc viewpoint for multiple objects",kDebug::DL_HIGH);
		calcMultiple();
	}

	else if (field == _showField || field == _sphereMode)
	{
		m_soViewer->setSphereMode(_sphereMode->getEnumValue()+1); //Die ENUMs gehen 0..1 der sphereMode aber 1..2
		if (_showField->getIntValue()==0) m_soViewer->setDataField(m_calcVis.getField(VIS_FIELD));
		if (_showField->getIntValue()==1) m_soViewer->setDataField(m_calcVis.getField(STA_VIS_FIELD));
		if (_showField->getIntValue()==2) m_soViewer->setDataField(m_calcVis.getField(IMP_FIELD));
		if (_showField->getIntValue()==3) m_soViewer->setDataField(m_calcVis.getField(STA_IMP_FIELD));
		if (_showField->getIntValue()==4) m_soViewer->setDataField(m_calcVis.getField(NUM_FIELD));
		if (_showField->getIntValue()==5) m_soViewer->setDataField(m_calcVis.getField(ENT_FIELD));
		if (_showField->getIntValue()==6) m_soViewer->setDataField(m_calcVis.getField(DIS_FIELD));
		if (_showField->getIntValue()==7) m_soViewer->setDataField(m_calcVis.getField(CAM_FIELD));
		if (_showField->getIntValue()==8) m_soViewer->setDataField(m_calcVis.getField(REG_FIELD));		
		if (_showField->getIntValue()==9) m_soViewer->setDataField(m_calcVis.getField(SIL_FIELD));
		if (_showField->getIntValue()==10) m_soViewer->setDataField(m_calcVis.getField(CENTER_FIELD));
		if (_showField->getIntValue()==11) m_soViewer->setDataField(m_calcVis.getField(SUM_FIELD));
		
		if (_showField->getIntValue()==12) m_soViewer->setDataField(m_calcVis.getStackField(1));
		if (_showField->getIntValue()==13) m_soViewer->setDataField(m_calcVis.getStackField(2));
		if (_showField->getIntValue()==14) m_soViewer->setDataField(m_calcVis.getStackField(3));
		if (_showField->getIntValue()==15) m_soViewer->setDataField(m_calcVis.getStackField(4));
		if (_showField->getIntValue()==16) m_soViewer->setDataField(m_calcVis.getStackField(5));
		m_soViewer->touch();
	}

	else if (field == _dataPath){
		if (_dataPath->getStringValue() != ""){
			float fX,fY,fZ,fR;
			int iDiv;			
			m_calcVis.setData(_dataPath->getStringValue());
			//m_calcVis.logResults();
			if (m_calcVis.hasData()){
				m_calcVis.getSphereValues(fX,fY,fZ,fR,iDiv);
				m_soViewer->createSphere(fX,fY,fZ,fR,iDiv);
				m_soViewer->setSphereMode(2);
				m_vStructures = m_calcVis.getStructureNames ();
				for (int i=0; i<m_vStructures.size(); i++){
					m_calcVis.setImportance((*m_vStructures[i]),getImportance(m_vStructures[i]));
				}
				m_calcVis.setStackSize(10);
				m_calcVis.addVectorRegionToStackField(0,0,0,1,1); //eigentlich überflüssig, da es jetzt über externe Felder eingestellt werden kann
				m_calcVis.setStackFieldAsRegionField(0);
			}
			else { std::cout << "!!!!! m_calcVis has NO data" << std::endl; }
		}
	}

	else if (field == _currentStructure)
	{
		m_calcVis.setFocusObject(_currentStructure->getStringValue());
	}

	else if ( field == _currentCam )
	{
		m_calcVis.setCamPos ( _currentCam->getVec3fValue()[0], _currentCam->getVec3fValue()[1], _currentCam->getVec3fValue()[2] );
	}

	else if ( field == _camRange ) { m_calcVis.setCamRange ( _camRange->getDoubleValue() ); }
	else if ( field == _visSta ) { m_calcVis.setVisStability(_visSta->getDoubleValue() ); }
	else if ( field == _impSta ) { m_calcVis.setImpStability(_impSta->getDoubleValue() ); }
	else if ( field == _inspect ) { m_soViewer->inspectPoint(_inspect->getIntValue()); }
	else if ( field == _wVis ) { m_calcVis.setWeight (VIS_FIELD, _wVis->getDoubleValue()); }
	else if ( field == _wImp ) { m_calcVis.setWeight (IMP_FIELD, _wImp->getDoubleValue()); }
	else if ( field == _wNum ) { m_calcVis.setWeight (NUM_FIELD, _wNum->getDoubleValue()); }
	else if ( field == _wEnt ) { m_calcVis.setWeight (ENT_FIELD, _wEnt->getDoubleValue()); }
	else if ( field == _wDis ) { m_calcVis.setWeight (DIS_FIELD, _wDis->getDoubleValue()); }
	else if ( field == _wCam ) { m_calcVis.setWeight (CAM_FIELD, _wCam->getDoubleValue()); }
	else if ( field == _wReg ) { m_calcVis.setWeight (REG_FIELD, _wReg->getDoubleValue()); }
	else if ( field == _wVisSta ) { m_calcVis.setWeight (STA_VIS_FIELD, _wVisSta->getDoubleValue()); }
	else if ( field == _wImpSta ) { m_calcVis.setWeight (STA_IMP_FIELD, _wImpSta->getDoubleValue()); }
	else if ( field == _wSilhouette ) { m_calcVis.setWeight (SIL_FIELD, _wSilhouette->getDoubleValue()); }
	else if ( field == _wImageSpaceCenter ) { m_calcVis.setWeight (CENTER_FIELD, _wImageSpaceCenter->getDoubleValue()); }

	else if ( field == _prefRegionType )
	{
		if (_prefRegionType->getEnumValue()==PR_VECTOR)
			m_calcVis.setPrefRegionType(m_calcVis.PR_VECTOR);
		else
			m_calcVis.setPrefRegionType(m_calcVis.PR_POINT);
	}

	else if ( field == _prefRegionVector )
	{		
		m_calcVis.setPrefRegionVector(_prefRegionVector->getVec3fValue()[0], _prefRegionVector->getVec3fValue()[1], _prefRegionVector->getVec3fValue()[2]);
	}

	else if ( field == _prefRegionRange )
	{
		m_calcVis.setPrefRegionRange(_prefRegionRange->getDoubleValue());
	}

	else if (field == _restrictToRegion )
	{
		m_calcVis.setRestrictToRegion(_restrictToRegion->getBoolValue());
	}

	else if ( field == _calcMin)
	{
		kDebug::Debug("Calc viewpoint for minimal distance",kDebug::DL_HIGH);
		calcMultiple(false); //Only the two objects for minimal distcance must be in the _multipleStructures field; the sum field is calculated but the viewer cam is not set
		//As result the sum field of the two structures is located in StackField 1
		
		//m_calcVis.copyFieldToStack ( SUM_FIELD, 1 );
		//m_calcVis.setFocusObject(_secondStructure->getStringValue());
		//m_calcVis.calc();
		//std::cout << "calc second" << std::endl;
		//m_calcVis.copyFieldToStack ( SUM_FIELD, 2 );
		std::cout << "create region" << std::endl;
		m_calcVis.clearStack(2);
		m_calcVis.addVectorRegionToStackField ( 2, _minDistVec->getVec3fValue()[0], _minDistVec->getVec3fValue()[1], _minDistVec->getVec3fValue()[2],  _minRange->getDoubleValue() );

		//m_calcVis.addStackFields ( 1, 2, 4 );
		//std::cout << "normalize" << std::endl;
		//m_calcVis.getStackField(4)->normalize();

		m_calcVis.clearStack(3);
		m_calcVis.multiplyStackFields ( 1, 2, 3 ); //Multiply the region with the sum field and store result in stack 3		
		std::cout << "final result is in stack 3 (Field 15)" << std::endl;
		m_soViewer->touch();
		setCamPosition(3,true);
		updateObjectMgr();		
		timerSensor->schedule();
	}

	else if (field == _messageData)
	{
		std::cout << "_messageData=" << _messageData->getStringValue() << std::endl;
		vector<string> params;
		kBasics::split(_messageData->getStringValue(),' ',-1,&params);
		std::list<kBasics::optionstruct> options;
		options.push_back(kBasics::optionstruct("structures",-1,'s'));
		options.push_back(kBasics::optionstruct("objects",-1,'o'));
		options.push_back(kBasics::optionstruct("weightvis",1,'v'));
		options.push_back(kBasics::optionstruct("weightreg",1,'r'));
		options.push_back(kBasics::optionstruct("weightcam",1,'c'));
		options.push_back(kBasics::optionstruct("weightent",1,'e'));
		options.push_back(kBasics::optionstruct("weightnum",1,'n'));
		options.push_back(kBasics::optionstruct("weightimp",1,'i'));
		options.push_back(kBasics::optionstruct("weightsta",1,'a'));
		options.push_back(kBasics::optionstruct("thressta",1,'t'));
		options.push_back(kBasics::optionstruct("weightsta2",1,'w'));
		options.push_back(kBasics::optionstruct("thressta2",1,'m'));
		options.push_back(kBasics::optionstruct("weightviewplanedist",1,'p'));
		options.push_back(kBasics::optionstruct("weightsilhouette",1,'h'));
		options.push_back(kBasics::optionstruct("weightimagecenter",1,'g'));
		std::string value;
		std::map<char,std::string> values;
		char c;
		while(-1!=(c=kBasics::getOptions(params,options,value))){
			switch(c){
				case 's':	case 'o':	case 'v':
				case 'r':	case 'c':	case 'e':
				case 'n':	case 'i':	case 'a':
				case 't':	case 'w':	case 'm':
				case 'p':	case 'h':
				case 'g':values[c]=value;break;
			}
		}
		if (!myObjMgr->getObjAttributeString(O_CASEOBJECT, LAY_CASEOBJECT_CASE, INF_CASEOBJECT_CASE_DIRECTORY, path))
		{
			cout<<"FEHLER: Keine Falldaten vorhanden!"<<endl;
			_debug->setStringValue("error2");
			myObjMgr->setObjAttribute("CameraSolver","Calculation","Success",new bool(0),omINFOTYPE_BOOL ,true,false);
			sendNotification();		
			timerSensor->schedule();
		}
		else	
		{
			if (values.size()!=options.size()) 
			{
				cout << "FEHLER: MessageString Format falsch!" << values.size() << "!=" << options.size() << std::endl;
				_debug->setStringValue("error1");
				myObjMgr->setObjAttribute("CameraSolver","Calculation","Success",new bool(0),omINFOTYPE_BOOL ,true,false);
				sendNotification();		
				timerSensor->schedule();
			}
			else
			{				
				SbVec3f tempPos;
				SbVec4f tempOrientVec4;
				bool result;
				result = myObjMgr->getObjAttributeVec3f(_viewerName->getStringValue(), LAY_VIEWER_CAMERA, INF_VIEWER_CAMERA_POSITION, tempPos);
				//result = result && myObjMgr->getObjAttributeVec4f(_viewerName->getStringValue(), LAY_VIEWER_CAMERA, INF_VIEWER_CAMERA_ORIENTATION, tempOrientVec4);
				
				float x,y,z;
				tempPos.getValue(x,y,z);
				m_calcVis.setCamPos( x, y, z );

				if (!result)
				{
					std::cout << "No Camera position found in METK for " << _viewerName->getStringValue() << std::endl;
					timerSensor->schedule();
				}else{
					_currentStructures->setStringValue(values['o']);
					_wVis->setDoubleValue(kBasics::StringToDouble(values['v']));
					_wReg->setDoubleValue(kBasics::StringToDouble(values['r']));
					_wCam->setDoubleValue(kBasics::StringToDouble(values['c']));					
					_wEnt->setDoubleValue(kBasics::StringToDouble(values['e']));
					_wNum->setDoubleValue(kBasics::StringToDouble(values['n']));
					_wImp->setDoubleValue(kBasics::StringToDouble(values['i']));
					_wVisSta->setDoubleValue(kBasics::StringToDouble(values['a']));
					_visSta->setDoubleValue(kBasics::StringToDouble(values['t']));
					_wImpSta->setDoubleValue(kBasics::StringToDouble(values['w']));
					_impSta->setDoubleValue(kBasics::StringToDouble(values['m']));					
					_wDis->setDoubleValue(kBasics::StringToDouble(values['p']));
					_wSilhouette->setDoubleValue(kBasics::StringToDouble(values['h']));
					_wImageSpaceCenter->setDoubleValue(kBasics::StringToDouble(values['g']));
					_calc->notify();
				}

				//New
				//m_soViewer->touch();
				//setCamPosition(SUM_FIELD,false);
			}
		}
	}

	else if (field == getFieldContainer()->getField("inObjectContainer"))
	{
		if (getFieldContainer()->getField("inObjectContainer")->getDestinationField (0) == NULL)
		{
			//Verbinden aller inObjectContainer innerhalb des Moduls!!!
			//Diese Zeile hat mich fast 2 Tage und sehr viele Nerven gekostet ;-) (aus METKObjContainer geklaut)			
			getFieldContainer()->getField("inObjectContainer")->attachField(myObjMgr->getFieldContainer()->getField("inObjectContainer"),1);
			getFieldContainer()->getField("inObjectContainer")->attachField(oReceiver->getFieldContainer()->getField("inObjectContainer"),1);
		}	
	}

	else if (field == _debugState)
	{		
		std::cout << "setDebugLevel=" << _debugState->getStringValue() << std::endl;
		kDebug::setDebugLevel(_debugState->getStringValue());
	}

	else if (field == _writeCamToObjMgr)
	{
		writeCamToObjMgr();
		sendNotification();
	}

	inherited::handleNotification(field);
}
コード例 #9
0
ファイル: Command.cpp プロジェクト: lainegates/FreeCAD
void CmdRaytracingWriteCamera::activated(int iMsg)
{
    const char* ppReturn=0;
    getGuiApplication()->sendMsgToActiveView("GetCamera",&ppReturn);
    if (ppReturn) {
        std::string str(ppReturn);
        if (str.find("PerspectiveCamera") == std::string::npos) {
            int ret = QMessageBox::warning(Gui::getMainWindow(), 
                qApp->translate("CmdRaytracingWriteView","No perspective camera"),
                qApp->translate("CmdRaytracingWriteView","The current view camera is not perspective"
                                " and thus the result of the povray image later might look different to"
                                " what you expect.\nDo you want to continue?"),
                QMessageBox::Yes|QMessageBox::No);
            if (ret != QMessageBox::Yes)
                return;
        }
    }

    SoInput in;
    in.setBuffer((void*)ppReturn,std::strlen(ppReturn));

    SoNode* rootNode;
    SoDB::read(&in,rootNode);

    if (!rootNode || !rootNode->getTypeId().isDerivedFrom(SoCamera::getClassTypeId()))
        throw Base::Exception("CmdRaytracingWriteCamera::activated(): Could not read "
                              "camera information from ASCII stream....\n");

    // root-node returned from SoDB::readAll() has initial zero
    // ref-count, so reference it before we start using it to
    // avoid premature destruction.
    SoCamera * Cam = static_cast<SoCamera*>(rootNode);
    Cam->ref();

    SbRotation camrot = Cam->orientation.getValue();

    SbVec3f upvec(0, 1, 0); // init to default up vector
    camrot.multVec(upvec, upvec);

    SbVec3f lookat(0, 0, -1); // init to default view direction vector
    camrot.multVec(lookat, lookat);

    SbVec3f pos = Cam->position.getValue();
    float Dist = Cam->focalDistance.getValue();

    QStringList filter;
    filter << QObject::tr("Povray(*.pov)");
    filter << QObject::tr("All Files (*.*)");
    QString fn = Gui::FileDialog::getSaveFileName(Gui::getMainWindow(), QObject::tr("Export page"), QString(), filter.join(QLatin1String(";;")));
    if (fn.isEmpty()) 
        return;
    std::string cFullName = (const char*)fn.toUtf8();

    // building up the python string
    std::stringstream out;
    out << "Raytracing.writeCameraFile(\"" << strToPython(cFullName) << "\"," 
        << "(" << pos.getValue()[0]    <<"," << pos.getValue()[1]    <<"," << pos.getValue()[2]    <<")," 
        << "(" << lookat.getValue()[0] <<"," << lookat.getValue()[1] <<"," << lookat.getValue()[2] <<")," ;
    lookat *= Dist;
    lookat += pos;
    out << "(" << lookat.getValue()[0] <<"," << lookat.getValue()[1] <<"," << lookat.getValue()[2] <<")," 
        << "(" << upvec.getValue()[0]  <<"," << upvec.getValue()[1]  <<"," << upvec.getValue()[2]  <<") )" ;

    doCommand(Doc,"import Raytracing");
    doCommand(Gui,out.str().c_str());


    // Bring ref-count of root-node back to zero to cause the
    // destruction of the camera.
    Cam->unref();
}
コード例 #10
0
bool InventorViewer::computeCorrectFaceNormal(const SoPickedPoint * pick, bool ccw_face, Eigen::Vector3d& normal, int& shapeIdx)
{
    const SoDetail *pickDetail = pick->getDetail();
    if ((pickDetail != NULL) && (pickDetail->getTypeId() == SoFaceDetail::getClassTypeId()))
    {
        // Picked object is a face
        const SoFaceDetail * fd = dynamic_cast<const SoFaceDetail*>(pickDetail);
        if (!fd)
        {
            ROS_ERROR("Could not cast to face detail");
            return false;
        }

        // face index is always 0 with triangle strips
       // ROS_INFO_STREAM("Face index: "<<fd->getFaceIndex());

        if (fd->getNumPoints() < 3)
        {
            ROS_ERROR("Clicked on degenerate face, can't compute normal");
            return false;
        }
        /*else
        {
            ROS_INFO_STREAM("Clicked on face with "<<fd->getNumPoints()<<" points.");
        }*/       
        
        //ROS_INFO("Pick path:");
        //printPath(pick->getPath());

        /*SbVec3f pickNormal = pick->getNormal();
        //SbVec3f _normalObj=pick->getObjectNormal();
        float _x, _y, _z;
        pickNormal.getValue(_x, _y, _z);
        Eigen::Vector3d normalDef = Eigen::Vector3d(_x, _y, _z);
        normal = normalDef;*/

        // ROS_INFO_STREAM("Clicked on face with "<<fd->getNumPoints()<<" points.");

        int p1 = fd->getPoint(0)->getCoordinateIndex();
        int p2 = fd->getPoint(1)->getCoordinateIndex();
        int p3 = fd->getPoint(2)->getCoordinateIndex();

        // ROS_INFO_STREAM("Face part index: "<<fd->getPartIndex());

        // ROS_INFO_STREAM("First 3 coord indices: "<<p1<<", "<<p2<<", "<<p3);

        // Find the coordinate node that is used for the faces.
        // Assume that it's the last SoCoordinate3 node traversed
        // before the picked shape.
        SoSearchAction  searchCoords;
        searchCoords.setType(SoCoordinate3::getClassTypeId());
        searchCoords.setInterest(SoSearchAction::LAST);
        searchCoords.apply(pick->getPath());

        SbVec3f coord1, coord2, coord3;

        shapeIdx=pick->getPath()->getLength()-1;
        //ROS_INFO_STREAM("Len of pick path: "<<shapeIdx);

        if (searchCoords.getPath() == NULL)
        {
            // try to find SoIndexedShape instead
            // ROS_INFO("No SoCoordinate3 node found, looking for SoIndexedShape...");

            searchCoords.setType(SoIndexedShape::getClassTypeId());
            searchCoords.setInterest(SoSearchAction::LAST);
            searchCoords.apply(pick->getPath());

            if (searchCoords.getPath() == NULL)
            {
                ROS_ERROR("Failed to find coordinate node for the picked face. Returning default normal.");
                return false;
            }

            shapeIdx=searchCoords.getPath()->getLength()-1;
            // ROS_INFO_STREAM("Coords at Idx: "<<shapeIdx);

            // ROS_INFO("SearchCoords path:");
            // printPath(searchCoords.getPath());

            SoIndexedShape * vShapeNode = dynamic_cast<SoIndexedShape*>(searchCoords.getPath()->getTail());
            if (!vShapeNode)
            {
                ROS_ERROR("Could not cast SoIndexedShape");
                return false;
            }
            SoVertexProperty * vProp = dynamic_cast<SoVertexProperty*>(vShapeNode->vertexProperty.getValue());
            if (!vProp)
            {
                ROS_ERROR_STREAM("Could not cast SoVertexProperty.");
                return false;
            }
            coord1 = vProp->vertex[p1];
            coord2 = vProp->vertex[p2];
            coord3 = vProp->vertex[p3];
        }
        else
        {
            shapeIdx=searchCoords.getPath()->getLength()-1;
            
            SoCoordinate3 * coordNode = dynamic_cast<SoCoordinate3*>(searchCoords.getPath()->getTail());
            if (!coordNode)
            {
                ROS_ERROR("Could not cast SoCoordinate3");
                return false;
            }
            coord1 = coordNode->point[p1];
            coord2 = coordNode->point[p2];
            coord3 = coordNode->point[p3];
        }

        if (fd->getNumPoints() > 3)
        {
            ROS_WARN_STREAM("Face with " << fd->getNumPoints() <<
                            " points is not a triangle and may lead to wrong normal calculations.");
        }

        /*ROS_INFO_STREAM("Coords "<<p1<<", "<<p2<<", "<<p3);
        float _x, _y, _z;
        coord1.getValue(_x, _y, _z);
        ROS_INFO_STREAM("val1 "<<_x<<", "<<_y<<", "<<_z);
        coord2.getValue(_x, _y, _z);
        ROS_INFO_STREAM("val2 "<<_x<<", "<<_y<<", "<<_z);
        coord3.getValue(_x, _y, _z);
        ROS_INFO_STREAM("val3 "<<_x<<", "<<_y<<", "<<_z);*/

        SbVec3f diff1(coord2.getValue());
        diff1 -= coord1;
        SbVec3f diff2(coord3.getValue());
        diff2 -= coord1;
        SbVec3f cross = diff1.cross(diff2);
        if (!ccw_face) cross = -cross;

        float x, y, z;
        cross.getValue(x, y, z);
        double len = sqrt(x * x + y * y + z * z);
        x /= len;
        y /= len;
        z /= len;

        normal = Eigen::Vector3d(x, y, z);

        return true;
    }
    return false;
}
コード例 #11
0
ファイル: InvTelePointer.cpp プロジェクト: nixz/covise
//======================================================================
//
// Description:
//	projection of telepointer
//
//
// Use: private
//======================================================================
void TPHandler::projectTelePointer(TelePointer *, const SbVec3f pos, float aspectRatio,
                                   SbVec3f &intersection, InvExaminerViewer *viewer)
{
    const int xOffset = 61;
    const int yOffset = 33;

    float xs, ys, zs; // normalized screen coordinates
    SbVec3f screen;

    SbVec2s size = viewer->getSize();
    if (viewer->isDecoration())
    {
        // !! Attention: SoXtRenderArea with offset
        size[0] = size[0] - xOffset;
        size[1] = size[1] - yOffset;
    }
    float aspRat = size[0] / (float)size[1];

    // set aspect ratio explicitely
    tp_camera->aspectRatio.setValue(aspRat);
    // default setting
    tp_camera->height.setValue(2.0);
    // scale height
    tp_camera->scaleHeight(1 / aspRat);

    /*
   float h = tp_camera->height.getValue();
   fprintf(stderr, "Height Camera: %.6f\n", h);
   */

    // get the view volume -> rectangular box
    SbViewVolume viewVolume = tp_camera->getViewVolume();
    // determine mouse position
    viewVolume.projectToScreen(pos, screen);
    screen.getValue(xs, ys, zs);
    /*
   cerr << "xs: " << xs << endl;
   cerr << "ys: " << ys << endl;
   */

    // project the mouse point to a line
    SbVec3f p0, p1;
    viewVolume.projectPointToLine(SbVec2f(xs, ys), p0, p1);

    // take the midpoint of the line as telepointer position
    intersection = (p0 + p1) / 2.0f;

    // adapt telepointer position to the aspect ratio
    if (aspectRatio > 1.0)
    {
        intersection[0] = intersection[0] * aspectRatio / aspRat;
        intersection[1] = intersection[1] * aspectRatio / aspRat;
    }

    if (aspectRatio < 1.0)
    {
        // in this case the aspect ratio submitted to the function
        // and belonging to the delivered position leads to wrong projection point
        // => local correction for x-, y-value
        intersection[0] = intersection[0] / aspRat;
        intersection[1] = intersection[1] / aspRat;
    }

    /*
   float fx,fy,fz;
   intersection.getValue(fx,fy,fz);
   cerr << "TP: (" << fx << "," << fy << "," << fz << ")" << endl;
   */
}
コード例 #12
0
ファイル: SoMFColor.cpp プロジェクト: OpenXIP/xip-libraries
void
SoMFColor::set1Value(int index, const SbVec3f &vec)
{
    set1Value(index, SbColor(vec.getValue()));
}
コード例 #13
0
ファイル: SoMFColor.cpp プロジェクト: OpenXIP/xip-libraries
void
SoMFColor::setValue(const SbVec3f &vec)
{
    setValue(SbColor(vec.getValue()));
}
コード例 #14
0
void SoFCBoundingBox::GLRender (SoGLRenderAction *action)
{
    SbVec3f corner[2], ctr, *vptr;
    SbBool coord, dimension;

    // grab the current state
    //SoState *state = action->getState();

    if (!shouldGLRender(action))
        return;

    // get the latest values from the fields
    corner[0] = minBounds.getValue();
    corner[1] = maxBounds.getValue();
    coord     = coordsOn.getValue();
    dimension = dimensionsOn.getValue();

    // set the coordinates for the LineSet to point to
    vptr = bboxCoords->point.startEditing();
    for (int i = 0; i < 8; i++) {
        for (int j = 0; j < 3; j++) {
            vptr[i][j] = corner[bBoxVerts[i][j]][j];
        }
    }

    // if coord is true then set the text nodes
    if (coord) {
        ctr = (corner[1] - corner[0]) / 2.0f;
        for (int i = 0; i < 8; i++) {
            // create the string for the text
            std::stringstream str;
            str.precision(2);
            str.setf(std::ios::fixed | std::ios::showpoint);
            str << "(" << vptr[i][0] << "," << vptr[i][1] << "," << vptr[i][2] << ")";

            SoSeparator *sep   = (SoSeparator *)textSep->getChild(i);
            SoTransform *trans = (SoTransform *)sep->getChild(0);

            trans->translation.setValue(vptr[i].getValue());
            SoText2* t = (SoText2 *)sep->getChild(1);
            t->string.setValue(str.str().c_str());
        }

        textSep->ref();
        if (root->findChild(textSep) < 0)
            root->addChild(textSep);
    } else {
        if (root->findChild(textSep) >= 0)
            root->removeChild(textSep);
    }

    // if dimension is true then set the text nodes
    if (dimension) {
        ctr = (corner[1] - corner[0]) / 2.0f;
        for (int i = 0; i < 3; i++) {
            // create the string for the text
            std::stringstream str;
            str.precision(2);
            str.setf(std::ios::fixed | std::ios::showpoint);
            str << (2.0f * ctr[i]);

            SoSeparator *sep   = (SoSeparator *)dimSep->getChild(i);
            SoTransform *trans = (SoTransform *)sep->getChild(0);

            SbVec3f point = corner[0];
            point[i] += ctr[i];
            trans->translation.setValue(point.getValue());
            SoText2* t = (SoText2 *)sep->getChild(1);
            t->string.setValue(str.str().c_str());
        }

        dimSep->ref();
        if (root->findChild(dimSep) < 0)
            root->addChild(dimSep);
    } else {
        if (root->findChild(dimSep) >= 0)
            root->removeChild(dimSep);
    }

    bboxCoords->point.finishEditing();

    // Avoid shading
    SoState * state = action->getState();
    state->push();
    SoLazyElement::setLightModel(state, SoLazyElement::BASE_COLOR);
    root->GLRender(action);
    state->pop();
}