int main() { try { vpHomogeneousMatrix cdMo(0, 0, 0.75, 0, 0, 0); vpHomogeneousMatrix cMo(0.15, -0.1, 1., vpMath::rad(10), vpMath::rad(-10), vpMath::rad(50)); std::vector<vpPoint> point(4) ; point[0].setWorldCoordinates(-0.1,-0.1, 0); point[1].setWorldCoordinates( 0.1,-0.1, 0); point[2].setWorldCoordinates( 0.1, 0.1, 0); point[3].setWorldCoordinates(-0.1, 0.1, 0); vpServo task ; task.setServo(vpServo::EYEINHAND_CAMERA); task.setInteractionMatrixType(vpServo::CURRENT); task.setLambda(0.5); vpFeaturePoint p[4], pd[4] ; for (unsigned int i = 0 ; i < 4 ; i++) { point[i].track(cdMo); vpFeatureBuilder::create(pd[i], point[i]); point[i].track(cMo); vpFeatureBuilder::create(p[i], point[i]); task.addFeature(p[i], pd[i]); } vpHomogeneousMatrix wMc, wMo; vpSimulatorCamera robot; robot.setSamplingTime(0.040); robot.getPosition(wMc); wMo = wMc * cMo; vpImage<unsigned char> Iint(480, 640, 0) ; vpImage<unsigned char> Iext(480, 640, 0) ; #if defined VISP_HAVE_X11 vpDisplayX displayInt(Iint, 0, 0, "Internal view"); vpDisplayX displayExt(Iext, 670, 0, "External view"); #elif defined VISP_HAVE_GDI vpDisplayGDI displayInt(Iint, 0, 0, "Internal view"); vpDisplayGDI displayExt(Iext, 670, 0, "External view"); #elif defined VISP_HAVE_OPENCV vpDisplayOpenCV displayInt(Iint, 0, 0, "Internal view"); vpDisplayOpenCV displayExt(Iext, 670, 0, "External view"); #else std::cout << "No image viewer is available..." << std::endl; #endif vpCameraParameters cam(840, 840, Iint.getWidth()/2, Iint.getHeight()/2); vpHomogeneousMatrix cextMo(0,0,3, 0,0,0); vpWireFrameSimulator sim; sim.initScene(vpWireFrameSimulator::PLATE, vpWireFrameSimulator::D_STANDARD); sim.setCameraPositionRelObj(cMo); sim.setDesiredCameraPosition(cdMo); sim.setExternalCameraPosition(cextMo); sim.setInternalCameraParameters(cam); sim.setExternalCameraParameters(cam); while(1) { robot.getPosition(wMc); cMo = wMc.inverse() * wMo; for (unsigned int i = 0 ; i < 4 ; i++) { point[i].track(cMo); vpFeatureBuilder::create(p[i], point[i]); } vpColVector v = task.computeControlLaw(); robot.setVelocity(vpRobot::CAMERA_FRAME, v); sim.setCameraPositionRelObj(cMo); vpDisplay::display(Iint) ; vpDisplay::display(Iext) ; sim.getInternalImage(Iint); sim.getExternalImage(Iext); display_trajectory(Iint, point, cMo, cam); vpDisplay::flush(Iint); vpDisplay::flush(Iext); // A click in the internal view to exit if (vpDisplay::getClick(Iint, false)) break; vpTime::wait(1000*robot.getSamplingTime()); } task.kill(); } catch(vpException &e) { std::cout << "Catch an exception: " << e << std::endl; } }
int main(int argc, const char ** argv) { try { bool opt_click_allowed = true; bool opt_display = true; // Read the command line options if (getOptions(argc, argv, opt_click_allowed, opt_display) == false) { exit (-1); } // We open two displays, one for the internal camera view, the other one for // the external view, using either X11, GTK or GDI. #if defined VISP_HAVE_X11 vpDisplayX displayInt; #elif defined VISP_HAVE_GDI vpDisplayGDI displayInt; #elif defined VISP_HAVE_OPENCV vpDisplayOpenCV displayInt; #endif vpImage<unsigned char> Iint(480, 640, 255); if (opt_display) { // open a display for the visualization displayInt.init(Iint,700,0, "Internal view") ; } vpServo task; std::cout << std::endl ; std::cout << "----------------------------------------------" << std::endl ; std::cout << " Test program for vpServo " <<std::endl ; std::cout << " Eye-in-hand task control, articular velocity are computed" << std::endl ; std::cout << " Simulation " << std::endl ; std::cout << " task : servo 4 points " << std::endl ; std::cout << "----------------------------------------------" << std::endl ; std::cout << std::endl ; // sets the initial camera location vpHomogeneousMatrix cMo(-0.05,-0.05,0.7, vpMath::rad(10), vpMath::rad(10), vpMath::rad(-30)); // sets the point coordinates in the object frame vpPoint point[4] ; point[0].setWorldCoordinates(-0.045,-0.045,0) ; point[3].setWorldCoordinates(-0.045,0.045,0) ; point[2].setWorldCoordinates(0.045,0.045,0) ; point[1].setWorldCoordinates(0.045,-0.045,0) ; // computes the point coordinates in the camera frame and its 2D coordinates for (unsigned int i = 0 ; i < 4 ; i++) point[i].track(cMo) ; // sets the desired position of the point vpFeaturePoint p[4] ; for (unsigned int i = 0 ; i < 4 ; i++) vpFeatureBuilder::create(p[i],point[i]) ; //retrieve x,y and Z of the vpPoint structure // sets the desired position of the feature point s* vpFeaturePoint pd[4] ; // Desired pose vpHomogeneousMatrix cdMo(vpHomogeneousMatrix(0.0,0.0,0.8,vpMath::rad(0),vpMath::rad(0),vpMath::rad(0))); // Projection of the points for (unsigned int i = 0 ; i < 4 ; i++) point[i].track(cdMo); for (unsigned int i = 0 ; i < 4 ; i++) vpFeatureBuilder::create(pd[i], point[i]); // define the task // - we want an eye-in-hand control law // - articular velocity are computed task.setServo(vpServo::EYEINHAND_CAMERA); task.setInteractionMatrixType(vpServo::DESIRED) ; // we want to see a point on a point for (unsigned int i = 0 ; i < 4 ; i++) task.addFeature(p[i],pd[i]) ; // set the gain task.setLambda(0.8) ; // Declaration of the robot vpSimulatorAfma6 robot(opt_display); // Initialise the robot and especially the camera robot.init(vpAfma6::TOOL_CCMOP, vpCameraParameters::perspectiveProjWithoutDistortion); robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); // Initialise the object for the display part*/ robot.initScene(vpWireFrameSimulator::PLATE, vpWireFrameSimulator::D_STANDARD); // Initialise the position of the object relative to the pose of the robot's camera robot.initialiseObjectRelativeToCamera(cMo); // Set the desired position (for the displaypart) robot.setDesiredCameraPosition(cdMo); // Get the internal robot's camera parameters vpCameraParameters cam; robot.getCameraParameters(cam,Iint); if (opt_display) { //Get the internal view vpDisplay::display(Iint); robot.getInternalView(Iint); vpDisplay::flush(Iint); } // Display task information task.print() ; unsigned int iter=0 ; vpTRACE("\t loop") ; while(iter++<500) { std::cout << "---------------------------------------------" << iter <<std::endl ; vpColVector v ; // Get the Time at the beginning of the loop double t = vpTime::measureTimeMs(); // Get the current pose of the camera cMo = robot.get_cMo(); if (iter==1) { std::cout <<"Initial robot position with respect to the object frame:\n"; cMo.print(); } // new point position for (unsigned int i = 0 ; i < 4 ; i++) { point[i].track(cMo) ; // retrieve x,y and Z of the vpPoint structure vpFeatureBuilder::create(p[i],point[i]) ; } if (opt_display) { // Get the internal view and display it vpDisplay::display(Iint) ; robot.getInternalView(Iint); vpDisplay::flush(Iint); } if (opt_display && opt_click_allowed && iter == 1) { // suppressed for automate test std::cout << "Click in the internal view window to continue..." << std::endl; vpDisplay::getClick(Iint) ; } // compute the control law v = task.computeControlLaw() ; // send the camera velocity to the controller robot.setVelocity(vpRobot::CAMERA_FRAME, v) ; std::cout << "|| s - s* || " << ( task.getError() ).sumSquare() <<std::endl ; // The main loop has a duration of 10 ms at minimum vpTime::wait(t,10); } // Display task information task.print() ; task.kill(); std::cout <<"Final robot position with respect to the object frame:\n"; cMo.print(); if (opt_display && opt_click_allowed) { // suppressed for automate test std::cout << "Click in the internal view window to end..." << std::endl; vpDisplay::getClick(Iint) ; } return 0; } catch(vpException e) { std::cout << "Catch a ViSP exception: " << e << std::endl; return 1; } }
int main(int argc, const char ** argv) { bool opt_display = true; bool opt_click_allowed = true; // Read the command line options if (getOptions(argc, argv, opt_click_allowed, opt_display) == false) { exit (-1); } vpImage<unsigned char> Iint(512,512,0) ; vpImage<unsigned char> Iext(512,512,0) ; // We open a window using either X11, GTK or GDI. #if defined VISP_HAVE_X11 vpDisplayX displayInt; vpDisplayX displayExt; #elif defined VISP_HAVE_GTK vpDisplayGTK displayInt; vpDisplayGTK displayExt; #elif defined VISP_HAVE_GDI vpDisplayGDI displayInt; vpDisplayGDI displayExt; #endif if (opt_display) { try{ // Display size is automatically defined by the image (Iint) and (Iext) size displayInt.init(Iint, 100, 100,"Internal view") ; displayExt.init(Iext, (int)(130+Iint.getWidth()), 100, "External view") ; // Display the image // The image class has a member that specify a pointer toward // the display that has been initialized in the display declaration // therefore is is no longuer necessary to make a reference to the // display variable. vpDisplay::display(Iint) ; vpDisplay::display(Iext) ; vpDisplay::flush(Iint) ; vpDisplay::flush(Iext) ; } catch(...) { vpERROR_TRACE("Error while displaying the image") ; exit(-1); } } vpProjectionDisplay externalview ; //Set the camera parameters double px, py ; px = py = 600 ; double u0, v0 ; u0 = v0 = 256 ; vpCameraParameters cam(px,py,u0,v0); vpServo task ; vpSimulatorCamera robot ; // sets the initial camera location vpHomogeneousMatrix cMo(-0.2,0.1,2, vpMath::rad(5), vpMath::rad(5), vpMath::rad(20)); vpHomogeneousMatrix wMc, wMo; robot.getPosition(wMc) ; wMo = wMc * cMo; // Compute the position of the object in the world frame // sets the final camera location (for simulation purpose) vpHomogeneousMatrix cMod(0,0,1, vpMath::rad(0), vpMath::rad(0), vpMath::rad(0)); // sets the cylinder coordinates in the world frame vpCylinder cylinder(0,1,0, // direction 0,0,0, // point of the axis 0.1) ; // radius externalview.insert(cylinder) ; // sets the desired position of the visual feature cylinder.track(cMod) ; cylinder.print() ; //Build the desired line features thanks to the cylinder and especially its paramaters in the image frame vpFeatureLine ld[2] ; int i ; for(i=0 ; i < 2 ; i++) vpFeatureBuilder::create(ld[i],cylinder,i) ; // computes the cylinder coordinates in the camera frame and its 2D coordinates // sets the current position of the visual feature cylinder.track(cMo) ; cylinder.print() ; //Build the current line features thanks to the cylinder and especially its paramaters in the image frame vpFeatureLine l[2] ; for(i=0 ; i < 2 ; i++) { vpFeatureBuilder::create(l[i],cylinder,i) ; l[i].print() ; } // define the task // - we want an eye-in-hand control law // - robot is controlled in the camera frame task.setServo(vpServo::EYEINHAND_CAMERA) ; task.setInteractionMatrixType(vpServo::DESIRED,vpServo::PSEUDO_INVERSE) ; // it can also be interesting to test these possibilities // task.setInteractionMatrixType(vpServo::CURRENT,vpServo::PSEUDO_INVERSE) ; // task.setInteractionMatrixType(vpServo::MEAN, vpServo::PSEUDO_INVERSE) ; // task.setInteractionMatrixType(vpServo::CURRENT, vpServo::PSEUDO_INVERSE) ; // task.setInteractionMatrixType(vpServo::DESIRED, vpServo::TRANSPOSE) ; // task.setInteractionMatrixType(vpServo::CURRENT, vpServo::TRANSPOSE) ; // we want to see 2 lines on 2 lines task.addFeature(l[0],ld[0]) ; task.addFeature(l[1],ld[1]) ; // Set the point of view of the external view vpHomogeneousMatrix cextMo(0,0,6, vpMath::rad(40), vpMath::rad(10), vpMath::rad(60)) ; // Display the initial scene vpServoDisplay::display(task,cam,Iint) ; externalview.display(Iext,cextMo, cMo, cam, vpColor::red); vpDisplay::flush(Iint) ; vpDisplay::flush(Iext) ; // Display task information task.print() ; if (opt_display && opt_click_allowed) { std::cout << "\n\nClick in the internal camera view window to start..." << std::endl; vpDisplay::getClick(Iint) ; } // set the gain task.setLambda(1) ; // Display task information task.print() ; unsigned int iter=0 ; // The first loop is needed to reach the desired position do { std::cout << "---------------------------------------------" << iter++ <<std::endl ; vpColVector v ; // get the robot position robot.getPosition(wMc) ; // Compute the position of the camera wrt the object frame cMo = wMc.inverse() * wMo; // new line position // retrieve x,y and Z of the vpLine structure // Compute the parameters of the cylinder in the camera frame and in the image frame cylinder.track(cMo) ; //Build the current line features thanks to the cylinder and especially its paramaters in the image frame for(i=0 ; i < 2 ; i++) { vpFeatureBuilder::create(l[i],cylinder,i) ; } // Display the current scene if (opt_display) { vpDisplay::display(Iint) ; vpDisplay::display(Iext) ; vpServoDisplay::display(task,cam,Iint) ; externalview.display(Iext,cextMo, cMo, cam, vpColor::red); vpDisplay::flush(Iint); vpDisplay::flush(Iext); } // compute the control law v = task.computeControlLaw() ; // send the camera velocity to the controller robot.setVelocity(vpRobot::CAMERA_FRAME, v) ; std::cout << "|| s - s* || = " << ( task.getError() ).sumSquare() <<std::endl ; } while(( task.getError() ).sumSquare() > 1e-9) ; // Second loop is to compute the control law while taking into account the secondary task. // In this example the secondary task is cut in four steps. // The first one consists in impose a movement of the robot along the x axis of the object frame with a velocity of 0.5. // The second one consists in impose a movement of the robot along the y axis of the object frame with a velocity of 0.5. // The third one consists in impose a movement of the robot along the x axis of the object frame with a velocity of -0.5. // The last one consists in impose a movement of the robot along the y axis of the object frame with a velocity of -0.5. // Each steps is made during 200 iterations. vpColVector e1(6) ; e1 = 0 ; vpColVector e2(6) ; e2 = 0 ; vpColVector proj_e1 ; vpColVector proj_e2 ; iter = 0; double rapport = 0; double vitesse = 0.5; unsigned int tempo = 800; while(iter < tempo) { vpColVector v ; robot.getPosition(wMc) ; // Compute the position of the camera wrt the object frame cMo = wMc.inverse() * wMo; cylinder.track(cMo) ; for(i=0 ; i < 2 ; i++) { vpFeatureBuilder::create(l[i],cylinder,i) ; } if (opt_display) { vpDisplay::display(Iint) ; vpDisplay::display(Iext) ; vpServoDisplay::display(task,cam,Iint) ; externalview.display(Iext,cextMo, cMo, cam, vpColor::red); vpDisplay::flush(Iint); vpDisplay::flush(Iext); } v = task.computeControlLaw() ; if ( iter%tempo < 200 /*&& iter%tempo >= 0*/) { e2 = 0; e1[0] = fabs(vitesse) ; proj_e1 = task.secondaryTask(e1); rapport = vitesse/proj_e1[0]; proj_e1 *= rapport ; v += proj_e1 ; } if ( iter%tempo < 400 && iter%tempo >= 200) { e1 = 0; e2[1] = fabs(vitesse) ; proj_e2 = task.secondaryTask(e2); rapport = vitesse/proj_e2[1]; proj_e2 *= rapport ; v += proj_e2 ; } if ( iter%tempo < 600 && iter%tempo >= 400) { e2 = 0; e1[0] = -fabs(vitesse) ; proj_e1 = task.secondaryTask(e1); rapport = -vitesse/proj_e1[0]; proj_e1 *= rapport ; v += proj_e1 ; } if ( iter%tempo < 800 && iter%tempo >= 600) { e1 = 0; e2[1] = -fabs(vitesse) ; proj_e2 = task.secondaryTask(e2); rapport = -vitesse/proj_e2[1]; proj_e2 *= rapport ; v += proj_e2 ; } robot.setVelocity(vpRobot::CAMERA_FRAME, v); std::cout << "|| s - s* || = " << ( task.getError() ).sumSquare() <<std::endl ; iter++; } if (opt_display && opt_click_allowed) { std::cout << "\nClick in the internal camera view window to end..." << std::endl; vpDisplay::getClick(Iint) ; } // Display task information task.print() ; task.kill(); }
int main(int argc, const char ** argv) { try { bool opt_click_allowed = true; bool opt_display = true; // Read the command line options if (getOptions(argc, argv, opt_click_allowed, opt_display) == false) { exit (-1); } // We open two displays, one for the internal camera view, the other one for // the external view, using either X11, GTK or GDI. #if defined VISP_HAVE_X11 vpDisplayX displayInt; vpDisplayX displayExt; #elif defined VISP_HAVE_GTK vpDisplayGTK displayInt; vpDisplayGTK displayExt; #elif defined VISP_HAVE_GDI vpDisplayGDI displayInt; vpDisplayGDI displayExt; #elif defined VISP_HAVE_OPENCV vpDisplayOpenCV displayInt; vpDisplayOpenCV displayExt; #endif // open a display for the visualization vpImage<unsigned char> Iint(300, 300, 0) ; vpImage<unsigned char> Iext(300, 300, 0) ; if (opt_display) { displayInt.init(Iint,0,0, "Internal view") ; displayExt.init(Iext,330,000, "External view") ; } vpProjectionDisplay externalview ; double px, py ; px = py = 500 ; double u0, v0 ; u0 = 150, v0 = 160 ; vpCameraParameters cam(px,py,u0,v0); int i ; vpServo task ; vpSimulatorCamera robot ; std::cout << std::endl ; std::cout << "----------------------------------------------" << std::endl ; std::cout << " Test program for vpServo " <<std::endl ; std::cout << " Eye-in-hand task control, articular velocity are computed" << std::endl ; std::cout << " Simulation " << std::endl ; std::cout << " task : servo 4 points " << std::endl ; std::cout << "----------------------------------------------" << std::endl ; std::cout << std::endl ; // sets the initial camera location vpHomogeneousMatrix cMo(-0.1,-0.1,1, vpMath::rad(40), vpMath::rad(10), vpMath::rad(60)) ; // Compute the position of the object in the world frame vpHomogeneousMatrix wMc, wMo; robot.getPosition(wMc) ; wMo = wMc * cMo; vpHomogeneousMatrix cextMo(0,0,2, 0,0,0) ;//vpMath::rad(40), vpMath::rad(10), vpMath::rad(60)) ; // sets the point coordinates in the object frame vpPoint point[4] ; point[0].setWorldCoordinates(-0.1,-0.1,0) ; point[1].setWorldCoordinates(0.1,-0.1,0) ; point[2].setWorldCoordinates(0.1,0.1,0) ; point[3].setWorldCoordinates(-0.1,0.1,0) ; for (i = 0 ; i < 4 ; i++) externalview.insert(point[i]) ; // computes the point coordinates in the camera frame and its 2D coordinates for (i = 0 ; i < 4 ; i++) point[i].track(cMo) ; // sets the desired position of the point vpFeaturePoint p[4] ; for (i = 0 ; i < 4 ; i++) vpFeatureBuilder::create(p[i],point[i]) ; //retrieve x,y and Z of the vpPoint structure // sets the desired position of the feature point s* vpFeaturePoint pd[4] ; pd[0].buildFrom(-0.1,-0.1, 1) ; pd[1].buildFrom( 0.1,-0.1, 1) ; pd[2].buildFrom( 0.1, 0.1, 1) ; pd[3].buildFrom(-0.1, 0.1, 1) ; // define the task // - we want an eye-in-hand control law // - articular velocity are computed task.setServo(vpServo::EYEINHAND_L_cVe_eJe) ; task.setInteractionMatrixType(vpServo::MEAN) ; // Set the position of the camera in the end-effector frame ") ; vpHomogeneousMatrix cMe ; vpVelocityTwistMatrix cVe(cMe) ; task.set_cVe(cVe) ; // Set the Jacobian (expressed in the end-effector frame) vpMatrix eJe ; robot.get_eJe(eJe) ; task.set_eJe(eJe) ; // we want to see a point on a point for (i = 0 ; i < 4 ; i++) task.addFeature(p[i],pd[i]) ; // set the gain task.setLambda(1) ; // Display task information " ) ; task.print() ; unsigned int iter=0 ; // loop while(iter++<200) { std::cout << "---------------------------------------------" << iter <<std::endl ; vpColVector v ; // Set the Jacobian (expressed in the end-effector frame) // since q is modified eJe is modified robot.get_eJe(eJe) ; task.set_eJe(eJe) ; // get the robot position robot.getPosition(wMc) ; // Compute the position of the camera wrt the object frame cMo = wMc.inverse() * wMo; // update new point position and corresponding features for (i = 0 ; i < 4 ; i++) { point[i].track(cMo) ; //retrieve x,y and Z of the vpPoint structure vpFeatureBuilder::create(p[i],point[i]) ; } // since vpServo::MEAN interaction matrix is used, we need also to update the desired features at each iteration pd[0].buildFrom(-0.1,-0.1, 1) ; pd[1].buildFrom( 0.1,-0.1, 1) ; pd[2].buildFrom( 0.1, 0.1, 1) ; pd[3].buildFrom(-0.1, 0.1, 1) ; if (opt_display) { vpDisplay::display(Iint) ; vpDisplay::display(Iext) ; vpServoDisplay::display(task,cam,Iint) ; externalview.display(Iext,cextMo, cMo, cam, vpColor::green) ; vpDisplay::flush(Iint); vpDisplay::flush(Iext); } // compute the control law v = task.computeControlLaw() ; // send the camera velocity to the controller robot.setVelocity(vpRobot::CAMERA_FRAME, v) ; std::cout << "|| s - s* || = " << ( task.getError() ).sumSquare() <<std::endl ; } // Display task information task.print() ; task.kill(); std::cout <<"Final robot position with respect to the object frame:\n"; cMo.print(); if (opt_display && opt_click_allowed) { // suppressed for automate test std::cout << "\n\nClick in the internal view window to end..." << std::endl; vpDisplay::getClick(Iint) ; } return 0; } catch(vpException e) { std::cout << "Catch a ViSP exception: " << e << std::endl; return 1; } }
int main() { #if defined(VISP_HAVE_PTHREAD) try { vpHomogeneousMatrix cdMo(0, 0, 0.75, 0, 0, 0); vpHomogeneousMatrix cMo(0.15, -0.1, 1., vpMath::rad(10), vpMath::rad(-10), vpMath::rad(50)); /* Top view of the world frame, the camera frame and the object frame world, also robot base frame : --> w_y | \|/ w_x object : o_y /|\ | o_x <-- camera : c_y /|\ | c_x <-- */ vpHomogeneousMatrix wMo(vpTranslationVector(0.40, 0, -0.15), vpRotationMatrix(vpRxyzVector(-M_PI, 0, M_PI/2.))); std::vector<vpPoint> point; point.push_back( vpPoint(-0.1,-0.1, 0) ); point.push_back( vpPoint( 0.1,-0.1, 0) ); point.push_back( vpPoint( 0.1, 0.1, 0) ); point.push_back( vpPoint(-0.1, 0.1, 0) ); vpServo task ; task.setServo(vpServo::EYEINHAND_CAMERA); task.setInteractionMatrixType(vpServo::CURRENT); task.setLambda(0.5); vpFeaturePoint p[4], pd[4] ; for (unsigned int i = 0 ; i < 4 ; i++) { point[i].track(cdMo); vpFeatureBuilder::create(pd[i], point[i]); point[i].track(cMo); vpFeatureBuilder::create(p[i], point[i]); task.addFeature(p[i], pd[i]); } vpSimulatorViper850 robot(true); robot.setVerbose(true); // Enlarge the default joint limits vpColVector qmin = robot.getJointMin(); vpColVector qmax = robot.getJointMax(); qmin[0] = -vpMath::rad(180); qmax[0] = vpMath::rad(180); qmax[1] = vpMath::rad(0); qmax[2] = vpMath::rad(270); qmin[4] = -vpMath::rad(180); qmax[4] = vpMath::rad(180); robot.setJointLimit(qmin, qmax); std::cout << "Robot joint limits: " << std::endl; for (unsigned int i=0; i< qmin.size(); i ++) std::cout << "Joint " << i << ": min " << vpMath::deg(qmin[i]) << " max " << vpMath::deg(qmax[i]) << " (deg)" << std::endl; robot.init(vpViper850::TOOL_PTGREY_FLEA2_CAMERA, vpCameraParameters::perspectiveProjWithoutDistortion); robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); robot.initScene(vpWireFrameSimulator::PLATE, vpWireFrameSimulator::D_STANDARD); robot.set_fMo(wMo); bool ret = true; #if VISP_VERSION_INT > VP_VERSION_INT(2,7,0) ret = #endif robot.initialiseCameraRelativeToObject(cMo); if (ret == false) return 0; // Not able to set the position robot.setDesiredCameraPosition(cdMo); // We modify the default external camera position robot.setExternalCameraPosition(vpHomogeneousMatrix(vpTranslationVector(-0.4, 0.4, 2), vpRotationMatrix(vpRxyzVector(M_PI/2,0,0)))); vpImage<unsigned char> Iint(480, 640, 255); #if defined(VISP_HAVE_X11) vpDisplayX displayInt(Iint, 700, 0, "Internal view"); #elif defined(VISP_HAVE_GDI) vpDisplayGDI displayInt(Iint, 700, 0, "Internal view"); #elif defined(VISP_HAVE_OPENCV) vpDisplayOpenCV displayInt(Iint, 700, 0, "Internal view"); #else std::cout << "No image viewer is available..." << std::endl; #endif vpCameraParameters cam(840, 840, Iint.getWidth()/2, Iint.getHeight()/2); // Modify the camera parameters to match those used in the other simulations robot.setCameraParameters(cam); bool start = true; //for ( ; ; ) for (int iter =0; iter < 275; iter ++) { cMo = robot.get_cMo(); for (unsigned int i = 0 ; i < 4 ; i++) { point[i].track(cMo); vpFeatureBuilder::create(p[i], point[i]); } vpDisplay::display(Iint); robot.getInternalView(Iint); if (!start) { display_trajectory(Iint, point, cMo, cam); vpDisplay::displayText(Iint, 40, 120, "Click to stop the servo...", vpColor::red); } vpDisplay::flush(Iint); vpColVector v = task.computeControlLaw(); robot.setVelocity(vpRobot::CAMERA_FRAME, v); // A click to exit if (vpDisplay::getClick(Iint, false)) break; if (start) { start = false; v = 0; robot.setVelocity(vpRobot::CAMERA_FRAME, v); vpDisplay::displayText(Iint, 40, 120, "Click to start the servo...", vpColor::blue); vpDisplay::flush(Iint); //vpDisplay::getClick(Iint); } vpTime::wait(1000*robot.getSamplingTime()); } task.kill(); } catch(vpException e) { std::cout << "Catch an exception: " << e << std::endl; } #endif }
int main() { try { vpHomogeneousMatrix cdMo(0, 0, 0.75, 0, 0, 0); vpHomogeneousMatrix cMo(0.15, -0.1, 1., vpMath::rad(10), vpMath::rad(-10), vpMath::rad(50)); std::vector<vpPoint> point; point.push_back( vpPoint(-0.1,-0.1, 0) ); point.push_back( vpPoint( 0.1,-0.1, 0) ); point.push_back( vpPoint( 0.1, 0.1, 0) ); point.push_back( vpPoint(-0.1, 0.1, 0) ); vpServo task ; task.setServo(vpServo::EYEINHAND_CAMERA); task.setInteractionMatrixType(vpServo::CURRENT); task.setLambda(0.5); vpFeaturePoint p[4], pd[4] ; for (unsigned int i = 0 ; i < 4 ; i++) { point[i].track(cdMo); vpFeatureBuilder::create(pd[i], point[i]); point[i].track(cMo); vpFeatureBuilder::create(p[i], point[i]); task.addFeature(p[i], pd[i]); } vpHomogeneousMatrix wMc, wMo; vpSimulatorCamera robot; robot.setSamplingTime(0.040); robot.getPosition(wMc); wMo = wMc * cMo; vpImage<unsigned char> Iint(480, 640, 255) ; vpImage<unsigned char> Iext(480, 640, 255) ; #if defined(VISP_HAVE_X11) vpDisplayX displayInt(Iint, 0, 0, "Internal view"); vpDisplayX displayExt(Iext, 670, 0, "External view"); #elif defined(VISP_HAVE_GDI) vpDisplayGDI displayInt(Iint, 0, 0, "Internal view"); vpDisplayGDI displayExt(Iext, 670, 0, "External view"); #elif defined(VISP_HAVE_OPENCV) vpDisplayOpenCV displayInt(Iint, 0, 0, "Internal view"); vpDisplayOpenCV displayExt(Iext, 670, 0, "External view"); #else std::cout << "No image viewer is available..." << std::endl; #endif #if defined(VISP_HAVE_DISPLAY) vpProjectionDisplay externalview; for (unsigned int i = 0 ; i < 4 ; i++) externalview.insert(point[i]) ; #endif vpCameraParameters cam(840, 840, Iint.getWidth()/2, Iint.getHeight()/2); vpHomogeneousMatrix cextMo(0,0,3, 0,0,0); while(1) { robot.getPosition(wMc); cMo = wMc.inverse() * wMo; for (unsigned int i = 0 ; i < 4 ; i++) { point[i].track(cMo); vpFeatureBuilder::create(p[i], point[i]); } vpColVector v = task.computeControlLaw(); robot.setVelocity(vpRobot::CAMERA_FRAME, v); vpDisplay::display(Iint) ; vpDisplay::display(Iext) ; display_trajectory(Iint, point, cMo, cam); vpServoDisplay::display(task, cam, Iint, vpColor::green, vpColor::red); #if defined(VISP_HAVE_DISPLAY) externalview.display(Iext, cextMo, cMo, cam, vpColor::red, true); #endif vpDisplay::flush(Iint); vpDisplay::flush(Iext); // A click to exit if (vpDisplay::getClick(Iint, false) || vpDisplay::getClick(Iext, false)) break; vpTime::wait( robot.getSamplingTime() * 1000); } task.kill(); } catch(vpException e) { std::cout << "Catch an exception: " << e << std::endl; } }