Esempio n. 1
0
int test(double x,double y,double z,double alpha){
  //intial pose
  vpHomogeneousMatrix cMo(x,y,z,-vpMath::rad(0),vpMath::rad(0),alpha);
  //Desired pose
  vpHomogeneousMatrix cdMo(vpHomogeneousMatrix(0.0,0.0,1.0,vpMath::rad(0),vpMath::rad(0),-vpMath::rad(0)));

  //source and destination objects for moment manipulation
  vpMomentObject src(6);
  vpMomentObject dst(6);

  //init and run the simulation
  initScene(cMo, cdMo, src, dst); //initialize graphical scene (for interface)

  vpMatrix mat = execute(cMo, cdMo, src, dst);

  if(fabs(mat[0][0]-(-1)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[0][1]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[0][2]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;

  if(fabs(mat[1][0]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[1][1]-(-1)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[1][2]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;

  if(fabs(mat[2][0]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[2][1]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[2][2]-(-1)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[2][5]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;

  if(fabs(mat[3][0]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[3][1]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[3][2]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[3][5]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;

  if(fabs(mat[4][0]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[4][1]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[4][2]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[4][5]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;

  if(fabs(mat[5][0]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[5][1]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[5][2]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[5][5]-(-1)) > std::numeric_limits<double>::epsilon()*1e10) return -1;

  return 0;
}
int main()
{
  try {
    vpHomogeneousMatrix cdMo(0, 0, 0.75, 0, 0, 0);
    vpHomogeneousMatrix cMo(0.15, -0.1, 1., vpMath::rad(10), vpMath::rad(-10), vpMath::rad(50));

    std::vector<vpPoint> point(4) ;
    point[0].setWorldCoordinates(-0.1,-0.1, 0);
    point[1].setWorldCoordinates( 0.1,-0.1, 0);
    point[2].setWorldCoordinates( 0.1, 0.1, 0);
    point[3].setWorldCoordinates(-0.1, 0.1, 0);

    vpServo task ;
    task.setServo(vpServo::EYEINHAND_CAMERA);
    task.setInteractionMatrixType(vpServo::CURRENT);
    task.setLambda(0.5);

    vpFeaturePoint p[4], pd[4] ;
    for (unsigned int i = 0 ; i < 4 ; i++) {
      point[i].track(cdMo);
      vpFeatureBuilder::create(pd[i], point[i]);
      point[i].track(cMo);
      vpFeatureBuilder::create(p[i], point[i]);
      task.addFeature(p[i], pd[i]);
    }

    vpHomogeneousMatrix wMc, wMo;
    vpSimulatorCamera robot;
    robot.setSamplingTime(0.040);
    robot.getPosition(wMc);
    wMo = wMc * cMo;

    vpImage<unsigned char> Iint(480, 640, 0) ;
    vpImage<unsigned char> Iext(480, 640, 0) ;
#if defined VISP_HAVE_X11
    vpDisplayX displayInt(Iint, 0, 0, "Internal view");
    vpDisplayX displayExt(Iext, 670, 0, "External view");
#elif  defined VISP_HAVE_GDI
    vpDisplayGDI displayInt(Iint, 0, 0, "Internal view");
    vpDisplayGDI displayExt(Iext, 670, 0, "External view");
#elif  defined VISP_HAVE_OPENCV
    vpDisplayOpenCV displayInt(Iint, 0, 0, "Internal view");
    vpDisplayOpenCV displayExt(Iext, 670, 0, "External view");
#else
    std::cout << "No image viewer is available..." << std::endl;
#endif

    vpCameraParameters cam(840, 840, Iint.getWidth()/2, Iint.getHeight()/2);
    vpHomogeneousMatrix cextMo(0,0,3, 0,0,0);

    vpWireFrameSimulator sim;
    sim.initScene(vpWireFrameSimulator::PLATE, vpWireFrameSimulator::D_STANDARD);
    sim.setCameraPositionRelObj(cMo);
    sim.setDesiredCameraPosition(cdMo);
    sim.setExternalCameraPosition(cextMo);
    sim.setInternalCameraParameters(cam);
    sim.setExternalCameraParameters(cam);

    while(1) {
      robot.getPosition(wMc);
      cMo = wMc.inverse() * wMo;
      for (unsigned int i = 0 ; i < 4 ; i++) {
        point[i].track(cMo);
        vpFeatureBuilder::create(p[i], point[i]);
      }
      vpColVector v = task.computeControlLaw();
      robot.setVelocity(vpRobot::CAMERA_FRAME, v);

      sim.setCameraPositionRelObj(cMo);

      vpDisplay::display(Iint) ;
      vpDisplay::display(Iext) ;

      sim.getInternalImage(Iint);
      sim.getExternalImage(Iext);

      display_trajectory(Iint, point, cMo, cam);
      vpDisplay::flush(Iint);
      vpDisplay::flush(Iext);

      // A click in the internal view to exit
      if (vpDisplay::getClick(Iint, false))
        break;
      vpTime::wait(1000*robot.getSamplingTime());
    }
    task.kill();
  }
  catch(vpException &e) {
    std::cout << "Catch an exception: " << e << std::endl;
  }
}
int
main(int argc, const char ** argv)
{
  // Read the command line options
  if (getOptions(argc, argv) == false) {
    exit (-1);
  }

  // Log file creation in /tmp/$USERNAME/log.dat
  // This file contains by line:
  // - the 6 computed camera velocities (m/s, rad/s) to achieve the task
  // - the 6 values of s - s*
  std::string username;
  // Get the user login name
  vpIoTools::getUserName(username);

  // Create a log filename to save velocities...
  std::string logdirname;
#ifdef WIN32
  logdirname ="C:/temp/" + username;
#else
  logdirname ="/tmp/" + username;
#endif
  // Test if the output path exist. If no try to create it
  if (vpIoTools::checkDirectory(logdirname) == false) {
    try {
      // Create the dirname
      vpIoTools::makeDirectory(logdirname);
    }
    catch (...) {
      std::cerr << std::endl
		<< "ERROR:" << std::endl;
      std::cerr << "  Cannot create " << logdirname << std::endl;
      exit(-1);
    }
  }
  std::string logfilename;
  logfilename = logdirname + "/log.dat";

  // Open the log file name
  std::ofstream flog(logfilename.c_str());

  vpServo task ;
  vpRobotCamera robot ;

  std::cout << std::endl ;
  std::cout << "-------------------------------------------------------" << std::endl ;
  std::cout << " Test program for vpServo "  <<std::endl ;
  std::cout << " Eye-in-hand task control, velocity computed in the camera frame" << std::endl ;
  std::cout << " Simulation " << std::endl ;
  std::cout << " task :  3D visual servoing " << std::endl ;
  std::cout << "-------------------------------------------------------" << std::endl ;
  std::cout << std::endl ;


  // Sets the initial camera location
  vpPoseVector c_r_o(// Translation tx,ty,tz
		     0.1, 0.2, 2, 
		     // ThetaU rotation 
		     vpMath::rad(20), vpMath::rad(10),  vpMath::rad(50) ) ;
  
  // From the camera pose build the corresponding homogeneous matrix
  vpHomogeneousMatrix cMo(c_r_o) ;

  // Set the robot initial position
  robot.setPosition(cMo) ;

  // Sets the desired camera location
  vpPoseVector cd_r_o(// Translation tx,ty,tz
		      0, 0, 1, 
		      // ThetaU rotation 
		      vpMath::rad(0),vpMath::rad(0),vpMath::rad(0)) ; 
  // From the camera desired pose build the corresponding homogeneous matrix
  vpHomogeneousMatrix cdMo(cd_r_o) ;

  // Compute the homogeneous transformation from the desired camera position to the initial one
  vpHomogeneousMatrix cdMc ;
  cdMc = cdMo*cMo.inverse() ;

  // Build the current visual features s = (c*tc, thetaU_c*Rc)^T
  vpFeatureTranslation t(vpFeatureTranslation::cdMc) ;
  vpFeatureThetaU tu(vpFeatureThetaU::cdRc); // current feature
  t.buildFrom(cdMc) ;
  tu.buildFrom(cdMc) ;

  // Sets the desired rotation (always zero !)  since s is the
  // rotation that the camera has to achieve. Here s* = (0, 0)^T
  vpFeatureTranslation td(vpFeatureTranslation::cdMc) ;
  vpFeatureThetaU tud(vpFeatureThetaU::cdRc); // desired feature

  // Define the task
  // - we want an eye-in-hand control law
  // - the robot is controlled in the camera frame
  task.setServo(vpServo::EYEINHAND_CAMERA) ;
  // - we use here the interaction matrix computed with the 
  //   current features
  task.setInteractionMatrixType(vpServo::CURRENT);

  // Add the current and desired visual features
  task.addFeature(t,td) ;   // 3D translation
  task.addFeature(tu,tud) ; // 3D rotation

  // - set the constant gain to 1.0
  task.setLambda(1) ;

  // Display task information
  task.print() ;

  int iter=0 ;
  // Start the visual servoing loop. We stop the servo after 200 iterations
  while(iter++ < 200) {
    std::cout << "-----------------------------------" << iter <<std::endl ;
    vpColVector v ;

    // get the robot position
    robot.getPosition(cMo) ;

    // new displacement to achieve
    cdMc = cdMo*cMo.inverse() ;

    // Update the current visual features
    t.buildFrom(cdMc) ;
    tu.buildFrom(cdMc) ;

    // Compute the control law
    v = task.computeControlLaw() ;

    // Display task information
    if (iter==1) task.print() ;

    // Send the camera velocity to the controller
    robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;
      
    // Retrieve the error 
    std::cout << task.error.sumSquare() <<std::endl ;
    
    // Save log
    flog << v.t() << " " << task.error.t() << std::endl;
  }
  // Display task information
  task.print() ;

  // Kill the task
  task.kill();

  // Close the log file
  flog.close();
}
int
main(int argc, const char ** argv)
{
  try {
    bool opt_click_allowed = true;
    bool opt_display = true;

    // Read the command line options
    if (getOptions(argc, argv, opt_click_allowed, opt_display) == false) {
      exit (-1);
    }

    // We open two displays, one for the internal camera view, the other one for
    // the external view, using either X11, GTK or GDI.
#if defined VISP_HAVE_X11
    vpDisplayX displayInt;
#elif defined VISP_HAVE_GDI
    vpDisplayGDI displayInt;
#elif defined VISP_HAVE_OPENCV
    vpDisplayOpenCV displayInt;
#endif

    vpImage<unsigned char> Iint(480, 640, 255);

    if (opt_display) {
      // open a display for the visualization
      displayInt.init(Iint,700,0, "Internal view") ;
    }

    vpServo task;

    std::cout << std::endl ;
    std::cout << "----------------------------------------------" << std::endl ;
    std::cout << " Test program for vpServo "  <<std::endl ;
    std::cout << " Eye-in-hand task control, articular velocity are computed"
              << std::endl ;
    std::cout << " Simulation " << std::endl ;
    std::cout << " task : servo 4 points " << std::endl ;
    std::cout << "----------------------------------------------" << std::endl ;
    std::cout << std::endl ;

    // sets the initial camera location
    vpHomogeneousMatrix cMo(-0.05,-0.05,0.7,
                            vpMath::rad(10),  vpMath::rad(10),  vpMath::rad(-30));

    // sets the point coordinates in the object frame
    vpPoint point[4] ;
    point[0].setWorldCoordinates(-0.045,-0.045,0) ;
    point[3].setWorldCoordinates(-0.045,0.045,0) ;
    point[2].setWorldCoordinates(0.045,0.045,0) ;
    point[1].setWorldCoordinates(0.045,-0.045,0) ;

    // computes the point coordinates in the camera frame and its 2D coordinates
    for (unsigned int i = 0 ; i < 4 ; i++)
      point[i].track(cMo) ;

    // sets the desired position of the point
    vpFeaturePoint p[4] ;
    for (unsigned int i = 0 ; i < 4 ; i++)
      vpFeatureBuilder::create(p[i],point[i])  ;  //retrieve x,y and Z of the vpPoint structure

    // sets the desired position of the feature point s*
    vpFeaturePoint pd[4] ;

    // Desired pose
    vpHomogeneousMatrix cdMo(vpHomogeneousMatrix(0.0,0.0,0.8,vpMath::rad(0),vpMath::rad(0),vpMath::rad(0)));

    // Projection of the points
    for (unsigned int  i = 0 ; i < 4 ; i++)
      point[i].track(cdMo);

    for (unsigned int  i = 0 ; i < 4 ; i++)
      vpFeatureBuilder::create(pd[i], point[i]);

    // define the task
    // - we want an eye-in-hand control law
    // - articular velocity are computed
    task.setServo(vpServo::EYEINHAND_CAMERA);
    task.setInteractionMatrixType(vpServo::DESIRED) ;

    // we want to see a point on a point
    for (unsigned int i = 0 ; i < 4 ; i++)
      task.addFeature(p[i],pd[i]) ;

    // set the gain
    task.setLambda(0.8) ;

    // Declaration of the robot
    vpSimulatorAfma6 robot(opt_display);

    // Initialise the robot and especially the camera
    robot.init(vpAfma6::TOOL_CCMOP, vpCameraParameters::perspectiveProjWithoutDistortion);
    robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL);

    // Initialise the object for the display part*/
    robot.initScene(vpWireFrameSimulator::PLATE, vpWireFrameSimulator::D_STANDARD);

    // Initialise the position of the object relative to the pose of the robot's camera
    robot.initialiseObjectRelativeToCamera(cMo);

    // Set the desired position (for the displaypart)
    robot.setDesiredCameraPosition(cdMo);

    // Get the internal robot's camera parameters
    vpCameraParameters cam;
    robot.getCameraParameters(cam,Iint);

    if (opt_display)
    {
      //Get the internal view
      vpDisplay::display(Iint);
      robot.getInternalView(Iint);
      vpDisplay::flush(Iint);
    }

    // Display task information
    task.print() ;

    unsigned int iter=0 ;
    vpTRACE("\t loop") ;
    while(iter++<500)
    {
      std::cout << "---------------------------------------------" << iter <<std::endl ;
      vpColVector v ;

      // Get the Time at the beginning of the loop
      double t = vpTime::measureTimeMs();

      // Get the current pose of the camera
      cMo = robot.get_cMo();

      if (iter==1) {
        std::cout <<"Initial robot position with respect to the object frame:\n";
        cMo.print();
      }

      // new point position
      for (unsigned int i = 0 ; i < 4 ; i++)
      {
        point[i].track(cMo) ;
        // retrieve x,y and Z of the vpPoint structure
        vpFeatureBuilder::create(p[i],point[i])  ;
      }

      if (opt_display)
      {
        // Get the internal view and display it
        vpDisplay::display(Iint) ;
        robot.getInternalView(Iint);
        vpDisplay::flush(Iint);
      }

      if (opt_display && opt_click_allowed && iter == 1)
      {
        // suppressed for automate test
        std::cout << "Click in the internal view window to continue..." << std::endl;
        vpDisplay::getClick(Iint) ;
      }

      // compute the control law
      v = task.computeControlLaw() ;

      // send the camera velocity to the controller
      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;

      std::cout << "|| s - s* || " << ( task.getError() ).sumSquare() <<std::endl ;

      // The main loop has a duration of 10 ms at minimum
      vpTime::wait(t,10);
    }

    // Display task information
    task.print() ;
    task.kill();

    std::cout <<"Final robot position with respect to the object frame:\n";
    cMo.print();

    if (opt_display && opt_click_allowed)
    {
      // suppressed for automate test
      std::cout << "Click in the internal view window to end..." << std::endl;
      vpDisplay::getClick(Iint) ;
    }
    return 0;
  }
  catch(vpException e) {
    std::cout << "Catch a ViSP exception: " << e << std::endl;
    return 1;
  }
}
int main()
{
#if defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV)
  try {
    vpHomogeneousMatrix cdMo(0, 0, 0.75, 0, 0, 0);
    vpHomogeneousMatrix cMo(0.15, -0.1, 1., vpMath::rad(10), vpMath::rad(-10), vpMath::rad(50));

    vpImage<unsigned char> I(480, 640, 255);
    vpCameraParameters cam(840, 840, I.getWidth()/2, I.getHeight()/2);

    std::vector<vpPoint> point(4) ;
    point[0].setWorldCoordinates(-0.1,-0.1, 0);
    point[1].setWorldCoordinates( 0.1,-0.1, 0);
    point[2].setWorldCoordinates( 0.1, 0.1, 0);
    point[3].setWorldCoordinates(-0.1, 0.1, 0);

    vpServo task ;
    task.setServo(vpServo::EYEINHAND_CAMERA);
    task.setInteractionMatrixType(vpServo::CURRENT);
    task.setLambda(0.5);

    vpVirtualGrabber g("./target_square.pgm", cam);
    g.acquire(I, cMo);

#if defined(VISP_HAVE_X11)
    vpDisplayX d(I, 0, 0, "Current camera view");
#elif defined(VISP_HAVE_GDI)
    vpDisplayGDI d(I, 0, 0, "Current camera view");
#elif defined(VISP_HAVE_OPENCV)
    vpDisplayOpenCV d(I, 0, 0, "Current camera view");
#else
    std::cout << "No image viewer is available..." << std::endl;
#endif

    vpDisplay::display(I);
    vpDisplay::displayText(I, 10, 10,
                           "Click in the 4 dots to initialise the tracking and start the servo",
                           vpColor::red);
    vpDisplay::flush(I);

    vpFeaturePoint p[4], pd[4];
    std::vector<vpDot2> dot(4);

    for (unsigned int i = 0 ; i < 4 ; i++) {
      point[i].track(cdMo);
      vpFeatureBuilder::create(pd[i], point[i]);

      dot[i].setGraphics(true);
      dot[i].initTracking(I);
      vpDisplay::flush(I);
      vpFeatureBuilder::create(p[i], cam, dot[i].getCog());

      task.addFeature(p[i], pd[i]);
    }

    vpHomogeneousMatrix wMc, wMo;
    vpSimulatorCamera robot;
    robot.setSamplingTime(0.040);
    robot.getPosition(wMc);
    wMo = wMc * cMo;

    for (; ; ) {
      robot.getPosition(wMc);
      cMo = wMc.inverse() * wMo;

      g.acquire(I, cMo);

      vpDisplay::display(I);

      for (unsigned int i = 0 ; i < 4 ; i++) {
        dot[i].track(I);
        vpFeatureBuilder::create(p[i], cam, dot[i].getCog());

        vpColVector cP;
        point[i].changeFrame(cMo, cP) ;
        p[i].set_Z(cP[2]);
      }

      vpColVector v = task.computeControlLaw();

      display_trajectory(I, dot);
      vpServoDisplay::display(task, cam, I, vpColor::green, vpColor::red) ;
      robot.setVelocity(vpRobot::CAMERA_FRAME, v);

      vpDisplay::flush(I);
      if (vpDisplay::getClick(I, false))
        break;

      vpTime::wait( robot.getSamplingTime() * 1000);
    }
    task.kill();
  }
  catch(vpException e) {
    std::cout << "Catch an exception: " << e << std::endl;
  }
#endif
}
Esempio n. 6
0
int main()
{
  try {
    vpHomogeneousMatrix cdMo(0, 0, 0.75, 0, 0, 0);
    vpHomogeneousMatrix cMo(0.15, -0.1, 1., vpMath::rad(10), vpMath::rad(-10), vpMath::rad(50));

    // Define the target as 4 points
    vpPoint point[4] ;
    point[0].setWorldCoordinates(-0.1,-0.1, 0);
    point[1].setWorldCoordinates( 0.1,-0.1, 0);
    point[2].setWorldCoordinates( 0.1, 0.1, 0);
    point[3].setWorldCoordinates(-0.1, 0.1, 0);

#if defined(VISP_HAVE_OGRE)
    // Color image used as background texture.
    vpImage<unsigned char> background(480, 640, 255);

    // Parameters of our camera
    vpCameraParameters cam(840, 840, background.getWidth()/2, background.getHeight()/2);

    // Our object
    // A simulator with the camera parameters defined above,
    // and the background image size
    vpAROgre ogre;
    ogre.setShowConfigDialog(false);
    ogre.setCameraParameters(cam);
    ogre.addResource("./"); // Add the path to the Sphere.mesh resource
    ogre.init(background, false, true);

    // Create the scene that contains 4 spheres
    // Sphere.mesh contains a sphere with 1 meter radius
    std::vector<std::string> name(4);
    for (unsigned int i=0; i<4; i++) {
      std::ostringstream s; s << "Sphere" <<  i; name[i] = s.str();
      ogre.load(name[i], "Sphere.mesh");
      ogre.setScale(name[i], 0.02f, 0.02f, 0.02f); // Rescale the sphere to 2 cm radius
      // Set the position of each sphere in the object frame
      ogre.setPosition(name[i], vpTranslationVector(point[i].get_oX(), point[i].get_oY(), point[i].get_oZ()));
    }

    // Add an optional point light source
    Ogre::Light * light = ogre.getSceneManager()->createLight();
    light->setDiffuseColour(1, 1, 1); // scaled RGB values
    light->setSpecularColour(1, 1, 1); // scaled RGB values
    light->setPosition((Ogre::Real)cdMo[0][3], (Ogre::Real)cdMo[1][3], (Ogre::Real)(-cdMo[2][3]));
    light->setType(Ogre::Light::LT_POINT);
#endif

    vpServo task ;
    task.setServo(vpServo::EYEINHAND_CAMERA);
    task.setInteractionMatrixType(vpServo::CURRENT);
    task.setLambda(0.5);

    vpFeaturePoint p[4], pd[4] ;
    for (int i = 0 ; i < 4 ; i++) {
      point[i].track(cdMo);
      vpFeatureBuilder::create(pd[i], point[i]);
      point[i].track(cMo);
      vpFeatureBuilder::create(p[i], point[i]);
      task.addFeature(p[i], pd[i]);
    }

    vpHomogeneousMatrix wMc, wMo;
    vpSimulatorCamera robot;
    robot.setSamplingTime(0.040);
    robot.getPosition(wMc);
    wMo = wMc * cMo;

    for (unsigned int iter=0; iter < 150; iter ++) {
      robot.getPosition(wMc);
      cMo = wMc.inverse() * wMo;
      for (int i = 0 ; i < 4 ; i++) {
        point[i].track(cMo);
        vpFeatureBuilder::create(p[i], point[i]);
      }
#if defined(VISP_HAVE_OGRE)
      // Update the scene from the new camera position
      ogre.display(background, cMo);
#endif
      vpColVector v = task.computeControlLaw();
      robot.setVelocity(vpRobot::CAMERA_FRAME, v);
      vpTime::wait( robot.getSamplingTime() * 1000);
    }
    task.kill();
  }
  catch(vpException &e) {
    std::cout << "Catch an exception: " << e << std::endl;
  }
  catch(...) {
    std::cout << "Catch an exception " << std::endl;
  }
}
Esempio n. 7
0
/*!

  \example testFeatureSegment.cpp

  Shows how to build a task with a segment visual feature.

*/
int main(int argc, const char **argv)
{  
  try {
#if (defined (VISP_HAVE_X11) || defined (VISP_HAVE_GDI))
    int opt_no_display = 0;
    int opt_curves = 1;
#endif
    int opt_normalized = 1;

    // Parse the command line to set the variables
    vpParseArgv::vpArgvInfo argTable[] =
    {
  #if (defined (VISP_HAVE_X11) || defined (VISP_HAVE_GDI))
      {"-d", vpParseArgv::ARGV_CONSTANT, 0, (char *) &opt_no_display,
       "Disable display and graphics viewer."},
  #endif
      {"-normalized", vpParseArgv::ARGV_INT, (char*) NULL, (char *) &opt_normalized,
       "1 to use normalized features, 0 for non normalized."},
      {"-h", vpParseArgv::ARGV_HELP, (char*) NULL, (char *) NULL,
       "Print the help."},
      {(char*) NULL, vpParseArgv::ARGV_END, (char*) NULL, (char*) NULL, (char*) NULL}
    } ;

    // Read the command line options
    if(vpParseArgv::parse(&argc, argv, argTable,
                          vpParseArgv::ARGV_NO_LEFTOVERS |
                          vpParseArgv::ARGV_NO_ABBREV |
                          vpParseArgv::ARGV_NO_DEFAULTS)) {
      return (false);
    }

    std::cout << "Used options: " << std::endl;
#if (defined (VISP_HAVE_X11) || defined (VISP_HAVE_GDI))
    opt_curves = (opt_no_display == 0) ? 1 : 0;
    std::cout << " - no display: " << opt_no_display << std::endl;
    std::cout << " - curves    : " << opt_curves << std::endl;
#endif
    std::cout << " - normalized: " << opt_normalized << std::endl;

    vpCameraParameters cam(640.,480.,320.,240.);

#if defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)
    vpDisplay *display = NULL;
    if (!opt_no_display) {
#if defined(VISP_HAVE_X11)
      display = new vpDisplayX;
#elif defined VISP_HAVE_GDI
      display = new vpDisplayGDI;
#endif
    }
#endif
    vpImage<unsigned char> I(480,640,0);

#if (defined (VISP_HAVE_X11) || defined (VISP_HAVE_GDI))
    if (!opt_no_display)
      display->init(I);
#endif

    vpHomogeneousMatrix wMo; // Set to indentity. Robot world frame is equal to object frame
    vpHomogeneousMatrix cMo (-0.5, 0.5, 2., vpMath::rad(10), vpMath::rad(20), vpMath::rad(30));
    vpHomogeneousMatrix cdMo(0., 0., 1., vpMath::rad(0), vpMath::rad(0), vpMath::rad(0));
    vpHomogeneousMatrix wMc; // Camera location in the robot world frame

    vpPoint P[4]; // 4 points in the object frame
    P[0].setWorldCoordinates( .1,  .1, 0.);
    P[1].setWorldCoordinates(-.1,  .1, 0.);
    P[2].setWorldCoordinates(-.1, -.1, 0.);
    P[3].setWorldCoordinates( .1, -.1, 0.);

    vpPoint Pd[4]; // 4 points in the desired camera frame
    for (int i=0; i<4; i++) {
      Pd[i] = P[i];
      Pd[i].project(cdMo);
    }
    vpPoint Pc[4]; // 4 points in the current camera frame
    for (int i=0; i<4; i++) {
      Pc[i] = P[i];
      Pc[i].project(cMo);
    }

    vpFeatureSegment seg_cur[2], seg_des[2]; // Current and desired features
    for (int i=0; i <2; i++)
    {
      if (opt_normalized) {
        seg_cur[i].setNormalized(true);
        seg_des[i].setNormalized(true);
      }
      else {
        seg_cur[i].setNormalized(false);
        seg_des[i].setNormalized(false);
      }
      vpFeatureBuilder::create(seg_cur[i], Pc[i*2], Pc[i*2+1]);
      vpFeatureBuilder::create(seg_des[i], Pd[i*2], Pd[i*2+1]);
      seg_cur[i].print();
      seg_des[i].print();
    }

    //define visual servoing task
    vpServo task;
    task.setServo(vpServo::EYEINHAND_CAMERA);
    task.setInteractionMatrixType(vpServo::CURRENT);
    task.setLambda(2.) ;

    for (int i=0; i <2; i++)
      task.addFeature(seg_cur[i], seg_des[i]);

#if (defined (VISP_HAVE_X11) || defined(VISP_HAVE_GDI))
    if (!opt_no_display) {
      vpDisplay::display(I);
      for (int i=0; i <2; i++) {
        seg_cur[i].display(cam, I, vpColor::red);
        seg_des[i].display(cam, I, vpColor::green);
        vpDisplay::flush(I);
      }
    }
#endif

#if (defined (VISP_HAVE_X11) || defined (VISP_HAVE_GDI))
    vpPlot *graph = NULL;
    if (opt_curves)
    {
      //Create a window (700 by 700) at position (100, 200) with two graphics
      graph = new vpPlot(2, 500, 500, 700, 10, "Curves...");

      //The first graphic contains 3 curve and the second graphic contains 3 curves
      graph->initGraph(0,6);
      graph->initGraph(1,8);
      //     graph->setTitle(0, "Velocities");
      //     graph->setTitle(1, "Error s-s*");
    }
#endif

    //param robot
    vpSimulatorCamera robot;
    float sampling_time = 0.02f; // Sampling period in seconds
    robot.setSamplingTime(sampling_time);
    robot.setMaxTranslationVelocity(5.);
    robot.setMaxRotationVelocity(vpMath::rad(90.));
    wMc = wMo * cMo.inverse();
    robot.setPosition(wMc);
    int iter=0;

    do {
      double t = vpTime::measureTimeMs();
      wMc = robot.getPosition();
      cMo = wMc.inverse() * wMo;
      for (int i=0; i <4; i++)
        Pc[i].project(cMo);

      for (int i=0; i <2; i++)
        vpFeatureBuilder::create(seg_cur[i], Pc[i*2], Pc[i*2+1]);

#if (defined (VISP_HAVE_X11) || defined(VISP_HAVE_GDI))
      if (!opt_no_display) {
        vpDisplay::display(I);
        for (int i=0; i <2; i++) {
          seg_cur[i].display(cam, I, vpColor::red);
          seg_des[i].display(cam, I, vpColor::green);
          vpDisplay::flush(I);
        }
      }
#endif

      vpColVector v = task.computeControlLaw();
      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;

#if (defined (VISP_HAVE_X11) || defined (VISP_HAVE_GDI))
      if (opt_curves)
      {
        graph->plot(0, iter, v); // plot velocities applied to the robot
        graph->plot(1, iter, task.getError()); // plot error vector
      }
#endif

      vpTime::wait(t, sampling_time * 1000); // Wait 10 ms
      iter ++;

    } while(( task.getError() ).sumSquare() > 0.0005);

    // A call to kill() is requested here to destroy properly the current
    // and desired feature lists.
    task.kill();

#if (defined (VISP_HAVE_X11) || defined (VISP_HAVE_GDI))
    if (graph != NULL)
      delete graph;
#endif
#if (defined (VISP_HAVE_X11) || defined (VISP_HAVE_GDI))
    if (!opt_no_display && display != NULL)
      delete display;
#endif

    std::cout << "final error=" << ( task.getError() ).sumSquare() << std::endl;
    return 0;
  }
  catch(vpException &e) {
    std::cout << "Catch an exception: " << e << std::endl;
    return 1;
  }
}
int main()
{
#if defined(VISP_HAVE_PTHREAD)
  try {
    vpHomogeneousMatrix cdMo(0, 0, 0.75, 0, 0, 0);
    vpHomogeneousMatrix cMo(0.15, -0.1, 1., vpMath::rad(10), vpMath::rad(-10), vpMath::rad(50));

    /*
    Top view of the world frame, the camera frame and the object frame

    world, also robot base frame :  --> w_y
                                    |
                                   \|/
                                      w_x

    object :
                     o_y
                  /|\
                   |
             o_x <--


    camera :
                     c_y
                  /|\
                   |
             c_x <--

    */
    vpHomogeneousMatrix wMo(vpTranslationVector(0.40, 0, -0.15),
                            vpRotationMatrix(vpRxyzVector(-M_PI, 0, M_PI/2.)));

    std::vector<vpPoint> point;
    point.push_back( vpPoint(-0.1,-0.1, 0) );
    point.push_back( vpPoint( 0.1,-0.1, 0) );
    point.push_back( vpPoint( 0.1, 0.1, 0) );
    point.push_back( vpPoint(-0.1, 0.1, 0) );

    vpServo task ;
    task.setServo(vpServo::EYEINHAND_CAMERA);
    task.setInteractionMatrixType(vpServo::CURRENT);
    task.setLambda(0.5);

    vpFeaturePoint p[4], pd[4] ;
    for (unsigned int i = 0 ; i < 4 ; i++) {
      point[i].track(cdMo);
      vpFeatureBuilder::create(pd[i], point[i]);
      point[i].track(cMo);
      vpFeatureBuilder::create(p[i], point[i]);
      task.addFeature(p[i], pd[i]);
    }

    vpSimulatorViper850 robot(true);
    robot.setVerbose(true);

    // Enlarge the default joint limits
    vpColVector qmin = robot.getJointMin();
    vpColVector qmax = robot.getJointMax();
    qmin[0] = -vpMath::rad(180);
    qmax[0] =  vpMath::rad(180);
    qmax[1] =  vpMath::rad(0);
    qmax[2] =  vpMath::rad(270);
    qmin[4] = -vpMath::rad(180);
    qmax[4] =  vpMath::rad(180);

    robot.setJointLimit(qmin, qmax);

    std::cout << "Robot joint limits: " << std::endl;
    for (unsigned int i=0; i< qmin.size(); i ++)
      std::cout << "Joint " << i << ": min " << vpMath::deg(qmin[i]) << " max " << vpMath::deg(qmax[i]) << " (deg)" << std::endl;

    robot.init(vpViper850::TOOL_PTGREY_FLEA2_CAMERA, vpCameraParameters::perspectiveProjWithoutDistortion);
    robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL);
    robot.initScene(vpWireFrameSimulator::PLATE, vpWireFrameSimulator::D_STANDARD);
    robot.set_fMo(wMo);
    bool ret = true;
#if VISP_VERSION_INT > VP_VERSION_INT(2,7,0)
    ret =
    #endif
        robot.initialiseCameraRelativeToObject(cMo);
    if (ret == false)
      return 0; // Not able to set the position
    robot.setDesiredCameraPosition(cdMo);
    // We modify the default external camera position
    robot.setExternalCameraPosition(vpHomogeneousMatrix(vpTranslationVector(-0.4, 0.4, 2),
                                                        vpRotationMatrix(vpRxyzVector(M_PI/2,0,0))));

    vpImage<unsigned char> Iint(480, 640, 255);
#if defined(VISP_HAVE_X11)
    vpDisplayX displayInt(Iint, 700, 0, "Internal view");
#elif defined(VISP_HAVE_GDI)
    vpDisplayGDI displayInt(Iint, 700, 0, "Internal view");
#elif defined(VISP_HAVE_OPENCV)
    vpDisplayOpenCV displayInt(Iint, 700, 0, "Internal view");
#else
    std::cout << "No image viewer is available..." << std::endl;
#endif

    vpCameraParameters cam(840, 840, Iint.getWidth()/2, Iint.getHeight()/2);
    // Modify the camera parameters to match those used in the other simulations
    robot.setCameraParameters(cam);

    bool start = true;
    //for ( ; ; )
    for (int iter =0; iter < 275; iter ++)
    {
      cMo = robot.get_cMo();

      for (unsigned int i = 0 ; i < 4 ; i++) {
        point[i].track(cMo);
        vpFeatureBuilder::create(p[i], point[i]);
      }

      vpDisplay::display(Iint);
      robot.getInternalView(Iint);
      if (!start) {
        display_trajectory(Iint, point, cMo, cam);
        vpDisplay::displayText(Iint, 40, 120, "Click to stop the servo...", vpColor::red);
      }
      vpDisplay::flush(Iint);

      vpColVector v = task.computeControlLaw();
      robot.setVelocity(vpRobot::CAMERA_FRAME, v);

      // A click to exit
      if (vpDisplay::getClick(Iint, false))
        break;

      if (start) {
        start = false;
        v = 0;
        robot.setVelocity(vpRobot::CAMERA_FRAME, v);
        vpDisplay::displayText(Iint, 40, 120, "Click to start the servo...", vpColor::blue);
        vpDisplay::flush(Iint);
        //vpDisplay::getClick(Iint);
      }

      vpTime::wait(1000*robot.getSamplingTime());
    }
    task.kill();
  }
  catch(vpException e) {
    std::cout << "Catch an exception: " << e << std::endl;
  }
#endif
}
int main()
{
  try {
    vpHomogeneousMatrix cdMo(0, 0, 0.75, 0, 0, 0);
    vpHomogeneousMatrix cMo(0.15, -0.1, 1.,
                            vpMath::rad(10), vpMath::rad(-10), vpMath::rad(50));

    std::vector<vpPoint> point;
    point.push_back( vpPoint(-0.1,-0.1, 0) );
    point.push_back( vpPoint( 0.1,-0.1, 0) );
    point.push_back( vpPoint( 0.1, 0.1, 0) );
    point.push_back( vpPoint(-0.1, 0.1, 0) );

    vpServo task ;
    task.setServo(vpServo::EYEINHAND_CAMERA);
    task.setInteractionMatrixType(vpServo::CURRENT);
    task.setLambda(0.5);

    vpFeaturePoint p[4], pd[4] ;
    for (unsigned int i = 0 ; i < 4 ; i++) {
      point[i].track(cdMo);
      vpFeatureBuilder::create(pd[i], point[i]);
      point[i].track(cMo);
      vpFeatureBuilder::create(p[i], point[i]);
      task.addFeature(p[i], pd[i]);
    }

    vpHomogeneousMatrix wMc, wMo;
    vpSimulatorCamera robot;
    robot.setSamplingTime(0.040);
    robot.getPosition(wMc);
    wMo = wMc * cMo;

    vpImage<unsigned char> Iint(480, 640, 255) ;
    vpImage<unsigned char> Iext(480, 640, 255) ;
#if defined(VISP_HAVE_X11)
    vpDisplayX displayInt(Iint, 0, 0, "Internal view");
    vpDisplayX displayExt(Iext, 670, 0, "External view");
#elif defined(VISP_HAVE_GDI)
    vpDisplayGDI displayInt(Iint, 0, 0, "Internal view");
    vpDisplayGDI displayExt(Iext, 670, 0, "External view");
#elif defined(VISP_HAVE_OPENCV)
    vpDisplayOpenCV displayInt(Iint, 0, 0, "Internal view");
    vpDisplayOpenCV displayExt(Iext, 670, 0, "External view");
#else
    std::cout << "No image viewer is available..." << std::endl;
#endif


#if defined(VISP_HAVE_DISPLAY)
    vpProjectionDisplay externalview;
    for (unsigned int i = 0 ; i < 4 ; i++)
      externalview.insert(point[i]) ;
#endif
    vpCameraParameters cam(840, 840, Iint.getWidth()/2, Iint.getHeight()/2);
    vpHomogeneousMatrix cextMo(0,0,3, 0,0,0);

    while(1) {
      robot.getPosition(wMc);
      cMo = wMc.inverse() * wMo;
      for (unsigned int i = 0 ; i < 4 ; i++) {
        point[i].track(cMo);
        vpFeatureBuilder::create(p[i], point[i]);
      }
      vpColVector v = task.computeControlLaw();
      robot.setVelocity(vpRobot::CAMERA_FRAME, v);

      vpDisplay::display(Iint) ;
      vpDisplay::display(Iext) ;
      display_trajectory(Iint, point, cMo, cam);

      vpServoDisplay::display(task, cam, Iint, vpColor::green, vpColor::red);
#if defined(VISP_HAVE_DISPLAY)
      externalview.display(Iext, cextMo, cMo, cam, vpColor::red, true);
#endif
      vpDisplay::flush(Iint);
      vpDisplay::flush(Iext);

      // A click to exit
      if (vpDisplay::getClick(Iint, false) || vpDisplay::getClick(Iext, false))
        break;

      vpTime::wait( robot.getSamplingTime() * 1000);
    }
    task.kill();
  }
  catch(vpException e) {
    std::cout << "Catch an exception: " << e << std::endl;
  }
}
int
main(int argc, const char ** argv)
{
  // Read the command line options
  if (getOptions(argc, argv) == false) {
    exit (-1);
  }

  // Log file creation in /tmp/$USERNAME/log.dat
  // This file contains by line:
  // - the 6 computed camera velocities (m/s, rad/s) to achieve the task
  // - the 6 values of s - s*
  std::string username;
  // Get the user login name
  vpIoTools::getUserName(username);

  // Create a log filename to save velocities...
  std::string logdirname;
#ifdef WIN32
  logdirname ="C:/temp/" + username;
#else
  logdirname ="/tmp/" + username;
#endif
  // Test if the output path exist. If no try to create it
  if (vpIoTools::checkDirectory(logdirname) == false) {
    try {
      // Create the dirname
      vpIoTools::makeDirectory(logdirname);
    }
    catch (...) {
      std::cerr << std::endl
		<< "ERROR:" << std::endl;
      std::cerr << "  Cannot create " << logdirname << std::endl;
      exit(-1);
    }
  }
  std::string logfilename;
  logfilename = logdirname + "/log.dat";

  // Open the log file name
  std::ofstream flog(logfilename.c_str());

  vpRobotCamera robot ;

  std::cout << std::endl ;
  std::cout << "-------------------------------------------------------" << std::endl ;
  std::cout << " Test program for vpServo "  <<std::endl ;
  std::cout << " Eye-in-hand task control, velocity computed in the camera frame" << std::endl ;
  std::cout << " Simulation " << std::endl ;
  std::cout << " task :  3D visual servoing " << std::endl ;
  std::cout << "-------------------------------------------------------" << std::endl ;
  std::cout << std::endl ;

  // Sets the initial camera location
  vpPoseVector c_r_o(// Translation tx,ty,tz
		     0.1, 0.2, 2, 
		     // ThetaU rotation 
		     vpMath::rad(20), vpMath::rad(10),  vpMath::rad(50) ) ;

  // From the camera pose build the corresponding homogeneous matrix
  vpHomogeneousMatrix cMo(c_r_o) ;

  // Set the robot initial position
  robot.setPosition(cMo) ;

  // Sets the desired camera location
  vpPoseVector cd_r_o(// Translation tx,ty,tz
		      0, 0, 1, 
		      // ThetaU rotation 
		      vpMath::rad(0),vpMath::rad(0),vpMath::rad(0)) ; 

  // From the camera desired pose build the corresponding homogeneous matrix
  vpHomogeneousMatrix cdMo(cd_r_o) ;

  vpHomogeneousMatrix cMcd; // Transformation between current and desired camera frame
  vpRotationMatrix cRcd; // Rotation between current and desired camera frame
 
  // Set the constant gain of the servo
  double lambda = 1;

  int iter=0 ;
  // Start the visual servoing loop. We stop the servo after 200 iterations
  while(iter++ < 200) {
    std::cout << "------------------------------------" << iter <<std::endl ;

    // get the robot position
    robot.getPosition(cMo) ;

    // new displacement to achieve
    cMcd = cMo*cdMo.inverse() ;
      
    // Extract the translation vector ctc* which is the current
    // translational visual feature. 
    vpTranslationVector ctcd;
    cMcd.extract(ctcd);
    // Compute the current theta U visual feature
    vpThetaUVector tu_cRcd(cMcd);

    // Create the identity matrix
    vpMatrix I(3,3);
    I.setIdentity();

    // Compute the camera translational velocity
    vpColVector v(3);
    v = lambda * ( I - vpColVector::skew(tu_cRcd) ) * ctcd; 
    // Compute the camera rotational velocity
    vpColVector w(3);
    w = lambda * tu_cRcd;

    // Update the complete camera velocity vector
    vpColVector velocity(6);
    for (int i=0; i<3; i++) {
      velocity[i]   = v[i]; // Translational velocity
      velocity[i+3] = w[i]; // Rotational velocity
    }

    // Send the camera velocity to the controller
    robot.setVelocity(vpRobot::CAMERA_FRAME, velocity) ;

    // Retrieve the error (s-s*)
    std::cout << ctcd.t() << " " << tu_cRcd.t() << std::endl;

    // Save log
    flog << velocity.t() << " " << ctcd.t() << " " << tu_cRcd.t() << std::endl;
  }

  // Close the log file
  flog.close();
}