コード例 #1
0
int main()
{
  try {
    vpHomogeneousMatrix cdMo(0, 0, 0.75, 0, 0, 0);
    vpHomogeneousMatrix cMo(0.15, -0.1, 1., vpMath::rad(10), vpMath::rad(-10), vpMath::rad(50));

    std::vector<vpPoint> point(4) ;
    point[0].setWorldCoordinates(-0.1,-0.1, 0);
    point[1].setWorldCoordinates( 0.1,-0.1, 0);
    point[2].setWorldCoordinates( 0.1, 0.1, 0);
    point[3].setWorldCoordinates(-0.1, 0.1, 0);

    vpServo task ;
    task.setServo(vpServo::EYEINHAND_CAMERA);
    task.setInteractionMatrixType(vpServo::CURRENT);
    task.setLambda(0.5);

    vpFeaturePoint p[4], pd[4] ;
    for (unsigned int i = 0 ; i < 4 ; i++) {
      point[i].track(cdMo);
      vpFeatureBuilder::create(pd[i], point[i]);
      point[i].track(cMo);
      vpFeatureBuilder::create(p[i], point[i]);
      task.addFeature(p[i], pd[i]);
    }

    vpHomogeneousMatrix wMc, wMo;
    vpSimulatorCamera robot;
    robot.setSamplingTime(0.040);
    robot.getPosition(wMc);
    wMo = wMc * cMo;

    vpImage<unsigned char> Iint(480, 640, 0) ;
    vpImage<unsigned char> Iext(480, 640, 0) ;
#if defined VISP_HAVE_X11
    vpDisplayX displayInt(Iint, 0, 0, "Internal view");
    vpDisplayX displayExt(Iext, 670, 0, "External view");
#elif  defined VISP_HAVE_GDI
    vpDisplayGDI displayInt(Iint, 0, 0, "Internal view");
    vpDisplayGDI displayExt(Iext, 670, 0, "External view");
#elif  defined VISP_HAVE_OPENCV
    vpDisplayOpenCV displayInt(Iint, 0, 0, "Internal view");
    vpDisplayOpenCV displayExt(Iext, 670, 0, "External view");
#else
    std::cout << "No image viewer is available..." << std::endl;
#endif

    vpCameraParameters cam(840, 840, Iint.getWidth()/2, Iint.getHeight()/2);
    vpHomogeneousMatrix cextMo(0,0,3, 0,0,0);

    vpWireFrameSimulator sim;
    sim.initScene(vpWireFrameSimulator::PLATE, vpWireFrameSimulator::D_STANDARD);
    sim.setCameraPositionRelObj(cMo);
    sim.setDesiredCameraPosition(cdMo);
    sim.setExternalCameraPosition(cextMo);
    sim.setInternalCameraParameters(cam);
    sim.setExternalCameraParameters(cam);

    while(1) {
      robot.getPosition(wMc);
      cMo = wMc.inverse() * wMo;
      for (unsigned int i = 0 ; i < 4 ; i++) {
        point[i].track(cMo);
        vpFeatureBuilder::create(p[i], point[i]);
      }
      vpColVector v = task.computeControlLaw();
      robot.setVelocity(vpRobot::CAMERA_FRAME, v);

      sim.setCameraPositionRelObj(cMo);

      vpDisplay::display(Iint) ;
      vpDisplay::display(Iext) ;

      sim.getInternalImage(Iint);
      sim.getExternalImage(Iext);

      display_trajectory(Iint, point, cMo, cam);
      vpDisplay::flush(Iint);
      vpDisplay::flush(Iext);

      // A click in the internal view to exit
      if (vpDisplay::getClick(Iint, false))
        break;
      vpTime::wait(1000*robot.getSamplingTime());
    }
    task.kill();
  }
  catch(vpException &e) {
    std::cout << "Catch an exception: " << e << std::endl;
  }
}
コード例 #2
0
int
main(int argc, const char ** argv)
{
  try {
    bool opt_click_allowed = true;
    bool opt_display = true;

    // Read the command line options
    if (getOptions(argc, argv, opt_click_allowed, opt_display) == false) {
      exit (-1);
    }

    // We open two displays, one for the internal camera view, the other one for
    // the external view, using either X11, GTK or GDI.
#if defined VISP_HAVE_X11
    vpDisplayX displayInt;
    vpDisplayX displayExt;
#elif defined VISP_HAVE_GTK
    vpDisplayGTK displayInt;
    vpDisplayGTK displayExt;
#elif defined VISP_HAVE_GDI
    vpDisplayGDI displayInt;
    vpDisplayGDI displayExt;
#elif defined VISP_HAVE_OPENCV
    vpDisplayOpenCV displayInt;
    vpDisplayOpenCV displayExt;
#endif

    // open a display for the visualization

    vpImage<unsigned char> Iint(300, 300, 0) ;
    vpImage<unsigned char> Iext(300, 300, 0) ;

    if (opt_display) {
      displayInt.init(Iint,0,0, "Internal view") ;
      displayExt.init(Iext,330,000, "External view") ;

    }
    vpProjectionDisplay externalview ;

    double px, py ; px = py = 500 ;
    double u0, v0 ; u0 = 150, v0 = 160 ;

    vpCameraParameters cam(px,py,u0,v0);

    int i ;
    vpServo task ;
    vpSimulatorCamera robot ;

    std::cout << std::endl ;
    std::cout << "----------------------------------------------" << std::endl ;
    std::cout << " Test program for vpServo "  <<std::endl ;
    std::cout << " Eye-in-hand task control, articular velocity are computed"
              << std::endl ;
    std::cout << " Simulation " << std::endl ;
    std::cout << " task : servo 4 points " << std::endl ;
    std::cout << "----------------------------------------------" << std::endl ;
    std::cout << std::endl ;

    // sets the initial camera location
    vpHomogeneousMatrix cMo(-0.1,-0.1,1,
                            vpMath::rad(40),  vpMath::rad(10),  vpMath::rad(60))   ;

    // Compute the position of the object in the world frame
    vpHomogeneousMatrix wMc, wMo;
    robot.getPosition(wMc) ;
    wMo = wMc * cMo;

    vpHomogeneousMatrix cextMo(0,0,2,
                               0,0,0) ;//vpMath::rad(40),  vpMath::rad(10),  vpMath::rad(60))   ;

    // sets the point coordinates in the object frame
    vpPoint point[4] ;
    point[0].setWorldCoordinates(-0.1,-0.1,0) ;
    point[1].setWorldCoordinates(0.1,-0.1,0) ;
    point[2].setWorldCoordinates(0.1,0.1,0) ;
    point[3].setWorldCoordinates(-0.1,0.1,0) ;

    for (i = 0 ; i < 4 ; i++)
      externalview.insert(point[i]) ;

    // computes  the point coordinates in the camera frame and its 2D coordinates
    for (i = 0 ; i < 4 ; i++)
      point[i].track(cMo) ;

    // sets the desired position of the point
    vpFeaturePoint p[4] ;
    for (i = 0 ; i < 4 ; i++)
      vpFeatureBuilder::create(p[i],point[i])  ;  //retrieve x,y and Z of the vpPoint structure

    // sets the desired position of the feature point s*
    vpFeaturePoint pd[4] ;

    pd[0].buildFrom(-0.1,-0.1, 1) ;
    pd[1].buildFrom( 0.1,-0.1, 1) ;
    pd[2].buildFrom( 0.1, 0.1, 1) ;
    pd[3].buildFrom(-0.1, 0.1, 1) ;

    // define the task
    // - we want an eye-in-hand control law
    // - articular velocity are computed
    task.setServo(vpServo::EYEINHAND_L_cVe_eJe) ;
    task.setInteractionMatrixType(vpServo::MEAN) ;

    // Set the position of the camera in the end-effector frame ") ;
    vpHomogeneousMatrix cMe ;
    vpVelocityTwistMatrix cVe(cMe) ;
    task.set_cVe(cVe) ;

    // Set the Jacobian (expressed in the end-effector frame)
    vpMatrix eJe ;
    robot.get_eJe(eJe) ;
    task.set_eJe(eJe) ;

    // we want to see a point on a point
    for (i = 0 ; i < 4 ; i++)
      task.addFeature(p[i],pd[i]) ;

    // set the gain
    task.setLambda(1) ;

    // Display task information " ) ;
    task.print() ;

    unsigned int iter=0 ;
    // loop
    while(iter++<200)
    {
      std::cout << "---------------------------------------------" << iter <<std::endl ;
      vpColVector v ;

      // Set the Jacobian (expressed in the end-effector frame)
      // since q is modified eJe is modified
      robot.get_eJe(eJe) ;
      task.set_eJe(eJe) ;

      // get the robot position
      robot.getPosition(wMc) ;
      // Compute the position of the camera wrt the object frame
      cMo = wMc.inverse() * wMo;

      // update new point position and corresponding features
      for (i = 0 ; i < 4 ; i++)
      {
        point[i].track(cMo) ;
        //retrieve x,y and Z of the vpPoint structure
        vpFeatureBuilder::create(p[i],point[i])  ;
      }
      // since vpServo::MEAN interaction matrix is used, we need also to update the desired features at each iteration
      pd[0].buildFrom(-0.1,-0.1, 1) ;
      pd[1].buildFrom( 0.1,-0.1, 1) ;
      pd[2].buildFrom( 0.1, 0.1, 1) ;
      pd[3].buildFrom(-0.1, 0.1, 1) ;

      if (opt_display) {
        vpDisplay::display(Iint) ;
        vpDisplay::display(Iext) ;
        vpServoDisplay::display(task,cam,Iint) ;
        externalview.display(Iext,cextMo, cMo, cam, vpColor::green) ;
        vpDisplay::flush(Iint);
        vpDisplay::flush(Iext);
      }

      // compute the control law
      v = task.computeControlLaw() ;

      // send the camera velocity to the controller
      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;

      std::cout << "|| s - s* || = " << ( task.getError() ).sumSquare() <<std::endl ;
    }

    // Display task information
    task.print() ;
    task.kill();

    std::cout <<"Final robot position with respect to the object frame:\n";
    cMo.print();

    if (opt_display && opt_click_allowed) {
      // suppressed for automate test
      std::cout << "\n\nClick in the internal view window to end..." << std::endl;
      vpDisplay::getClick(Iint) ;
    }
    return 0;
  }
  catch(vpException e) {
    std::cout << "Catch a ViSP exception: " << e << std::endl;
    return 1;
  }
}
int
main(int argc, const char ** argv)
{
  bool opt_display = true;
  bool opt_click_allowed = true;

  // Read the command line options
  if (getOptions(argc, argv, opt_click_allowed, opt_display) == false) {
    exit (-1);
  }

  vpImage<unsigned char> Iint(512,512,0) ;
  vpImage<unsigned char> Iext(512,512,0) ;

  // We open a window using either X11, GTK or GDI.
#if defined VISP_HAVE_X11
  vpDisplayX displayInt;
  vpDisplayX displayExt;
#elif defined VISP_HAVE_GTK
  vpDisplayGTK displayInt;
  vpDisplayGTK displayExt;
#elif defined VISP_HAVE_GDI
  vpDisplayGDI displayInt;
  vpDisplayGDI displayExt;
#endif

  if (opt_display) {
    try{
      // Display size is automatically defined by the image (Iint) and (Iext) size
      displayInt.init(Iint, 100, 100,"Internal view") ;
      displayExt.init(Iext, (int)(130+Iint.getWidth()), 100, "External view") ;
      // Display the image
      // The image class has a member that specify a pointer toward
      // the display that has been initialized in the display declaration
      // therefore is is no longuer necessary to make a reference to the
      // display variable.
      vpDisplay::display(Iint) ;
      vpDisplay::display(Iext) ;
      vpDisplay::flush(Iint) ;
      vpDisplay::flush(Iext) ;
    }
    catch(...)
    {
      vpERROR_TRACE("Error while displaying the image") ;
      exit(-1);
    }
  }

  vpProjectionDisplay externalview ;

  //Set the camera parameters
  double px, py ; px = py = 600 ;
  double u0, v0 ; u0 = v0 = 256 ;

  vpCameraParameters cam(px,py,u0,v0);

  vpServo task ;
  vpSimulatorCamera robot ;

  // sets the initial camera location
  vpHomogeneousMatrix cMo(-0.2,0.1,2,
                          vpMath::rad(5),  vpMath::rad(5),  vpMath::rad(20));

  vpHomogeneousMatrix wMc, wMo;
  robot.getPosition(wMc) ;
  wMo = wMc * cMo; // Compute the position of the object in the world frame

  // sets the final camera location (for simulation purpose)
  vpHomogeneousMatrix cMod(0,0,1,
                           vpMath::rad(0),  vpMath::rad(0),  vpMath::rad(0));

  // sets the cylinder coordinates in the world frame
  vpCylinder cylinder(0,1,0,  // direction
                      0,0,0,  // point of the axis
                      0.1) ;  // radius

  externalview.insert(cylinder) ;

  // sets the desired position of the visual feature
  cylinder.track(cMod) ;
  cylinder.print() ;

  //Build the desired line features thanks to the cylinder and especially its paramaters in the image frame
  vpFeatureLine ld[2] ;
  int i ;
  for(i=0 ; i < 2 ; i++)
    vpFeatureBuilder::create(ld[i],cylinder,i)  ;

  // computes  the cylinder coordinates in the camera frame and its 2D coordinates
  // sets the current position of the visual feature
  cylinder.track(cMo) ;
  cylinder.print() ;

  //Build the current line features thanks to the cylinder and especially its paramaters in the image frame
  vpFeatureLine l[2] ;
  for(i=0 ; i < 2 ; i++)
  {
    vpFeatureBuilder::create(l[i],cylinder,i)  ;
    l[i].print() ;
  }

  // define the task
  // - we want an eye-in-hand control law
  // - robot is controlled in the camera frame
  task.setServo(vpServo::EYEINHAND_CAMERA) ;
  task.setInteractionMatrixType(vpServo::DESIRED,vpServo::PSEUDO_INVERSE) ;
  //  it can also be interesting to test these possibilities
  // task.setInteractionMatrixType(vpServo::CURRENT,vpServo::PSEUDO_INVERSE) ;
  // task.setInteractionMatrixType(vpServo::MEAN, vpServo::PSEUDO_INVERSE) ;
  // task.setInteractionMatrixType(vpServo::CURRENT, vpServo::PSEUDO_INVERSE) ;
  // task.setInteractionMatrixType(vpServo::DESIRED,  vpServo::TRANSPOSE) ;
  // task.setInteractionMatrixType(vpServo::CURRENT,  vpServo::TRANSPOSE) ;

  // we want to see  2 lines on 2 lines
  task.addFeature(l[0],ld[0]) ;
  task.addFeature(l[1],ld[1]) ;

  // Set the point of view of the external view
  vpHomogeneousMatrix cextMo(0,0,6,
                             vpMath::rad(40),  vpMath::rad(10),  vpMath::rad(60))   ;

  // Display the initial scene
  vpServoDisplay::display(task,cam,Iint) ;
  externalview.display(Iext,cextMo, cMo, cam, vpColor::red);
  vpDisplay::flush(Iint) ;
  vpDisplay::flush(Iext) ;

  // Display task information
  task.print() ;

  if (opt_display && opt_click_allowed) {
    std::cout << "\n\nClick in the internal camera view window to start..." << std::endl;
    vpDisplay::getClick(Iint) ;
  }

  // set the gain
  task.setLambda(1) ;

  // Display task information
  task.print() ;

  unsigned int iter=0 ;
  // The first loop is needed to reach the desired position
  do
  {
    std::cout << "---------------------------------------------" << iter++ <<std::endl ;
    vpColVector v ;

    // get the robot position
    robot.getPosition(wMc) ;
    // Compute the position of the camera wrt the object frame
    cMo = wMc.inverse() * wMo;

    // new line position
    // retrieve x,y and Z of the vpLine structure
    // Compute the parameters of the cylinder in the camera frame and in the image frame
    cylinder.track(cMo) ;

    //Build the current line features thanks to the cylinder and especially its paramaters in the image frame
    for(i=0 ; i < 2 ; i++)
    {
      vpFeatureBuilder::create(l[i],cylinder,i)  ;
    }

    // Display the current scene
    if (opt_display) {
      vpDisplay::display(Iint) ;
      vpDisplay::display(Iext) ;
      vpServoDisplay::display(task,cam,Iint) ;
      externalview.display(Iext,cextMo, cMo, cam, vpColor::red);
      vpDisplay::flush(Iint);
      vpDisplay::flush(Iext);
    }

    // compute the control law
    v = task.computeControlLaw() ;

    // send the camera velocity to the controller
    robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;

    std::cout << "|| s - s* || = " << ( task.getError() ).sumSquare() <<std::endl ;
  }
  while(( task.getError() ).sumSquare() >  1e-9) ;


  // Second loop is to compute the control law while taking into account the secondary task.
  // In this example the secondary task is cut in four steps.
  // The first one consists in impose a movement of the robot along the x axis of the object frame with a velocity of 0.5.
  // The second one consists in impose a movement of the robot along the y axis of the object frame with a velocity of 0.5.
  // The third one consists in impose a movement of the robot along the x axis of the object frame with a velocity of -0.5.
  // The last one consists in impose a movement of the robot along the y axis of the object frame with a velocity of -0.5.
  // Each steps is made during 200 iterations.
  vpColVector e1(6) ; e1 = 0 ;
  vpColVector e2(6) ; e2 = 0 ;
  vpColVector proj_e1 ;
  vpColVector proj_e2 ;
  iter = 0;
  double rapport = 0;
  double vitesse = 0.5;
  unsigned int tempo = 800;

  while(iter < tempo)
  {
    vpColVector v ;

    robot.getPosition(wMc) ;
    // Compute the position of the camera wrt the object frame
    cMo = wMc.inverse() * wMo;

    cylinder.track(cMo) ;

    for(i=0 ; i < 2 ; i++)
    {
      vpFeatureBuilder::create(l[i],cylinder,i)  ;
    }

    if (opt_display) 
    {
      vpDisplay::display(Iint) ;
      vpDisplay::display(Iext) ;
      vpServoDisplay::display(task,cam,Iint) ;
      externalview.display(Iext,cextMo, cMo, cam, vpColor::red);
      vpDisplay::flush(Iint);
      vpDisplay::flush(Iext);
    }

    v = task.computeControlLaw() ;

    if ( iter%tempo < 200  /*&&  iter%tempo >= 0*/)
    {
      e2 = 0;
      e1[0] = fabs(vitesse)  ;
      proj_e1 = task.secondaryTask(e1);
      rapport = vitesse/proj_e1[0];
      proj_e1 *= rapport ;
      v += proj_e1 ;
    }

    if ( iter%tempo < 400 &&  iter%tempo >= 200)
    {
      e1 = 0;
      e2[1] = fabs(vitesse)  ;
      proj_e2 = task.secondaryTask(e2);
      rapport = vitesse/proj_e2[1];
      proj_e2 *= rapport ;
      v += proj_e2 ;
    }

    if ( iter%tempo < 600 &&  iter%tempo >= 400)
    {
      e2 = 0;
      e1[0] = -fabs(vitesse)  ;
      proj_e1 = task.secondaryTask(e1);
      rapport = -vitesse/proj_e1[0];
      proj_e1 *= rapport ;
      v += proj_e1 ;
    }

    if ( iter%tempo < 800 &&  iter%tempo >= 600)
    {
      e1 = 0;
      e2[1] = -fabs(vitesse)  ;
      proj_e2 = task.secondaryTask(e2);
      rapport = -vitesse/proj_e2[1];
      proj_e2 *= rapport ;
      v += proj_e2 ;
    }

    robot.setVelocity(vpRobot::CAMERA_FRAME, v);

    std::cout << "|| s - s* || = " << ( task.getError() ).sumSquare() <<std::endl ;

    iter++;
  }

  if (opt_display && opt_click_allowed) {
    std::cout << "\nClick in the internal camera view window to end..." << std::endl;
    vpDisplay::getClick(Iint) ;
  }

  // Display task information
  task.print() ;
  task.kill();
}
コード例 #4
0
int main()
{
  try {
    vpHomogeneousMatrix cdMo(0, 0, 0.75, 0, 0, 0);
    vpHomogeneousMatrix cMo(0.15, -0.1, 1.,
                            vpMath::rad(10), vpMath::rad(-10), vpMath::rad(50));

    std::vector<vpPoint> point;
    point.push_back( vpPoint(-0.1,-0.1, 0) );
    point.push_back( vpPoint( 0.1,-0.1, 0) );
    point.push_back( vpPoint( 0.1, 0.1, 0) );
    point.push_back( vpPoint(-0.1, 0.1, 0) );

    vpServo task ;
    task.setServo(vpServo::EYEINHAND_CAMERA);
    task.setInteractionMatrixType(vpServo::CURRENT);
    task.setLambda(0.5);

    vpFeaturePoint p[4], pd[4] ;
    for (unsigned int i = 0 ; i < 4 ; i++) {
      point[i].track(cdMo);
      vpFeatureBuilder::create(pd[i], point[i]);
      point[i].track(cMo);
      vpFeatureBuilder::create(p[i], point[i]);
      task.addFeature(p[i], pd[i]);
    }

    vpHomogeneousMatrix wMc, wMo;
    vpSimulatorCamera robot;
    robot.setSamplingTime(0.040);
    robot.getPosition(wMc);
    wMo = wMc * cMo;

    vpImage<unsigned char> Iint(480, 640, 255) ;
    vpImage<unsigned char> Iext(480, 640, 255) ;
#if defined(VISP_HAVE_X11)
    vpDisplayX displayInt(Iint, 0, 0, "Internal view");
    vpDisplayX displayExt(Iext, 670, 0, "External view");
#elif defined(VISP_HAVE_GDI)
    vpDisplayGDI displayInt(Iint, 0, 0, "Internal view");
    vpDisplayGDI displayExt(Iext, 670, 0, "External view");
#elif defined(VISP_HAVE_OPENCV)
    vpDisplayOpenCV displayInt(Iint, 0, 0, "Internal view");
    vpDisplayOpenCV displayExt(Iext, 670, 0, "External view");
#else
    std::cout << "No image viewer is available..." << std::endl;
#endif


#if defined(VISP_HAVE_DISPLAY)
    vpProjectionDisplay externalview;
    for (unsigned int i = 0 ; i < 4 ; i++)
      externalview.insert(point[i]) ;
#endif
    vpCameraParameters cam(840, 840, Iint.getWidth()/2, Iint.getHeight()/2);
    vpHomogeneousMatrix cextMo(0,0,3, 0,0,0);

    while(1) {
      robot.getPosition(wMc);
      cMo = wMc.inverse() * wMo;
      for (unsigned int i = 0 ; i < 4 ; i++) {
        point[i].track(cMo);
        vpFeatureBuilder::create(p[i], point[i]);
      }
      vpColVector v = task.computeControlLaw();
      robot.setVelocity(vpRobot::CAMERA_FRAME, v);

      vpDisplay::display(Iint) ;
      vpDisplay::display(Iext) ;
      display_trajectory(Iint, point, cMo, cam);

      vpServoDisplay::display(task, cam, Iint, vpColor::green, vpColor::red);
#if defined(VISP_HAVE_DISPLAY)
      externalview.display(Iext, cextMo, cMo, cam, vpColor::red, true);
#endif
      vpDisplay::flush(Iint);
      vpDisplay::flush(Iext);

      // A click to exit
      if (vpDisplay::getClick(Iint, false) || vpDisplay::getClick(Iext, false))
        break;

      vpTime::wait( robot.getSamplingTime() * 1000);
    }
    task.kill();
  }
  catch(vpException e) {
    std::cout << "Catch an exception: " << e << std::endl;
  }
}