int main()
{
  //////////////////////////////////////////
  // sets the initial camera location
  vpHomogeneousMatrix cMo(0.3,0.2,3,
                          vpMath::rad(0),vpMath::rad(0),vpMath::rad(40))  ;

  ///////////////////////////////////
  // initialize the robot
  vpRobotCamera robot ;
  robot.setSamplingTime(0.04); // 40ms 
  robot.setPosition(cMo) ;

  //initialize the camera parameters
  vpCameraParameters cam(800,800,240,180); 

  //Image definition
  unsigned int height = 360 ;
  unsigned int width  = 480 ;
  vpImage<unsigned char> I(height,width);

  //Display initialization
  vpDisplayGTK disp;
  disp.init(I,100,100,"Simulation display");

  ////////////////////////////////////////
  // Desired visual features initialization

  // sets the points coordinates in the object frame (in meter)
  vpPoint point[4] ;
  point[0].setWorldCoordinates(-0.1,-0.1,0) ;
  point[1].setWorldCoordinates(0.1,-0.1,0) ;
  point[2].setWorldCoordinates(0.1,0.1,0) ;
  point[3].setWorldCoordinates(-0.1,0.1,0) ;

  // sets the desired camera location
  vpHomogeneousMatrix cMo_d(0,0,1,0,0,0) ;

  // computes the 3D point coordinates in the camera frame and its 2D coordinates
  for (int i = 0 ; i < 4 ; i++)
    point[i].project(cMo_d) ;

  // creates the associated features
  vpFeaturePoint pd[4] ;
  for (int i = 0 ; i < 4 ; i++)
    vpFeatureBuilder::create(pd[i],point[i]) ;


  ///////////////////////////////////////
  // Current visual features initialization

  // computes the 3D point coordinates in the camera frame and its 2D coordinates
  for (int i = 0 ; i < 4 ; i++)
    point[i].project(cMo) ;

  // creates the associated features
  vpFeaturePoint p[4] ;
  for (int i = 0 ; i < 4 ; i++)
    vpFeatureBuilder::create(p[i],point[i])  ;


  /////////////////////////////////
  // Task defintion
  vpServo task ;
  // we want an eye-in-hand control law ;
  task.setServo(vpServo::EYEINHAND_L_cVe_eJe) ;
  task.setInteractionMatrixType(vpServo::DESIRED, vpServo::PSEUDO_INVERSE) ;

  // Set the position of the camera in the end-effector frame
  vpHomogeneousMatrix cMe ;
  vpVelocityTwistMatrix cVe(cMe) ;
  task.set_cVe(cVe) ;
  // Set the Jacobian (expressed in the end-effector frame)
  vpMatrix eJe ;
  robot.get_eJe(eJe) ;
  task.set_eJe(eJe) ;

  // we want to see a point on a point
  for (int i = 0 ; i < 4 ; i++)
    task.addFeature(p[i],pd[i]) ;
  // Set the gain
  task.setLambda(1.0) ;
  // Print the current information about the task
  task.print();


  ////////////////////////////////////////////////
  // The control loop
  int k = 0;
  while(k++ < 200){
    double t = vpTime::measureTimeMs();
    
    // Display the image background
    vpDisplay::display(I);
    
    // Update the current features
    for (int i = 0 ; i < 4 ; i++)
    {
      point[i].project(cMo) ;
      vpFeatureBuilder::create(p[i],point[i])  ;
    }
    
    // Display the task features (current and desired)
    vpServoDisplay::display(task,cam,I);
    vpDisplay::flush(I);
    
    // Update the robot Jacobian
    robot.get_eJe(eJe) ;
    task.set_eJe(eJe) ;
    
    // Compute the control law
    vpColVector v = task.computeControlLaw() ;
    
    // Send the computed velocity to the robot and compute the new robot position
    robot.setVelocity(vpRobot::ARTICULAR_FRAME, v) ;
    robot.getPosition(cMo) ;
    
    // Print the current information about the task
    task.print();
    
    // Wait 40 ms
    vpTime::wait(t,40); 
  }  
  task.kill();
  return 0;
}
Exemple #2
0
static
void *mainLoop (void *_simu)
{
  // pointer copy of the vpSimulator instance
  vpSimulator *simu = (vpSimulator *)_simu ;

  // Simulation initialization
  simu->initMainApplication() ;


  /////////////////////////////////////////
  // sets the initial camera location
  vpHomogeneousMatrix cMo(-0.3,-0.2,3,
                          vpMath::rad(0),vpMath::rad(0),vpMath::rad(40))  ;

  ///////////////////////////////////
  // Initialize the robot
  vpRobotCamera robot ;
  robot.setSamplingTime(0.04); // 40ms 
  robot.setPosition(cMo) ;
  // Send the robot position to the visualizator
  simu->setCameraPosition(cMo) ;
  // Initialize the camera parameters
  vpCameraParameters cam ;
  simu->getCameraParameters(cam);  

  ////////////////////////////////////////
  // Desired visual features initialization

  // sets the points coordinates in the object frame (in meter)
  vpPoint point[4] ;
  point[0].setWorldCoordinates(-0.1,-0.1,0) ;
  point[1].setWorldCoordinates(0.1,-0.1,0) ;
  point[2].setWorldCoordinates(0.1,0.1,0) ;
  point[3].setWorldCoordinates(-0.1,0.1,0) ;

  // sets the desired camera location
  vpHomogeneousMatrix cMo_d(0,0,1,0,0,0) ;

  // computes the 3D point coordinates in the camera frame and its 2D coordinates
  for (int i = 0 ; i < 4 ; i++)
    point[i].project(cMo_d) ;

  // creates the associated features
  vpFeaturePoint pd[4] ;
  for (int i = 0 ; i < 4 ; i++)
    vpFeatureBuilder::create(pd[i],point[i]) ;



  ///////////////////////////////////////
  // Current visual features initialization

  // computes the 3D point coordinates in the camera frame and its 2D coordinates
  for (int i = 0 ; i < 4 ; i++)
    point[i].project(cMo) ;

  // creates the associated features
  vpFeaturePoint p[4] ;
  for (int i = 0 ; i < 4 ; i++)
    vpFeatureBuilder::create(p[i],point[i])  ;


  /////////////////////////////////
  // Task defintion
  vpServo task ;
  // we want an eye-in-hand control law ;
  task.setServo(vpServo::EYEINHAND_L_cVe_eJe) ;
  task.setInteractionMatrixType(vpServo::DESIRED,vpServo::PSEUDO_INVERSE) ;

  // Set the position of the camera in the end-effector frame
  vpHomogeneousMatrix cMe ;
  vpVelocityTwistMatrix cVe(cMe) ;
  task.set_cVe(cVe) ;
  // Set the Jacobian (expressed in the end-effector frame)
  vpMatrix eJe ;
  robot.get_eJe(eJe) ;
  task.set_eJe(eJe) ;

  // we want to see a point on a point
  for (int i = 0 ; i < 4 ; i++)
    task.addFeature(p[i],pd[i]) ;
  // Set the gain
  task.setLambda(1.0) ;
  // Print the current information about the task
  task.print();

  vpTime::wait(500);
  ////////////////////////////////////////////////
  // The control loop
  int k = 0;
  while(k++ < 200){
    double t = vpTime::measureTimeMs();
    
    
    // Update the current features
    for (int i = 0 ; i < 4 ; i++)
    {
      point[i].project(cMo) ;
      vpFeatureBuilder::create(p[i],point[i])  ;
    }
    
    // Update the robot Jacobian
    robot.get_eJe(eJe) ;
    task.set_eJe(eJe) ;
    
    // Compute the control law
    vpColVector v = task.computeControlLaw() ;
    
    // Send the computed velocity to the robot and compute the new robot position
    robot.setVelocity(vpRobot::ARTICULAR_FRAME, v) ;
    robot.getPosition(cMo) ;
    
    // Send the robot position to the visualizator
    simu->setCameraPosition(cMo) ;

    // Print the current information about the task
    task.print();
    
    // Wait 40 ms
    vpTime::wait(t,40); 
  }  
  task.kill();
  simu->closeMainApplication() ;


  void *a=NULL ;
  return a ;
  // return (void *);
}
Exemple #3
0
static
void *mainLoop (void *_simu)
{
  // pointer copy of the vpSimulator instance
  vpSimulator *simu = (vpSimulator *)_simu ;

  // Simulation initialization
  simu->initMainApplication() ;

  ///////////////////////////////////
  // Set the initial camera location
  vpHomogeneousMatrix cMo(0.3,0.2,3,
                          vpMath::rad(0),vpMath::rad(0),vpMath::rad(40));
  vpHomogeneousMatrix wMo; // Set to identity
  vpHomogeneousMatrix wMc; // Camera position in the world frame

  ///////////////////////////////////
  // Initialize the robot
  vpSimulatorCamera robot ;
  robot.setSamplingTime(0.04); // 40ms 
  wMc = wMo * cMo.inverse();
  robot.setPosition(wMc) ;
  // Send the robot position to the visualizator
  simu->setCameraPosition(cMo) ;
  // Initialize the camera parameters
  vpCameraParameters cam ;
  simu->getCameraParameters(cam);

  ////////////////////////////////////////
  // Desired visual features initialization

  // sets the points coordinates in the object frame (in meter)
  vpPoint point[4] ;
  point[0].setWorldCoordinates(-0.1,-0.1,0) ;
  point[1].setWorldCoordinates(0.1,-0.1,0) ;
  point[2].setWorldCoordinates(0.1,0.1,0) ;
  point[3].setWorldCoordinates(-0.1,0.1,0) ;

  // sets the desired camera location
  vpHomogeneousMatrix cMo_d(0,0,1,0,0,0) ;

  // computes the 3D point coordinates in the camera frame and its 2D coordinates
  for (int i = 0 ; i < 4 ; i++)
    point[i].project(cMo_d) ;

  // creates the associated features
  vpFeaturePoint pd[4] ;
  for (int i = 0 ; i < 4 ; i++)
    vpFeatureBuilder::create(pd[i],point[i]) ;

  ///////////////////////////////////////
  // Current visual features initialization
  unsigned int height = simu->getInternalHeight();
  unsigned int width = simu->getInternalWidth();

  // Create a greyscale image
  vpImage<unsigned char> I(height,width);

  //Display initialization
#if defined(VISP_HAVE_X11)
  vpDisplayX disp;
#elif defined(VISP_HAVE_GDI)
  vpDisplayGDI disp;
#elif defined(VISP_HAVE_GTK)
  vpDisplayGTK disp;
#endif
  disp.init(I,100,100,"Simulation display");
  //  disp(I);
  // Get the current image
  vpTime::wait(500); // wait to be sure the image is generated
  simu->getInternalImage(I);

  // Display the current image
  vpDisplay::display(I);
  vpDisplay::flush(I);

  // Initialize the four dots tracker
  std::cout << "A click in the four dots clockwise. " << std::endl;
  vpDot2 dot[4];
  vpFeaturePoint p[4]; 
  for (int i = 0 ; i < 4 ; i++)
  {
    dot[i].setGraphics(true);
    // Call for a click
    std::cout << "A click in the dot " << i << std::endl;
    dot[i].initTracking(I);
    // Create the associated feature 
    vpFeatureBuilder::create(p[i],cam,dot[i]);
    // flush the display
    vpDisplay::flush(I);
  }  


  /////////////////////////////////
  // Task defintion
  vpServo task ;
  // we want an eye-in-hand control law ;
  task.setServo(vpServo::EYEINHAND_L_cVe_eJe) ;
  task.setInteractionMatrixType(vpServo::DESIRED) ;

  // Set the position of the camera in the end-effector frame
  vpHomogeneousMatrix cMe ;
  vpVelocityTwistMatrix cVe(cMe) ;
  task.set_cVe(cVe) ;
  // Set the Jacobian (expressed in the end-effector frame)
  vpMatrix eJe ;
  robot.get_eJe(eJe) ;
  task.set_eJe(eJe) ;

  // we want to see a point on a point
  for (int i = 0 ; i < 4 ; i++)
    task.addFeature(p[i],pd[i]) ;
  // Set the gain
  task.setLambda(1.0) ;
  // Print the current information about the task
  task.print();

  vpTime::wait(500);

  ////////////////////////////////////////////////
  // The control loop
  int k = 0;
  while(k++ < 200){
    double t = vpTime::measureTimeMs();
    
    // Get the current internal camera view and display it
    simu->getInternalImage(I);
    vpDisplay::display(I);

    // Track the four dots and update the associated visual features
    for (int i = 0 ; i < 4 ; i++)
    {
      dot[i].track(I) ;
      vpFeatureBuilder::create(p[i],cam,dot[i]) ;
    }

    // Display the desired and current visual features
    vpServoDisplay::display(task,cam,I) ;
    vpDisplay::flush(I);
    
    // Update the robot Jacobian
    robot.get_eJe(eJe) ;
    task.set_eJe(eJe) ;
    
    // Compute the control law
    vpColVector v = task.computeControlLaw() ;
    
    // Send the computed velocity to the robot and compute the new robot position
    robot.setVelocity(vpRobot::ARTICULAR_FRAME, v);
    wMc = robot.getPosition();
    cMo = wMc.inverse() * wMo;
    
    // Send the robot position to the visualizator
    simu->setCameraPosition(cMo) ;

    // Wait 40 ms
    vpTime::wait(t,40); 
  }  
  // Print information about the task
  task.print();
  task.kill();
  simu->closeMainApplication() ;

  void *a=NULL ;
  return a ;
}
int
main(int argc, const char ** argv)
{
  try {
    // Read the command line options
    if (getOptions(argc, argv) == false) {
      exit (-1);
    }

    int i ;
    vpServo task ;
    vpSimulatorCamera robot ;


    std::cout << std::endl ;
    std::cout << "-------------------------------------------------------" << std::endl ;
    std::cout << " Test program for vpServo "  <<std::endl ;
    std::cout << " Eye-in-hand task control,  articular velocity are computed" << std::endl ;
    std::cout << " Simulation " << std::endl ;
    std::cout << " task : servo 4 points " << std::endl ;
    std::cout << "-------------------------------------------------------" << std::endl ;
    std::cout << std::endl ;

    // sets the initial camera location with respect to the object
    vpHomogeneousMatrix cMo ;
    cMo[0][3] = 0.1 ;
    cMo[1][3] = 0.2 ;
    cMo[2][3] = 2 ;

    // Compute the position of the object in the world frame
    vpHomogeneousMatrix wMc, wMo;
    robot.getPosition(wMc) ;
    wMo = wMc * cMo;

    // sets the point coordinates in the object frame
    vpPoint point[4] ;
    point[0].setWorldCoordinates(-1,-1,0) ;
    point[1].setWorldCoordinates(1,-1,0) ;
    point[2].setWorldCoordinates(1,1,0) ;
    point[3].setWorldCoordinates(-1,1,0) ;

    // computes  the point coordinates in the camera frame and its 2D coordinates
    for (i = 0 ; i < 4 ; i++)
      point[i].track(cMo) ;

    // sets the desired position of the point
    vpFeaturePoint p[4] ;
    for (i = 0 ; i < 4 ; i++)
      vpFeatureBuilder::create(p[i], point[i])  ;  //retrieve x,y and Z of the vpPoint structure

    // sets the desired position of the point
    vpFeaturePoint pd[4] ;

    pd[0].buildFrom(-0.1,-0.1, 1) ;
    pd[1].buildFrom( 0.1,-0.1, 1) ;
    pd[2].buildFrom( 0.1, 0.1, 1) ;
    pd[3].buildFrom(-0.1, 0.1, 1) ;

    // define the task
    // - we want an eye-in-hand control law
    // - articular velocity are computed
    task.setServo(vpServo::EYEINHAND_L_cVe_eJe) ;
    task.setInteractionMatrixType(vpServo::MEAN) ;

    // Set the position of the camera in the end-effector frame
    vpHomogeneousMatrix cMe ;
    vpVelocityTwistMatrix cVe(cMe) ;
    task.set_cVe(cVe) ;

    // Set the Jacobian (expressed in the end-effector frame)
    vpMatrix eJe ;
    robot.get_eJe(eJe) ;
    task.set_eJe(eJe) ;

    // we want to see a point on a point
    for (i = 0 ; i < 4 ; i++)
      task.addFeature(p[i],pd[i]) ;

    // set the gain
    task.setLambda(1) ;

    // Display task information
    task.print() ;

    unsigned int iter=0 ;
    // loop
    while(iter++<1500)
    {
      std::cout << "---------------------------------------------" << iter <<std::endl ;
      vpColVector v ;

      // Set the Jacobian (expressed in the end-effector frame)
      // since q is modified eJe is modified
      robot.get_eJe(eJe) ;
      task.set_eJe(eJe) ;

      // get the robot position
      robot.getPosition(wMc) ;
      // Compute the position of the camera wrt the object frame
      cMo = wMc.inverse() * wMo;

      // update new point position and corresponding features
      for (i = 0 ; i < 4 ; i++)
      {
        point[i].track(cMo) ;
        //retrieve x,y and Z of the vpPoint structure
        vpFeatureBuilder::create(p[i],point[i])  ;
      }
      // since vpServo::MEAN interaction matrix is used, we need also to update the desired features at each iteration
      pd[0].buildFrom(-0.1,-0.1, 1) ;
      pd[1].buildFrom( 0.1,-0.1, 1) ;
      pd[2].buildFrom( 0.1, 0.1, 1) ;
      pd[3].buildFrom(-0.1, 0.1, 1) ;

      // compute the control law ") ;
      v = task.computeControlLaw() ;

      // send the camera velocity to the controller ") ;
      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;

      std::cout << "|| s - s* || = " << ( task.getError() ).sumSquare() << std::endl;
    }

    // Display task information
    task.print() ;
    task.kill();
    return 0;
  }
  catch(vpException e) {
    std::cout << "Catch a ViSP exception: " << e << std::endl;
    return 1;
  }
}
int
main(int argc, const char ** argv)
{
  try {
    bool opt_click_allowed = true;
    bool opt_display = true;

    // Read the command line options
    if (getOptions(argc, argv, opt_click_allowed, opt_display) == false) {
      exit (-1);
    }

    // We open two displays, one for the internal camera view, the other one for
    // the external view, using either X11, GTK or GDI.
#if defined VISP_HAVE_X11
    vpDisplayX displayInt;
    vpDisplayX displayExt;
#elif defined VISP_HAVE_GTK
    vpDisplayGTK displayInt;
    vpDisplayGTK displayExt;
#elif defined VISP_HAVE_GDI
    vpDisplayGDI displayInt;
    vpDisplayGDI displayExt;
#elif defined VISP_HAVE_OPENCV
    vpDisplayOpenCV displayInt;
    vpDisplayOpenCV displayExt;
#endif

    // open a display for the visualization

    vpImage<unsigned char> Iint(300, 300, 0) ;
    vpImage<unsigned char> Iext(300, 300, 0) ;

    if (opt_display) {
      displayInt.init(Iint,0,0, "Internal view") ;
      displayExt.init(Iext,330,000, "External view") ;

    }
    vpProjectionDisplay externalview ;

    double px, py ; px = py = 500 ;
    double u0, v0 ; u0 = 150, v0 = 160 ;

    vpCameraParameters cam(px,py,u0,v0);

    int i ;
    vpServo task ;
    vpSimulatorCamera robot ;

    std::cout << std::endl ;
    std::cout << "----------------------------------------------" << std::endl ;
    std::cout << " Test program for vpServo "  <<std::endl ;
    std::cout << " Eye-in-hand task control, articular velocity are computed"
              << std::endl ;
    std::cout << " Simulation " << std::endl ;
    std::cout << " task : servo 4 points " << std::endl ;
    std::cout << "----------------------------------------------" << std::endl ;
    std::cout << std::endl ;

    // sets the initial camera location
    vpHomogeneousMatrix cMo(-0.1,-0.1,1,
                            vpMath::rad(40),  vpMath::rad(10),  vpMath::rad(60))   ;

    // Compute the position of the object in the world frame
    vpHomogeneousMatrix wMc, wMo;
    robot.getPosition(wMc) ;
    wMo = wMc * cMo;

    vpHomogeneousMatrix cextMo(0,0,2,
                               0,0,0) ;//vpMath::rad(40),  vpMath::rad(10),  vpMath::rad(60))   ;

    // sets the point coordinates in the object frame
    vpPoint point[4] ;
    point[0].setWorldCoordinates(-0.1,-0.1,0) ;
    point[1].setWorldCoordinates(0.1,-0.1,0) ;
    point[2].setWorldCoordinates(0.1,0.1,0) ;
    point[3].setWorldCoordinates(-0.1,0.1,0) ;

    for (i = 0 ; i < 4 ; i++)
      externalview.insert(point[i]) ;

    // computes  the point coordinates in the camera frame and its 2D coordinates
    for (i = 0 ; i < 4 ; i++)
      point[i].track(cMo) ;

    // sets the desired position of the point
    vpFeaturePoint p[4] ;
    for (i = 0 ; i < 4 ; i++)
      vpFeatureBuilder::create(p[i],point[i])  ;  //retrieve x,y and Z of the vpPoint structure

    // sets the desired position of the feature point s*
    vpFeaturePoint pd[4] ;

    pd[0].buildFrom(-0.1,-0.1, 1) ;
    pd[1].buildFrom( 0.1,-0.1, 1) ;
    pd[2].buildFrom( 0.1, 0.1, 1) ;
    pd[3].buildFrom(-0.1, 0.1, 1) ;

    // define the task
    // - we want an eye-in-hand control law
    // - articular velocity are computed
    task.setServo(vpServo::EYEINHAND_L_cVe_eJe) ;
    task.setInteractionMatrixType(vpServo::MEAN) ;

    // Set the position of the camera in the end-effector frame ") ;
    vpHomogeneousMatrix cMe ;
    vpVelocityTwistMatrix cVe(cMe) ;
    task.set_cVe(cVe) ;

    // Set the Jacobian (expressed in the end-effector frame)
    vpMatrix eJe ;
    robot.get_eJe(eJe) ;
    task.set_eJe(eJe) ;

    // we want to see a point on a point
    for (i = 0 ; i < 4 ; i++)
      task.addFeature(p[i],pd[i]) ;

    // set the gain
    task.setLambda(1) ;

    // Display task information " ) ;
    task.print() ;

    unsigned int iter=0 ;
    // loop
    while(iter++<200)
    {
      std::cout << "---------------------------------------------" << iter <<std::endl ;
      vpColVector v ;

      // Set the Jacobian (expressed in the end-effector frame)
      // since q is modified eJe is modified
      robot.get_eJe(eJe) ;
      task.set_eJe(eJe) ;

      // get the robot position
      robot.getPosition(wMc) ;
      // Compute the position of the camera wrt the object frame
      cMo = wMc.inverse() * wMo;

      // update new point position and corresponding features
      for (i = 0 ; i < 4 ; i++)
      {
        point[i].track(cMo) ;
        //retrieve x,y and Z of the vpPoint structure
        vpFeatureBuilder::create(p[i],point[i])  ;
      }
      // since vpServo::MEAN interaction matrix is used, we need also to update the desired features at each iteration
      pd[0].buildFrom(-0.1,-0.1, 1) ;
      pd[1].buildFrom( 0.1,-0.1, 1) ;
      pd[2].buildFrom( 0.1, 0.1, 1) ;
      pd[3].buildFrom(-0.1, 0.1, 1) ;

      if (opt_display) {
        vpDisplay::display(Iint) ;
        vpDisplay::display(Iext) ;
        vpServoDisplay::display(task,cam,Iint) ;
        externalview.display(Iext,cextMo, cMo, cam, vpColor::green) ;
        vpDisplay::flush(Iint);
        vpDisplay::flush(Iext);
      }

      // compute the control law
      v = task.computeControlLaw() ;

      // send the camera velocity to the controller
      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;

      std::cout << "|| s - s* || = " << ( task.getError() ).sumSquare() <<std::endl ;
    }

    // Display task information
    task.print() ;
    task.kill();

    std::cout <<"Final robot position with respect to the object frame:\n";
    cMo.print();

    if (opt_display && opt_click_allowed) {
      // suppressed for automate test
      std::cout << "\n\nClick in the internal view window to end..." << std::endl;
      vpDisplay::getClick(Iint) ;
    }
    return 0;
  }
  catch(vpException e) {
    std::cout << "Catch a ViSP exception: " << e << std::endl;
    return 1;
  }
}