int
main(int argc, const char ** argv)
{
  try {
    bool opt_display = true;
    bool opt_click_allowed = true;

    // Read the command line options
    if (getOptions(argc, argv, opt_click_allowed, opt_display) == false) {
      exit (-1);
    }

    vpImage<unsigned char> I(512,512,0) ;

    // We open a window using either X11, GTK or GDI.
#if defined VISP_HAVE_X11
    vpDisplayX display;
#elif defined VISP_HAVE_GTK
    vpDisplayGTK display;
#elif defined VISP_HAVE_GDI
    vpDisplayGDI display;
#elif defined VISP_HAVE_OPENCV
    vpDisplayOpenCV display;
#endif

    if (opt_display) {
      try{
        // Display size is automatically defined by the image (I) size
        display.init(I, 100, 100,"Camera view...") ;
        // Display the image
        // The image class has a member that specify a pointer toward
        // the display that has been initialized in the display declaration
        // therefore is is no longuer necessary to make a reference to the
        // display variable.
        vpDisplay::display(I) ;
        vpDisplay::flush(I) ;
      }
      catch(...)
      {
        vpERROR_TRACE("Error while displaying the image") ;
        exit(-1);
      }
    }

    double px, py ; px = py = 600 ;
    double u0, v0 ; u0 = v0 = 256 ;

    vpCameraParameters cam(px,py,u0,v0);

    vpServo task ;
    vpSimulatorCamera robot ;

    // sets the initial camera location
    vpHomogeneousMatrix cMo(0,0,1,
                            vpMath::rad(0),  vpMath::rad(80),  vpMath::rad(30))   ;
    vpHomogeneousMatrix wMc, wMo;
    robot.getPosition(wMc) ;
    wMo = wMc * cMo; // Compute the position of the object in the world frame

    vpHomogeneousMatrix cMod(-0.1,-0.1,0.7,
                             vpMath::rad(40),  vpMath::rad(10),  vpMath::rad(30))   ;

    // sets the circle coordinates in the world frame
    vpCircle circle ;
    circle.setWorldCoordinates(0,0,1,
                               0,0,0,
                               0.1) ;

    // sets the desired position of the visual feature
    vpFeatureEllipse pd ;
    circle.track(cMod) ;
    vpFeatureBuilder::create(pd,circle)  ;

    // project : computes the circle coordinates in the camera frame and its 2D coordinates
    // sets the current position of the visual feature
    vpFeatureEllipse p ;
    circle.track(cMo) ;
    vpFeatureBuilder::create(p,circle)  ;

    // define the task
    // - we want an eye-in-hand control law
    // - robot is controlled in the camera frame
    task.setServo(vpServo::EYEINHAND_CAMERA) ;
    task.setInteractionMatrixType(vpServo::DESIRED) ;
    // - we want to see a circle on a circle
    task.addFeature(p,pd) ;
    // - set the gain
    task.setLambda(1) ;

    // Display task information
    task.print() ;

    unsigned int iter=0 ;
    // loop
    while(iter++ < 200)
    {
      std::cout << "---------------------------------------------" << iter <<std::endl ;
      vpColVector v ;

      // get the robot position
      robot.getPosition(wMc) ;
      // Compute the position of the camera wrt the object frame
      cMo = wMc.inverse() * wMo;

      // new circle position
      // retrieve x,y and Z of the vpCircle structure
      circle.track(cMo) ;
      vpFeatureBuilder::create(p,circle);
      circle.print() ;
      p.print() ;

      if (opt_display) {
        vpDisplay::display(I) ;
        vpServoDisplay::display(task,cam,I) ;
        vpDisplay::flush(I) ;
      }

      // compute the control law
      v = task.computeControlLaw() ;
      std::cout << "task rank: " << task.getTaskRank() <<std::endl ;
      // send the camera velocity to the controller
      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;

      std::cout << "|| s - s* || = " << ( task.getError() ).sumSquare() <<std::endl ;
    }

    // Display task information
    task.print() ;
    task.kill();

    if (opt_display && opt_click_allowed) {
      std::cout << "Click in the camera view window to end..." << std::endl;
      vpDisplay::getClick(I) ;
    }
    return 0;
  }
  catch(vpException e) {
    std::cout << "Catch a ViSP exception: " << e << std::endl;
    return 1;
  }
}
int
main(int argc, const char ** argv)
{
  try {
    bool opt_display = true;
    bool opt_click_allowed = true;

    // Read the command line options
    if (getOptions(argc, argv, opt_click_allowed, opt_display) == false) {
      exit (-1);
    }

    vpImage<unsigned char> I(512,512,0) ;

    // We open a window using either X11, GTK or GDI.
#if defined VISP_HAVE_X11
    vpDisplayX display;
#elif defined VISP_HAVE_GTK
    vpDisplayGTK display;
#elif defined VISP_HAVE_GDI
    vpDisplayGDI display;
#elif defined VISP_HAVE_OPENCV
    vpDisplayOpenCV display;
#endif

    if (opt_display) {
      try{
        // Display size is automatically defined by the image (I) size
        display.init(I, 100, 100,"Camera view...") ;
        // Display the image
        // The image class has a member that specify a pointer toward
        // the display that has been initialized in the display declaration
        // therefore is is no longuer necessary to make a reference to the
        // display variable.
        vpDisplay::display(I) ;
        vpDisplay::flush(I) ;
      }
      catch(...)
      {
        vpERROR_TRACE("Error while displaying the image") ;
        exit(-1);
      }
    }

    // Set the camera parameters
    double px, py ; px = py = 600 ;
    double u0, v0 ; u0 = v0 = 256 ;

    vpCameraParameters cam(px,py,u0,v0);

    vpServo task ;
    vpSimulatorCamera robot ;

    // sets the initial camera location
    vpHomogeneousMatrix cMo(0.2,0.2,1,
                            vpMath::rad(45),  vpMath::rad(45),  vpMath::rad(125));

    // Compute the position of the object in the world frame
    vpHomogeneousMatrix wMc, wMo;
    robot.getPosition(wMc) ;
    wMo = wMc * cMo;

    // sets the final camera location (for simulation purpose)
    vpHomogeneousMatrix cMod(0,0,1,
                             vpMath::rad(0),  vpMath::rad(0),  vpMath::rad(0));


    int nbline = 4;

    // sets the line coordinates (2 planes) in the world frame
    vpLine line[4] ;
    line[0].setWorldCoordinates(1,0,0,0.05,0,0,1,0);
    line[1].setWorldCoordinates(0,1,0,0.05,0,0,1,0);
    line[2].setWorldCoordinates(1,0,0,-0.05,0,0,1,0);
    line[3].setWorldCoordinates(0,1,0,-0.05,0,0,1,0);

    vpFeatureLine ld[4] ;
    vpFeatureLine l[4] ;

    // sets the desired position of the visual feature
    for(int i = 0; i < nbline; i++)
    {
      line[i].track(cMod) ;
      line[i].print() ;

      vpFeatureBuilder::create(ld[i],line[i])  ;
    }

    // computes  the line coordinates in the camera frame and its 2D coordinates
    // sets the current position of the visual feature
    for(int i = 0; i < nbline; i++)
    {
      line[i].track(cMo) ;
      line[i].print() ;

      vpFeatureBuilder::create(l[i],line[i])  ;
      l[i].print() ;
    }

    // define the task
    // - we want an eye-in-hand control law
    // - robot is controlled in the camera frame
    task.setServo(vpServo::EYEINHAND_CAMERA) ;
    task.setInteractionMatrixType(vpServo::CURRENT, vpServo::PSEUDO_INVERSE);
    //It could be also interesting to test the following tasks
    //task.setInteractionMatrixType(vpServo::DESIRED, vpServo::PSEUDO_INVERSE);
    //task.setInteractionMatrixType(vpServo::MEAN, vpServo::PSEUDO_INVERSE);

    // we want to see a four lines on four lines
    for(int i = 0; i < nbline; i++)
      task.addFeature(l[i],ld[i]) ;

    vpDisplay::display(I) ;
    vpServoDisplay::display(task,cam,I) ;
    vpDisplay::flush(I) ;

    // set the gain
    task.setLambda(1) ;

    // Display task information
    task.print() ;

    if (opt_display && opt_click_allowed) {
      std::cout << "\n\nClick in the camera view window to start..." << std::endl;
      vpDisplay::getClick(I) ;
    }

    unsigned int iter=0 ;
    // loop
    while(iter++<200)
    {
      std::cout << "---------------------------------------------" << iter <<std::endl ;
      vpColVector v ;

      // get the robot position
      robot.getPosition(wMc) ;
      // Compute the position of the camera wrt the object frame
      cMo = wMc.inverse() * wMo;

      // new line position: retrieve x,y and Z of the vpLine structure
      for(int i = 0; i < nbline; i++)
      {
        line[i].track(cMo) ;
        vpFeatureBuilder::create(l[i],line[i]);
      }

      if (opt_display) {
        vpDisplay::display(I) ;
        vpServoDisplay::display(task,cam,I) ;
        vpDisplay::flush(I) ;
      }

      // compute the control law
      v = task.computeControlLaw() ;

      // send the camera velocity to the controller
      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;

      std::cout << "|| s - s* || = " << ( task.getError() ).sumSquare() <<std::endl ; ;

    }

    if (opt_display && opt_click_allowed) {
      std::cout << "\nClick in the camera view window to end..." << std::endl;
      vpDisplay::getClick(I) ;
    }

    // Display task information
    task.print() ;
    task.kill();
    return 0;
  }
  catch(vpException e) {
    std::cout << "Catch a ViSP exception: " << e << std::endl;
    return 1;
  }
}
int
main(int argc, const char ** argv)
{
  bool opt_display = true;
  bool opt_click_allowed = true;

  // Read the command line options
  if (getOptions(argc, argv, opt_click_allowed, opt_display) == false) {
    exit (-1);
  }

  vpImage<unsigned char> Iint(512,512,0) ;
  vpImage<unsigned char> Iext(512,512,0) ;

  // We open a window using either X11, GTK or GDI.
#if defined VISP_HAVE_X11
  vpDisplayX displayInt;
  vpDisplayX displayExt;
#elif defined VISP_HAVE_GTK
  vpDisplayGTK displayInt;
  vpDisplayGTK displayExt;
#elif defined VISP_HAVE_GDI
  vpDisplayGDI displayInt;
  vpDisplayGDI displayExt;
#endif

  if (opt_display) {
    try{
      // Display size is automatically defined by the image (Iint) and (Iext) size
      displayInt.init(Iint, 100, 100,"Internal view") ;
      displayExt.init(Iext, (int)(130+Iint.getWidth()), 100, "External view") ;
      // Display the image
      // The image class has a member that specify a pointer toward
      // the display that has been initialized in the display declaration
      // therefore is is no longuer necessary to make a reference to the
      // display variable.
      vpDisplay::display(Iint) ;
      vpDisplay::display(Iext) ;
      vpDisplay::flush(Iint) ;
      vpDisplay::flush(Iext) ;
    }
    catch(...)
    {
      vpERROR_TRACE("Error while displaying the image") ;
      exit(-1);
    }
  }

  vpProjectionDisplay externalview ;

  //Set the camera parameters
  double px, py ; px = py = 600 ;
  double u0, v0 ; u0 = v0 = 256 ;

  vpCameraParameters cam(px,py,u0,v0);

  vpServo task ;
  vpSimulatorCamera robot ;

  // sets the initial camera location
  vpHomogeneousMatrix cMo(-0.2,0.1,2,
                          vpMath::rad(5),  vpMath::rad(5),  vpMath::rad(20));

  vpHomogeneousMatrix wMc, wMo;
  robot.getPosition(wMc) ;
  wMo = wMc * cMo; // Compute the position of the object in the world frame

  // sets the final camera location (for simulation purpose)
  vpHomogeneousMatrix cMod(0,0,1,
                           vpMath::rad(0),  vpMath::rad(0),  vpMath::rad(0));

  // sets the cylinder coordinates in the world frame
  vpCylinder cylinder(0,1,0,  // direction
                      0,0,0,  // point of the axis
                      0.1) ;  // radius

  externalview.insert(cylinder) ;

  // sets the desired position of the visual feature
  cylinder.track(cMod) ;
  cylinder.print() ;

  //Build the desired line features thanks to the cylinder and especially its paramaters in the image frame
  vpFeatureLine ld[2] ;
  int i ;
  for(i=0 ; i < 2 ; i++)
    vpFeatureBuilder::create(ld[i],cylinder,i)  ;

  // computes  the cylinder coordinates in the camera frame and its 2D coordinates
  // sets the current position of the visual feature
  cylinder.track(cMo) ;
  cylinder.print() ;

  //Build the current line features thanks to the cylinder and especially its paramaters in the image frame
  vpFeatureLine l[2] ;
  for(i=0 ; i < 2 ; i++)
  {
    vpFeatureBuilder::create(l[i],cylinder,i)  ;
    l[i].print() ;
  }

  // define the task
  // - we want an eye-in-hand control law
  // - robot is controlled in the camera frame
  task.setServo(vpServo::EYEINHAND_CAMERA) ;
  task.setInteractionMatrixType(vpServo::DESIRED,vpServo::PSEUDO_INVERSE) ;
  //  it can also be interesting to test these possibilities
  // task.setInteractionMatrixType(vpServo::CURRENT,vpServo::PSEUDO_INVERSE) ;
  // task.setInteractionMatrixType(vpServo::MEAN, vpServo::PSEUDO_INVERSE) ;
  // task.setInteractionMatrixType(vpServo::CURRENT, vpServo::PSEUDO_INVERSE) ;
  // task.setInteractionMatrixType(vpServo::DESIRED,  vpServo::TRANSPOSE) ;
  // task.setInteractionMatrixType(vpServo::CURRENT,  vpServo::TRANSPOSE) ;

  // we want to see  2 lines on 2 lines
  task.addFeature(l[0],ld[0]) ;
  task.addFeature(l[1],ld[1]) ;

  // Set the point of view of the external view
  vpHomogeneousMatrix cextMo(0,0,6,
                             vpMath::rad(40),  vpMath::rad(10),  vpMath::rad(60))   ;

  // Display the initial scene
  vpServoDisplay::display(task,cam,Iint) ;
  externalview.display(Iext,cextMo, cMo, cam, vpColor::red);
  vpDisplay::flush(Iint) ;
  vpDisplay::flush(Iext) ;

  // Display task information
  task.print() ;

  if (opt_display && opt_click_allowed) {
    std::cout << "\n\nClick in the internal camera view window to start..." << std::endl;
    vpDisplay::getClick(Iint) ;
  }

  // set the gain
  task.setLambda(1) ;

  // Display task information
  task.print() ;

  unsigned int iter=0 ;
  // The first loop is needed to reach the desired position
  do
  {
    std::cout << "---------------------------------------------" << iter++ <<std::endl ;
    vpColVector v ;

    // get the robot position
    robot.getPosition(wMc) ;
    // Compute the position of the camera wrt the object frame
    cMo = wMc.inverse() * wMo;

    // new line position
    // retrieve x,y and Z of the vpLine structure
    // Compute the parameters of the cylinder in the camera frame and in the image frame
    cylinder.track(cMo) ;

    //Build the current line features thanks to the cylinder and especially its paramaters in the image frame
    for(i=0 ; i < 2 ; i++)
    {
      vpFeatureBuilder::create(l[i],cylinder,i)  ;
    }

    // Display the current scene
    if (opt_display) {
      vpDisplay::display(Iint) ;
      vpDisplay::display(Iext) ;
      vpServoDisplay::display(task,cam,Iint) ;
      externalview.display(Iext,cextMo, cMo, cam, vpColor::red);
      vpDisplay::flush(Iint);
      vpDisplay::flush(Iext);
    }

    // compute the control law
    v = task.computeControlLaw() ;

    // send the camera velocity to the controller
    robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;

    std::cout << "|| s - s* || = " << ( task.getError() ).sumSquare() <<std::endl ;
  }
  while(( task.getError() ).sumSquare() >  1e-9) ;


  // Second loop is to compute the control law while taking into account the secondary task.
  // In this example the secondary task is cut in four steps.
  // The first one consists in impose a movement of the robot along the x axis of the object frame with a velocity of 0.5.
  // The second one consists in impose a movement of the robot along the y axis of the object frame with a velocity of 0.5.
  // The third one consists in impose a movement of the robot along the x axis of the object frame with a velocity of -0.5.
  // The last one consists in impose a movement of the robot along the y axis of the object frame with a velocity of -0.5.
  // Each steps is made during 200 iterations.
  vpColVector e1(6) ; e1 = 0 ;
  vpColVector e2(6) ; e2 = 0 ;
  vpColVector proj_e1 ;
  vpColVector proj_e2 ;
  iter = 0;
  double rapport = 0;
  double vitesse = 0.5;
  unsigned int tempo = 800;

  while(iter < tempo)
  {
    vpColVector v ;

    robot.getPosition(wMc) ;
    // Compute the position of the camera wrt the object frame
    cMo = wMc.inverse() * wMo;

    cylinder.track(cMo) ;

    for(i=0 ; i < 2 ; i++)
    {
      vpFeatureBuilder::create(l[i],cylinder,i)  ;
    }

    if (opt_display) 
    {
      vpDisplay::display(Iint) ;
      vpDisplay::display(Iext) ;
      vpServoDisplay::display(task,cam,Iint) ;
      externalview.display(Iext,cextMo, cMo, cam, vpColor::red);
      vpDisplay::flush(Iint);
      vpDisplay::flush(Iext);
    }

    v = task.computeControlLaw() ;

    if ( iter%tempo < 200  /*&&  iter%tempo >= 0*/)
    {
      e2 = 0;
      e1[0] = fabs(vitesse)  ;
      proj_e1 = task.secondaryTask(e1);
      rapport = vitesse/proj_e1[0];
      proj_e1 *= rapport ;
      v += proj_e1 ;
    }

    if ( iter%tempo < 400 &&  iter%tempo >= 200)
    {
      e1 = 0;
      e2[1] = fabs(vitesse)  ;
      proj_e2 = task.secondaryTask(e2);
      rapport = vitesse/proj_e2[1];
      proj_e2 *= rapport ;
      v += proj_e2 ;
    }

    if ( iter%tempo < 600 &&  iter%tempo >= 400)
    {
      e2 = 0;
      e1[0] = -fabs(vitesse)  ;
      proj_e1 = task.secondaryTask(e1);
      rapport = -vitesse/proj_e1[0];
      proj_e1 *= rapport ;
      v += proj_e1 ;
    }

    if ( iter%tempo < 800 &&  iter%tempo >= 600)
    {
      e1 = 0;
      e2[1] = -fabs(vitesse)  ;
      proj_e2 = task.secondaryTask(e2);
      rapport = -vitesse/proj_e2[1];
      proj_e2 *= rapport ;
      v += proj_e2 ;
    }

    robot.setVelocity(vpRobot::CAMERA_FRAME, v);

    std::cout << "|| s - s* || = " << ( task.getError() ).sumSquare() <<std::endl ;

    iter++;
  }

  if (opt_display && opt_click_allowed) {
    std::cout << "\nClick in the internal camera view window to end..." << std::endl;
    vpDisplay::getClick(Iint) ;
  }

  // Display task information
  task.print() ;
  task.kill();
}
Ejemplo n.º 4
0
int
main()
{
  try
    {
      vpImage<unsigned char> I ;

      vp1394TwoGrabber g;
      g.setVideoMode(vp1394TwoGrabber::vpVIDEO_MODE_640x480_MONO8);
      g.setFramerate(vp1394TwoGrabber::vpFRAMERATE_60);
      g.open(I) ;

      g.acquire(I) ;


      vpDisplayX display(I,100,100,"testDisplayX.cpp ") ;
      vpTRACE(" ") ;

      vpDisplay::display(I) ;
      vpDisplay::flush(I);

      vpServo task ;

      vpRobotAfma6 robot ;
      //robot.move("zero.pos") ;

      vpCameraParameters cam ;
      // Update camera parameters
      robot.getCameraParameters (cam, I);


      std::cout << std::endl ;
      std::cout << "-------------------------------------------------------" << std::endl ;
      std::cout << " Test program for vpServo "  <<std::endl ;
      std::cout << " Eye-in-hand task control, velocity computed in the camera frame" << std::endl ;
      std::cout << " Simulation " << std::endl ;
      std::cout << " task : servo a line " << std::endl ;
      std::cout << "-------------------------------------------------------" << std::endl ;
      std::cout << std::endl ;

      int nbline =4 ;
      int nbpoint =4 ;


      vpTRACE("sets the desired position of the visual feature ") ;
      vpPoint pointd[nbpoint];  //position of the fours corners
      vpPoint pointcd;  //position of the center of the square
      vpFeaturePoint pd;

      double L=0.05 ;
      pointd[0].setWorldCoordinates(L,-L, 0 ) ;
      pointd[1].setWorldCoordinates(L,L, 0 ) ;
      pointd[2].setWorldCoordinates(-L,L, 0 ) ;
      pointd[3].setWorldCoordinates(-L,-L, 0 ) ;

      //The coordinates in the object frame of the point used as a feature ie the center of the square
      pointcd.setWorldCoordinates(0, 0, 0 ) ;

      //The desired homogeneous matrix.
      vpHomogeneousMatrix cMod(0,0,0.4,0,0,vpMath::rad(10));

      pointd[0].project(cMod);
      pointd[1].project(cMod);
      pointd[2].project(cMod);
      pointd[3].project(cMod);

      pointcd.project(cMod);

      vpFeatureBuilder::create(pd,pointcd);

      vpTRACE("Initialization of the tracking") ;
      vpMeLine line[nbline] ;
      vpPoint point[nbpoint];
      int i ;

      vpMe me ;
      me.setRange(10) ;
      me.setPointsToTrack(100) ;
      me.setThreshold(50000) ;
      me.setSampleStep(10);

      //Initialize the tracking. Define the four lines to track
      for (i=0 ; i < nbline ; i++)
      {
        line[i].setMe(&me) ;

        line[i].initTracking(I) ;
        line[i].track(I) ;
      }

      // Compute the position of the four corners. The goal is to
      // compute the pose
      vpImagePoint ip;
      for (i=0 ; i < nbline ; i++)
      {
        double x=0, y=0;

        if (!vpMeLine::intersection (line[i%nbline], line[(i+1)%nbline], ip))
        {
          exit(-1);
        }

        vpPixelMeterConversion::convertPoint(cam, ip, x, y)  ;

        point[i].set_x(x) ;
        point[i].set_y(y) ;
      }

      //Compute the pose cMo
      vpPose pose ;
      pose.clearPoint() ;
      vpHomogeneousMatrix cMo ;

      point[0].setWorldCoordinates(L,-L, 0 ) ;
      point[1].setWorldCoordinates(L,L, 0 ) ;
      point[2].setWorldCoordinates(-L,L, 0 ) ;
      point[3].setWorldCoordinates(-L,-L, 0 ) ;

      for (i=0 ; i < nbline ; i++)
      {
        pose.addPoint(point[i]) ; // and added to the pose computation point list
      }

      pose.computePose(vpPose::LAGRANGE, cMo) ;
      pose.computePose(vpPose::VIRTUAL_VS, cMo);


      vpTRACE("sets the current position of the visual feature ") ;

      //The first features are the position in the camera frame x and y of the square center
      vpPoint pointc; //The current position of the center of the square
      double xc = (point[0].get_x()+point[2].get_x())/2;
      double yc = (point[0].get_y()+point[2].get_y())/2;
      pointc.set_x(xc);
      pointc.set_y(yc);
      vpFeaturePoint p;
      pointc.project(cMo);
      vpFeatureBuilder::create(p,pointc);

      //The second feature is the depth of the current square center relative to the depth of the desired square center.
      vpFeatureDepth logZ;
      logZ.buildFrom(pointc.get_x(), pointc.get_y(), pointc.get_Z(), log(pointc.get_Z()/pointcd.get_Z()));

      //The last three features are the rotations thetau between the current pose and the desired pose.
      vpHomogeneousMatrix cdMc ;
      cdMc = cMod*cMo.inverse() ;
      vpFeatureThetaU tu(vpFeatureThetaU::cdRc);
      tu.buildFrom(cdMc) ;

      vpTRACE("define the task") ;
      vpTRACE("\t we want an eye-in-hand control law") ;
      vpTRACE("\t robot is controlled in the camera frame") ;
      task.setServo(vpServo::EYEINHAND_CAMERA) ;
      task.setInteractionMatrixType(vpServo::CURRENT, vpServo::PSEUDO_INVERSE);

      vpTRACE("\t we want to see a point on a point..") ;
      std::cout << std::endl ;
      task.addFeature(p,pd) ;
      task.addFeature(logZ) ;
      task.addFeature(tu);


      vpTRACE("\t set the gain") ;
      task.setLambda(0.2) ;


      vpTRACE("Display task information " ) ;
      task.print() ;

      robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL) ;

      int iter=0 ;
      vpTRACE("\t loop") ;
      vpColVector v ;
      vpImage<vpRGBa> Ic ;
      double lambda_av =0.05;
      double alpha = 0.05 ;
      double beta =3 ;
      double erreur= 1;

      while(1)
	{
	  std::cout << "---------------------------------------------" << iter <<std::endl ;

	  try {
	    g.acquire(I) ;
	    vpDisplay::display(I) ;

	    pose.clearPoint() ;

	    //Track the lines and find the current position of the corners
	    for (i=0 ; i < nbline ; i++)
	    {
		      line[i].track(I) ;

		      line[i].display(I,vpColor::green);

	              double x=0, y=0;

	              if (!vpMeLine::intersection (line[i%nbline], line[(i+1)%nbline], ip))
	              {
	                 exit(-1);
	              }

		      vpPixelMeterConversion::convertPoint(cam, ip, x, y)  ;

	              point[i].set_x(x);
	              point[i].set_y(y);

		      pose.addPoint(point[i]) ;
	    }

	    //Compute the pose
	    pose.computePose(vpPose::VIRTUAL_VS, cMo) ;

	    //Update the two first features x and y (position of the square center)
            xc = (point[0].get_x()+point[2].get_x())/2;
            yc = (point[0].get_y()+point[2].get_y())/2;
            pointc.set_x(xc);
            pointc.set_y(yc);
            pointc.project(cMo);
            vpFeatureBuilder::create(p,pointc);
	    //Print the current and the desired position of the center of the square
	    //Print the desired position of the four corners
	    p.display(cam, I,  vpColor::green) ;
	    pd.display(cam, I,  vpColor::red) ;
	    for (i = 0; i < nbpoint; i++) pointd[i].display(I, cam, vpColor::red);

	    //Update the second feature
	    logZ.buildFrom(pointc.get_x(), pointc.get_y(), pointc.get_Z(), log(pointc.get_Z()/pointcd.get_Z()));

	    //Update the last three features
	    cdMc = cMod*cMo.inverse() ;
	    tu.buildFrom(cdMc) ;

	    //Adaptive gain
	    double gain ;
	    {
	      if (alpha == 0) gain = lambda_av ;
	      else
		    {
		      gain = alpha * exp (-beta * task.error.sumSquare() ) +  lambda_av ;
		    }
	    }

	    task.setLambda(gain) ;

	    v = task.computeControlLaw() ;

            vpDisplay::flush(I) ;
	    std::cout << v.sumSquare() <<std::endl  ;
	    if (iter==0)  vpDisplay::getClick(I) ;
	    if (v.sumSquare() > 0.5)
	      {
		      v =0 ;
		      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;
		      robot.stopMotion() ;
		      vpDisplay::getClick(I) ;
	      }

	    robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;

	  }
	  catch(...)
	    {
	      v =0 ;
	      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;
	      robot.stopMotion() ;
	      exit(1) ;
	    }

	  vpTRACE("\t\t || s - s* || = %f ", task.error.sumSquare()) ;
	  erreur = task.error.sumSquare();
	  iter++;
	}

      vpTRACE("Display task information " ) ;
      task.print() ;
      task.kill();
    }
  catch (...)
    {
      vpERROR_TRACE(" Test failed") ;
      return 0;
    }
}
int
main(int argc, const char ** argv)
{
  bool opt_display = true;
  bool opt_click_allowed = true;

  // Read the command line options
  if (getOptions(argc, argv, opt_click_allowed, opt_display) == false) {
    exit (-1);
  }

  vpImage<unsigned char> I(512,512,0) ;

  // We open a window using either X11, GTK or GDI.
#if defined VISP_HAVE_X11
  vpDisplayX display;
#elif defined VISP_HAVE_GTK
  vpDisplayGTK display;
#elif defined VISP_HAVE_GDI
  vpDisplayGDI display;
#endif

  if (opt_display) {
    try{
      // Display size is automatically defined by the image (I) size
      display.init(I, 100, 100,"Camera view...") ;
      // Display the image
      // The image class has a member that specify a pointer toward
      // the display that has been initialized in the display declaration
      // therefore is is no longuer necessary to make a reference to the
      // display variable.
      vpDisplay::display(I) ;
      vpDisplay::flush(I) ;
    }
    catch(...)
    {
      vpERROR_TRACE("Error while displaying the image") ;
      exit(-1);
    }
  }

  double px, py ; px = py = 600 ;
  double u0, v0 ; u0 = v0 = 256 ;

  vpCameraParameters cam(px,py,u0,v0);

  vpServo task ;
  vpSimulatorCamera robot ;

  // sets the initial camera location
  vpHomogeneousMatrix cMo(-0.2,0.1,1,
                          vpMath::rad(5),  vpMath::rad(5),  vpMath::rad(90));

  // Compute the position of the object in the world frame
  vpHomogeneousMatrix wMc, wMo;
  robot.getPosition(wMc) ;
  wMo = wMc * cMo;

  // sets the final camera location (for simulation purpose)
  vpHomogeneousMatrix cMod(0,0,1,
                           vpMath::rad(0),  vpMath::rad(0),  vpMath::rad(0));

  // sets the line coordinates (2 planes) in the world frame
  vpColVector plane1(4) ;
  vpColVector plane2(4) ;
  plane1[0] = 0;  // z = 0
  plane1[1] = 0;
  plane1[2] = 1;
  plane1[3] = 0;
  plane2[0] = 0; // y  =0
  plane2[1] = 1;
  plane2[2] = 0;
  plane2[3] = 0;

  vpLine line ;
  line.setWorldCoordinates(plane1, plane2) ;

  // sets the desired position of the visual feature
  line.track(cMod) ;
  line.print() ;

  vpFeatureLine ld ;
  vpFeatureBuilder::create(ld,line)  ;

  // computes the line coordinates in the camera frame and its 2D coordinates
  // sets the current position of the visual feature
  line.track(cMo) ;
  line.print() ;

  vpFeatureLine l ;
  vpFeatureBuilder::create(l,line)  ;
  l.print() ;

  // define the task
  // - we want an eye-in-hand control law
  // - robot is controlled in the camera frame
  task.setServo(vpServo::EYEINHAND_CAMERA) ;

  // we want to see a line on a line

  task.addFeature(l,ld) ;
  vpDisplay::display(I) ;
  vpServoDisplay::display(task,cam,I) ;
  vpDisplay::flush(I) ; 

  // set the gain
  task.setLambda(1) ;
  // Display task information " ) ;
  task.print() ;

  if (opt_display && opt_click_allowed) {
    std::cout << "\n\nClick in the camera view window to start..." << std::endl;
    vpDisplay::getClick(I) ;
  }

  unsigned int iter=0 ;
  // loop
  while(iter++<200)
  {
    std::cout << "---------------------------------------------" << iter <<std::endl ;
    vpColVector v ;

    // get the robot position
    robot.getPosition(wMc) ;
    // Compute the position of the camera wrt the object frame
    cMo = wMc.inverse() * wMo;

    // new line position
    line.track(cMo) ;
    // retrieve x,y and Z of the vpLine structure
    vpFeatureBuilder::create(l,line);

    if (opt_display) {
      vpDisplay::display(I) ;
      vpServoDisplay::display(task,cam,I) ;
      vpDisplay::flush(I) ;
    }

    // compute the control law
    v = task.computeControlLaw() ;

    // send the camera velocity to the controller
    robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;

    std::cout << "|| s - s* || = " << ( task.getError() ).sumSquare() <<std::endl ;
  }

  if (opt_display && opt_click_allowed) {
    std::cout << "\nClick in the camera view window to end..." << std::endl;
    vpDisplay::getClick(I) ;
  }

  // Display task information
  task.print() ;
  task.kill();
}
int
main(int argc, const char ** argv)
{
  bool opt_display = true;
  bool opt_click_allowed = true;

  // Read the command line options
  if (getOptions(argc, argv, opt_click_allowed, opt_display) == false) {
    exit (-1);
  }

  vpImage<unsigned char> I(512,512,255) ;

  // We open a window using either X11, GTK or GDI.
#if defined VISP_HAVE_X11
  vpDisplayX display;
#elif defined VISP_HAVE_GTK
  vpDisplayGTK display;
#elif defined VISP_HAVE_GDI
  vpDisplayGDI display;
#endif

  if (opt_display) {
    try{
      // Display size is automatically defined by the image (I) size
      display.init(I, 100, 100,"Camera view...") ;
      // Display the image
      // The image class has a member that specify a pointer toward
      // the display that has been initialized in the display declaration
      // therefore is is no longuer necessary to make a reference to the
      // display variable.
      vpDisplay::display(I) ;
      vpDisplay::flush(I) ;
    }
    catch(...)
      {
	vpERROR_TRACE("Error while displaying the image") ;
	exit(-1);
      }
  }

  double px, py ; px = py = 600 ;
  double u0, v0 ; u0 = v0 = 256 ;

  vpCameraParameters cam(px,py,u0,v0);

  vpServo task ;
  vpRobotCamera robot ;

  vpTRACE("sets the initial camera location " ) ;
  vpHomogeneousMatrix cMo(-0.2,0.1,2,
			  vpMath::rad(5),  vpMath::rad(5),  vpMath::rad(20));

  robot.setPosition(cMo) ;

  vpTRACE("sets the final camera location (for simulation purpose)" ) ;
  vpHomogeneousMatrix cMod(0,0,1,
			   vpMath::rad(-60),  vpMath::rad(0),  vpMath::rad(0));



  vpTRACE("sets the cylinder coordinates in the world frame "  ) ;
  vpCylinder cylinder(0,1,0,  // direction
		      0,0,0,  // point of the axis
		      0.1) ;  // radius

  vpTRACE("sets the desired position of the visual feature ") ;
  cylinder.track(cMod) ;
  cylinder.print() ;

  vpFeatureLine ld[2] ;
  int i ;
  for(i=0 ; i < 2 ; i++)
    vpFeatureBuilder::create(ld[i],cylinder,i)  ;


  vpTRACE("project : computes  the cylinder coordinates in the camera frame and its 2D coordinates"  ) ;
  vpTRACE("sets the current position of the visual feature ") ;
  cylinder.track(cMo) ;
  cylinder.print() ;

  vpFeatureLine l[2] ;
  for(i=0 ; i < 2 ; i++)
    {
      vpFeatureBuilder::create(l[i],cylinder,i)  ;
      l[i].print() ;
    }

  vpTRACE("define the task") ;
  vpTRACE("\t we want an eye-in-hand control law") ;
  vpTRACE("\t robot is controlled in the camera frame") ;
  task.setServo(vpServo::EYEINHAND_CAMERA) ;
  // task.setInteractionMatrixType(vpServo::CURRENT, vpServo::PSEUDO_INVERSE) ;
  //  it can also be interesting to test these possibilities
  //    task.setInteractionMatrixType(vpServo::MEAN, vpServo::PSEUDO_INVERSE) ;
  task.setInteractionMatrixType(vpServo::DESIRED,vpServo::PSEUDO_INVERSE) ;
  //task.setInteractionMatrixType(vpServo::DESIRED,  vpServo::TRANSPOSE) ;
  // task.setInteractionMatrixType(vpServo::CURRENT,  vpServo::TRANSPOSE) ;

  vpTRACE("\t we want to see  2 lines on 2 lines.") ;

  task.addFeature(l[0],ld[0]) ;
  task.addFeature(l[1],ld[1]) ;

  vpServoDisplay::display(task,cam,I) ;
  vpDisplay::flush(I) ;

  vpTRACE("Display task information " ) ;
  task.print() ;

  if (opt_display && opt_click_allowed) {
    std::cout << "\n\nClick in the camera view window to start..." << std::endl;
    vpDisplay::getClick(I) ;
  }

  vpTRACE("\t set the gain") ;
  task.setLambda(1) ;


  vpTRACE("Display task information " ) ;
  task.print() ;

  int iter=0 ;
  vpTRACE("\t loop") ;
  do
    {
      std::cout << "---------------------------------------------" << iter++ <<std::endl ;
      vpColVector v ;

      if (iter==1) vpTRACE("\t\t get the robot position ") ;
      robot.getPosition(cMo) ;
      if (iter==1) vpTRACE("\t\t new line position ") ;
      //retrieve x,y and Z of the vpLine structure

      cylinder.track(cMo) ;
      //    cylinder.print() ;
      for(i=0 ; i < 2 ; i++)
	{
	  vpFeatureBuilder::create(l[i],cylinder,i)  ;
	  //   l[i].print() ;
	}

      if (opt_display) {
	      vpDisplay::display(I) ;
	      vpServoDisplay::display(task,cam,I) ;
        vpDisplay::flush(I) ; 
      }

      if (iter==1) vpTRACE("\t\t compute the control law ") ;
      v = task.computeControlLaw() ;

      if (iter==1) vpTRACE("\t\t send the camera velocity to the controller ") ;
      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;

      vpTRACE("\t\t || s - s* || ") ;
      std::cout << task.error.sumSquare() <<std::endl ; ;

      //   vpDisplay::getClick(I) ;
    }
  while(task.error.sumSquare() >  1e-9) ;

  if (opt_display && opt_click_allowed) {
    std::cout << "\nClick in the camera view window to end..." << std::endl;
    vpDisplay::getClick(I) ;
  }

  vpTRACE("Display task information " ) ;
  task.print() ;
  task.kill();
}