Пример #1
0
int
main()
{
  vpTRACE("DirectShow is not available...") ;
}
Пример #2
0
int
main()
{
  vpTRACE("OpenCV is not available...") ;
}
Пример #3
0
int main()
{
  try {
    vpHomogeneousMatrix cMo ;
    cMo[0][3] = 0.1 ;
    cMo[1][3] = 0.2 ;
    cMo[2][3] = 2 ;

    vpPoint point ;
    vpTRACE("set point coordinates in the world  frame ") ;
    point.setWorldCoordinates(0,0,0) ;

    std::cout <<"------------------------------------------------------"<<std::endl ;
    vpTRACE("test the projection ") ;
    point.track(cMo) ;

    vpTRACE("coordinates in the world frame ") ;
    std::cout << point.oP.t() << std::endl ;
    vpTRACE("coordinates in the camera frame  ") ;
    std::cout << point.cP.t() << std::endl ;

    vpTRACE("2D coordinates ") ;
    std::cout<< point.get_x() << "  " << point.get_y() << std::endl ;

    std::cout <<"------------------------------------------------------"<<std::endl ;
    vpTRACE("test the interaction matrix ") ;

    vpFeaturePoint p ;
    vpFeatureBuilder::create(p,point) ;

    vpMatrix L ;
    L = p.interaction() ;
    std::cout << L << std::endl ;

    vpTRACE("test the interaction matrix select") ;
    vpTRACE("\t only X") ;
    L = p.interaction(vpFeaturePoint::selectX()) ;
    std::cout << L << std::endl ;

    vpTRACE("\t only Y") ;
    L = p.interaction(vpFeaturePoint::selectY()) ;
    std::cout << L << std::endl ;

    vpTRACE("\t X & Y") ;
    L = p.interaction(vpFeaturePoint::selectX() |
                      vpFeaturePoint::selectY()) ;
    std::cout << L << std::endl ;

    vpTRACE("\t selectAll") ;
    L = p.interaction(vpFeaturePoint::selectAll() ) ;
    std::cout << L << std::endl ;

    std::cout <<"------------------------------------------------------"<<std::endl ;
    vpTRACE("test the error ") ;

    try{
      vpFeaturePoint pd ;
      pd.set_x(0) ;
      pd.set_y(0) ;

      pd.print() ; std::cout << std::endl ;
      vpColVector e ;
      e = p.error(pd) ;
      std::cout << e << std::endl ;

      vpTRACE("test the interaction matrix select") ;
      vpTRACE("\t only X") ;
      e = p.error(pd,vpFeaturePoint::selectX()) ;
      std::cout << e << std::endl ;

      vpTRACE("\t only Y") ;
      e = p.error(pd,vpFeaturePoint::selectY()) ;
      std::cout << e << std::endl ;

      vpTRACE("\t X & Y") ;
      e = p.error(pd,vpFeaturePoint::selectX() | vpFeaturePoint::selectY()) ;
      std::cout << e << std::endl ;

      vpTRACE("\t selectAll") ;
      e = p.error(pd,vpFeaturePoint::selectAll() ) ;
      std::cout << e << std::endl ;
    }
    catch(vpFeatureException &me){ std::cout << me << std::endl ; }
    catch(vpException &me){ std::cout << me << std::endl ; }
    std::cout <<"------------------------------------------------------"<<std::endl ;
    vpTRACE("test the  dimension") ;
    unsigned int dim ;
    dim = p.getDimension() ;
    std::cout << "Dimension = " << dim << std::endl ;

    vpTRACE("test the dimension with  select") ;
    vpTRACE("\t only X") ;
    dim = p.getDimension(vpFeaturePoint::selectX()) ;
    std::cout << "Dimension = " << dim << std::endl ;

    vpTRACE("\t only Y") ;
    dim = p.getDimension(vpFeaturePoint::selectY()) ;
    std::cout << "Dimension = " << dim << std::endl ;

    vpTRACE("\t X & Y") ;
    dim = p.getDimension(vpFeaturePoint::selectX() | vpFeaturePoint::selectY()) ;
    std::cout << "Dimension = " << dim << std::endl ;

    vpTRACE("\t selectAll") ;
    dim = p.getDimension(vpFeaturePoint::selectAll() ) ;
    std::cout << "Dimension = " << dim << std::endl ;
    return 0;
  }
  catch(vpException &e) {
    std::cout << "Catch an exception: " << e << std::endl;
    return 1;
  }
}
Пример #4
0
/*!
  Compute and return the interaction matrix \f$ L \f$ associated to a
  subset of the possible 2D image point features with polar
  coordinates \f$(\rho,\theta)\f$.

  \f[
  L = \left[
  \begin{array}{l}
  L_{\rho} \\
  \; \\
  L_{\theta}\\
  \end{array}
  \right]
  =
  \left[
  \begin{array}{cccccc}
  \frac{-\cos \theta}{Z} & \frac{-\sin \theta}{Z}  &  \frac{\rho}{Z} & (1+\rho^2)\sin\theta  & -(1+\rho^2)\cos\theta &  0 \\
  \; \\
   \frac{\sin\theta}{\rho Z} & \frac{-\cos\theta}{\rho Z} &  0 &  \cos\theta /\rho &  \sin\theta/\rho & -1 \\
  \end{array}
  \right]
  \f]

  where \f$Z\f$ is the 3D depth of the considered point.

  \param select : Selection of a subset of the possible polar
  point coordinate features.
  - To compute the interaction matrix for all the two 
    subset features \f$(\rho,\theta)\f$ use vpBasicFeature::FEATURE_ALL. In
    that case the dimension of the interaction matrix is \f$ [2 \times
    6] \f$
  - To compute the interaction matrix for only one of the subset
    (\f$\rho,\theta\f$) use one of the corresponding function
    selectRho() or selectTheta(). In that case the returned
    interaction matrix is \f$ [1 \times 6] \f$ dimension.

  \return The interaction matrix computed from the 2D point
  polar coordinate features.

  \exception vpFeatureException::badInitializationError : If the point
  is behind the camera \f$(Z < 0)\f$, or if the 3D depth is null \f$(Z
  = 0)\f$, or if the \f$\rho\f$ polar coordinate of the point is null.

  The code below shows how to compute the interaction matrix associated to 
  the visual feature \f$s = (\rho,\theta)\f$.
  \code
  vpFeaturePointPolar s;
  double rho   = 0.3;
  double theta = M_PI;
  double Z     = 1;
  // Creation of the current feature s
  s.buildFrom(rho, theta, Z);
  // Build the interaction matrix L_s
  vpMatrix L = s.interaction();
  \endcode

  The interaction matrix could also be build by:
  \code
  vpMatrix L = s.interaction( vpBasicFeature::FEATURE_ALL );
  \endcode

  In both cases, L is a 2 by 6 matrix. The first line corresponds to
  the \f$\rho\f$ visual feature while the second one to the
  \f$\theta\f$ visual feature.

  It is also possible to build the interaction matrix associated to
  one of the possible features. The code below shows how to consider
  only the \f$\theta\f$ component.

  \code
  vpMatrix L_theta = s.interaction( vpFeaturePointPolar::selectTheta() );
  \endcode

  In that case, L_theta is a 1 by 6 matrix.
*/
vpMatrix
vpFeaturePointPolar::interaction(const unsigned int select)
{
  vpMatrix L ;

  L.resize(0,6) ;

  if (deallocate == vpBasicFeature::user)
  {
    for (unsigned int i = 0; i < nbParameters; i++)
    {
      if (flags[i] == false)
      {
        switch(i){
        case 0:
          vpTRACE("Warning !!!  The interaction matrix is computed but rho was not set yet");
        break;
        case 1:
          vpTRACE("Warning !!!  The interaction matrix is computed but theta was not set yet");
        break;
        case 2:
          vpTRACE("Warning !!!  The interaction matrix is computed but Z was not set yet");
        break;
        default:
          vpTRACE("Problem during the reading of the variable flags");
        }
      }
    }
    resetFlags();
  }

  double rho   = get_rho() ;
  double theta = get_theta() ;
  double Z_    = get_Z() ;

  double c_ = cos(theta);
  double s_ = sin(theta);

  double rho2 = rho*rho;

  if (fabs(rho) < 1e-6) {
    vpERROR_TRACE("rho polar coordinate of the point is null") ;
    std::cout <<"rho = " << rho << std::endl ;

    throw(vpFeatureException(vpFeatureException::badInitializationError,
			     "rho polar coordinate of the point is null")) ;
  }

  if (Z_ < 0)
  {
    vpERROR_TRACE("Point is behind the camera ") ;
    std::cout <<"Z = " << Z_ << std::endl ;

    throw(vpFeatureException(vpFeatureException::badInitializationError,
			     "Point is behind the camera ")) ;
  }

  if (fabs(Z_) < 1e-6)
  {
    vpERROR_TRACE("Point Z coordinates is null ") ;
    std::cout <<"Z = " << Z_ << std::endl ;

    throw(vpFeatureException(vpFeatureException::badInitializationError,
			     "Point Z coordinates is null")) ;
  }

  if (vpFeaturePointPolar::selectRho() & select )
  {
    vpMatrix Lrho(1,6) ; Lrho = 0;

    Lrho[0][0] = -c_/Z_  ;
    Lrho[0][1] = -s_/Z_ ;
    Lrho[0][2] = rho/Z_ ;
    Lrho[0][3] = (1+rho2)*s_ ;
    Lrho[0][4] = -(1+rho2)*c_ ;
    Lrho[0][5] = 0 ;

//     printf("Lrho: rho %f theta %f Z %f\n", rho, theta, Z);
//     std::cout << "Lrho: " << Lrho << std::endl;

    L = vpMatrix::stackMatrices(L,Lrho) ;
  }

  if (vpFeaturePointPolar::selectTheta() & select )
  {
    vpMatrix Ltheta(1,6) ; Ltheta = 0;

    Ltheta[0][0] = s_/(rho*Z_) ;
    Ltheta[0][1]  = -c_/(rho*Z_) ;
    Ltheta[0][2] = 0 ;
    Ltheta[0][3] = c_/rho ;
    Ltheta[0][4] = s_/rho ;
    Ltheta[0][5] = -1 ;

//     printf("Ltheta: rho %f theta %f Z %f\n", rho, theta, Z);
//     std::cout << "Ltheta: " << Ltheta << std::endl;
    L = vpMatrix::stackMatrices(L,Ltheta) ;
  }
  return L ;
}
/*!
  Compute and return the interaction matrix \f$ L \f$ associated to a subset
  of the possible 3D point features \f$(X,Y,Z)\f$ that
  represent the 3D point coordinates expressed in the camera frame.

  \f[
  L = \left[
  \begin{array}{rrrrrr}
  -1 &  0 &  0 &  0 & -Z &  Y \\
   0 & -1 &  0 &  Z &  0 & -X \\
   0 &  0 & -1 & -Y &  X &  0 \\
  \end{array}
  \right]
  \f]


  \param select : Selection of a subset of the possible 3D point coordinate
  features. 
  - To compute the interaction matrix for all the three 
    subset features \f$(X,Y,Z)\f$ use vpBasicFeature::FEATURE_ALL. In
    that case the dimension of the interaction matrix is \f$ [3 \times
    6] \f$
  - To compute the interaction matrix for only one of the 
    subset (\f$X, Y,Z\f$) use
    one of the corresponding function selectX(), selectY() or
    selectZ(). In that case the returned interaction matrix is \f$ [1
    \times 6] \f$ dimension.

  \return The interaction matrix computed from the 3D point coordinate
  features.

  The code below shows how to compute the interaction matrix
  associated to the visual feature \f$s = X \f$. 

  \code
  vpPoint point;
  ... 
  // Creation of the current feature s
  vpFeaturePoint3D s;
  s.buildFrom(point);

  vpMatrix L_X = s.interaction( vpFeaturePoint3D::selectX() );
  \endcode

  The code below shows how to compute the interaction matrix
  associated to the \f$s = (X,Y) \f$
  subset visual feature:

  \code
  vpMatrix L_XY = s.interaction( vpFeaturePoint3D::selectX() | vpFeaturePoint3D::selectY() );
  \endcode

  L_XY is here now a 2 by 6 matrix. The first line corresponds to
  the \f$ X \f$ visual feature while the second one to the \f$
  Y \f$ visual feature.

  It is also possible to build the interaction matrix from all the
  3D point coordinates by:

  \code
  vpMatrix L_XYZ = s.interaction( vpBasicFeature::FEATURE_ALL );
  \endcode

  In that case, L_XYZ is a 3 by 6 interaction matrix where the last
  line corresponds to the \f$ Z \f$ visual feature.

*/
vpMatrix
vpFeaturePoint3D::interaction(const unsigned int select)
{
  vpMatrix L ;

  L.resize(0,6) ;

  if (deallocate == vpBasicFeature::user)
  {
    for (unsigned int i = 0; i < nbParameters; i++)
    {
      if (flags[i] == false)
      {
        switch(i){
        case 0:
          vpTRACE("Warning !!!  The interaction matrix is computed but X was not set yet");
        break;
        case 1:
          vpTRACE("Warning !!!  The interaction matrix is computed but Y was not set yet");
        break;
        case 2:
          vpTRACE("Warning !!!  The interaction matrix is computed but Z was not set yet");
        break;
        default:
          vpTRACE("Problem during the reading of the variable flags");
        }
      }
    }
    resetFlags();
  }

  double X = get_X() ;
  double Y = get_Y() ;
  double Z = get_Z() ;

  if (vpFeaturePoint3D::selectX() & select )
  {
    vpMatrix Lx(1,6) ; Lx = 0;

    Lx[0][0] = -1  ;
    Lx[0][1] = 0 ;
    Lx[0][2] = 0 ;
    Lx[0][3] = 0 ;
    Lx[0][4] = -Z ;
    Lx[0][5] = Y ;

    L = vpMatrix::stackMatrices(L,Lx) ;
  }

  if (vpFeaturePoint3D::selectY() & select )
  {
    vpMatrix Ly(1,6) ; Ly = 0;

    Ly[0][0] = 0 ;
    Ly[0][1] = -1 ;
    Ly[0][2] = 0 ;
    Ly[0][3] = Z ;
    Ly[0][4] = 0 ;
    Ly[0][5] = -X ;

    L = vpMatrix::stackMatrices(L,Ly) ;
  }
  if (vpFeaturePoint3D::selectZ() & select )
  {
    vpMatrix Lz(1,6) ; Lz = 0;

    Lz[0][0] = 0 ;
    Lz[0][1] = 0 ;
    Lz[0][2] = -1 ;
    Lz[0][3] = -Y ;
    Lz[0][4] = X ;
    Lz[0][5] = 0 ;

    L = vpMatrix::stackMatrices(L,Lz) ;
  }
  return L ;
}
Пример #6
0
int
main()
{
  vpTRACE("Ieee 1394 grabber capabilities are not available...\n"
	  "You should install libdc1394-2 to use this binary.") ;
}
int
main(int argc, const char ** argv)
{
  bool opt_display = true;
  bool opt_click_allowed = true;

  // Read the command line options
  if (getOptions(argc, argv, opt_click_allowed, opt_display) == false) {
    exit (-1);
  }

  vpImage<unsigned char> I(512,512,0) ;

  // We open a window using either X11, GTK or GDI.
#if defined VISP_HAVE_X11
  vpDisplayX display;
#elif defined VISP_HAVE_GTK
  vpDisplayGTK display;
#elif defined VISP_HAVE_GDI
  vpDisplayGDI display;
#endif

  if (opt_display) {
    try{
      // Display size is automatically defined by the image (I) size
      display.init(I, 100, 100,"Camera view...") ;
      // Display the image
      // The image class has a member that specify a pointer toward
      // the display that has been initialized in the display declaration
      // therefore is is no longuer necessary to make a reference to the
      // display variable.
      vpDisplay::display(I) ;
      vpDisplay::flush(I) ;
    }
    catch(...)
      {
	vpERROR_TRACE("Error while displaying the image") ;
	exit(-1);
      }
  }

  // Set the camera parameters
  double px, py ; px = py = 600 ;
  double u0, v0 ; u0 = v0 = 256 ;

  vpCameraParameters cam(px,py,u0,v0);

  vpServo task ;
  vpRobotCamera robot ;

  vpTRACE("sets the initial camera location " ) ;
  vpHomogeneousMatrix cMo(0.2,0.2,1,
  			  vpMath::rad(45),  vpMath::rad(45),  vpMath::rad(125));

  robot.setPosition(cMo) ;

  vpTRACE("sets the final camera location (for simulation purpose)" ) ;
  vpHomogeneousMatrix cMod(0,0,1,
			   vpMath::rad(0),  vpMath::rad(0),  vpMath::rad(0));


  int nbline = 4;

  vpTRACE("sets the line coordinates (2 planes) in the world frame "  ) ;
  vpLine line[4] ;
  line[0].setWorldCoordinates(1,0,0,0.05,0,0,1,0);
  line[1].setWorldCoordinates(0,1,0,0.05,0,0,1,0);
  line[2].setWorldCoordinates(1,0,0,-0.05,0,0,1,0);
  line[3].setWorldCoordinates(0,1,0,-0.05,0,0,1,0);

  vpFeatureLine ld[4] ;
  vpFeatureLine l[4] ;

  vpTRACE("sets the desired position of the visual feature ") ;
  for(int i = 0; i < nbline; i++)
  {
  line[i].track(cMod) ;
  line[i].print() ;

  vpFeatureBuilder::create(ld[i],line[i])  ;
  }


  vpTRACE("project : computes  the line coordinates in the camera frame and its 2D coordinates"  ) ;
  vpTRACE("sets the current position of the visual feature ") ;
  for(int i = 0; i < nbline; i++)
  {
  line[i].track(cMo) ;
  line[i].print() ;

  vpFeatureBuilder::create(l[i],line[i])  ;
  l[i].print() ;
  }

  vpTRACE("define the task") ;
  vpTRACE("\t we want an eye-in-hand control law") ;
  vpTRACE("\t robot is controlled in the camera frame") ;
  task.setServo(vpServo::EYEINHAND_CAMERA) ;
  task.setInteractionMatrixType(vpServo::CURRENT, vpServo::PSEUDO_INVERSE);
  //It could be also interesting to test the following tasks
  //task.setInteractionMatrixType(vpServo::DESIRED, vpServo::PSEUDO_INVERSE);
  //task.setInteractionMatrixType(vpServo::MEAN, vpServo::PSEUDO_INVERSE);

  vpTRACE("\t we want to see a four lines on four lines.\n") ;

  for(int i = 0; i < nbline; i++)
  task.addFeature(l[i],ld[i]) ;

  vpDisplay::display(I) ;
  vpServoDisplay::display(task,cam,I) ;
  vpDisplay::flush(I) ; 

  vpTRACE("\t set the gain") ;
  task.setLambda(1) ;


  vpTRACE("Display task information " ) ;
  task.print() ;

  if (opt_display && opt_click_allowed) {
    std::cout << "\n\nClick in the camera view window to start..." << std::endl;
    vpDisplay::getClick(I) ;
  }

  int iter=0 ;
  vpTRACE("\t loop") ;
  while(iter++<200)
    {
      std::cout << "---------------------------------------------" << iter <<std::endl ;
      vpColVector v ;

      if (iter==1) vpTRACE("\t\t get the robot position ") ;
      robot.getPosition(cMo) ;
      if (iter==1) vpTRACE("\t\t new line position ") ;
      //retrieve x,y and Z of the vpLine structure

      for(int i = 0; i < nbline; i++)
      {
      line[i].track(cMo) ;
      vpFeatureBuilder::create(l[i],line[i]);
      }

      if (opt_display) {
	vpDisplay::display(I) ;
	vpServoDisplay::display(task,cam,I) ;
        vpDisplay::flush(I) ; 
      }

      if (iter==1) vpTRACE("\t\t compute the control law ") ;
      v = task.computeControlLaw() ;

      if (iter==1) vpTRACE("\t\t send the camera velocity to the controller ") ;
      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;

      vpTRACE("\t\t || s - s* || ") ;

    }

  if (opt_display && opt_click_allowed) {
    std::cout << "\nClick in the camera view window to end..." << std::endl;
    vpDisplay::getClick(I) ;
  }

  vpTRACE("Display task information " ) ;
  task.print() ;
  task.kill();
}
int
main()
{
  // Log file creation in /tmp/$USERNAME/log.dat
  // This file contains by line:
  // - the 6 computed joint velocities (m/s, rad/s) to achieve the task
  // - the 6 mesured joint velocities (m/s, rad/s)
  // - the 6 mesured joint positions (m, rad)
  // - the 8 values of s - s*
  std::string username;
  // Get the user login name
  vpIoTools::getUserName(username);

  // Create a log filename to save velocities...
  std::string logdirname;
  logdirname ="/tmp/" + username;

  // Test if the output path exist. If no try to create it
  if (vpIoTools::checkDirectory(logdirname) == false) {
    try {
      // Create the dirname
      vpIoTools::makeDirectory(logdirname);
    }
    catch (...) {
      std::cerr << std::endl
	   << "ERROR:" << std::endl;
      std::cerr << "  Cannot create " << logdirname << std::endl;
      return(-1);
    }
  }
  std::string logfilename;
  logfilename = logdirname + "/log.dat";

  // Open the log file name
  std::ofstream flog(logfilename.c_str());

  try {
    // Define the square CAD model
    // Square dimention
    //#define L 0.075
#define L 0.05
    // Distance between the camera and the square at the desired
    // position after visual servoing convergence
#define D 0.5

    vpRobotViper850 robot ;
    // Load the end-effector to camera frame transformation obtained
    // using a camera intrinsic model with distortion
    vpCameraParameters::vpCameraParametersProjType projModel =
      vpCameraParameters::perspectiveProjWithDistortion;
    robot.init(vpRobotViper850::TOOL_PTGREY_FLEA2_CAMERA, projModel);

    vpServo task ;

    vpImage<unsigned char> I ;
    int i ;

    bool reset = false;
    vp1394TwoGrabber g(reset);
    g.setVideoMode(vp1394TwoGrabber::vpVIDEO_MODE_640x480_MONO8);
    g.setFramerate(vp1394TwoGrabber::vpFRAMERATE_60);
    g.open(I) ;

    g.acquire(I) ;

    vpDisplayX display(I, 100, 100, "Camera view ") ;
    vpTRACE(" ") ;

    vpDisplay::display(I) ;
    vpDisplay::flush(I) ;

    std::cout << std::endl ;
    std::cout << "-------------------------------------------------------" << std::endl ;
    std::cout << " Test program for vpServo "  <<std::endl ;
    std::cout << " Eye-in-hand task control, velocity computed in the joint space" << std::endl ;
    std::cout << " Use of the Afma6 robot " << std::endl ;
    std::cout << " task : servo 4 points on a square with dimention " << L << " meters" << std::endl ;
    std::cout << "-------------------------------------------------------" << std::endl ;
    std::cout << std::endl ;


    vpDot dot[4] ;
    vpImagePoint cog;

    std::cout << "Click on the 4 dots clockwise starting from upper/left dot..."
	      << std::endl;

    for (i=0 ; i < 4 ; i++) {
      dot[i].initTracking(I) ;
      cog = dot[i].getCog();
      vpDisplay::displayCross(I, cog, 10, vpColor::blue) ;
      vpDisplay::flush(I);
    }

    vpCameraParameters cam ;

    // Update camera parameters
    robot.getCameraParameters (cam, I);

    cam.printParameters();

    // Sets the current position of the visual feature
    vpFeaturePoint p[4] ;
    for (i=0 ; i < 4 ; i++)
      vpFeatureBuilder::create(p[i],cam, dot[i])  ;  //retrieve x,y and Z of the vpPoint structure

    // sets the desired position of the visual feature
    vpFeaturePoint pd[4] ;

    pd[0].buildFrom(-L,-L,D) ;
    pd[1].buildFrom(L,-L,D) ;
    pd[2].buildFrom(L,L,D) ;
    pd[3].buildFrom(-L,L,D) ;

    // We want to see a point on a point
    std::cout << std::endl ;
    for (i=0 ; i < 4 ; i++)
      task.addFeature(p[i],pd[i]) ;

    // Set the proportional gain
    task.setLambda(0.4) ;

    // Display task information
    task.print() ;

    // Define the task
    // - we want an eye-in-hand control law
    // - articular velocity are computed
    task.setServo(vpServo::EYEINHAND_L_cVe_eJe) ;
    task.setInteractionMatrixType(vpServo::DESIRED, vpServo::PSEUDO_INVERSE) ;
    task.print() ;

    vpVelocityTwistMatrix cVe ;
    robot.get_cVe(cVe) ;
    task.set_cVe(cVe) ;
    task.print() ;

    // Set the Jacobian (expressed in the end-effector frame)
    vpMatrix eJe ;
    robot.get_eJe(eJe) ;
    task.set_eJe(eJe) ;
    task.print() ;

    // Initialise the velocity control of the robot
    robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL) ;

    std::cout << "\nHit CTRL-C to stop the loop...\n" << std::flush;
    while(1) {
      // Acquire a new image from the camera
      g.acquire(I) ;

      // Display this image
      vpDisplay::display(I) ;

      try {
	// For each point...
	for (i=0 ; i < 4 ; i++) {
	  // Achieve the tracking of the dot in the image
	  dot[i].track(I) ;
	  // Display a green cross at the center of gravity position in the
	  // image
	  cog = dot[i].getCog();
	  vpDisplay::displayCross(I, cog, 10, vpColor::green) ;
	}
      }
      catch(...) {
	flog.close() ; // Close the log file
	vpTRACE("Error detected while tracking visual features") ;
	robot.stopMotion() ;
	exit(1) ;
      }

      // Update the point feature from the dot location
      for (i=0 ; i < 4 ; i++)
	vpFeatureBuilder::create(p[i],cam, dot[i]);

      // Get the jacobian of the robot
      robot.get_eJe(eJe) ;
      // Update this jacobian in the task structure. It will be used to compute
      // the velocity skew (as an articular velocity)
      // qdot = -lambda * L^+ * cVe * eJe * (s-s*)
      task.set_eJe(eJe) ;

      vpColVector v ;
      // Compute the visual servoing skew vector
      v = task.computeControlLaw() ;

      // Display the current and desired feature points in the image display
      vpServoDisplay::display(task,cam,I) ;

      // Apply the computed joint velocities to the robot
      robot.setVelocity(vpRobot::ARTICULAR_FRAME, v) ;

      // Save velocities applied to the robot in the log file
      // v[0], v[1], v[2] correspond to joint translation velocities in m/s
      // v[3], v[4], v[5] correspond to joint rotation velocities in rad/s
      flog << v[0] << " " << v[1] << " " << v[2] << " "
	   << v[3] << " " << v[4] << " " << v[5] << " ";

      // Get the measured joint velocities of the robot
      vpColVector qvel;
      robot.getVelocity(vpRobot::ARTICULAR_FRAME, qvel);
      // Save measured joint velocities of the robot in the log file:
      // - qvel[0], qvel[1], qvel[2] correspond to measured joint translation
      //   velocities in m/s
      // - qvel[3], qvel[4], qvel[5] correspond to measured joint rotation
      //   velocities in rad/s
      flog << qvel[0] << " " << qvel[1] << " " << qvel[2] << " "
	   << qvel[3] << " " << qvel[4] << " " << qvel[5] << " ";

      // Get the measured joint positions of the robot
      vpColVector q;
      robot.getPosition(vpRobot::ARTICULAR_FRAME, q);
      // Save measured joint positions of the robot in the log file
      // - q[0], q[1], q[2] correspond to measured joint translation
      //   positions in m
      // - q[3], q[4], q[5] correspond to measured joint rotation
      //   positions in rad
      flog << q[0] << " " << q[1] << " " << q[2] << " "
	   << q[3] << " " << q[4] << " " << q[5] << " ";

      // Save feature error (s-s*) for the 4 feature points. For each feature
      // point, we have 2 errors (along x and y axis).  This error is expressed
      // in meters in the camera frame
      flog << task.error[0] << " " << task.error[1] << " " // s-s* for point 1
	   << task.error[2] << " " << task.error[3] << " " // s-s* for point 2
	   << task.error[4] << " " << task.error[5] << " " // s-s* for point 3
	   << task.error[6] << " " << task.error[7]        // s-s* for point 4
	   << std::endl;

      // Flush the display
      vpDisplay::flush(I) ;

      //	vpTRACE("\t\t || s - s* || = %f ", task.error.sumSquare()) ;
    }

    vpTRACE("Display task information " ) ;
    task.print() ;
    task.kill();
    flog.close() ; // Close the log file
    return 0;
  }
  catch (...)
    {
      flog.close() ; // Close the log file
      vpERROR_TRACE(" Test failed") ;
      return 0;
    }
}
/*!

  Compute and return the interaction matrix \f$ L \f$ from a subset
  \f$(t_x, t_y, t_z)\f$ of the possible translation features that
  represent the 3D transformation \f$ ^{{\cal{F}}_2}M_{{\cal{F}}_1} \f$.

  As it exists three different features, the computation of the
  interaction matrix is diferent for each one.

  - With the feature type cdMc:

  \f[ L = [ ^{c^*}R_c \;\; 0_3] \f] 

  where \f$^{c^*}R_c\f$ is the rotation the camera has to achieve to
  move from the desired camera frame to the current camera frame.

  - With the feature type cMcd:

  \f[ L = [ -I_3 \;\; [^{c}t_{c^*}]_\times] \f]

  where \f$^{c}R_{c^*}\f$ is the rotation the camera has to achieve to
  move from the current camera frame to the desired camera frame.

  - With the feature type cMo:

  \f[ L = [ -I_3 \;\; [^{c}t_o]_\times] \f]

  where \f$^{c}t_o \f$ is the position of
  the object frame relative to the current camera frame.

  \param select : Selection of a subset of the possible translation
  features. 
  - To compute the interaction matrix for all the three translation
    subset features \f$(t_x,t_y,t_y)\f$ use vpBasicFeature::FEATURE_ALL. In
    that case the dimension of the interaction matrix is \f$ [3 \times
    6] \f$
  - To compute the interaction matrix for only one of the translation
    subset (\f$t_x, t_y, t_z\f$) use
    one of the corresponding function selectTx(), selectTy() or
    selectTz(). In that case the returned interaction matrix is \f$ [1
    \times 6] \f$ dimension.

  \return The interaction matrix computed from the translation
  features.

  The code below shows how to compute the interaction matrix
  associated to the visual feature \f$s = t_x \f$ using the cdMc feature type.

  \code
  vpHomogeneousMatrix cdMc;
  ... 
  // Creation of the current feature s
  vpFeatureTranslation s(vpFeatureTranslation::cdMc);
  s.buildFrom(cdMc);

  vpMatrix L_x = s.interaction( vpFeatureTranslation::selectTx() );
  \endcode

  The code below shows how to compute the interaction matrix
  associated to the \f$s = (t_x, t_y) \f$
  subset visual feature:

  \code
  vpMatrix L_xy = s.interaction( vpFeatureTranslation::selectTx() | vpFeatureTranslation::selectTy() );
  \endcode

  L_xy is here now a 2 by 6 matrix. The first line corresponds to
  the \f$ t_x \f$ visual feature while the second one to the \f$
  t_y \f$ visual feature.

  It is also possible to build the interaction matrix from all the
  translation components by:

  \code
  vpMatrix L_xyz = s.interaction( vpBasicFeature::FEATURE_ALL );
  \endcode

  In that case, L_xyz is a 3 by 6 interaction matrix where the last
  line corresponds to the \f$ t_z \f$ visual feature.

*/
vpMatrix
vpFeatureTranslation::interaction(const unsigned int select)
{

  vpMatrix L ;
  L.resize(0,6) ;

  if (deallocate == vpBasicFeature::user) {
    for (unsigned int i = 0; i < nbParameters; i++) {
      if (flags[i] == false) {
        switch(i){
        case 0:
          vpTRACE("Warning !!!  The interaction matrix is computed but f2Mf1 was not set yet");
        break;
        default:
          vpTRACE("Problem during the reading of the variable flags");
        }
      }
    }
    resetFlags();
  }

  if (translation == cdMc) {
    //This version is a simplification
    if (vpFeatureTranslation::selectTx() & select ) {
      vpMatrix Lx(1,6) ;

      for (int i=0 ; i < 3 ; i++)
	Lx[0][i] = f2Mf1[0][i] ;
      Lx[0][3] = 0 ;    Lx[0][4] = 0 ;    Lx[0][5] = 0 ;

      L = vpMatrix::stackMatrices(L,Lx) ;
    }

    if (vpFeatureTranslation::selectTy() & select ) {
      vpMatrix Ly(1,6) ;

      for (int i=0 ; i < 3 ; i++)
	Ly[0][i] = f2Mf1[1][i] ;
      Ly[0][3] = 0 ;    Ly[0][4] = 0 ;    Ly[0][5] = 0 ;

      L = vpMatrix::stackMatrices(L,Ly) ;
    }

    if (vpFeatureTranslation::selectTz() & select ) {
      vpMatrix Lz(1,6) ;

      for (int i=0 ; i < 3 ; i++)
	Lz[0][i] = f2Mf1[2][i] ;
      Lz[0][3] = 0 ;    Lz[0][4] = 0 ;    Lz[0][5] = 0 ;

      L = vpMatrix::stackMatrices(L,Lz) ;
    }
  }
  if (translation == cMcd) {
    //This version is a simplification
    if (vpFeatureTranslation::selectTx() & select ) {
      vpMatrix Lx(1,6) ;
      Lx[0][0] = -1 ;    Lx[0][1] = 0 ;    Lx[0][2] = 0 ;
      Lx[0][3] = 0 ;    Lx[0][4] = -s[2] ;    Lx[0][5] = s[1] ;

      L = vpMatrix::stackMatrices(L,Lx) ;
    }

    if (vpFeatureTranslation::selectTy() & select ) {
      vpMatrix Ly(1,6) ;
      Ly[0][0] = 0 ;    Ly[0][1] = -1 ;    Ly[0][2] = 0 ;
      Ly[0][3] = s[2] ;    Ly[0][4] = 0 ;    Ly[0][5] = -s[0] ;

      L = vpMatrix::stackMatrices(L,Ly) ;
    }

    if (vpFeatureTranslation::selectTz() & select ) {
      vpMatrix Lz(1,6) ;
      Lz[0][0] = 0 ;    Lz[0][1] = 0 ;    Lz[0][2] = -1 ;
      Lz[0][3] = -s[1] ;    Lz[0][4] = s[0] ;    Lz[0][5] = 0 ;

      L = vpMatrix::stackMatrices(L,Lz) ;
    }
  }

  if (translation == cMo) {
    //This version is a simplification
    if (vpFeatureTranslation::selectTx() & select ) {
      vpMatrix Lx(1,6) ;
      Lx[0][0] = -1 ;    Lx[0][1] = 0 ;    Lx[0][2] = 0 ;
      Lx[0][3] = 0 ;    Lx[0][4] = -s[2] ;    Lx[0][5] = s[1] ;

      L = vpMatrix::stackMatrices(L,Lx) ;
    }

    if (vpFeatureTranslation::selectTy() & select ) {
      vpMatrix Ly(1,6) ;
      Ly[0][0] = 0 ;    Ly[0][1] = -1 ;    Ly[0][2] = 0 ;
      Ly[0][3] = s[2] ;    Ly[0][4] = 0 ;    Ly[0][5] = -s[0] ;

      L = vpMatrix::stackMatrices(L,Ly) ;
    }

    if (vpFeatureTranslation::selectTz() & select ) {
      vpMatrix Lz(1,6) ;
      Lz[0][0] = 0 ;    Lz[0][1] = 0 ;    Lz[0][2] = -1 ;
      Lz[0][3] = -s[1] ;    Lz[0][4] = s[0] ;    Lz[0][5] = 0 ;

      L = vpMatrix::stackMatrices(L,Lz) ;
    }
  }

  return L ;
}
Пример #10
0
/*!
  Allocates and initializes the parameters depending on the video and the desired color type.
  One the stream is opened, it is possible to get the video encoding framerate getFramerate(),
  and the dimension of the images using getWidth() and getHeight().
  
  \param filename : Path to the video which has to be read.
  \param colortype : Desired color map used to open the video.
  The parameter can take two values : COLORED and GRAY_SCALED.
  
  \return It returns true if the paramters could be initialized. Else it returns false.
*/
bool vpFFMPEG::openStream(const char *filename, vpFFMPEGColorType colortype)
{
  this->color_type = colortype;
  
  av_register_all();
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53,0,0) // libavformat 52.84.0
  if (av_open_input_file (&pFormatCtx, filename, NULL, 0, NULL) != 0)
#else
  if (avformat_open_input (&pFormatCtx, filename, NULL, NULL) != 0) // libavformat 53.4.0
#endif
  {
    vpTRACE("Couldn't open file ");
    return false;
  }

#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53,21,0) // libavformat 53.21.0
  if (av_find_stream_info (pFormatCtx) < 0)
#else 
  if (avformat_find_stream_info (pFormatCtx, NULL) < 0)
#endif
      return false;
  
  videoStream = 0;
  bool found_codec = false;
  
  /*
  * Detect streams types
  */
  for (unsigned int i = 0; i < pFormatCtx->nb_streams; i++)
  {
#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(51,0,0)
    if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) // avutil 50.33.0
#else
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) // avutil 51.9.1
#endif
    {
      videoStream = i;
      //std::cout << "rate: " << pFormatCtx->streams[i]->r_frame_rate.num << " " << pFormatCtx->streams[i]->r_frame_rate.den << std::endl;
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(55,12,0)
      framerate_stream =  pFormatCtx->streams[i]->r_frame_rate.num;
      framerate_stream /= pFormatCtx->streams[i]->r_frame_rate.den;
#else
      framerate_stream =  pFormatCtx->streams[i]->avg_frame_rate.num;
      framerate_stream /= pFormatCtx->streams[i]->avg_frame_rate.den;
#endif
      found_codec= true;
      break;
    }
  }

  if (found_codec)
  {
    pCodecCtx = pFormatCtx->streams[videoStream]->codec;
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);

    if (pCodec == NULL)
    {
      vpTRACE("unsuported codec");
      return false;		// Codec not found
    }
    
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,35,0) // libavcodec 53.35.0
    if (avcodec_open (pCodecCtx, pCodec) < 0)
#else
    if (avcodec_open2 (pCodecCtx, pCodec, NULL) < 0)
#endif
    {
      vpTRACE("Could not open codec");
      return false;		// Could not open codec
    }

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,34,0)
    pFrame = avcodec_alloc_frame();
#else
    pFrame = av_frame_alloc(); // libavcodec 55.34.1
#endif

    if (color_type == vpFFMPEG::COLORED)
    {
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,34,0)
      pFrameRGB=avcodec_alloc_frame();
#else
      pFrameRGB=av_frame_alloc(); // libavcodec 55.34.1
#endif
    
      if (pFrameRGB == NULL)
        return false;
      
      numBytes = avpicture_get_size (PIX_FMT_RGB24,pCodecCtx->width,pCodecCtx->height);
    }
    
    else if (color_type == vpFFMPEG::GRAY_SCALED)
    {
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,34,0)
      pFrameGRAY=avcodec_alloc_frame();
#else
      pFrameGRAY=av_frame_alloc(); // libavcodec 55.34.1
#endif
    
      if (pFrameGRAY == NULL)
        return false;
      
      numBytes = avpicture_get_size (PIX_FMT_GRAY8,pCodecCtx->width,pCodecCtx->height);
    }  

    /*
     * Determine required buffer size and allocate buffer
     */
    width = pCodecCtx->width ;
    height = pCodecCtx->height ;
    buffer = (uint8_t *) malloc ((unsigned int)(sizeof (uint8_t)) * (unsigned int)numBytes);
  }
  else
  {
    vpTRACE("Didn't find a video stream");
    return false;
  }
  
  if (color_type == vpFFMPEG::COLORED)
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
  
  else if (color_type == vpFFMPEG::GRAY_SCALED)
    avpicture_fill((AVPicture *)pFrameGRAY, buffer, PIX_FMT_GRAY8, pCodecCtx->width, pCodecCtx->height);
  
  streamWasOpen = true;

  return true;
}
Пример #11
0
/*!

  Compute and return the interaction matrix \f$ L \f$ from
  a subset (\f$ \theta u_x, \theta u_y, \theta u_z\f$) of the possible
  \f$ \theta u \f$ features that represent the 3D rotation
  \f$^{c^*}R_c\f$ or \f$^{c}R_{c^*}\f$, with

  \f[ L = [ 0_3 \; L_{\theta u}] \f] 

  See the vpFeatureThetaU class description for the equations of
  \f$L_{\theta u}\f$.

  \param select : Selection of a subset of the possible \f$ \theta u \f$
  features. 
  - To compute the interaction matrix for all the three \f$ \theta u \f$ 
    features use vpBasicFeature::FEATURE_ALL. In that case the dimension of the
    interaction matrix is \f$ [3 \times 6] \f$
  - To compute the interaction matrix for only one of the \f$ \theta u \f$ 
    component feature (\f$\theta u_x, \theta u_y, \theta u_z\f$) use one of the
    corresponding function selectTUx(), selectTUy() or selectTUz(). In
    that case the returned interaction matrix is \f$ [1 \times 6] \f$
    dimension.

  \return The interaction matrix computed from the \f$ \theta u \f$
  features that represent either the rotation \f$^{c^*}R_c\f$ or the
  rotation \f$^{c}R_{c^*}\f$.

  The code below shows how to compute the interaction matrix
  associated to the visual feature \f$s = \theta u_x \f$. 

  \code
  vpRotationMatrix cdMc;

  // Creation of the current feature s
  vpFeatureThetaU s(vpFeatureThetaU::cdRc);
  s.buildFrom(cdMc);

  vpMatrix L_x = s.interaction( vpFeatureThetaU::selectTUx() );
  \endcode

  The code below shows how to compute the interaction matrix
  associated to the \f$s = (\theta u_x, \theta u_y) \f$
  subset visual feature:

  \code
  vpMatrix L_xy = s.interaction( vpFeatureThetaU::selectTUx() | vpFeatureThetaU::selectTUy() );
  \endcode

  L_xy is here now a 2 by 6 matrix. The first line corresponds to
  the \f$ \theta u_x \f$ visual feature while the second one to the \f$
  \theta u_y \f$ visual feature.

  It is also possible to build the interaction matrix from all the \f$
  \theta u \f$ components by:

  \code
  vpMatrix L_xyz = s.interaction( vpBasicFeature::FEATURE_ALL );
  \endcode

  In that case, L_xyz is a 3 by 6 interaction matrix where the last
  line corresponds to the \f$ \theta u_z \f$ visual feature.

*/
vpMatrix
vpFeatureThetaU::interaction(const unsigned int select)
{

  vpMatrix L ;
  L.resize(0,6) ;

  if (deallocate == vpBasicFeature::user)
  {
    for (unsigned int i = 0; i < nbParameters; i++)
    {
      if (flags[i] == false)
      {
        switch(i){
        case 0:
          vpTRACE("Warning !!!  The interaction matrix is computed but Tu_x was not set yet");
        break;
        case 1:
          vpTRACE("Warning !!!  The interaction matrix is computed but Tu_y was not set yet");
        break;
        case 2:
          vpTRACE("Warning !!!  The interaction matrix is computed but Tu_z was not set yet");
        break;
        default:
          vpTRACE("Problem during the reading of the variable flags");
        }
      }
    }
    resetFlags();
  }

  // Lw computed using Lw = [theta/2 u]_x +/- (I + alpha [u]_x [u]_x)
  vpColVector u(3)  ;
  for (unsigned int i=0 ; i < 3 ; i++) {
    u[i] = s[i]/2.0 ; 
  }
  
  vpMatrix Lw(3,3) ;
  Lw = vpColVector::skew(u) ;  /* [theta/2  u]_x */

  vpMatrix U2(3,3) ;
  U2.eye() ;
  
  double  theta = sqrt(s.sumSquare()) ;
  if (theta >= 1e-6) {
    for (unsigned int i=0 ; i < 3 ; i++) 
      u[i] = s[i]/theta ;

    vpMatrix skew_u ;
    skew_u = vpColVector::skew(u) ; 
    U2 += (1-vpMath::sinc(theta)/vpMath::sqr(vpMath::sinc(theta/2.0)))*skew_u*skew_u ;
  }
 
  if (rotation == cdRc) {
    Lw += U2; 
  }
  else { 
    Lw -=  U2; 
  }

  //This version is a simplification
  if (vpFeatureThetaU::selectTUx() & select )
    {
      vpMatrix Lx(1,6) ;

      Lx[0][0] = 0 ;    Lx[0][1] = 0 ;    Lx[0][2] = 0 ;
      for (int i=0 ; i < 3 ; i++) Lx[0][i+3] = Lw[0][i] ;


      L = vpMatrix::stack(L,Lx) ;
    }

  if (vpFeatureThetaU::selectTUy() & select )
    {
      vpMatrix Ly(1,6) ;

      Ly[0][0] = 0 ;    Ly[0][1] = 0 ;    Ly[0][2] = 0 ;
      for (int i=0 ; i < 3 ; i++) Ly[0][i+3] = Lw[1][i] ;

      L = vpMatrix::stack(L,Ly) ;
    }

  if (vpFeatureThetaU::selectTUz() & select )
    {
      vpMatrix Lz(1,6) ;

      Lz[0][0] = 0 ;    Lz[0][1] = 0 ;    Lz[0][2] = 0 ;
      for (int i=0 ; i < 3 ; i++) Lz[0][i+3] = Lw[2][i] ;

      L = vpMatrix::stack(L,Lz) ;
    }

  return L ;
}
Пример #12
0
bool vpFFMPEG::openEncoder(const char *filename, unsigned int w, unsigned int h, AVCodecID codec)
#endif
{
  av_register_all();

  /* find the mpeg1 video encoder */
  pCodec = avcodec_find_encoder(codec);
  if (pCodec == NULL) {
    fprintf(stderr, "codec not found\n");
    return false;
  }

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,5,0) // libavcodec 53.5.0
  pCodecCtx = avcodec_alloc_context();
#else
  pCodecCtx = avcodec_alloc_context3(NULL);
#endif

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,34,0)
  pFrame = avcodec_alloc_frame();
  pFrameRGB = avcodec_alloc_frame();
#else
  pFrame = av_frame_alloc(); // libavcodec 55.34.1
  pFrameRGB = av_frame_alloc(); // libavcodec 55.34.1
#endif

  /* put sample parameters */
  pCodecCtx->bit_rate = (int)bit_rate;
  /* resolution must be a multiple of two */
  pCodecCtx->width = (int)w;
  pCodecCtx->height = (int)h;
  this->width = (int)w;
  this->height = (int)h;
  /* frames per second */
  pCodecCtx->time_base.num = 1;
  pCodecCtx->time_base.den = framerate_encoder;
  pCodecCtx->gop_size = 10; /* emit one intra frame every ten frames */
  pCodecCtx->max_b_frames=1;
  pCodecCtx->pix_fmt = PIX_FMT_YUV420P;

  /* open it */
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,35,0) // libavcodec 53.35.0
  if (avcodec_open (pCodecCtx, pCodec) < 0) {
#else
  if (avcodec_open2 (pCodecCtx, pCodec, NULL) < 0) {
#endif
    fprintf(stderr, "could not open codec\n");
    return false;
  }

  /* the codec gives us the frame size, in samples */

  f = fopen(filename, "wb");
  if (!f) {
    fprintf(stderr, "could not open %s\n", filename);
    return false;
  }

  outbuf_size = 100000;
  outbuf = new uint8_t[outbuf_size];

  numBytes = avpicture_get_size (PIX_FMT_YUV420P,pCodecCtx->width,pCodecCtx->height);
  picture_buf = new uint8_t[numBytes];
  avpicture_fill((AVPicture *)pFrame, picture_buf, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);

  numBytes = avpicture_get_size (PIX_FMT_RGB24,pCodecCtx->width,pCodecCtx->height);
  buffer = new uint8_t[numBytes];
  avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);

  img_convert_ctx= sws_getContext(pCodecCtx->width, pCodecCtx->height, PIX_FMT_RGB24, pCodecCtx->width,pCodecCtx->height,PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
  
  encoderWasOpened = true;

  return true;
}


/*!
  Saves the image I as frame of the video.
  
  \param I : the image to save.
  
  \return It returns true if the image could be saved.
*/
bool vpFFMPEG::saveFrame(vpImage<vpRGBa> &I)
{
  if (encoderWasOpened == false)
  {
    vpTRACE("Couldn't save a frame. The parameters have to be initialized before ");
    return false;
  }
  
  writeBitmap(I);
  sws_scale(img_convert_ctx, pFrameRGB->data, pFrameRGB->linesize, 0, pCodecCtx->height, pFrame->data, pFrame->linesize);
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54,2,100) // libavcodec 54.2.100
  out_size = avcodec_encode_video(pCodecCtx, outbuf, outbuf_size, pFrame);
  fwrite(outbuf, 1, (size_t)out_size, f);
#else
  AVPacket pkt;
  av_init_packet(&pkt);
  pkt.data = NULL;    // packet data will be allocated by the encoder
  pkt.size = 0;

  int got_output;
  int ret = avcodec_encode_video2(pCodecCtx, &pkt, pFrame, &got_output);
  if (ret < 0) {
    std::cerr << "Error encoding frame in " << __FILE__ << " " << __LINE__ << " " << __FUNCTION__ << std::endl;
    return false;
  }
  if (got_output) {
    fwrite(pkt.data, 1, pkt.size, f);
    av_free_packet(&pkt);
  }
#endif
  fflush(stdout);
  return true;
}


/*!
  Saves the image I as frame of the video.
  
  \param I : the image to save.
  
  \return It returns true if the image could be saved.
*/
bool vpFFMPEG::saveFrame(vpImage<unsigned char> &I)
{
  if (encoderWasOpened == false)
  {
    vpTRACE("Couldn't save a frame. The parameters have to be initialized before ");
    return false;
  }
  
  writeBitmap(I);
  sws_scale(img_convert_ctx, pFrameRGB->data, pFrameRGB->linesize, 0, pCodecCtx->height, pFrame->data, pFrame->linesize);  
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54,2,100) // libavcodec 54.2.100
  out_size = avcodec_encode_video(pCodecCtx, outbuf, outbuf_size, pFrame);
  fwrite(outbuf, 1, (size_t)out_size, f);
#else
  AVPacket pkt;
  av_init_packet(&pkt);
  pkt.data = NULL;    // packet data will be allocated by the encoder
  pkt.size = 0;

  int got_output;
  int ret = avcodec_encode_video2(pCodecCtx, &pkt, pFrame, &got_output);
  if (ret < 0) {
    std::cerr << "Error encoding frame in " << __FILE__ << " " << __LINE__ << " " << __FUNCTION__ << std::endl;
    return false;
  }
  if (got_output) {
    fwrite(pkt.data, 1, pkt.size, f);
    av_free_packet(&pkt);
  }
#endif

  fflush(stdout);
  return true;
}

/*!
  Ends the writing of the video and close the file.
  
  \return It returns true if the file was closed without problem
*/
bool vpFFMPEG::endWrite()
{
  if (encoderWasOpened == false)
  {
    vpTRACE("Couldn't save a frame. The parameters have to be initialized before ");
    return false;
  }
  
  int ret = 1;
  while (ret != 0)
  {

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54,2,100) // libavcodec 54.2.100
    ret = avcodec_encode_video(pCodecCtx, outbuf, outbuf_size, NULL);
    fwrite(outbuf, 1, (size_t)out_size, f);
#else
    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = NULL;    // packet data will be allocated by the encoder
    pkt.size = 0;
    int got_output;
    ret = avcodec_encode_video2(pCodecCtx, &pkt, NULL, &got_output);
    if (ret < 0) {
      std::cerr << "Error encoding frame in " << __FILE__ << " " << __LINE__ << " " << __FUNCTION__ << std::endl;
      return false;
    }
    if (got_output) {
      fwrite(pkt.data, 1, pkt.size, f);
      av_free_packet(&pkt);
    }
#endif
  }

  /*The end of a mpeg file*/
  outbuf[0] = 0x00;
  outbuf[1] = 0x00;
  outbuf[2] = 0x01;
  outbuf[3] = 0xb7;
  fwrite(outbuf, 1, 4, f);
  fclose(f);
  f = NULL;
  return true;
}

/*!
  This method enables to fill the frame bitmap thanks to the vpImage bitmap.
*/
void vpFFMPEG::writeBitmap(vpImage<vpRGBa> &I)
{
  unsigned char* beginInput = (unsigned char*)I.bitmap;
  unsigned char* input = NULL;
  unsigned char* beginOutput = (unsigned char*)pFrameRGB->data[0];
  int widthStep = pFrameRGB->linesize[0];
  
  for(int i=0 ; i < height ; i++)
  {
    input = beginInput + 4 * i * width;
    unsigned char *output = beginOutput + i * widthStep;
    for(int j=0 ; j < width ; j++)
    {
      *(output++) = *(input);
      *(output++) = *(input+1);
      *(output++) = *(input+2);

      input+=4;
    }
  }
}
int
main()
{
  std::cout << std::endl ;
  std::cout << "-------------------------------------------------------" << std::endl ;
  std::cout << " Test program for vpServo "  <<std::endl ;
  std::cout << " Eye-in-hand task control, velocity computed in the camera frame" << std::endl ;
  std::cout << " Simulation " << std::endl ;
  std::cout << " task : servo a point " << std::endl ;
  std::cout << "-------------------------------------------------------" << std::endl ;
  std::cout << std::endl ;

  try{

#ifdef VISP_HAVE_PTHREAD
    pthread_mutex_lock( &mutexEndLoop );
#endif
    signal( SIGINT,&signalCtrC );

    vpRobotPtu46 robot ;
    {
      vpColVector q(2); q=0;
      robot.setRobotState(vpRobot::STATE_POSITION_CONTROL) ;
      robot.setPosition( vpRobot::ARTICULAR_FRAME,q );
    }

    vpImage<unsigned char> I ;

    vp1394Grabber g;

    g.open(I) ;

    try{
      g.acquire(I) ;
    }
    catch(...)
    {
      vpERROR_TRACE(" Error caught") ;
      return(-1) ;
    }


    vpDisplayX display(I,100,100,"testDisplayX.cpp ") ;
    vpTRACE(" ") ;

    try{
      vpDisplay::display(I) ;
      vpDisplay::flush(I) ;
    }
    catch(...)
    {
      vpERROR_TRACE(" Error caught") ;
      return(-1) ;
    }


    vpServo task ;

    vpDot2 dot ;

    try{
      vpERROR_TRACE("start dot.initTracking(I) ") ;
      int x,y;
      vpDisplay::getClick( I,y,x );
      dot.set_u( x ) ;
      dot.set_v( y ) ;
      vpDEBUG_TRACE(25,"Click!");
      //dot.initTracking(I) ;
      dot.track(I);
      vpERROR_TRACE("after dot.initTracking(I) ") ;
    }
    catch(...)
    {
      vpERROR_TRACE(" Error caught ") ;
      return(-1) ;
    }

    vpCameraParameters cam ;

    vpTRACE("sets the current position of the visual feature ") ;
    vpFeaturePoint p ;
    vpFeatureBuilder::create(p,cam, dot)  ;  //retrieve x,y and Z of the vpPoint structure

    p.set_Z(1) ;
    vpTRACE("sets the desired position of the visual feature ") ;
    vpFeaturePoint pd ;
    pd.buildFrom(0,0,1) ;

    vpTRACE("define the task") ;
    vpTRACE("\t we want an eye-in-hand control law") ;
    vpTRACE("\t articular velocity are computed") ;
    task.setServo(vpServo::EYEINHAND_L_cVe_eJe) ;
    task.setInteractionMatrixType(vpServo::DESIRED, vpServo::PSEUDO_INVERSE) ;


    vpTRACE("Set the position of the camera in the end-effector frame ") ;
    vpHomogeneousMatrix cMe ;
    //  robot.get_cMe(cMe) ;

    vpVelocityTwistMatrix cVe ;
    robot.get_cVe(cVe) ;
    std::cout << cVe <<std::endl ;
    task.set_cVe(cVe) ;

    vpDisplay::getClick(I) ;
    vpTRACE("Set the Jacobian (expressed in the end-effector frame)") ;
    vpMatrix eJe ;
    robot.get_eJe(eJe) ;
    task.set_eJe(eJe) ;


    vpTRACE("\t we want to see a point on a point..") ;
    std::cout << std::endl ;
    task.addFeature(p,pd) ;

    vpTRACE("\t set the gain") ;
    task.setLambda(0.1) ;


    vpTRACE("Display task information " ) ;
    task.print() ;


    robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL) ;

    unsigned int iter=0 ;
    vpTRACE("\t loop") ;
#ifdef VISP_HAVE_PTHREAD
    while( 0 != pthread_mutex_trylock( &mutexEndLoop ) )
#else
    for ( ; ; )
#endif
    {
      std::cout << "---------------------------------------------" << iter <<std::endl ;

      g.acquire(I) ;
      vpDisplay::display(I) ;

      dot.track(I) ;

      //    vpDisplay::displayCross(I,(int)dot.I(), (int)dot.J(),
      //			   10,vpColor::green) ;


      vpFeatureBuilder::create(p,cam, dot);

      // get the jacobian
      robot.get_eJe(eJe) ;
      task.set_eJe(eJe) ;

      //  std::cout << (vpMatrix)cVe*eJe << std::endl ;

      vpColVector v ;
      v = task.computeControlLaw() ;

      vpServoDisplay::display(task,cam,I) ;
      std::cout << v.t() ;
      robot.setVelocity(vpRobot::ARTICULAR_FRAME, v) ;
      vpDisplay::flush(I) ;

      vpTRACE("\t\t || s - s* || = %f ", ( task.getError() ).sumSquare()) ;
    }

    vpTRACE("Display task information " ) ;
    task.print() ;
    task.kill();

  } catch (...) { vpERROR_TRACE("Trow uncatched..."); }
}
Пример #14
0
//! compute the interaction matrix from a subset a the possible features
vpMatrix
vpFeatureEllipse::interaction(const unsigned int select)
{
  vpMatrix L ;

  L.resize(0,6) ;

  if (deallocate == vpBasicFeature::user)
  {
    for (unsigned int i = 0; i < nbParameters; i++)
    {
      if (flags[i] == false)
      {
        switch(i){
        case 0:
          vpTRACE("Warning !!!  The interaction matrix is computed but x was not set yet");
        break;
        case 1:
          vpTRACE("Warning !!!  The interaction matrix is computed but y was not set yet");
        break;
        case 2:
          vpTRACE("Warning !!!  The interaction matrix is computed but mu20 was not set yet");
        break;
        case 3:
          vpTRACE("Warning !!!  The interaction matrix is computed but mu11 was not set yet");
        break;
        case 4:
          vpTRACE("Warning !!!  The interaction matrix is computed but mu02 was not set yet");
        break;
        case 5:
          vpTRACE("Warning !!!  The interaction matrix is computed but A was not set yet");
        break;
        case 6:
          vpTRACE("Warning !!!  The interaction matrix is computed but B was not set yet");
        break;
        case 7:
          vpTRACE("Warning !!!  The interaction matrix is computed but C was not set yet");
        break;
        default:
          vpTRACE("Problem during the reading of the variable flags");
        }
      }
    }
    resetFlags();
  }

  double xc = s[0] ;
  double yc = s[1] ;
  double mu20 = s[2] ;
  double mu11 = s[3] ;
  double mu02 = s[4] ;

  //eq 39
  double Z = 1/(A*xc + B*yc + C) ;



  if (vpFeatureEllipse::selectX() & select )
  {
    vpMatrix H(1,6) ; H = 0;


    H[0][0] = -1/Z;
    H[0][1] = 0 ;
    H[0][2] = xc/Z + A*mu20 + B*mu11;
    H[0][3] = xc*yc + mu11;
    H[0][4] = -1-vpMath::sqr(xc)-mu20;
    H[0][5] = yc;


    L = vpMatrix::stackMatrices(L,H) ;
  }

  if (vpFeatureEllipse::selectY() & select )
  {
    vpMatrix H(1,6) ; H = 0;


    H[0][0] = 0 ;
    H[0][1] = -1/Z;
    H[0][2] = yc/Z + A*mu11 + B*mu02;
    H[0][3] = 1+vpMath::sqr(yc)+mu02;
    H[0][4] = -xc*yc - mu11;
    H[0][5] = -xc;

    L = vpMatrix::stackMatrices(L,H) ;
  }

  if (vpFeatureEllipse::selectMu20() & select )
  {
    vpMatrix H(1,6) ; H = 0;

    H[0][0] = -2*(A*mu20+B*mu11);
    H[0][1] = 0 ;
    H[0][2] = 2*((1/Z+A*xc)*mu20+B*xc*mu11) ;
    H[0][3] = 2*(yc*mu20+xc*mu11);
    H[0][4] = -4*mu20*xc;
    H[0][5] = 2*mu11;

    L = vpMatrix::stackMatrices(L,H) ;
  }

  if (vpFeatureEllipse::selectMu11() & select )
  {
    vpMatrix H(1,6) ; H = 0;

    H[0][0] = -A*mu11-B*mu02;
    H[0][1] = -A*mu20-B*mu11;
    H[0][2] = A*yc*mu20+(3/Z-C)*mu11+B*xc*mu02;
    H[0][3] = 3*yc*mu11+xc*mu02;
    H[0][4] = -yc*mu20-3*xc*mu11;
    H[0][5] = mu02-mu20;

    L = vpMatrix::stackMatrices(L,H) ;
  }

  if (vpFeatureEllipse::selectMu02() & select )
  {
    vpMatrix H(1,6) ; H = 0;

    H[0][0] = 0 ;
    H[0][1] = -2*(A*mu11+B*mu02);
    H[0][2] = 2*((1/Z+B*yc)*mu02+A*yc*mu11);
    H[0][3] = 4*yc*mu02;
    H[0][4] = -2*(yc*mu11 +xc*mu02) ;
    H[0][5] = -2*mu11 ;
    L = vpMatrix::stackMatrices(L,H) ;
  }


  return L ;
}
Пример #15
0
int
main()
{
  // Log file creation in /tmp/$USERNAME/log.dat
  // This file contains by line:
  // - the 6 computed joint velocities (m/s, rad/s) to achieve the task
  // - the 6 mesured joint velocities (m/s, rad/s)
  // - the 6 mesured joint positions (m, rad)
  // - the 2 values of s - s*
  std::string username;
  // Get the user login name
  vpIoTools::getUserName(username);

  // Create a log filename to save velocities...
  std::string logdirname;
  logdirname ="/tmp/" + username;

  // Test if the output path exist. If no try to create it
  if (vpIoTools::checkDirectory(logdirname) == false) {
    try {
      // Create the dirname
      vpIoTools::makeDirectory(logdirname);
    }
    catch (...) {
      std::cerr << std::endl
                << "ERROR:" << std::endl;
      std::cerr << "  Cannot create " << logdirname << std::endl;
      exit(-1);
    }
  }
  std::string logfilename;
  logfilename = logdirname + "/log.dat";

  // Open the log file name
  std::ofstream flog(logfilename.c_str());

  try {
    vpServo task ;

    vpImage<unsigned char> I ;

    vp1394TwoGrabber g;
    g.setVideoMode(vp1394TwoGrabber::vpVIDEO_MODE_640x480_MONO8);
    g.setFramerate(vp1394TwoGrabber::vpFRAMERATE_60);
    g.open(I) ;

    g.acquire(I) ;

#ifdef VISP_HAVE_X11
    vpDisplayX display(I,100,100,"Current image") ;
#elif defined(VISP_HAVE_OPENCV)
    vpDisplayOpenCV display(I,100,100,"Current image") ;
#elif defined(VISP_HAVE_GTK)
    vpDisplayGTK display(I,100,100,"Current image") ;
#endif

    vpDisplay::display(I) ;
    vpDisplay::flush(I) ;
    // exit(1) ;

    std::cout << std::endl ;
    std::cout << "-------------------------------------------------------" << std::endl ;
    std::cout << " Test program for vpServo "  <<std::endl ;
    std::cout << " Eye-in-hand task control, velocity computed in the joint space" << std::endl ;
    std::cout << " Use of the Afma6 robot " << std::endl ;
    std::cout << " task : servo a point " << std::endl ;
    std::cout << "-------------------------------------------------------" << std::endl ;
    std::cout << std::endl ;


    vpDot dot ;
    vpImagePoint cog;

    std::cout << "Click on a dot..." << std::endl;
    dot.initTracking(I) ;
    // Get the dot cog
    cog = dot.getCog();
    vpDisplay::displayCross(I, cog, 10, vpColor::blue) ;
    vpDisplay::flush(I);

    vpRobotAfma6 robot ;

    vpCameraParameters cam ;
    // Update camera parameters
    robot.getCameraParameters (cam, I);

    vpTRACE("sets the current position of the visual feature ") ;
    vpFeaturePoint p ;
    vpFeatureBuilder::create(p,cam, dot)  ;  //retrieve x,y and Z of the vpPoint structure

    p.set_Z(1) ;
    vpTRACE("sets the desired position of the visual feature ") ;
    vpFeaturePoint pd ;
    pd.buildFrom(0,0,1) ;

    vpTRACE("define the task") ;
    vpTRACE("\t we want an eye-in-hand control law") ;
    vpTRACE("\t articular velocity are computed") ;
    task.setServo(vpServo::EYEINHAND_L_cVe_eJe) ;
    task.setInteractionMatrixType(vpServo::DESIRED, vpServo::PSEUDO_INVERSE) ;


    vpTRACE("Set the position of the camera in the end-effector frame ") ;
    vpHomogeneousMatrix cMe ;
    //  robot.get_cMe(cMe) ;

    vpVelocityTwistMatrix cVe ;
    robot.get_cVe(cVe) ;
    std::cout << cVe <<std::endl ;
    task.set_cVe(cVe) ;

    //    vpDisplay::getClick(I) ;
    vpTRACE("Set the Jacobian (expressed in the end-effector frame)") ;
    vpMatrix eJe ;
    robot.get_eJe(eJe) ;
    task.set_eJe(eJe) ;


    vpTRACE("\t we want to see a point on a point..") ;
    std::cout << std::endl ;
    task.addFeature(p,pd) ;

    vpTRACE("\t set the gain") ;
    task.setLambda(0.8) ;

    vpTRACE("Display task information " ) ;
    task.print() ;

    robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL) ;

    std::cout << "\nHit CTRL-C to stop the loop...\n" << std::flush;
    for ( ; ; ) {
      // Acquire a new image from the camera
      g.acquire(I) ;

      // Display this image
      vpDisplay::display(I) ;

      // Achieve the tracking of the dot in the image
      dot.track(I) ;

      // Get the dot cog
      cog = dot.getCog();

      // Display a green cross at the center of gravity position in the image
      vpDisplay::displayCross(I, cog, 10, vpColor::green) ;

      // Update the point feature from the dot location
      vpFeatureBuilder::create(p, cam, dot);

      // Get the jacobian of the robot
      robot.get_eJe(eJe) ;
      // Update this jacobian in the task structure. It will be used to compute
      // the velocity skew (as an articular velocity)
      // qdot = -lambda * L^+ * cVe * eJe * (s-s*)
      task.set_eJe(eJe) ;

      //  std::cout << (vpMatrix)cVe*eJe << std::endl ;

      vpColVector v ;
      // Compute the visual servoing skew vector
      v = task.computeControlLaw() ;

      // Display the current and desired feature points in the image display
      vpServoDisplay::display(task, cam, I) ;

      // Apply the computed joint velocities to the robot
      robot.setVelocity(vpRobot::ARTICULAR_FRAME, v) ;

      // Save velocities applied to the robot in the log file
      // v[0], v[1], v[2] correspond to joint translation velocities in m/s
      // v[3], v[4], v[5] correspond to joint rotation velocities in rad/s
      flog << v[0] << " " << v[1] << " " << v[2] << " "
           << v[3] << " " << v[4] << " " << v[5] << " ";

      // Get the measured joint velocities of the robot
      vpColVector qvel;
      robot.getVelocity(vpRobot::ARTICULAR_FRAME, qvel);
      // Save measured joint velocities of the robot in the log file:
      // - qvel[0], qvel[1], qvel[2] correspond to measured joint translation
      //   velocities in m/s
      // - qvel[3], qvel[4], qvel[5] correspond to measured joint rotation
      //   velocities in rad/s
      flog << qvel[0] << " " << qvel[1] << " " << qvel[2] << " "
           << qvel[3] << " " << qvel[4] << " " << qvel[5] << " ";

      // Get the measured joint positions of the robot
      vpColVector q;
      robot.getPosition(vpRobot::ARTICULAR_FRAME, q);
      // Save measured joint positions of the robot in the log file
      // - q[0], q[1], q[2] correspond to measured joint translation
      //   positions in m
      // - q[3], q[4], q[5] correspond to measured joint rotation
      //   positions in rad
      flog << q[0] << " " << q[1] << " " << q[2] << " "
           << q[3] << " " << q[4] << " " << q[5] << " ";

      // Save feature error (s-s*) for the feature point. For this feature
      // point, we have 2 errors (along x and y axis).  This error is expressed
      // in meters in the camera frame
      flog << ( task.getError() ).t() << std::endl;
      vpDisplay::flush(I) ;

      //      vpTRACE("\t\t || s - s* || = %f ", ( task.getError() ).sumSquare()) ;
    }

    flog.close() ; // Close the log file

    vpTRACE("Display task information " ) ;
    task.print() ;
    task.kill();
    return 0;
  }
  catch (...)
  {
    flog.close() ; // Close the log file
    vpERROR_TRACE(" Test failed") ;
    return 0;
  }
}
Пример #16
0
/*!
  Handle the first request in the queue.
  
  \warning : This function doesn't run the request. If it does handle a request that hasn't been ran yet,
  The request's parameters will be replace. 
  
  \sa vpNetwork::handleRequests()
  
  \return : The index of the request that has been handled.
*/
int vpNetwork::_handleFirstRequest()
{
  size_t indStart = currentMessageReceived.find(beginning);
  size_t indSep = currentMessageReceived.find(separator);
  size_t indEnd = currentMessageReceived.find(end);
  
  if (indStart == std::string::npos && indSep == std::string::npos && indEnd == std::string::npos)
  {
    if(currentMessageReceived.size() != 0)
      currentMessageReceived.clear();
    
    if(verboseMode)
      vpTRACE("Incorrect message");
    
    return -1;
  }
  
  if(indStart == std::string::npos || indSep == std::string::npos || indEnd == std::string::npos)
    return -1;
  
  if(indEnd < indStart)
  {
    if(verboseMode)
      vpTRACE("Incorrect message");
    currentMessageReceived.erase((unsigned)indStart,indEnd+end.size());
    return -1;
  }
  
  size_t indStart2 = currentMessageReceived.find(beginning,indStart+1);
  if(indStart2 != std::string::npos && indStart2 < indEnd)
  {
    if(verboseMode)
      vpTRACE("Incorrect message");
    currentMessageReceived.erase((unsigned)indStart,(unsigned)indStart2);
    return -1;
  }
  
  size_t deb = indStart + beginning.size();
  std::string id = currentMessageReceived.substr((unsigned)deb, indSep - deb);
  
  deb = indSep+separator.size();
  std::string params = currentMessageReceived.substr((unsigned)deb, (unsigned)(indEnd - deb));
  
//   std::cout << "Handling : " << currentMessageReceived.substr(indStart, indEnd+end.size() - indStart) << std::endl;
   
  int indRequest = 0;
  bool hasBeenFound = false;
  for(unsigned int i = 0 ; i < request_list.size() ; i++)
  {
    if(id == request_list[i]->getId()){
        hasBeenFound = true;
        request_list[i]->clear();
        indRequest = (int)i;
        break;
    }
  }
  
  if(!hasBeenFound){
    //currentMessageReceived.erase(indStart,indEnd+end.size());
    if(verboseMode)
      vpTRACE("No request corresponds to the received message");
    return -1;
  }
  
  size_t indDebParam = indSep + separator.size();
  size_t indEndParam = currentMessageReceived.find(param_sep,indDebParam);
  
  std::string param;
  while(indEndParam != std::string::npos)
  {
    param = currentMessageReceived.substr((unsigned)indDebParam,(unsigned)(indEndParam - indDebParam));
    request_list[(unsigned)indRequest]->addParameter(param);
    indDebParam = indEndParam+param_sep.size();
    indEndParam = currentMessageReceived.find(param_sep,indDebParam);
  }
  
  param = currentMessageReceived.substr((unsigned)indDebParam, indEnd - indDebParam);
  request_list[(unsigned)indRequest]->addParameter(param);
  currentMessageReceived.erase(indStart,indEnd+end.size());
  
  return indRequest;
}
int
main()
{
  try
  {
    vpImage<unsigned char> I ;

    vp1394TwoGrabber g;
    g.setVideoMode(vp1394TwoGrabber::vpVIDEO_MODE_640x480_MONO8);
    g.setFramerate(vp1394TwoGrabber::vpFRAMERATE_60);
    g.open(I) ;

    g.acquire(I) ;

#ifdef VISP_HAVE_X11
    vpDisplayX display(I,100,100,"Current image") ;
#elif defined(VISP_HAVE_OPENCV)
    vpDisplayOpenCV display(I,100,100,"Current image") ;
#elif defined(VISP_HAVE_GTK)
    vpDisplayGTK display(I,100,100,"Current image") ;
#endif

    vpDisplay::display(I) ;
    vpDisplay::flush(I) ;

    vpServo task ;

    std::cout << std::endl ;
    std::cout << "-------------------------------------------------------" << std::endl ;
    std::cout << " Test program for vpServo "  <<std::endl ;
    std::cout << " Eye-in-hand task control, velocity computed in the camera frame" << std::endl ;
    std::cout << " Simulation " << std::endl ;
    std::cout << " task : servo a point " << std::endl ;
    std::cout << "-------------------------------------------------------" << std::endl ;
    std::cout << std::endl ;


    int i ;
    int nbline =2 ;
    vpMeLine line[nbline] ;

    vpMe me ;
    me.setRange(20) ;
    me.setPointsToTrack(100) ;
    me.setThreshold(2000) ;
    me.setSampleStep(10);

    //Initialize the tracking of the two edges of the cylinder
    for (i=0 ; i < nbline ; i++)
    {
      line[i].setDisplay(vpMeSite::RANGE_RESULT) ;
      line[i].setMe(&me) ;

      line[i].initTracking(I) ;
      line[i].track(I) ;
    }

    vpRobotAfma6 robot ;
    //robot.move("zero.pos") ;

    vpCameraParameters cam ;
    // Update camera parameters
    robot.getCameraParameters (cam, I);

    vpTRACE("sets the current position of the visual feature ") ;
    vpFeatureLine p[nbline] ;
    for (i=0 ; i < nbline ; i++)
      vpFeatureBuilder::create(p[i],cam, line[i])  ;

    vpTRACE("sets the desired position of the visual feature ") ;
    vpCylinder cyld(0,1,0,0,0,0,0.04);

    vpHomogeneousMatrix cMo(0,0,0.5,0,0,vpMath::rad(0));

    cyld.project(cMo);

    vpFeatureLine pd[nbline] ;
    vpFeatureBuilder::create(pd[0],cyld,vpCylinder::line1);
    vpFeatureBuilder::create(pd[1],cyld,vpCylinder::line2);

    //Those lines are needed to keep the conventions define in vpMeLine (Those in vpLine are less restrictive)
    //Another way to have the coordinates of the desired features is to learn them before executing the program.
    pd[0].setRhoTheta(-fabs(pd[0].getRho()),0);
    pd[1].setRhoTheta(-fabs(pd[1].getRho()),M_PI);

    vpTRACE("define the task") ;
    vpTRACE("\t we want an eye-in-hand control law") ;
    vpTRACE("\t robot is controlled in the camera frame") ;
    task.setServo(vpServo::EYEINHAND_CAMERA) ;
    task.setInteractionMatrixType(vpServo::DESIRED, vpServo::PSEUDO_INVERSE);

    vpTRACE("\t we want to see a point on a point..") ;
    std::cout << std::endl ;
    for (i=0 ; i < nbline ; i++)
      task.addFeature(p[i],pd[i]) ;

    vpTRACE("\t set the gain") ;
    task.setLambda(0.3) ;


    vpTRACE("Display task information " ) ;
    task.print() ;


    robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL) ;

    unsigned int iter=0 ;
    vpTRACE("\t loop") ;
    vpColVector v ;
    vpImage<vpRGBa> Ic ;
    double lambda_av =0.05;
    double alpha = 0.02;
    double beta =3;
    double erreur = 1;


    //First loop to reach the convergence position
    while(erreur > 0.00001)
    {
      std::cout << "---------------------------------------------" << iter <<std::endl ;

      try {
        g.acquire(I) ;
        vpDisplay::display(I) ;

        //Track the two edges and update the features
        for (i=0 ; i < nbline ; i++)
	      {
		      line[i].track(I) ;
		      line[i].display(I, vpColor::red) ;

		      vpFeatureBuilder::create(p[i],cam,line[i]);

		      p[i].display(cam, I,  vpColor::red) ;
		      pd[i].display(cam, I,  vpColor::green) ;
	      }

        vpDisplay::flush(I) ;

        //Adaptative gain
        double gain ;
        {
          if (std::fabs(alpha) <= std::numeric_limits<double>::epsilon())
            gain = lambda_av ;
          else
          {
            gain = alpha * exp (-beta * ( task.getError() ).sumSquare() ) +  lambda_av ;
          }
        }
        task.setLambda(gain) ;

        v = task.computeControlLaw() ;

        if (iter==0)  vpDisplay::getClick(I) ;
      }
      catch(...)
	    {
	      v =0 ;
	      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;
	      robot.stopMotion() ;
	      exit(1) ;
	    }

      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;
      erreur = ( task.getError() ).sumSquare();
      vpTRACE("\t\t || s - s* || = %f ", ( task.getError() ).sumSquare()) ;
      iter++;
    }

    /**********************************************************************************************/

    //Second loop is to compute the control while taking into account the secondary task.
    vpColVector e1(6) ; e1 = 0 ;
    vpColVector e2(6) ; e2 = 0 ;
    vpColVector proj_e1 ;
    vpColVector proj_e2 ;
    iter = 0;
    double rapport = 0;
    double vitesse = 0.02;
    unsigned int tempo = 1200;

    for ( ; ; )
    {
      std::cout << "---------------------------------------------" << iter <<std::endl ;

      try {
        g.acquire(I) ;
        vpDisplay::display(I) ;

        //Track the two edges and update the features
        for (i=0 ; i < nbline ; i++)
	      {
		      line[i].track(I) ;
		      line[i].display(I, vpColor::red) ;

		      vpFeatureBuilder::create(p[i],cam,line[i]);

		      p[i].display(cam, I,  vpColor::red) ;
		      pd[i].display(cam, I,  vpColor::green) ;
	      }

        vpDisplay::flush(I) ;

        v = task.computeControlLaw() ;

        //Compute the new control law corresponding to the secondary task
        if ( iter%tempo < 400  /*&&  iter%tempo >= 0*/)
        {
          e2 = 0;
          e1[0] = fabs(vitesse)  ;
          proj_e1 = task.secondaryTask(e1);
          rapport = vitesse/proj_e1[0];
          proj_e1 *= rapport ;
          v += proj_e1 ;
          if ( iter == 199 ) iter+=200;  //This line is needed to make on ly an half turn during the first cycle
        }

        if ( iter%tempo < 600 &&  iter%tempo >= 400)
        {
          e1 = 0;
          e2[1] = fabs(vitesse)  ;
          proj_e2 = task.secondaryTask(e2);
          rapport = vitesse/proj_e2[1];
          proj_e2 *= rapport ;
          v += proj_e2 ;
        }

        if ( iter%tempo < 1000 &&  iter%tempo >= 600)
        {
          e2 = 0;
          e1[0] = -fabs(vitesse)  ;
          proj_e1 = task.secondaryTask(e1);
          rapport = -vitesse/proj_e1[0];
          proj_e1 *= rapport ;
          v += proj_e1 ;
        }

        if ( iter%tempo < 1200 &&  iter%tempo >= 1000)
        {
          e1 = 0;
          e2[1] = -fabs(vitesse)  ;
          proj_e2 = task.secondaryTask(e2);
          rapport = -vitesse/proj_e2[1];
          proj_e2 *= rapport ;
          v += proj_e2 ;
        }

        robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;
      }
      catch(...)
	    {
	      v =0 ;
	      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;
	      robot.stopMotion() ;
	      exit(1) ;
	    }

      vpTRACE("\t\t || s - s* || = %f ", ( task.getError() ).sumSquare()) ;
      iter++;
    }


    vpTRACE("Display task information " ) ;
    task.print() ;
    task.kill();
  }
  catch (...)
  {
    vpERROR_TRACE(" Test failed") ;
    return 0;
  }
}
Пример #18
0
/*!
  Receives a message once (in the limit of the Maximum message size value).
  This message can represent an entire request or not. Several calls to this function
  might be necessary to get the entire request.
  
  \warning Requests will be received but not decoded.
  
  \sa vpNetwork::receive()
  \sa vpNetwork::receiveRequestFrom()
  \sa vpNetwork::receiveRequest()
  \sa vpNetwork::receiveRequestOnceFrom()
  \sa vpNetwork::receiveAndDecodeRequest()
  \sa vpNetwork::receiveAndDecodeRequestFrom()
  \sa vpNetwork::receiveAndDecodeRequestOnce()
  \sa vpNetwork::receiveAndDecodeRequestOnceFrom()
  
  \return The number of bytes received, -1 if an error occured.
*/
int vpNetwork::_receiveRequestOnce()
{
  if(receptor_list.size() == 0)
  {
    if(verboseMode)
      vpTRACE( "No Receptor!" );
    return -1;
  }
  
  tv.tv_sec = tv_sec;
  tv.tv_usec = tv_usec;
  
  FD_ZERO(&readFileDescriptor);        
  
  for(unsigned int i=0; i<receptor_list.size(); i++){ 
    if(i == 0)
      socketMax = receptor_list[i].socketFileDescriptorReceptor;
    
    FD_SET((unsigned)receptor_list[i].socketFileDescriptorReceptor,&readFileDescriptor); 
    if(socketMax < receptor_list[i].socketFileDescriptorReceptor) socketMax = receptor_list[i].socketFileDescriptorReceptor; 
  }

  int value = select((int)socketMax+1,&readFileDescriptor,NULL,NULL,&tv);
  int numbytes = 0;
  
  if(value == -1){
    if(verboseMode)
      vpERROR_TRACE( "Select error" );
    return -1;
  }
  else if(value == 0){
    //Timeout
    return 0;
  }
  else{
    for(unsigned int i=0; i<receptor_list.size(); i++){
      if(FD_ISSET((unsigned int)receptor_list[i].socketFileDescriptorReceptor,&readFileDescriptor)){
        char *buf = new char [max_size_message];
#ifdef UNIX
        numbytes=recv(receptor_list[i].socketFileDescriptorReceptor, buf, max_size_message, 0);
#else
        numbytes=recv((unsigned int)receptor_list[i].socketFileDescriptorReceptor, buf, (int)max_size_message, 0);
#endif
        
        
        if(numbytes <= 0)
        {
          std::cout << "Disconnected : " << inet_ntoa(receptor_list[i].receptorAddress.sin_addr) << std::endl;
          receptor_list.erase(receptor_list.begin()+(int)i);
          return numbytes;
        }
        else if(numbytes > 0){
          std::string returnVal(buf, (unsigned int)numbytes);
          currentMessageReceived.append(returnVal);
        }
        delete [] buf;
        break;
      }
    }
  }
  
  return numbytes;
}
Пример #19
0
int
main()
{
  try
    {
      vpImage<unsigned char> I ;

      vp1394TwoGrabber g;
      g.setVideoMode(vp1394TwoGrabber::vpVIDEO_MODE_640x480_MONO8);
      g.setFramerate(vp1394TwoGrabber::vpFRAMERATE_60);
      g.open(I) ;

      g.acquire(I) ;


      vpDisplayX display(I,100,100,"testDisplayX.cpp ") ;
      vpTRACE(" ") ;

      vpDisplay::display(I) ;
      vpDisplay::flush(I);

      vpServo task ;

      vpRobotAfma6 robot ;
      //robot.move("zero.pos") ;

      vpCameraParameters cam ;
      // Update camera parameters
      robot.getCameraParameters (cam, I);


      std::cout << std::endl ;
      std::cout << "-------------------------------------------------------" << std::endl ;
      std::cout << " Test program for vpServo "  <<std::endl ;
      std::cout << " Eye-in-hand task control, velocity computed in the camera frame" << std::endl ;
      std::cout << " Simulation " << std::endl ;
      std::cout << " task : servo a line " << std::endl ;
      std::cout << "-------------------------------------------------------" << std::endl ;
      std::cout << std::endl ;

      int nbline =4 ;
      int nbpoint =4 ;


      vpTRACE("sets the desired position of the visual feature ") ;
      vpPoint pointd[nbpoint];  //position of the fours corners
      vpPoint pointcd;  //position of the center of the square
      vpFeaturePoint pd;

      double L=0.05 ;
      pointd[0].setWorldCoordinates(L,-L, 0 ) ;
      pointd[1].setWorldCoordinates(L,L, 0 ) ;
      pointd[2].setWorldCoordinates(-L,L, 0 ) ;
      pointd[3].setWorldCoordinates(-L,-L, 0 ) ;

      //The coordinates in the object frame of the point used as a feature ie the center of the square
      pointcd.setWorldCoordinates(0, 0, 0 ) ;

      //The desired homogeneous matrix.
      vpHomogeneousMatrix cMod(0,0,0.4,0,0,vpMath::rad(10));

      pointd[0].project(cMod);
      pointd[1].project(cMod);
      pointd[2].project(cMod);
      pointd[3].project(cMod);

      pointcd.project(cMod);

      vpFeatureBuilder::create(pd,pointcd);

      vpTRACE("Initialization of the tracking") ;
      vpMeLine line[nbline] ;
      vpPoint point[nbpoint];
      int i ;

      vpMe me ;
      me.setRange(10) ;
      me.setPointsToTrack(100) ;
      me.setThreshold(50000) ;
      me.setSampleStep(10);

      //Initialize the tracking. Define the four lines to track
      for (i=0 ; i < nbline ; i++)
      {
        line[i].setMe(&me) ;

        line[i].initTracking(I) ;
        line[i].track(I) ;
      }

      // Compute the position of the four corners. The goal is to
      // compute the pose
      vpImagePoint ip;
      for (i=0 ; i < nbline ; i++)
      {
        double x=0, y=0;

        if (!vpMeLine::intersection (line[i%nbline], line[(i+1)%nbline], ip))
        {
          exit(-1);
        }

        vpPixelMeterConversion::convertPoint(cam, ip, x, y)  ;

        point[i].set_x(x) ;
        point[i].set_y(y) ;
      }

      //Compute the pose cMo
      vpPose pose ;
      pose.clearPoint() ;
      vpHomogeneousMatrix cMo ;

      point[0].setWorldCoordinates(L,-L, 0 ) ;
      point[1].setWorldCoordinates(L,L, 0 ) ;
      point[2].setWorldCoordinates(-L,L, 0 ) ;
      point[3].setWorldCoordinates(-L,-L, 0 ) ;

      for (i=0 ; i < nbline ; i++)
      {
        pose.addPoint(point[i]) ; // and added to the pose computation point list
      }

      pose.computePose(vpPose::LAGRANGE, cMo) ;
      pose.computePose(vpPose::VIRTUAL_VS, cMo);


      vpTRACE("sets the current position of the visual feature ") ;

      //The first features are the position in the camera frame x and y of the square center
      vpPoint pointc; //The current position of the center of the square
      double xc = (point[0].get_x()+point[2].get_x())/2;
      double yc = (point[0].get_y()+point[2].get_y())/2;
      pointc.set_x(xc);
      pointc.set_y(yc);
      vpFeaturePoint p;
      pointc.project(cMo);
      vpFeatureBuilder::create(p,pointc);

      //The second feature is the depth of the current square center relative to the depth of the desired square center.
      vpFeatureDepth logZ;
      logZ.buildFrom(pointc.get_x(), pointc.get_y(), pointc.get_Z(), log(pointc.get_Z()/pointcd.get_Z()));

      //The last three features are the rotations thetau between the current pose and the desired pose.
      vpHomogeneousMatrix cdMc ;
      cdMc = cMod*cMo.inverse() ;
      vpFeatureThetaU tu(vpFeatureThetaU::cdRc);
      tu.buildFrom(cdMc) ;

      vpTRACE("define the task") ;
      vpTRACE("\t we want an eye-in-hand control law") ;
      vpTRACE("\t robot is controlled in the camera frame") ;
      task.setServo(vpServo::EYEINHAND_CAMERA) ;
      task.setInteractionMatrixType(vpServo::CURRENT, vpServo::PSEUDO_INVERSE);

      vpTRACE("\t we want to see a point on a point..") ;
      std::cout << std::endl ;
      task.addFeature(p,pd) ;
      task.addFeature(logZ) ;
      task.addFeature(tu);


      vpTRACE("\t set the gain") ;
      task.setLambda(0.2) ;


      vpTRACE("Display task information " ) ;
      task.print() ;

      robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL) ;

      int iter=0 ;
      vpTRACE("\t loop") ;
      vpColVector v ;
      vpImage<vpRGBa> Ic ;
      double lambda_av =0.05;
      double alpha = 0.05 ;
      double beta =3 ;
      double erreur= 1;

      while(1)
	{
	  std::cout << "---------------------------------------------" << iter <<std::endl ;

	  try {
	    g.acquire(I) ;
	    vpDisplay::display(I) ;

	    pose.clearPoint() ;

	    //Track the lines and find the current position of the corners
	    for (i=0 ; i < nbline ; i++)
	    {
		      line[i].track(I) ;

		      line[i].display(I,vpColor::green);

	              double x=0, y=0;

	              if (!vpMeLine::intersection (line[i%nbline], line[(i+1)%nbline], ip))
	              {
	                 exit(-1);
	              }

		      vpPixelMeterConversion::convertPoint(cam, ip, x, y)  ;

	              point[i].set_x(x);
	              point[i].set_y(y);

		      pose.addPoint(point[i]) ;
	    }

	    //Compute the pose
	    pose.computePose(vpPose::VIRTUAL_VS, cMo) ;

	    //Update the two first features x and y (position of the square center)
            xc = (point[0].get_x()+point[2].get_x())/2;
            yc = (point[0].get_y()+point[2].get_y())/2;
            pointc.set_x(xc);
            pointc.set_y(yc);
            pointc.project(cMo);
            vpFeatureBuilder::create(p,pointc);
	    //Print the current and the desired position of the center of the square
	    //Print the desired position of the four corners
	    p.display(cam, I,  vpColor::green) ;
	    pd.display(cam, I,  vpColor::red) ;
	    for (i = 0; i < nbpoint; i++) pointd[i].display(I, cam, vpColor::red);

	    //Update the second feature
	    logZ.buildFrom(pointc.get_x(), pointc.get_y(), pointc.get_Z(), log(pointc.get_Z()/pointcd.get_Z()));

	    //Update the last three features
	    cdMc = cMod*cMo.inverse() ;
	    tu.buildFrom(cdMc) ;

	    //Adaptive gain
	    double gain ;
	    {
	      if (alpha == 0) gain = lambda_av ;
	      else
		    {
		      gain = alpha * exp (-beta * task.error.sumSquare() ) +  lambda_av ;
		    }
	    }

	    task.setLambda(gain) ;

	    v = task.computeControlLaw() ;

            vpDisplay::flush(I) ;
	    std::cout << v.sumSquare() <<std::endl  ;
	    if (iter==0)  vpDisplay::getClick(I) ;
	    if (v.sumSquare() > 0.5)
	      {
		      v =0 ;
		      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;
		      robot.stopMotion() ;
		      vpDisplay::getClick(I) ;
	      }

	    robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;

	  }
	  catch(...)
	    {
	      v =0 ;
	      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;
	      robot.stopMotion() ;
	      exit(1) ;
	    }

	  vpTRACE("\t\t || s - s* || = %f ", task.error.sumSquare()) ;
	  erreur = task.error.sumSquare();
	  iter++;
	}

      vpTRACE("Display task information " ) ;
      task.print() ;
      task.kill();
    }
  catch (...)
    {
      vpERROR_TRACE(" Test failed") ;
      return 0;
    }
}
Пример #20
0
int
main()
{  
  vpTRACE("You should install Coin3D and/or GTK") ;
}
int
main()
{
  try
  {
    vpImage<unsigned char> I ;

    vp1394TwoGrabber g;
    g.setVideoMode(vp1394TwoGrabber::vpVIDEO_MODE_640x480_MONO8);
    g.setFramerate(vp1394TwoGrabber::vpFRAMERATE_60);
    g.open(I) ;

    g.acquire(I) ;

#ifdef VISP_HAVE_X11
    vpDisplayX display(I,100,100,"Current image") ;
#elif defined(VISP_HAVE_OPENCV)
    vpDisplayOpenCV display(I,100,100,"Current image") ;
#elif defined(VISP_HAVE_GTK)
    vpDisplayGTK display(I,100,100,"Current image") ;
#endif

    vpDisplay::display(I) ;
    vpDisplay::flush(I);

    vpServo task ;


    std::cout << std::endl ;
    std::cout << "-------------------------------------------------------" << std::endl ;
    std::cout << " Test program for vpServo "  <<std::endl ;
    std::cout << " Eye-in-hand task control, velocity computed in the camera frame" << std::endl ;
    std::cout << " Simulation " << std::endl ;
    std::cout << " task : servo a line " << std::endl ;
    std::cout << "-------------------------------------------------------" << std::endl ;
    std::cout << std::endl ;

    int i ;
    int nbline =4 ;

    vpMeLine line[nbline] ;

    vpMe me ;
    me.setRange(10) ;
    me.setPointsToTrack(100) ;
    me.setThreshold(50000) ;
    me.setSampleStep(10);

    //Initialize the tracking. Define the four lines to track.
    for (i=0 ; i < nbline ; i++)
    {
      line[i].setDisplay(vpMeSite::RANGE_RESULT) ;
      line[i].setMe(&me) ;

      line[i].initTracking(I) ;
      line[i].track(I) ;
    }

    vpRobotAfma6 robot ;
    //robot.move("zero.pos") ;

    vpCameraParameters cam ;
    // Update camera parameters
    robot.getCameraParameters (cam, I);

    vpTRACE("sets the current position of the visual feature ") ;
    vpFeatureLine p[nbline] ;
    for (i=0 ; i < nbline ; i++)
      vpFeatureBuilder::create(p[i],cam, line[i])  ;

    vpTRACE("sets the desired position of the visual feature ") ;
    vpLine lined[nbline];
    lined[0].setWorldCoordinates(1,0,0,0.05,0,0,1,0);
    lined[1].setWorldCoordinates(0,1,0,0.05,0,0,1,0);
    lined[2].setWorldCoordinates(1,0,0,-0.05,0,0,1,0);
    lined[3].setWorldCoordinates(0,1,0,-0.05,0,0,1,0);

    vpHomogeneousMatrix cMo(0,0,0.5,0,0,vpMath::rad(0));

    lined[0].project(cMo);
    lined[1].project(cMo);
    lined[2].project(cMo);
    lined[3].project(cMo);

    //Those lines are needed to keep the conventions define in vpMeLine (Those in vpLine are less restrictive)
    //Another way to have the coordinates of the desired features is to learn them before executing the program.
    lined[0].setRho(-fabs(lined[0].getRho()));
    lined[0].setTheta(0);
    lined[1].setRho(-fabs(lined[1].getRho()));
    lined[1].setTheta(M_PI/2);
    lined[2].setRho(-fabs(lined[2].getRho()));
    lined[2].setTheta(M_PI);
    lined[3].setRho(-fabs(lined[3].getRho()));
    lined[3].setTheta(-M_PI/2);

    vpFeatureLine pd[nbline] ;

    vpFeatureBuilder::create(pd[0],lined[0]);
    vpFeatureBuilder::create(pd[1],lined[1]);
    vpFeatureBuilder::create(pd[2],lined[2]);
    vpFeatureBuilder::create(pd[3],lined[3]);

    vpTRACE("define the task") ;
    vpTRACE("\t we want an eye-in-hand control law") ;
    vpTRACE("\t robot is controlled in the camera frame") ;
    task.setServo(vpServo::EYEINHAND_CAMERA) ;
    task.setInteractionMatrixType(vpServo::DESIRED, vpServo::PSEUDO_INVERSE);

    vpTRACE("\t we want to see a point on a point..") ;
    std::cout << std::endl ;
    for (i=0 ; i < nbline ; i++)
      task.addFeature(p[i],pd[i]) ;

    vpTRACE("\t set the gain") ;
    task.setLambda(0.2) ;


    vpTRACE("Display task information " ) ;
    task.print() ;

    robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL) ;

    unsigned int iter=0 ;
    vpTRACE("\t loop") ;
    vpColVector v ;
    vpImage<vpRGBa> Ic ;
    double lambda_av =0.05;
    double alpha = 0.05 ;
    double beta =3 ;

    for ( ; ; )
    {
      std::cout << "---------------------------------------------" << iter <<std::endl ;

      try {
        g.acquire(I) ;
        vpDisplay::display(I) ;

        //Track the lines and update the features
        for (i=0 ; i < nbline ; i++)
	      {
		      line[i].track(I) ;
		      line[i].display(I, vpColor::red) ;

		      vpFeatureBuilder::create(p[i],cam,line[i]);

		      p[i].display(cam, I,  vpColor::red) ;
		      pd[i].display(cam, I,  vpColor::green) ;
	      }

        double gain ;
        {
          if (std::fabs(alpha) <= std::numeric_limits<double>::epsilon())
            gain = lambda_av ;
          else
          {
            gain = alpha * exp (-beta * ( task.getError() ).sumSquare() ) +  lambda_av ;
          }
        }

        task.setLambda(gain) ;

        v = task.computeControlLaw() ;

        vpDisplay::flush(I) ;

        if (iter==0)  vpDisplay::getClick(I) ;
        if (v.sumSquare() > 0.5)
	      {
		      v =0 ;
		      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;
		      robot.stopMotion() ;
		      vpDisplay::getClick(I) ;
	      }

        robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;

      }
      catch(...)
	    {
	      v =0 ;
	      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;
	      robot.stopMotion() ;
	      exit(1) ;
	    }

      vpTRACE("\t\t || s - s* || = %f ", ( task.getError() ).sumSquare()) ;
      iter++;
    }

    vpTRACE("Display task information " ) ;
    task.print() ;
    task.kill();
  }
  catch (...)
  {
    vpERROR_TRACE(" Test failed") ;
    return 0;
  }
}
Пример #22
0
int
main(int argc, const char ** argv)
{
  // Read the command line options
  if (getOptions(argc, argv) == false) {
    exit (-1);
  }

  vpServo task ;
  vpRobotCamera robot ;

  std::cout << std::endl ;
  std::cout << "-------------------------------------------------------" << std::endl ;
  std::cout << " Test program for vpServo "  <<std::endl ;
  std::cout << " Simulation " << std::endl ;
  std::cout << " task : servo a sphere " << std::endl ;
  std::cout << "-------------------------------------------------------" << std::endl ;
  std::cout << std::endl ;


  vpTRACE("sets the initial camera location " ) ;
  vpHomogeneousMatrix cMo ;
  cMo[0][3] = 0.1 ;
  cMo[1][3] = 0.2 ;
  cMo[2][3] = 2 ;
  robot.setPosition(cMo) ;

  vpHomogeneousMatrix cMod ;
  cMod[0][3] = 0 ;
  cMod[1][3] = 0 ;
  cMod[2][3] = 1 ;



  vpTRACE("sets the sphere coordinates in the world frame "  ) ;
  vpSphere sphere ;
  sphere.setWorldCoordinates(0,0,0,0.1) ;

  vpTRACE("sets the desired position of the visual feature ") ;
  vpFeatureEllipse pd ;
  sphere.track(cMod) ;
  vpFeatureBuilder::create(pd,sphere)  ;

  vpTRACE("project : computes  the sphere coordinates in the camera frame and its 2D coordinates"  ) ;

  vpTRACE("sets the current position of the visual feature ") ;
  vpFeatureEllipse p ;
  sphere.track(cMo) ;
  vpFeatureBuilder::create(p,sphere)  ;

  vpTRACE("define the task") ;
  vpTRACE("\t we want an eye-in-hand control law") ;
  vpTRACE("\t robot is controlled in the camera frame") ;
  task.setServo(vpServo::EYEINHAND_CAMERA) ;

  vpTRACE("\t we want to see a sphere on a sphere..") ;
  std::cout << std::endl ;
  task.addFeature(p,pd) ;

  vpTRACE("\t set the gain") ;
  task.setLambda(1) ;


  vpTRACE("Display task information " ) ;
  task.print() ;
  // exit(1) ;
  int iter=0 ;
  vpTRACE("\t loop") ;
  while(iter++<500)
    {
      std::cout << "---------------------------------------------" << iter <<std::endl ;
      vpColVector v ;

      if (iter==1) vpTRACE("\t\t get the robot position ") ;
      robot.getPosition(cMo) ;
      if (iter==1) vpTRACE("\t\t new sphere position ") ;
      //retrieve x,y and Z of the vpSphere structure

      sphere.track(cMo) ;
      vpFeatureBuilder::create(p,sphere);

      if (iter==1) vpTRACE("\t\t compute the control law ") ;
      v = task.computeControlLaw() ;
      //  vpTRACE("computeControlLaw" ) ;
      std::cout << task.rankJ1 <<std::endl ;
      if (iter==1) vpTRACE("\t\t send the camera velocity to the controller ") ;
      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;

      //  vpTRACE("\t\t || s - s* || ") ;
      //  std::cout << task.error.sumSquare() <<std::endl ; ;
    }

  vpTRACE("Display task information " ) ;
  task.print() ;
  task.kill();
}
int
main(int argc, const char ** argv)
{
  bool opt_click_allowed = true;
  bool opt_display = true;

  // Read the command line options
  if (getOptions(argc, argv, opt_click_allowed, opt_display) == false) {
    exit (-1);
  }

  // We open two displays, one for the internal camera view, the other one for
  // the external view, using either X11, GTK or GDI.
#if defined VISP_HAVE_X11
  vpDisplayX displayInt;
#elif defined VISP_HAVE_GDI
  vpDisplayGDI displayInt;
#elif defined VISP_HAVE_OPENCV
  vpDisplayOpenCV displayInt;
#endif

  // open a display for the visualization

  vpImage<unsigned char> Iint(480, 640, 255);

  if (opt_display) {
    displayInt.init(Iint,700,0, "Internal view") ;
  }

  int i;
  vpServo task;


  std::cout << std::endl ;
  std::cout << "----------------------------------------------" << std::endl ;
  std::cout << " Test program for vpServo "  <<std::endl ;
  std::cout << " Eye-in-hand task control, articular velocity are computed" 
	    << std::endl ;
  std::cout << " Simulation " << std::endl ;
  std::cout << " task : servo 4 points " << std::endl ;
  std::cout << "----------------------------------------------" << std::endl ;
  std::cout << std::endl ;


  vpTRACE("sets the initial camera location " ) ;
  vpHomogeneousMatrix cMo(-0.05,-0.05,0.7,
			  vpMath::rad(10),  vpMath::rad(10),  vpMath::rad(-30));


  vpTRACE("sets the point coordinates in the object frame "  ) ;
  vpPoint point[4] ;
  point[0].setWorldCoordinates(-0.045,-0.045,0) ;
  point[3].setWorldCoordinates(-0.045,0.045,0) ;
  point[2].setWorldCoordinates(0.045,0.045,0) ;
  point[1].setWorldCoordinates(0.045,-0.045,0) ;

  vpTRACE("project : computes  the point coordinates in the camera frame and its 2D coordinates"  ) ;
  for (i = 0 ; i < 4 ; i++)
    point[i].track(cMo) ;

  vpTRACE("sets the desired position of the point ") ;
  vpFeaturePoint p[4] ;
  for (i = 0 ; i < 4 ; i++)
    vpFeatureBuilder::create(p[i],point[i])  ;  //retrieve x,y and Z of the vpPoint structure
  
  vpTRACE("sets the desired position of the feature point s*") ;
  vpFeaturePoint pd[4] ;
  
  //Desired pose
  vpHomogeneousMatrix cdMo(vpHomogeneousMatrix(0.0,0.0,0.8,vpMath::rad(0),vpMath::rad(0),vpMath::rad(0)));
  
  // Projection of the points
  for (int i = 0 ; i < 4 ; i++)
    point[i].track(cdMo);
  
  for (int i = 0 ; i < 4 ; i++)
    vpFeatureBuilder::create(pd[i], point[i]);

  vpTRACE("define the task") ;
  vpTRACE("\t we want an eye-in-hand control law") ;
  vpTRACE("\t articular velocity are computed") ;
  task.setServo(vpServo::EYEINHAND_CAMERA);
  task.setInteractionMatrixType(vpServo::DESIRED) ;

  vpTRACE("\t we want to see a point on a point..") ;
  for (i = 0 ; i < 4 ; i++)
    task.addFeature(p[i],pd[i]) ;

  vpTRACE("\t set the gain") ;
  task.setLambda(0.8) ;
  
  /*Declaration of the robot*/
  vpSimulatorViper850 robot(opt_display);
  
  /*Initialise the robot and especially the camera*/
  robot.init(vpViper850::TOOL_PTGREY_FLEA2_CAMERA,vpCameraParameters::perspectiveProjWithoutDistortion);
  robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL);
  
  /*Initialise the object for the display part*/
  robot.initScene(vpWireFrameSimulator::PLATE, vpWireFrameSimulator::D_STANDARD);
  
  /*Initialise the position of the object relative to the pose of the robot's camera*/
  robot.initialiseObjectRelativeToCamera(cMo);
  
  /*Set the desired position (for the displaypart)*/
  robot.setDesiredCameraPosition(cdMo);
  
  /*Get the internal robot's camera parameters*/
  vpCameraParameters cam;
  robot.getCameraParameters(cam,Iint);
  
  if (opt_display)
  { 
    //Get the internal view
	  vpDisplay::display(Iint);
    robot.getInternalView(Iint);
    vpDisplay::flush(Iint);
  }


  vpTRACE("Display task information " ) ;
  task.print() ;

  int iter=0 ;
  vpTRACE("\t loop") ;
  while(iter++<500)
    {
      std::cout << "---------------------------------------------" << iter <<std::endl ;
      vpColVector v ;

      //Get the Time at the beginning of the loop
      double t = vpTime::measureTimeMs();

      //Get the current pose of the camera
      cMo = robot.get_cMo();

      if (iter==1) {
	std::cout <<"Initial robot position with respect to the object frame:\n";
	cMo.print();
      }

      if (iter==1) vpTRACE("\t new point position ") ;
      for (i = 0 ; i < 4 ; i++)
      {
	point[i].track(cMo) ;
	//retrieve x,y and Z of the vpPoint structure
	vpFeatureBuilder::create(p[i],point[i])  ;
      }

      if (opt_display) 
      {
	/*Get the internal view and display it*/
	vpDisplay::display(Iint) ;
	robot.getInternalView(Iint);	  
	vpDisplay::flush(Iint);
      }
      
      if (opt_display && opt_click_allowed && iter == 1)
      {
        // suppressed for automate test
        vpTRACE("\n\nClick in the internal view window to continue...");
        vpDisplay::getClick(Iint) ;
      }

      if (iter==1) vpTRACE("\t\t compute the control law ") ;
      v = task.computeControlLaw() ;

      if (iter==1) vpTRACE("\t\t send the camera velocity to the controller ") ;
      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;

      vpTRACE("\t\t || s - s* || ") ;
      std::cout << task.error.sumSquare() <<std::endl ;
      
      /* The main loop as a duration of 10 ms at minimum*/
      vpTime::wait(t,10);
    }

  vpTRACE("Display task information " ) ;
  task.print() ;
  task.kill();


  std::cout <<"Final robot position with respect to the object frame:\n";
  cMo.print();

  if (opt_display && opt_click_allowed) 
  {
    // suppressed for automate test
    vpTRACE("\n\nClick in the internal view window to end...");
    vpDisplay::getClick(Iint) ;
  }
}
Пример #24
0
int
main()
{  vpTRACE("You should install GTK") ;

}
int
main()
{
  // Log file creation in /tmp/$USERNAME/log.dat
  // This file contains by line:
  // - the 6 computed joint velocities (m/s, rad/s) to achieve the task
  // - the 6 mesured joint velocities (m/s, rad/s)
  // - the 6 mesured joint positions (m, rad)
  // - the 8 values of s - s*
  std::string username;
  // Get the user login name
  vpIoTools::getUserName(username);

  // Create a log filename to save velocities...
  std::string logdirname;
  logdirname ="/tmp/" + username;

  // Test if the output path exist. If no try to create it
  if (vpIoTools::checkDirectory(logdirname) == false) {
    try {
      // Create the dirname
      vpIoTools::makeDirectory(logdirname);
    }
    catch (...) {
      std::cerr << std::endl
                << "ERROR:" << std::endl;
      std::cerr << "  Cannot create " << logdirname << std::endl;
      return(-1);
    }
  }
  std::string logfilename;
  logfilename = logdirname + "/log.dat";

  // Open the log file name
  std::ofstream flog(logfilename.c_str());

  try {
    vpRobotViper650 robot ;
    // Load the end-effector to camera frame transformation obtained
    // using a camera intrinsic model with distortion
    vpCameraParameters::vpCameraParametersProjType projModel =
        vpCameraParameters::perspectiveProjWithDistortion;
    robot.init(vpRobotViper650::TOOL_PTGREY_FLEA2_CAMERA, projModel);

    vpServo task ;

    vpImage<unsigned char> I ;
    int i ;

    bool reset = false;
    vp1394TwoGrabber g(reset);
    g.setVideoMode(vp1394TwoGrabber::vpVIDEO_MODE_640x480_MONO8);
    g.setFramerate(vp1394TwoGrabber::vpFRAMERATE_60);
    g.open(I) ;

    g.acquire(I) ;

#ifdef VISP_HAVE_X11
    vpDisplayX display(I,100,100,"Current image") ;
#elif defined(VISP_HAVE_OPENCV)
    vpDisplayOpenCV display(I,100,100,"Current image") ;
#elif defined(VISP_HAVE_GTK)
    vpDisplayGTK display(I,100,100,"Current image") ;
#endif

    vpDisplay::display(I) ;
    vpDisplay::flush(I) ;

    std::cout << std::endl ;
    std::cout << "-------------------------------------------------------" << std::endl ;
    std::cout << " Test program for vpServo "  <<std::endl ;
    std::cout << " Eye-in-hand task control, velocity computed in the joint space" << std::endl ;
    std::cout << " Use of the Afma6 robot " << std::endl ;
    std::cout << " task : servo 4 points on a square with dimention " << L << " meters" << std::endl ;
    std::cout << "-------------------------------------------------------" << std::endl ;
    std::cout << std::endl ;

    vpDot2 dot[4] ;
    vpImagePoint cog;

    std::cout << "Click on the 4 dots clockwise starting from upper/left dot..."
              << std::endl;

    for (i=0 ; i < 4 ; i++) {
      dot[i].setGraphics(true) ;
      dot[i].initTracking(I) ;
      cog = dot[i].getCog();
      vpDisplay::displayCross(I, cog, 10, vpColor::blue) ;
      vpDisplay::flush(I);
    }

    vpCameraParameters cam ;

    // Update camera parameters
    robot.getCameraParameters (cam, I);

    std::cout << "Camera parameters: \n" << cam << std::endl;

    // Sets the current position of the visual feature
    vpFeaturePoint p[4] ;
    for (i=0 ; i < 4 ; i++)
      vpFeatureBuilder::create(p[i], cam, dot[i]);  //retrieve x,y  of the vpFeaturePoint structure

    // Set the position of the square target in a frame which origin is
    // centered in the middle of the square
    vpPoint point[4] ;
    point[0].setWorldCoordinates(-L, -L, 0) ;
    point[1].setWorldCoordinates( L, -L, 0) ;
    point[2].setWorldCoordinates( L,  L, 0) ;
    point[3].setWorldCoordinates(-L,  L, 0) ;

    // Initialise a desired pose to compute s*, the desired 2D point features
    vpHomogeneousMatrix cMo;
    vpTranslationVector cto(0, 0, 0.5); // tz = 0.5 meter
    vpRxyzVector cro(vpMath::rad(0), vpMath::rad(10), vpMath::rad(20));
    vpRotationMatrix cRo(cro); // Build the rotation matrix
    cMo.buildFrom(cto, cRo); // Build the homogeneous matrix

    // Sets the desired position of the 2D visual feature
    vpFeaturePoint pd[4] ;
    // Compute the desired position of the features from the desired pose
    for (int i=0; i < 4; i ++) {
      vpColVector cP, p ;
      point[i].changeFrame(cMo, cP) ;
      point[i].projection(cP, p) ;

      pd[i].set_x(p[0]) ;
      pd[i].set_y(p[1]) ;
      pd[i].set_Z(cP[2]);
    }

    // We want to see a point on a point
    for (i=0 ; i < 4 ; i++)
      task.addFeature(p[i],pd[i]) ;

    // Set the proportional gain
    task.setLambda(0.3) ;

    // Display task information
    task.print() ;

    // Define the task
    // - we want an eye-in-hand control law
    // - articular velocity are computed
    task.setServo(vpServo::EYEINHAND_L_cVe_eJe) ;
    task.setInteractionMatrixType(vpServo::CURRENT, vpServo::PSEUDO_INVERSE) ;
    task.print() ;

    vpVelocityTwistMatrix cVe ;
    robot.get_cVe(cVe) ;
    task.set_cVe(cVe) ;
    task.print() ;

    // Set the Jacobian (expressed in the end-effector frame)
    vpMatrix eJe ;
    robot.get_eJe(eJe) ;
    task.set_eJe(eJe) ;
    task.print() ;

    // Initialise the velocity control of the robot
    robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL) ;

    std::cout << "\nHit CTRL-C to stop the loop...\n" << std::flush;
    for ( ; ; ) {
      // Acquire a new image from the camera
      g.acquire(I) ;

      // Display this image
      vpDisplay::display(I) ;

      try {
        // For each point...
        for (i=0 ; i < 4 ; i++) {
          // Achieve the tracking of the dot in the image
          dot[i].track(I) ;
          // Display a green cross at the center of gravity position in the
          // image
          cog = dot[i].getCog();
          vpDisplay::displayCross(I, cog, 10, vpColor::green) ;
        }
      }
      catch(...) {
        flog.close() ; // Close the log file
        vpTRACE("Error detected while tracking visual features") ;
        robot.stopMotion() ;
        return(1) ;
      }

      // During the servo, we compute the pose using LOWE method. For the
      // initial pose used in the non linear minimisation we use the pose
      // computed at the previous iteration.
      compute_pose(point, dot, 4, cam, cMo, cto, cro, false);

      for (i=0 ; i < 4 ; i++) {
        // Update the point feature from the dot location
        vpFeatureBuilder::create(p[i], cam, dot[i]);
        // Set the feature Z coordinate from the pose
        vpColVector cP;
        point[i].changeFrame(cMo, cP) ;

        p[i].set_Z(cP[2]);
      }

      // Get the jacobian of the robot
      robot.get_eJe(eJe) ;
      // Update this jacobian in the task structure. It will be used to compute
      // the velocity skew (as an articular velocity)
      // qdot = -lambda * L^+ * cVe * eJe * (s-s*)
      task.set_eJe(eJe) ;

      vpColVector v ;
      // Compute the visual servoing skew vector
      v = task.computeControlLaw() ;

      // Display the current and desired feature points in the image display
      vpServoDisplay::display(task,cam,I) ;

      // Apply the computed joint velocities to the robot
      robot.setVelocity(vpRobot::ARTICULAR_FRAME, v) ;

      // Save velocities applied to the robot in the log file
      // v[0], v[1], v[2] correspond to joint translation velocities in m/s
      // v[3], v[4], v[5] correspond to joint rotation velocities in rad/s
      flog << v[0] << " " << v[1] << " " << v[2] << " "
           << v[3] << " " << v[4] << " " << v[5] << " ";

      // Get the measured joint velocities of the robot
      vpColVector qvel;
      robot.getVelocity(vpRobot::ARTICULAR_FRAME, qvel);
      // Save measured joint velocities of the robot in the log file:
      // - qvel[0], qvel[1], qvel[2] correspond to measured joint translation
      //   velocities in m/s
      // - qvel[3], qvel[4], qvel[5] correspond to measured joint rotation
      //   velocities in rad/s
      flog << qvel[0] << " " << qvel[1] << " " << qvel[2] << " "
           << qvel[3] << " " << qvel[4] << " " << qvel[5] << " ";

      // Get the measured joint positions of the robot
      vpColVector q;
      robot.getPosition(vpRobot::ARTICULAR_FRAME, q);
      // Save measured joint positions of the robot in the log file
      // - q[0], q[1], q[2] correspond to measured joint translation
      //   positions in m
      // - q[3], q[4], q[5] correspond to measured joint rotation
      //   positions in rad
      flog << q[0] << " " << q[1] << " " << q[2] << " "
           << q[3] << " " << q[4] << " " << q[5] << " ";

      // Save feature error (s-s*) for the 4 feature points. For each feature
      // point, we have 2 errors (along x and y axis).  This error is expressed
      // in meters in the camera frame
      flog << ( task.getError() ).t() << std::endl;

      // Flush the display
      vpDisplay::flush(I) ;

      //	vpTRACE("\t\t || s - s* || = %f ", ( task.getError() ).sumSquare()) ;
    }

    vpTRACE("Display task information " ) ;
    task.print() ;
    task.kill();
    flog.close() ; // Close the log file
    return 0;
  }
  catch (...)
  {
    flog.close() ; // Close the log file
    vpERROR_TRACE(" Test failed") ;
    return 0;
  }
}
Пример #26
0
/*!
  \warning Not implemented yet.

  \sa init(), closeDisplay()
*/
void vpDisplayGTK::displayImage(const unsigned char * /* I */)
{
  vpTRACE(" not implemented ") ;
}
Пример #27
0
int
main()
{  vpTRACE("You should install Coin3D and SoQT or SoWin or SoXt") ;

}
Пример #28
0
/*!
  \warning Not implemented yet.
*/
void vpDisplayGTK::clearDisplay(const vpColor & /* color */)
{
  vpTRACE("Not implemented") ;
}
Пример #29
0
int
main()
{
  vpTRACE("Sorry, for the moment, vpRingLight class works only on unix...");
  return 0;
}
Пример #30
0
int
main()
{
  vpTRACE("GDI or GTK is not available...") ;
}