Exemplo n.º 1
0
void readConfigVar_PoseRxyz(const string &s, vpHomogeneousMatrix &M)
{
    vpColVector v(6);
    vpIoTools::readConfigVar(s, v);
    vpTranslationVector t(v[0], v[1], v[2]);
    vpRxyzVector r(v[3], v[4], v[5]);
    vpRotationMatrix R(r);
    M.buildFrom(t, R);
}
Exemplo n.º 2
0
 void convertGMPoseToVispHMat(const geometry_msgs::Pose &pose,
                              vpHomogeneousMatrix &HMatrix)
 {
   tf::Quaternion q;
   tf::quaternionMsgToTF(pose.orientation, q);
   double angle = q.getAngle();
   HMatrix.buildFrom(pose.position.x,
                     pose.position.y,
                     pose.position.z,
                     angle*q.getAxis().x(),
                     angle*q.getAxis().y(),
                     angle*q.getAxis().z()
                     );
 }
Exemplo n.º 3
0
  void convertTFtoVispHMat(const tf::StampedTransform &TFtransform,
                           vpHomogeneousMatrix &HMatrix)
  {
    double Angle; // Theta U angle of the transform
    Angle = TFtransform.getRotation().getAngle();

    HMatrix.buildFrom(TFtransform.getOrigin().x(),
                      TFtransform.getOrigin().y(),
                      TFtransform.getOrigin().z(),
                      Angle*TFtransform.getRotation().getAxis().x(),
                      Angle*TFtransform.getRotation().getAxis().y(),
                      Angle*TFtransform.getRotation().getAxis().z()
                      );
  }
/*!

  Compute the pose \e cMo from the 3D coordinates of the points \e point and
  their corresponding 2D coordinates \e dot. The pose is computed using a Lowe
  non linear method.

  \param point : 3D coordinates of the points.

  \param dot : 2D coordinates of the points.

  \param ndot : Number of points or dots used for the pose estimation.

  \param cam : Intrinsic camera parameters.

  \param cMo : Homogeneous matrix in output describing the transformation
  between the camera and object frame.

  \param cto : Translation in ouput extracted from \e cMo.

  \param cro : Rotation in ouput extracted from \e cMo.

  \param init : Indicates if the we have to estimate an initial pose with
  Lagrange or Dementhon methods.

*/
void compute_pose(vpPoint point[], vpDot2 dot[], int ndot,
                  vpCameraParameters cam,
                  vpHomogeneousMatrix &cMo,
                  vpTranslationVector &cto,
                  vpRxyzVector &cro, bool init)
{
  vpHomogeneousMatrix cMo_dementhon;  // computed pose with dementhon
  vpHomogeneousMatrix cMo_lagrange;  // computed pose with dementhon
  vpRotationMatrix cRo;
  vpPose pose;
  vpImagePoint cog;
  for (int i=0; i < ndot; i ++) {

    double x=0, y=0;
    cog = dot[i].getCog();
    vpPixelMeterConversion::convertPoint(cam,
                                         cog,
                                         x, y) ; //pixel to meter conversion
    point[i].set_x(x) ;//projection perspective          p
    point[i].set_y(y) ;
    pose.addPoint(point[i]) ;
  }

  if (init == true) {
    pose.computePose(vpPose::DEMENTHON, cMo_dementhon) ;
    // Compute and return the residual expressed in meter for the pose matrix
    // 'cMo'
    double residual_dementhon = pose.computeResidual(cMo_dementhon);
    pose.computePose(vpPose::LAGRANGE, cMo_lagrange) ;
    double residual_lagrange = pose.computeResidual(cMo_lagrange);

    // Select the best pose to initialize the lowe pose computation
    if (residual_lagrange < residual_dementhon)  
      cMo = cMo_lagrange;
    else
      cMo = cMo_dementhon;

  }
  else { // init = false; use of the previous pose to initialise LOWE
    cRo.buildFrom(cro);
    cMo.buildFrom(cto, cRo);
  }
  pose.computePose(vpPose::LOWE, cMo) ;
  cMo.extract(cto);
  cMo.extract(cRo);
  cro.buildFrom(cRo);
}