Ejemplo n.º 1
0
vpHomogeneousMatrix TJoint::getJointTransformation()
{
  vpRotationMatrix rm;
  vpTranslationVector tv(axis[0] * current_value, axis[1] * current_value, axis[2] * current_value);
  cerr << "getJointTransform: " << tv.t() << endl;
  return vpHomogeneousMatrix(tv, rm);
}
Ejemplo n.º 2
0
  void
  TrackerViewer::callback
  (const sensor_msgs::ImageConstPtr& image,
   const sensor_msgs::CameraInfoConstPtr& info,
   const geometry_msgs::PoseWithCovarianceStamped::ConstPtr& trackingResult,
   const visp_tracker::MovingEdgeSites::ConstPtr& sites,
   const visp_tracker::KltPoints::ConstPtr& klt)
  {
    // Copy image.
    try
      {
	rosImageToVisp(image_, image);
      }
    catch(std::exception& e)
      {
	ROS_ERROR_STREAM("dropping frame: " << e.what());
      }

    // Copy moving camera infos, edges sites and optional KLT points.
    info_ = info;
    sites_ = sites;
    klt_ = klt;

    // Copy cMo.
    cMo_ = vpHomogeneousMatrix();
    transformToVpHomogeneousMatrix(*cMo_, trackingResult->pose.pose);
  }
Ejemplo n.º 3
0
vpHomogeneousMatrix Transform::getTransform() {
    KDL::Rotation vRek;
    vRek=KDL::Rotation::Quaternion(msg_.rotation.x, msg_.rotation.y, msg_.rotation.z, msg_.rotation.w);

    vpRotationMatrix vRe;
    memcpy(vRe.data, vRek.data,9*sizeof(double));
    vpTranslationVector vTe(msg_.translation.x, msg_.translation.y, msg_.translation.z);

    return vpHomogeneousMatrix(vTe, vRe);
}
Ejemplo n.º 4
0
void PerfectHandTOG::computeRollerTurnTOG(ObjectAction *oa, TOGPrimitive **togp, ObjectFrame **of, TaskFrame **tf)
{
  cerr << "Computing Roller Turn plan on object " << oa->object->getName() << " for PerfectHand" << endl;

  //Compute the facing vector
  vpColVector f = getFacingVector(oa->object);

  //Set preshape to cylindrical precission
  *togp = new TOGPrimitive(new PerfectHandLateral(), new PerfectHandOPhalanx());
  ((PerfectHandLateral*)(*togp)->preshape)->setClosing(60);
  ((PerfectHandOPhalanx*)(*togp)->frame)->setFinger(4);
  //Set object frame to face which normal matches the container's normal.
  //Y direction of the object frame must go in opposite direction to the 
  //facing vector. Object frame X axis is aligned with container's Y axis
  //when possible. However, for this object frame, rotation in Z is not defined.
  vpHomogeneousMatrix oMof;
  vpColVector n(3), o(3), a(3);
  o = -f;
  if (fabs(f[2]) == 1 || fabs(f[0]) == 1)
  {
    n = 0;
    n[1] = -1;
  }
  else
  {
    n = 0;
    n[2] = -f[1];
  }
  a = vpColVector::cross(n, o);

  oMof.setIdentity();
  for (int i = 0; i < 3; i++)
  {
    oMof[i][0] = n[i];
    oMof[i][1] = o[i];
    oMof[i][2] = a[i];
  }
  *of = new ObjectFrame(oa->object, oMof);

  vpColVector uv(6);
  uv = 0;
  uv[3] = f[0];
  uv[4] = f[1];
  uv[5] = f[2];
  //TODO: chech turning direction and multiply uv accordingly
  *tf = new TaskFrame(oa->object, vpHomogeneousMatrix(0, 0, 0, 0, 0, 0), uv);
}
Ejemplo n.º 5
0
int test(double x,double y,double z,double alpha){
  //intial pose
  vpHomogeneousMatrix cMo(x,y,z,-vpMath::rad(0),vpMath::rad(0),alpha);
  //Desired pose
  vpHomogeneousMatrix cdMo(vpHomogeneousMatrix(0.0,0.0,1.0,vpMath::rad(0),vpMath::rad(0),-vpMath::rad(0)));

  //source and destination objects for moment manipulation
  vpMomentObject src(6);
  vpMomentObject dst(6);

  //init and run the simulation
  initScene(cMo, cdMo, src, dst); //initialize graphical scene (for interface)

  vpMatrix mat = execute(cMo, cdMo, src, dst);

  if(fabs(mat[0][0]-(-1)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[0][1]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[0][2]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;

  if(fabs(mat[1][0]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[1][1]-(-1)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[1][2]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;

  if(fabs(mat[2][0]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[2][1]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[2][2]-(-1)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[2][5]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;

  if(fabs(mat[3][0]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[3][1]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[3][2]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[3][5]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;

  if(fabs(mat[4][0]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[4][1]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[4][2]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[4][5]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;

  if(fabs(mat[5][0]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[5][1]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[5][2]-(0)) > std::numeric_limits<double>::epsilon()*1e10) return -1;
  if(fabs(mat[5][5]-(-1)) > std::numeric_limits<double>::epsilon()*1e10) return -1;

  return 0;
}
vpHomogeneousMatrix DoorHandleDetectionNode::createTFPlane(const vpColVector coeffs, const double x, const double y, const double z)
{
  vpColVector xp(3);
  vpColVector yp(3);
  vpColVector normal(3);
  vpRotationMatrix dRp;
  vpTranslationVector P0;
  vpHomogeneousMatrix dMp;
  geometry_msgs::Pose dMp_msg;
  tf::Transform transform;
  static tf::TransformBroadcaster br;

  //Create a normal to the plan from the coefficients
  normal[0] = -coeffs[0];
  normal[1] = -coeffs[1];
  normal[2] = -coeffs[2];

  normal.normalize();

  //Create a xp vector that is following the equation of the plane
  xp[0] = - (coeffs[1]*y + coeffs[2]*(z+0.05) + coeffs[3]) / (coeffs[0]) - x;
  xp[1] = 0;
  xp[2] = 0.05;
  xp.normalize();
  //Create a yp vector with the normal and xp
  yp = vpColVector::cross(normal,xp);

  //Create the Rotation Matrix
  dRp[0][0] = xp[0];
  dRp[1][0] = xp[1];
  dRp[2][0] = xp[2];
  dRp[0][1] = yp[0];
  dRp[1][1] = yp[1];
  dRp[2][1] = yp[2];
  dRp[0][2] = normal[0];
  dRp[1][2] = normal[1];
  dRp[2][2] = normal[2];

  transform.setOrigin( tf::Vector3(x, y, z) );

  //Calculate the z0 for the translation vector
  double z0 = -(coeffs[0]*x + coeffs[1]*y + coeffs[3])/(coeffs[2]);

  //Create the translation Vector
  P0 = vpTranslationVector(x, y, z0);

  //Create the homogeneous Matrix
  dMp = vpHomogeneousMatrix(P0, dRp);

  //Publish the tf of the plane
  dMp_msg = visp_bridge::toGeometryMsgsPose(dMp);
  tf::Quaternion q;
  q.setX(dMp_msg.orientation.x);
  q.setY(dMp_msg.orientation.y);
  q.setZ(dMp_msg.orientation.z);
  q.setW(dMp_msg.orientation.w);
  transform.setRotation(q);
  br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), m_parent_depth_tf, "tf_plane"));

  return dMp;
}
vpHomogeneousMatrix DoorHandleDetectionNode::createTFLine(const vpColVector direction_axis, vpColVector normal, const double x, const double y, const double z)
{
  vpRotationMatrix dRdh;
  vpHomogeneousMatrix dMdh;
  vpHomogeneousMatrix dMdh_border;
  geometry_msgs::Pose dMdh_msg_border;
  tf::Transform transformdh;
  static tf::TransformBroadcaster br;
  vpColVector direction_line(3);
  vpColVector centroid;
  vpColVector centroidBorder;

  centroid.stack(x);
  centroid.stack(y);
  centroid.stack(z);
  centroid.stack(1);

  //Normalize the direction axis
  direction_line[0]=direction_axis[0];
  direction_line[1]=direction_axis[1];
  direction_line[2]=direction_axis[2];
  direction_line.normalize();

  vpColVector y_dh(3);
  y_dh = vpColVector::cross(normal,direction_line);

  //Create the Rotation Matrix
  dRdh[0][0] = direction_line[0];
  dRdh[1][0] = direction_line[1];
  dRdh[2][0] = direction_line[2];
  dRdh[0][1] = y_dh[0];
  dRdh[1][1] = y_dh[1];
  dRdh[2][1] = y_dh[2];
  dRdh[0][2] = normal[0];
  dRdh[1][2] = normal[1];
  dRdh[2][2] = normal[2];

  //Create the pose of the handle with respect to the depth camera in the cog of the handle
  dMdh = vpHomogeneousMatrix(vpTranslationVector(x, y, z), dRdh);

  //Put the pose of the handle in its rotation axis instead of its cog
  centroidBorder = dMdh.inverse() * centroid;
  centroidBorder[0] = centroidBorder[0] - (m_lenght_dh / 2) + 0.015;
  centroidBorder = dMdh * centroidBorder;

  //Create the pose of the handle with respect to the depth camera in the rotation axis of the handle
  dMdh_border = vpHomogeneousMatrix(vpTranslationVector(centroidBorder[0], centroidBorder[1], centroidBorder[2]), dRdh);
  dMdh_msg_border = visp_bridge::toGeometryMsgsPose(dMdh_border);

  //Publish the tf of the handle with respect to the depth camera
  transformdh.setOrigin( tf::Vector3(centroidBorder[0], centroidBorder[1], centroidBorder[2] ));

  tf::Quaternion qdh;
  qdh.setX(dMdh_msg_border.orientation.x);
  qdh.setY(dMdh_msg_border.orientation.y);
  qdh.setZ(dMdh_msg_border.orientation.z);
  qdh.setW(dMdh_msg_border.orientation.w);

  transformdh.setRotation(qdh);
  br.sendTransform(tf::StampedTransform(transformdh, ros::Time::now(), m_parent_depth_tf, "door_handle_tf"));

  return dMdh_border;
}
int
main(int argc, const char ** argv)
{
  try {
    bool opt_click_allowed = true;
    bool opt_display = true;

    // Read the command line options
    if (getOptions(argc, argv, opt_click_allowed, opt_display) == false) {
      exit (-1);
    }

    // We open two displays, one for the internal camera view, the other one for
    // the external view, using either X11, GTK or GDI.
#if defined VISP_HAVE_X11
    vpDisplayX displayInt;
#elif defined VISP_HAVE_GDI
    vpDisplayGDI displayInt;
#elif defined VISP_HAVE_OPENCV
    vpDisplayOpenCV displayInt;
#endif

    vpImage<unsigned char> Iint(480, 640, 255);

    if (opt_display) {
      // open a display for the visualization
      displayInt.init(Iint,700,0, "Internal view") ;
    }

    vpServo task;

    std::cout << std::endl ;
    std::cout << "----------------------------------------------" << std::endl ;
    std::cout << " Test program for vpServo "  <<std::endl ;
    std::cout << " Eye-in-hand task control, articular velocity are computed"
              << std::endl ;
    std::cout << " Simulation " << std::endl ;
    std::cout << " task : servo 4 points " << std::endl ;
    std::cout << "----------------------------------------------" << std::endl ;
    std::cout << std::endl ;

    // sets the initial camera location
    vpHomogeneousMatrix cMo(-0.05,-0.05,0.7,
                            vpMath::rad(10),  vpMath::rad(10),  vpMath::rad(-30));

    // sets the point coordinates in the object frame
    vpPoint point[4] ;
    point[0].setWorldCoordinates(-0.045,-0.045,0) ;
    point[3].setWorldCoordinates(-0.045,0.045,0) ;
    point[2].setWorldCoordinates(0.045,0.045,0) ;
    point[1].setWorldCoordinates(0.045,-0.045,0) ;

    // computes the point coordinates in the camera frame and its 2D coordinates
    for (unsigned int i = 0 ; i < 4 ; i++)
      point[i].track(cMo) ;

    // sets the desired position of the point
    vpFeaturePoint p[4] ;
    for (unsigned int i = 0 ; i < 4 ; i++)
      vpFeatureBuilder::create(p[i],point[i])  ;  //retrieve x,y and Z of the vpPoint structure

    // sets the desired position of the feature point s*
    vpFeaturePoint pd[4] ;

    // Desired pose
    vpHomogeneousMatrix cdMo(vpHomogeneousMatrix(0.0,0.0,0.8,vpMath::rad(0),vpMath::rad(0),vpMath::rad(0)));

    // Projection of the points
    for (unsigned int  i = 0 ; i < 4 ; i++)
      point[i].track(cdMo);

    for (unsigned int  i = 0 ; i < 4 ; i++)
      vpFeatureBuilder::create(pd[i], point[i]);

    // define the task
    // - we want an eye-in-hand control law
    // - articular velocity are computed
    task.setServo(vpServo::EYEINHAND_CAMERA);
    task.setInteractionMatrixType(vpServo::DESIRED) ;

    // we want to see a point on a point
    for (unsigned int i = 0 ; i < 4 ; i++)
      task.addFeature(p[i],pd[i]) ;

    // set the gain
    task.setLambda(0.8) ;

    // Declaration of the robot
    vpSimulatorAfma6 robot(opt_display);

    // Initialise the robot and especially the camera
    robot.init(vpAfma6::TOOL_CCMOP, vpCameraParameters::perspectiveProjWithoutDistortion);
    robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL);

    // Initialise the object for the display part*/
    robot.initScene(vpWireFrameSimulator::PLATE, vpWireFrameSimulator::D_STANDARD);

    // Initialise the position of the object relative to the pose of the robot's camera
    robot.initialiseObjectRelativeToCamera(cMo);

    // Set the desired position (for the displaypart)
    robot.setDesiredCameraPosition(cdMo);

    // Get the internal robot's camera parameters
    vpCameraParameters cam;
    robot.getCameraParameters(cam,Iint);

    if (opt_display)
    {
      //Get the internal view
      vpDisplay::display(Iint);
      robot.getInternalView(Iint);
      vpDisplay::flush(Iint);
    }

    // Display task information
    task.print() ;

    unsigned int iter=0 ;
    vpTRACE("\t loop") ;
    while(iter++<500)
    {
      std::cout << "---------------------------------------------" << iter <<std::endl ;
      vpColVector v ;

      // Get the Time at the beginning of the loop
      double t = vpTime::measureTimeMs();

      // Get the current pose of the camera
      cMo = robot.get_cMo();

      if (iter==1) {
        std::cout <<"Initial robot position with respect to the object frame:\n";
        cMo.print();
      }

      // new point position
      for (unsigned int i = 0 ; i < 4 ; i++)
      {
        point[i].track(cMo) ;
        // retrieve x,y and Z of the vpPoint structure
        vpFeatureBuilder::create(p[i],point[i])  ;
      }

      if (opt_display)
      {
        // Get the internal view and display it
        vpDisplay::display(Iint) ;
        robot.getInternalView(Iint);
        vpDisplay::flush(Iint);
      }

      if (opt_display && opt_click_allowed && iter == 1)
      {
        // suppressed for automate test
        std::cout << "Click in the internal view window to continue..." << std::endl;
        vpDisplay::getClick(Iint) ;
      }

      // compute the control law
      v = task.computeControlLaw() ;

      // send the camera velocity to the controller
      robot.setVelocity(vpRobot::CAMERA_FRAME, v) ;

      std::cout << "|| s - s* || " << ( task.getError() ).sumSquare() <<std::endl ;

      // The main loop has a duration of 10 ms at minimum
      vpTime::wait(t,10);
    }

    // Display task information
    task.print() ;
    task.kill();

    std::cout <<"Final robot position with respect to the object frame:\n";
    cMo.print();

    if (opt_display && opt_click_allowed)
    {
      // suppressed for automate test
      std::cout << "Click in the internal view window to end..." << std::endl;
      vpDisplay::getClick(Iint) ;
    }
    return 0;
  }
  catch(vpException e) {
    std::cout << "Catch a ViSP exception: " << e << std::endl;
    return 1;
  }
}
int main()
{
#if defined(VISP_HAVE_PTHREAD)
  try {
    vpHomogeneousMatrix cdMo(0, 0, 0.75, 0, 0, 0);
    vpHomogeneousMatrix cMo(0.15, -0.1, 1., vpMath::rad(10), vpMath::rad(-10), vpMath::rad(50));

    /*
    Top view of the world frame, the camera frame and the object frame

    world, also robot base frame :  --> w_y
                                    |
                                   \|/
                                      w_x

    object :
                     o_y
                  /|\
                   |
             o_x <--


    camera :
                     c_y
                  /|\
                   |
             c_x <--

    */
    vpHomogeneousMatrix wMo(vpTranslationVector(0.40, 0, -0.15),
                            vpRotationMatrix(vpRxyzVector(-M_PI, 0, M_PI/2.)));

    std::vector<vpPoint> point;
    point.push_back( vpPoint(-0.1,-0.1, 0) );
    point.push_back( vpPoint( 0.1,-0.1, 0) );
    point.push_back( vpPoint( 0.1, 0.1, 0) );
    point.push_back( vpPoint(-0.1, 0.1, 0) );

    vpServo task ;
    task.setServo(vpServo::EYEINHAND_CAMERA);
    task.setInteractionMatrixType(vpServo::CURRENT);
    task.setLambda(0.5);

    vpFeaturePoint p[4], pd[4] ;
    for (unsigned int i = 0 ; i < 4 ; i++) {
      point[i].track(cdMo);
      vpFeatureBuilder::create(pd[i], point[i]);
      point[i].track(cMo);
      vpFeatureBuilder::create(p[i], point[i]);
      task.addFeature(p[i], pd[i]);
    }

    vpSimulatorViper850 robot(true);
    robot.setVerbose(true);

    // Enlarge the default joint limits
    vpColVector qmin = robot.getJointMin();
    vpColVector qmax = robot.getJointMax();
    qmin[0] = -vpMath::rad(180);
    qmax[0] =  vpMath::rad(180);
    qmax[1] =  vpMath::rad(0);
    qmax[2] =  vpMath::rad(270);
    qmin[4] = -vpMath::rad(180);
    qmax[4] =  vpMath::rad(180);

    robot.setJointLimit(qmin, qmax);

    std::cout << "Robot joint limits: " << std::endl;
    for (unsigned int i=0; i< qmin.size(); i ++)
      std::cout << "Joint " << i << ": min " << vpMath::deg(qmin[i]) << " max " << vpMath::deg(qmax[i]) << " (deg)" << std::endl;

    robot.init(vpViper850::TOOL_PTGREY_FLEA2_CAMERA, vpCameraParameters::perspectiveProjWithoutDistortion);
    robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL);
    robot.initScene(vpWireFrameSimulator::PLATE, vpWireFrameSimulator::D_STANDARD);
    robot.set_fMo(wMo);
    bool ret = true;
#if VISP_VERSION_INT > VP_VERSION_INT(2,7,0)
    ret =
    #endif
        robot.initialiseCameraRelativeToObject(cMo);
    if (ret == false)
      return 0; // Not able to set the position
    robot.setDesiredCameraPosition(cdMo);
    // We modify the default external camera position
    robot.setExternalCameraPosition(vpHomogeneousMatrix(vpTranslationVector(-0.4, 0.4, 2),
                                                        vpRotationMatrix(vpRxyzVector(M_PI/2,0,0))));

    vpImage<unsigned char> Iint(480, 640, 255);
#if defined(VISP_HAVE_X11)
    vpDisplayX displayInt(Iint, 700, 0, "Internal view");
#elif defined(VISP_HAVE_GDI)
    vpDisplayGDI displayInt(Iint, 700, 0, "Internal view");
#elif defined(VISP_HAVE_OPENCV)
    vpDisplayOpenCV displayInt(Iint, 700, 0, "Internal view");
#else
    std::cout << "No image viewer is available..." << std::endl;
#endif

    vpCameraParameters cam(840, 840, Iint.getWidth()/2, Iint.getHeight()/2);
    // Modify the camera parameters to match those used in the other simulations
    robot.setCameraParameters(cam);

    bool start = true;
    //for ( ; ; )
    for (int iter =0; iter < 275; iter ++)
    {
      cMo = robot.get_cMo();

      for (unsigned int i = 0 ; i < 4 ; i++) {
        point[i].track(cMo);
        vpFeatureBuilder::create(p[i], point[i]);
      }

      vpDisplay::display(Iint);
      robot.getInternalView(Iint);
      if (!start) {
        display_trajectory(Iint, point, cMo, cam);
        vpDisplay::displayText(Iint, 40, 120, "Click to stop the servo...", vpColor::red);
      }
      vpDisplay::flush(Iint);

      vpColVector v = task.computeControlLaw();
      robot.setVelocity(vpRobot::CAMERA_FRAME, v);

      // A click to exit
      if (vpDisplay::getClick(Iint, false))
        break;

      if (start) {
        start = false;
        v = 0;
        robot.setVelocity(vpRobot::CAMERA_FRAME, v);
        vpDisplay::displayText(Iint, 40, 120, "Click to start the servo...", vpColor::blue);
        vpDisplay::flush(Iint);
        //vpDisplay::getClick(Iint);
      }

      vpTime::wait(1000*robot.getSamplingTime());
    }
    task.kill();
  }
  catch(vpException e) {
    std::cout << "Catch an exception: " << e << std::endl;
  }
#endif
}