vpHomogeneousMatrix DoorHandleDetectionNode::createTFPlane(const vpColVector coeffs, const double x, const double y, const double z)
{
  vpColVector xp(3);
  vpColVector yp(3);
  vpColVector normal(3);
  vpRotationMatrix dRp;
  vpTranslationVector P0;
  vpHomogeneousMatrix dMp;
  geometry_msgs::Pose dMp_msg;
  tf::Transform transform;
  static tf::TransformBroadcaster br;

  //Create a normal to the plan from the coefficients
  normal[0] = -coeffs[0];
  normal[1] = -coeffs[1];
  normal[2] = -coeffs[2];

  normal.normalize();

  //Create a xp vector that is following the equation of the plane
  xp[0] = - (coeffs[1]*y + coeffs[2]*(z+0.05) + coeffs[3]) / (coeffs[0]) - x;
  xp[1] = 0;
  xp[2] = 0.05;
  xp.normalize();
  //Create a yp vector with the normal and xp
  yp = vpColVector::cross(normal,xp);

  //Create the Rotation Matrix
  dRp[0][0] = xp[0];
  dRp[1][0] = xp[1];
  dRp[2][0] = xp[2];
  dRp[0][1] = yp[0];
  dRp[1][1] = yp[1];
  dRp[2][1] = yp[2];
  dRp[0][2] = normal[0];
  dRp[1][2] = normal[1];
  dRp[2][2] = normal[2];

  transform.setOrigin( tf::Vector3(x, y, z) );

  //Calculate the z0 for the translation vector
  double z0 = -(coeffs[0]*x + coeffs[1]*y + coeffs[3])/(coeffs[2]);

  //Create the translation Vector
  P0 = vpTranslationVector(x, y, z0);

  //Create the homogeneous Matrix
  dMp = vpHomogeneousMatrix(P0, dRp);

  //Publish the tf of the plane
  dMp_msg = visp_bridge::toGeometryMsgsPose(dMp);
  tf::Quaternion q;
  q.setX(dMp_msg.orientation.x);
  q.setY(dMp_msg.orientation.y);
  q.setZ(dMp_msg.orientation.z);
  q.setW(dMp_msg.orientation.w);
  transform.setRotation(q);
  br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), m_parent_depth_tf, "tf_plane"));

  return dMp;
}
vpHomogeneousMatrix DoorHandleDetectionNode::createTFLine(const vpColVector direction_axis, vpColVector normal, const double x, const double y, const double z)
{
  vpRotationMatrix dRdh;
  vpHomogeneousMatrix dMdh;
  vpHomogeneousMatrix dMdh_border;
  geometry_msgs::Pose dMdh_msg_border;
  tf::Transform transformdh;
  static tf::TransformBroadcaster br;
  vpColVector direction_line(3);
  vpColVector centroid;
  vpColVector centroidBorder;

  centroid.stack(x);
  centroid.stack(y);
  centroid.stack(z);
  centroid.stack(1);

  //Normalize the direction axis
  direction_line[0]=direction_axis[0];
  direction_line[1]=direction_axis[1];
  direction_line[2]=direction_axis[2];
  direction_line.normalize();

  vpColVector y_dh(3);
  y_dh = vpColVector::cross(normal,direction_line);

  //Create the Rotation Matrix
  dRdh[0][0] = direction_line[0];
  dRdh[1][0] = direction_line[1];
  dRdh[2][0] = direction_line[2];
  dRdh[0][1] = y_dh[0];
  dRdh[1][1] = y_dh[1];
  dRdh[2][1] = y_dh[2];
  dRdh[0][2] = normal[0];
  dRdh[1][2] = normal[1];
  dRdh[2][2] = normal[2];

  //Create the pose of the handle with respect to the depth camera in the cog of the handle
  dMdh = vpHomogeneousMatrix(vpTranslationVector(x, y, z), dRdh);

  //Put the pose of the handle in its rotation axis instead of its cog
  centroidBorder = dMdh.inverse() * centroid;
  centroidBorder[0] = centroidBorder[0] - (m_lenght_dh / 2) + 0.015;
  centroidBorder = dMdh * centroidBorder;

  //Create the pose of the handle with respect to the depth camera in the rotation axis of the handle
  dMdh_border = vpHomogeneousMatrix(vpTranslationVector(centroidBorder[0], centroidBorder[1], centroidBorder[2]), dRdh);
  dMdh_msg_border = visp_bridge::toGeometryMsgsPose(dMdh_border);

  //Publish the tf of the handle with respect to the depth camera
  transformdh.setOrigin( tf::Vector3(centroidBorder[0], centroidBorder[1], centroidBorder[2] ));

  tf::Quaternion qdh;
  qdh.setX(dMdh_msg_border.orientation.x);
  qdh.setY(dMdh_msg_border.orientation.y);
  qdh.setZ(dMdh_msg_border.orientation.z);
  qdh.setW(dMdh_msg_border.orientation.w);

  transformdh.setRotation(qdh);
  br.sendTransform(tf::StampedTransform(transformdh, ros::Time::now(), m_parent_depth_tf, "door_handle_tf"));

  return dMdh_border;
}
Пример #3
0
int main()
{
  try {
    vpHomogeneousMatrix cdMo(0, 0, 0.75, 0, 0, 0);
    vpHomogeneousMatrix cMo(0.15, -0.1, 1., vpMath::rad(10), vpMath::rad(-10), vpMath::rad(50));

    // Define the target as 4 points
    vpPoint point[4] ;
    point[0].setWorldCoordinates(-0.1,-0.1, 0);
    point[1].setWorldCoordinates( 0.1,-0.1, 0);
    point[2].setWorldCoordinates( 0.1, 0.1, 0);
    point[3].setWorldCoordinates(-0.1, 0.1, 0);

#if defined(VISP_HAVE_OGRE)
    // Color image used as background texture.
    vpImage<unsigned char> background(480, 640, 255);

    // Parameters of our camera
    vpCameraParameters cam(840, 840, background.getWidth()/2, background.getHeight()/2);

    // Our object
    // A simulator with the camera parameters defined above,
    // and the background image size
    vpAROgre ogre;
    ogre.setShowConfigDialog(false);
    ogre.setCameraParameters(cam);
    ogre.addResource("./"); // Add the path to the Sphere.mesh resource
    ogre.init(background, false, true);

    // Create the scene that contains 4 spheres
    // Sphere.mesh contains a sphere with 1 meter radius
    std::vector<std::string> name(4);
    for (unsigned int i=0; i<4; i++) {
      std::ostringstream s; s << "Sphere" <<  i; name[i] = s.str();
      ogre.load(name[i], "Sphere.mesh");
      ogre.setScale(name[i], 0.02f, 0.02f, 0.02f); // Rescale the sphere to 2 cm radius
      // Set the position of each sphere in the object frame
      ogre.setPosition(name[i], vpTranslationVector(point[i].get_oX(), point[i].get_oY(), point[i].get_oZ()));
    }

    // Add an optional point light source
    Ogre::Light * light = ogre.getSceneManager()->createLight();
    light->setDiffuseColour(1, 1, 1); // scaled RGB values
    light->setSpecularColour(1, 1, 1); // scaled RGB values
    light->setPosition((Ogre::Real)cdMo[0][3], (Ogre::Real)cdMo[1][3], (Ogre::Real)(-cdMo[2][3]));
    light->setType(Ogre::Light::LT_POINT);
#endif

    vpServo task ;
    task.setServo(vpServo::EYEINHAND_CAMERA);
    task.setInteractionMatrixType(vpServo::CURRENT);
    task.setLambda(0.5);

    vpFeaturePoint p[4], pd[4] ;
    for (int i = 0 ; i < 4 ; i++) {
      point[i].track(cdMo);
      vpFeatureBuilder::create(pd[i], point[i]);
      point[i].track(cMo);
      vpFeatureBuilder::create(p[i], point[i]);
      task.addFeature(p[i], pd[i]);
    }

    vpHomogeneousMatrix wMc, wMo;
    vpSimulatorCamera robot;
    robot.setSamplingTime(0.040);
    robot.getPosition(wMc);
    wMo = wMc * cMo;

    for (unsigned int iter=0; iter < 150; iter ++) {
      robot.getPosition(wMc);
      cMo = wMc.inverse() * wMo;
      for (int i = 0 ; i < 4 ; i++) {
        point[i].track(cMo);
        vpFeatureBuilder::create(p[i], point[i]);
      }
#if defined(VISP_HAVE_OGRE)
      // Update the scene from the new camera position
      ogre.display(background, cMo);
#endif
      vpColVector v = task.computeControlLaw();
      robot.setVelocity(vpRobot::CAMERA_FRAME, v);
      vpTime::wait( robot.getSamplingTime() * 1000);
    }
    task.kill();
  }
  catch(vpException &e) {
    std::cout << "Catch an exception: " << e << std::endl;
  }
  catch(...) {
    std::cout << "Catch an exception " << std::endl;
  }
}
int main()
{
#if defined(VISP_HAVE_PTHREAD)
  try {
    vpHomogeneousMatrix cdMo(0, 0, 0.75, 0, 0, 0);
    vpHomogeneousMatrix cMo(0.15, -0.1, 1., vpMath::rad(10), vpMath::rad(-10), vpMath::rad(50));

    /*
    Top view of the world frame, the camera frame and the object frame

    world, also robot base frame :  --> w_y
                                    |
                                   \|/
                                      w_x

    object :
                     o_y
                  /|\
                   |
             o_x <--


    camera :
                     c_y
                  /|\
                   |
             c_x <--

    */
    vpHomogeneousMatrix wMo(vpTranslationVector(0.40, 0, -0.15),
                            vpRotationMatrix(vpRxyzVector(-M_PI, 0, M_PI/2.)));

    std::vector<vpPoint> point;
    point.push_back( vpPoint(-0.1,-0.1, 0) );
    point.push_back( vpPoint( 0.1,-0.1, 0) );
    point.push_back( vpPoint( 0.1, 0.1, 0) );
    point.push_back( vpPoint(-0.1, 0.1, 0) );

    vpServo task ;
    task.setServo(vpServo::EYEINHAND_CAMERA);
    task.setInteractionMatrixType(vpServo::CURRENT);
    task.setLambda(0.5);

    vpFeaturePoint p[4], pd[4] ;
    for (unsigned int i = 0 ; i < 4 ; i++) {
      point[i].track(cdMo);
      vpFeatureBuilder::create(pd[i], point[i]);
      point[i].track(cMo);
      vpFeatureBuilder::create(p[i], point[i]);
      task.addFeature(p[i], pd[i]);
    }

    vpSimulatorViper850 robot(true);
    robot.setVerbose(true);

    // Enlarge the default joint limits
    vpColVector qmin = robot.getJointMin();
    vpColVector qmax = robot.getJointMax();
    qmin[0] = -vpMath::rad(180);
    qmax[0] =  vpMath::rad(180);
    qmax[1] =  vpMath::rad(0);
    qmax[2] =  vpMath::rad(270);
    qmin[4] = -vpMath::rad(180);
    qmax[4] =  vpMath::rad(180);

    robot.setJointLimit(qmin, qmax);

    std::cout << "Robot joint limits: " << std::endl;
    for (unsigned int i=0; i< qmin.size(); i ++)
      std::cout << "Joint " << i << ": min " << vpMath::deg(qmin[i]) << " max " << vpMath::deg(qmax[i]) << " (deg)" << std::endl;

    robot.init(vpViper850::TOOL_PTGREY_FLEA2_CAMERA, vpCameraParameters::perspectiveProjWithoutDistortion);
    robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL);
    robot.initScene(vpWireFrameSimulator::PLATE, vpWireFrameSimulator::D_STANDARD);
    robot.set_fMo(wMo);
    bool ret = true;
#if VISP_VERSION_INT > VP_VERSION_INT(2,7,0)
    ret =
    #endif
        robot.initialiseCameraRelativeToObject(cMo);
    if (ret == false)
      return 0; // Not able to set the position
    robot.setDesiredCameraPosition(cdMo);
    // We modify the default external camera position
    robot.setExternalCameraPosition(vpHomogeneousMatrix(vpTranslationVector(-0.4, 0.4, 2),
                                                        vpRotationMatrix(vpRxyzVector(M_PI/2,0,0))));

    vpImage<unsigned char> Iint(480, 640, 255);
#if defined(VISP_HAVE_X11)
    vpDisplayX displayInt(Iint, 700, 0, "Internal view");
#elif defined(VISP_HAVE_GDI)
    vpDisplayGDI displayInt(Iint, 700, 0, "Internal view");
#elif defined(VISP_HAVE_OPENCV)
    vpDisplayOpenCV displayInt(Iint, 700, 0, "Internal view");
#else
    std::cout << "No image viewer is available..." << std::endl;
#endif

    vpCameraParameters cam(840, 840, Iint.getWidth()/2, Iint.getHeight()/2);
    // Modify the camera parameters to match those used in the other simulations
    robot.setCameraParameters(cam);

    bool start = true;
    //for ( ; ; )
    for (int iter =0; iter < 275; iter ++)
    {
      cMo = robot.get_cMo();

      for (unsigned int i = 0 ; i < 4 ; i++) {
        point[i].track(cMo);
        vpFeatureBuilder::create(p[i], point[i]);
      }

      vpDisplay::display(Iint);
      robot.getInternalView(Iint);
      if (!start) {
        display_trajectory(Iint, point, cMo, cam);
        vpDisplay::displayText(Iint, 40, 120, "Click to stop the servo...", vpColor::red);
      }
      vpDisplay::flush(Iint);

      vpColVector v = task.computeControlLaw();
      robot.setVelocity(vpRobot::CAMERA_FRAME, v);

      // A click to exit
      if (vpDisplay::getClick(Iint, false))
        break;

      if (start) {
        start = false;
        v = 0;
        robot.setVelocity(vpRobot::CAMERA_FRAME, v);
        vpDisplay::displayText(Iint, 40, 120, "Click to start the servo...", vpColor::blue);
        vpDisplay::flush(Iint);
        //vpDisplay::getClick(Iint);
      }

      vpTime::wait(1000*robot.getSamplingTime());
    }
    task.kill();
  }
  catch(vpException e) {
    std::cout << "Catch an exception: " << e << std::endl;
  }
#endif
}
Пример #5
0
int main()
{
  try {
#if defined(VISP_HAVE_OGRE) 
#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || (VISP_HAVE_OPENCV_VERSION >= 0x020100)

	// Image to stock gathered data
    // Here we acquire a color image. The consequence will be that
    // the background texture used in Ogre renderer will be also in color.
    vpImage<vpRGBa> I;

    // Now we try to find an available framegrabber
#if defined(VISP_HAVE_V4L2)
    // Video for linux 2 grabber
    vpV4l2Grabber grabber;
	grabber.open(I);
    grabber.acquire(I);
#elif defined(VISP_HAVE_DC1394)
    // libdc1394-2
    vp1394TwoGrabber grabber;
	grabber.open(I);
    grabber.acquire(I);
#elif defined(VISP_HAVE_OPENCV)
    // OpenCV to gather images
    cv::VideoCapture grabber(0); // open the default camera
    if(!grabber.isOpened()) { // check if we succeeded
      std::cout << "Failed to open the camera" << std::endl;
      return -1;
    }
    cv::Mat frame;
    grabber >> frame; // get a new frame from camera
    vpImageConvert::convert(frame, I);
#endif

    // Parameters of our camera
    double px = 565;
    double py = 565;
    double u0 = I.getWidth() / 2;
    double v0 = I.getHeight() / 2;
    vpCameraParameters cam(px,py,u0,v0);
    // The matrix with our pose
    // Defines the pose of the object in the camera frame
    vpHomogeneousMatrix cMo;

    // Our object
    // A simulator with the camera parameters defined above,
    // a grey level background image and of the good size
    vpAROgre ogre(cam, I.getWidth(), I.getHeight());
    // Initialisation
    // Here we load the requested plugins specified in the "plugins.cfg" file
    // and the resources specified in the "resources.cfg" file
    // These two files can be found respectively in ViSP_HAVE_OGRE_PLUGINS_PATH
    // and ViSP_HAVE_OGRE_RESOURCES_PATH folders
    ogre.init(I);

    // Create a basic scene
    // -----------------------------------
    //  	      Loading things
    // -----------------------------------
    //  As you will see in section 5, our
    //  application knows locations where
    //  it can search for medias.
    //  Here we use a mesh included in
    //  the installation files : a robot.
    // -----------------------------------
    // Here we load the "robot.mesh" model that is found thanks to the resources locations
    // specified in the "resources.cfg" file
    ogre.load("Robot", "robot.mesh");
    ogre.setPosition("Robot", vpTranslationVector(-0.3, 0.2, 0));
    ogre.setScale("Robot", 0.001f,0.001f,0.001f);
    ogre.setRotation("Robot", vpRotationMatrix(vpRxyzVector(M_PI, 0, 0)));

    cMo[2][3] = 1.; // Z = 1 meter

    std::cout << "cMo:\n" << cMo << std::endl;

    // Rendering loop, ended with on escape
    while(ogre.continueRendering()){
      // Acquire a new image
#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394)
      grabber.acquire(I);
#elif defined(VISP_HAVE_OPENCV)
	  grabber >> frame;
      vpImageConvert::convert(frame, I);
#endif
      //Pose computation
      // ...
      // cMo updated
      // Display the robot at the position specified by cMo with vpAROgre
      ogre.display(I,cMo);
    }
#else
    std::cout << "You need an available framegrabber to run this example" << std::endl;
#endif
#else
    std::cout << "You need Ogre3D to run this example" << std::endl;
#endif
    return 0;
  }
  catch(vpException e) {
    std::cout << "Catch an exception: " << e << std::endl;
    return 1;
  }
  catch(...) {
    std::cout << "Catch an exception " << std::endl;
    return 1;
  }
}
Пример #6
0
/*!
  Get position of a SceneNode.
  \param name : Name of the SceneNode in the scene graph.
  \return The position of the node.
*/
vpTranslationVector vpAROgre::getPosition(const std::string &name)const
{
  Ogre::Vector3 translation = mSceneMgr->getSceneNode(name)->getPosition();
  return vpTranslationVector((Ogre::Real)translation[0], (Ogre::Real)translation[1], (Ogre::Real)translation[2]);
}