示例#1
0
int main(int argc, const char **argv)
{ 
  std::string env_ipath;
  std::string opt_ipath;
  std::string ipath;
  std::string opt_ppath;
  std::string dirname;
  std::string filename;
  bool opt_click_allowed = true;

  // Get the VISP_IMAGE_PATH environment variable value
  char *ptenv = getenv("VISP_INPUT_IMAGE_PATH");
  if (ptenv != NULL)
    env_ipath = ptenv;

  // Set the default input path
  if (! env_ipath.empty())
    ipath = env_ipath;


  // Read the command line options
  if (getOptions(argc, argv, opt_ipath, opt_ppath, opt_click_allowed) == false) {
    exit (-1);
  }

  // Get the option values
  if (!opt_ipath.empty())
    ipath = opt_ipath;

  // Compare ipath and env_ipath. If they differ, we take into account
  // the input path comming from the command line option
  if (!opt_ipath.empty() && !env_ipath.empty() && opt_ppath.empty()) {
    if (ipath != env_ipath) {
      std::cout << std::endl
                << "WARNING: " << std::endl;
      std::cout << "  Since -i <visp image path=" << ipath << "> "
                << "  is different from VISP_IMAGE_PATH=" << env_ipath << std::endl
                << "  we skip the environment variable." << std::endl;
    }
  }

  // Test if an input path is set
  if (opt_ipath.empty() && env_ipath.empty() && opt_ppath.empty() ){
    usage(argv[0], NULL, ipath, opt_ppath);
    std::cerr << std::endl
              << "ERROR:" << std::endl;
    std::cerr << "  Use -i <visp image path> option or set VISP_INPUT_IMAGE_PATH "
              << std::endl
              << "  environment variable to specify the location of the " << std::endl
              << "  image path where test images are located." << std::endl
              << "  Use -p <personal image path> option if you want to "<<std::endl
              << "  use personal images." << std::endl
              << std::endl;

    exit(-1);
  }

  // Declare an image, this is a gray level image (unsigned char)
  // it size is not defined yet, it will be defined when the image will
  // read on the disk
  //	  vpImage<unsigned char> I ;

  //	  unsigned iter = 0;
  std::ostringstream s;
  //	  char cfilename[FILENAME_MAX];

  if (opt_ppath.empty()){
    // Set the path location of the image sequence
    dirname = ipath + vpIoTools::path("/ViSP-images/mire-2/");

    // Build the name of the image file

    s.setf(std::ios::right, std::ios::adjustfield);
    s << "image.%04d.pgm";
    filename = dirname + s.str();
  }
  else {
    filename = opt_ppath;
  }

  //We will read a sequence of images
  vpVideoReader grabber;
  grabber.setFirstFrameIndex(1);
  grabber.setFileName(filename.c_str());
  // Grey level image associated to a display in the initial pose computation
  vpImage<unsigned char> Idisplay;
  // Grey level image to track points
  vpImage<unsigned char> I;
  // RGBa image to get background
  vpImage<vpRGBa> IC;
  // Matrix representing camera parameters
  vpHomogeneousMatrix cmo;

  // Variables used for pose computation purposes
  vpPose mPose;
  vpDot2 md[4];
  vpImagePoint mcog[4];
  vpPoint mP[4];

  // CameraParameters we got from calibration
  // Keep u0 and v0 as center of the screen
  vpCameraParameters mcam;

  // Read the PGM image named "filename" on the disk, and put the
  // bitmap into the image structure I.  I is initialized to the
  // correct size
  //
  // exception readPGM may throw various exception if, for example,
  // the file does not exist, or if the memory cannot be allocated
  try{
    vpCTRACE << "Load: " << filename << std::endl;
    grabber.open(Idisplay);
    grabber.acquire(Idisplay);
    vpCameraParameters mcamTmp(592,570,grabber.getWidth()/2,grabber.getHeight()/2);
    // Compute the initial pose of the camera
    computeInitialPose(&mcamTmp, Idisplay, &mPose, md, mcog, &cmo, mP,
                       opt_click_allowed);
    // Close the framegrabber
    grabber.close();

    // Associate the grabber to the RGBa image
    grabber.open(IC);
    mcam.init(mcamTmp);
  }
  catch(...)
  {
    // an exception is thrown if an exception from readPGM has been caught
    // here this will result in the end of the program
    // Note that another error message has been printed from readPGM
    // to give more information about the error
    std::cerr << std::endl
              << "ERROR:" << std::endl;
    std::cerr << "  Cannot read " << filename << std::endl;
    std::cerr << "  Check your -i " << ipath << " option " << std::endl
              << "  or VISP_INPUT_IMAGE_PATH environment variable."
              << std::endl;
    exit(-1);
  }

  // Create a vpRAOgre object with color background
  vpAROgre ogre(mcam, (unsigned int)grabber.getWidth(), (unsigned int)grabber.getHeight());
  // Initialize it
  ogre.init(IC);
  ogre.load("Robot", "robot.mesh");
  ogre.setScale("Robot", 0.001f,0.001f,0.001f);
  ogre.setRotation("Robot", vpRotationMatrix(vpRxyzVector(M_PI/2, -M_PI/2, 0)));

  try
  {
    // Rendering loop
    while(ogre.continueRendering() && !grabber.end()){
      // Acquire a frame
      grabber.acquire(IC);

      // Convert it to a grey level image for tracking purpose
      vpImageConvert::convert(IC,I);
			
      // Update pose calculation
      try{
        // kill the point list
        mPose.clearPoint() ;
				
        // track the dot
        for (int i=0 ; i < 4 ; i++)
        {
          // track the point
          md[i].track(I, mcog[i]) ;
          md[i].setGrayLevelPrecision(0.90);
          // pixel->meter conversion
          {
            double x=0, y=0;
            vpPixelMeterConversion::convertPoint(mcam, mcog[i], x, y)  ;
            mP[i].set_x(x) ;
            mP[i].set_y(y) ;
          }

          // and added to the pose computation point list
          mPose.addPoint(mP[i]) ;
        }
        // the pose structure has been updated

        // the pose is now updated using the virtual visual servoing approach
        // Dementhon or lagrange is no longuer necessary, pose at the
        // previous iteration is sufficient
        mPose.computePose(vpPose::VIRTUAL_VS, cmo);
      }
      catch(...){
        vpERROR_TRACE("Error in tracking loop") ;
        return false;
      }

      // Display with ogre
      ogre.display(IC,cmo);

      // Wait so that the video does not go too fast
      vpTime::wait(15);
    }
    // Close the grabber
    grabber.close();
  }
  catch (Ogre::Exception& e)
  {
    std::cerr << "Exception:\n";
    std::cerr << e.getFullDescription().c_str() << "\n";
    return 1;
  }
  catch (...)
  {
    std::cerr << "Exception: " << "\n";
    return 1;
  }

  return EXIT_SUCCESS; 
}
int main()
{
#if defined(VISP_HAVE_PTHREAD)
  try {
    vpHomogeneousMatrix cdMo(0, 0, 0.75, 0, 0, 0);
    vpHomogeneousMatrix cMo(0.15, -0.1, 1., vpMath::rad(10), vpMath::rad(-10), vpMath::rad(50));

    /*
    Top view of the world frame, the camera frame and the object frame

    world, also robot base frame :  --> w_y
                                    |
                                   \|/
                                      w_x

    object :
                     o_y
                  /|\
                   |
             o_x <--


    camera :
                     c_y
                  /|\
                   |
             c_x <--

    */
    vpHomogeneousMatrix wMo(vpTranslationVector(0.40, 0, -0.15),
                            vpRotationMatrix(vpRxyzVector(-M_PI, 0, M_PI/2.)));

    std::vector<vpPoint> point;
    point.push_back( vpPoint(-0.1,-0.1, 0) );
    point.push_back( vpPoint( 0.1,-0.1, 0) );
    point.push_back( vpPoint( 0.1, 0.1, 0) );
    point.push_back( vpPoint(-0.1, 0.1, 0) );

    vpServo task ;
    task.setServo(vpServo::EYEINHAND_CAMERA);
    task.setInteractionMatrixType(vpServo::CURRENT);
    task.setLambda(0.5);

    vpFeaturePoint p[4], pd[4] ;
    for (unsigned int i = 0 ; i < 4 ; i++) {
      point[i].track(cdMo);
      vpFeatureBuilder::create(pd[i], point[i]);
      point[i].track(cMo);
      vpFeatureBuilder::create(p[i], point[i]);
      task.addFeature(p[i], pd[i]);
    }

    vpSimulatorViper850 robot(true);
    robot.setVerbose(true);

    // Enlarge the default joint limits
    vpColVector qmin = robot.getJointMin();
    vpColVector qmax = robot.getJointMax();
    qmin[0] = -vpMath::rad(180);
    qmax[0] =  vpMath::rad(180);
    qmax[1] =  vpMath::rad(0);
    qmax[2] =  vpMath::rad(270);
    qmin[4] = -vpMath::rad(180);
    qmax[4] =  vpMath::rad(180);

    robot.setJointLimit(qmin, qmax);

    std::cout << "Robot joint limits: " << std::endl;
    for (unsigned int i=0; i< qmin.size(); i ++)
      std::cout << "Joint " << i << ": min " << vpMath::deg(qmin[i]) << " max " << vpMath::deg(qmax[i]) << " (deg)" << std::endl;

    robot.init(vpViper850::TOOL_PTGREY_FLEA2_CAMERA, vpCameraParameters::perspectiveProjWithoutDistortion);
    robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL);
    robot.initScene(vpWireFrameSimulator::PLATE, vpWireFrameSimulator::D_STANDARD);
    robot.set_fMo(wMo);
    bool ret = true;
#if VISP_VERSION_INT > VP_VERSION_INT(2,7,0)
    ret =
    #endif
        robot.initialiseCameraRelativeToObject(cMo);
    if (ret == false)
      return 0; // Not able to set the position
    robot.setDesiredCameraPosition(cdMo);
    // We modify the default external camera position
    robot.setExternalCameraPosition(vpHomogeneousMatrix(vpTranslationVector(-0.4, 0.4, 2),
                                                        vpRotationMatrix(vpRxyzVector(M_PI/2,0,0))));

    vpImage<unsigned char> Iint(480, 640, 255);
#if defined(VISP_HAVE_X11)
    vpDisplayX displayInt(Iint, 700, 0, "Internal view");
#elif defined(VISP_HAVE_GDI)
    vpDisplayGDI displayInt(Iint, 700, 0, "Internal view");
#elif defined(VISP_HAVE_OPENCV)
    vpDisplayOpenCV displayInt(Iint, 700, 0, "Internal view");
#else
    std::cout << "No image viewer is available..." << std::endl;
#endif

    vpCameraParameters cam(840, 840, Iint.getWidth()/2, Iint.getHeight()/2);
    // Modify the camera parameters to match those used in the other simulations
    robot.setCameraParameters(cam);

    bool start = true;
    //for ( ; ; )
    for (int iter =0; iter < 275; iter ++)
    {
      cMo = robot.get_cMo();

      for (unsigned int i = 0 ; i < 4 ; i++) {
        point[i].track(cMo);
        vpFeatureBuilder::create(p[i], point[i]);
      }

      vpDisplay::display(Iint);
      robot.getInternalView(Iint);
      if (!start) {
        display_trajectory(Iint, point, cMo, cam);
        vpDisplay::displayText(Iint, 40, 120, "Click to stop the servo...", vpColor::red);
      }
      vpDisplay::flush(Iint);

      vpColVector v = task.computeControlLaw();
      robot.setVelocity(vpRobot::CAMERA_FRAME, v);

      // A click to exit
      if (vpDisplay::getClick(Iint, false))
        break;

      if (start) {
        start = false;
        v = 0;
        robot.setVelocity(vpRobot::CAMERA_FRAME, v);
        vpDisplay::displayText(Iint, 40, 120, "Click to start the servo...", vpColor::blue);
        vpDisplay::flush(Iint);
        //vpDisplay::getClick(Iint);
      }

      vpTime::wait(1000*robot.getSamplingTime());
    }
    task.kill();
  }
  catch(vpException e) {
    std::cout << "Catch an exception: " << e << std::endl;
  }
#endif
}
示例#3
0
int main()
{
  try {
#if defined(VISP_HAVE_OGRE) 
#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || (VISP_HAVE_OPENCV_VERSION >= 0x020100)

	// Image to stock gathered data
    // Here we acquire a color image. The consequence will be that
    // the background texture used in Ogre renderer will be also in color.
    vpImage<vpRGBa> I;

    // Now we try to find an available framegrabber
#if defined(VISP_HAVE_V4L2)
    // Video for linux 2 grabber
    vpV4l2Grabber grabber;
	grabber.open(I);
    grabber.acquire(I);
#elif defined(VISP_HAVE_DC1394)
    // libdc1394-2
    vp1394TwoGrabber grabber;
	grabber.open(I);
    grabber.acquire(I);
#elif defined(VISP_HAVE_OPENCV)
    // OpenCV to gather images
    cv::VideoCapture grabber(0); // open the default camera
    if(!grabber.isOpened()) { // check if we succeeded
      std::cout << "Failed to open the camera" << std::endl;
      return -1;
    }
    cv::Mat frame;
    grabber >> frame; // get a new frame from camera
    vpImageConvert::convert(frame, I);
#endif

    // Parameters of our camera
    double px = 565;
    double py = 565;
    double u0 = I.getWidth() / 2;
    double v0 = I.getHeight() / 2;
    vpCameraParameters cam(px,py,u0,v0);
    // The matrix with our pose
    // Defines the pose of the object in the camera frame
    vpHomogeneousMatrix cMo;

    // Our object
    // A simulator with the camera parameters defined above,
    // a grey level background image and of the good size
    vpAROgre ogre(cam, I.getWidth(), I.getHeight());
    // Initialisation
    // Here we load the requested plugins specified in the "plugins.cfg" file
    // and the resources specified in the "resources.cfg" file
    // These two files can be found respectively in ViSP_HAVE_OGRE_PLUGINS_PATH
    // and ViSP_HAVE_OGRE_RESOURCES_PATH folders
    ogre.init(I);

    // Create a basic scene
    // -----------------------------------
    //  	      Loading things
    // -----------------------------------
    //  As you will see in section 5, our
    //  application knows locations where
    //  it can search for medias.
    //  Here we use a mesh included in
    //  the installation files : a robot.
    // -----------------------------------
    // Here we load the "robot.mesh" model that is found thanks to the resources locations
    // specified in the "resources.cfg" file
    ogre.load("Robot", "robot.mesh");
    ogre.setPosition("Robot", vpTranslationVector(-0.3, 0.2, 0));
    ogre.setScale("Robot", 0.001f,0.001f,0.001f);
    ogre.setRotation("Robot", vpRotationMatrix(vpRxyzVector(M_PI, 0, 0)));

    cMo[2][3] = 1.; // Z = 1 meter

    std::cout << "cMo:\n" << cMo << std::endl;

    // Rendering loop, ended with on escape
    while(ogre.continueRendering()){
      // Acquire a new image
#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394)
      grabber.acquire(I);
#elif defined(VISP_HAVE_OPENCV)
	  grabber >> frame;
      vpImageConvert::convert(frame, I);
#endif
      //Pose computation
      // ...
      // cMo updated
      // Display the robot at the position specified by cMo with vpAROgre
      ogre.display(I,cMo);
    }
#else
    std::cout << "You need an available framegrabber to run this example" << std::endl;
#endif
#else
    std::cout << "You need Ogre3D to run this example" << std::endl;
#endif
    return 0;
  }
  catch(vpException e) {
    std::cout << "Catch an exception: " << e << std::endl;
    return 1;
  }
  catch(...) {
    std::cout << "Catch an exception " << std::endl;
    return 1;
  }
}