Example #1
0
int main(int argc, char** argv)
{
	Logger::getPtr();
	TimeManager::getPtr();

	// create subsystems:
	OgreSubsystem ogre(800,600,false);
	OISSubsystem ois;
	BulletSubsystem bull;
	GUISubsystem gui(800,600);
	ALSubsystem aSys;

	// allocate engine and add subsystems
	Engine* eng = new Engine();
	eng->addSubsystem(&aSys);
	eng->addSubsystem(&ogre);
	eng->addSubsystem(&ois);
	eng->addSubsystem(&bull);
	eng->addSubsystem(&gui);

	// initialize the engine
	eng->init();

	// add game state
	eng->addState(new PlayState());

	// start up the engine
	eng->start();

	// delete the engine object
	delete eng;
	return 0;
}
int main()
{
  try {
#if defined(VISP_HAVE_OGRE) 
#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || (VISP_HAVE_OPENCV_VERSION >= 0x020100)

    // Image to store gathered data
    // Here we acquire a grey level image. The consequence will be that
    // the background texture used in Ogre renderer will be also in grey
    // level.
    vpImage<unsigned char> I;

    // Now we try to find an available framegrabber
#if defined(VISP_HAVE_V4L2)
    // Video for linux 2 grabber
    vpV4l2Grabber grabber;
    // Open frame grabber
    // Here we acquire an image from an available framegrabber to update
    // the image size
    grabber.open(I);
    grabber.acquire(I);
#elif defined(VISP_HAVE_DC1394)
    // libdc1394-2
    vp1394TwoGrabber grabber;
    // Open frame grabber
    // Here we acquire an image from an available framegrabber to update
    // the image size
    grabber.open(I);
    grabber.acquire(I);
#elif defined(VISP_HAVE_OPENCV)
    // OpenCV to gather images
    cv::VideoCapture grabber(0); // open the default camera
    if(!grabber.isOpened()) { // check if we succeeded
      std::cout << "Failed to open the camera" << std::endl;
      return -1;
    }
    cv::Mat frame;
    grabber >> frame; // get a new frame from camera
    vpImageConvert::convert(frame, I);
#endif

    // Parameters of our camera
    double px = 565;
    double py = 565;
    double u0 = I.getWidth() / 2;
    double v0 = I.getHeight() / 2;
    vpCameraParameters cam(px,py,u0,v0);
    // The matrix with our pose
    vpHomogeneousMatrix cMo;
    cMo[2][3] = 0.5; // Z = 0.5 meter

    // Our object
    vpAROgreAdvanced ogre(cam, I.getWidth(), I.getHeight());
    // Initialisation
    ogre.init(I);

    // Rendering loop
    while(ogre.continueRendering()){
      // Acquire a new image
#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394)
      grabber.acquire(I);
#elif defined(VISP_HAVE_OPENCV)
      grabber >> frame;
      vpImageConvert::convert(frame, I);
#endif
      // Pose computation
      // ...
      // cMo updated
      // Display with vpAROgre
      ogre.display(I, cMo);
    }
#else
    std::cout << "You need an available framegrabber to run this example" << std::endl;
#endif
#else
    std::cout << "You need Ogre3D to run this example" << std::endl;
#endif
    return 0;
  }
  catch(vpException &e) {
    std::cout << "Catch an exception: " << e << std::endl;
    return 1;
  }
  catch(...) {
    std::cout << "Catch an exception " << std::endl;
    return 1;
  }
}
Example #3
0
int main(int argc, const char **argv)
{ 
  std::string env_ipath;
  std::string opt_ipath;
  std::string ipath;
  std::string opt_ppath;
  std::string dirname;
  std::string filename;
  bool opt_click_allowed = true;

  // Get the VISP_IMAGE_PATH environment variable value
  char *ptenv = getenv("VISP_INPUT_IMAGE_PATH");
  if (ptenv != NULL)
    env_ipath = ptenv;

  // Set the default input path
  if (! env_ipath.empty())
    ipath = env_ipath;


  // Read the command line options
  if (getOptions(argc, argv, opt_ipath, opt_ppath, opt_click_allowed) == false) {
    exit (-1);
  }

  // Get the option values
  if (!opt_ipath.empty())
    ipath = opt_ipath;

  // Compare ipath and env_ipath. If they differ, we take into account
  // the input path comming from the command line option
  if (!opt_ipath.empty() && !env_ipath.empty() && opt_ppath.empty()) {
    if (ipath != env_ipath) {
      std::cout << std::endl
                << "WARNING: " << std::endl;
      std::cout << "  Since -i <visp image path=" << ipath << "> "
                << "  is different from VISP_IMAGE_PATH=" << env_ipath << std::endl
                << "  we skip the environment variable." << std::endl;
    }
  }

  // Test if an input path is set
  if (opt_ipath.empty() && env_ipath.empty() && opt_ppath.empty() ){
    usage(argv[0], NULL, ipath, opt_ppath);
    std::cerr << std::endl
              << "ERROR:" << std::endl;
    std::cerr << "  Use -i <visp image path> option or set VISP_INPUT_IMAGE_PATH "
              << std::endl
              << "  environment variable to specify the location of the " << std::endl
              << "  image path where test images are located." << std::endl
              << "  Use -p <personal image path> option if you want to "<<std::endl
              << "  use personal images." << std::endl
              << std::endl;

    exit(-1);
  }

  // Declare an image, this is a gray level image (unsigned char)
  // it size is not defined yet, it will be defined when the image will
  // read on the disk
  //	  vpImage<unsigned char> I ;

  //	  unsigned iter = 0;
  std::ostringstream s;
  //	  char cfilename[FILENAME_MAX];

  if (opt_ppath.empty()){
    // Set the path location of the image sequence
    dirname = ipath + vpIoTools::path("/ViSP-images/mire-2/");

    // Build the name of the image file

    s.setf(std::ios::right, std::ios::adjustfield);
    s << "image.%04d.pgm";
    filename = dirname + s.str();
  }
  else {
    filename = opt_ppath;
  }

  //We will read a sequence of images
  vpVideoReader grabber;
  grabber.setFirstFrameIndex(1);
  grabber.setFileName(filename.c_str());
  // Grey level image associated to a display in the initial pose computation
  vpImage<unsigned char> Idisplay;
  // Grey level image to track points
  vpImage<unsigned char> I;
  // RGBa image to get background
  vpImage<vpRGBa> IC;
  // Matrix representing camera parameters
  vpHomogeneousMatrix cmo;

  // Variables used for pose computation purposes
  vpPose mPose;
  vpDot2 md[4];
  vpImagePoint mcog[4];
  vpPoint mP[4];

  // CameraParameters we got from calibration
  // Keep u0 and v0 as center of the screen
  vpCameraParameters mcam;

  // Read the PGM image named "filename" on the disk, and put the
  // bitmap into the image structure I.  I is initialized to the
  // correct size
  //
  // exception readPGM may throw various exception if, for example,
  // the file does not exist, or if the memory cannot be allocated
  try{
    vpCTRACE << "Load: " << filename << std::endl;
    grabber.open(Idisplay);
    grabber.acquire(Idisplay);
    vpCameraParameters mcamTmp(592,570,grabber.getWidth()/2,grabber.getHeight()/2);
    // Compute the initial pose of the camera
    computeInitialPose(&mcamTmp, Idisplay, &mPose, md, mcog, &cmo, mP,
                       opt_click_allowed);
    // Close the framegrabber
    grabber.close();

    // Associate the grabber to the RGBa image
    grabber.open(IC);
    mcam.init(mcamTmp);
  }
  catch(...)
  {
    // an exception is thrown if an exception from readPGM has been caught
    // here this will result in the end of the program
    // Note that another error message has been printed from readPGM
    // to give more information about the error
    std::cerr << std::endl
              << "ERROR:" << std::endl;
    std::cerr << "  Cannot read " << filename << std::endl;
    std::cerr << "  Check your -i " << ipath << " option " << std::endl
              << "  or VISP_INPUT_IMAGE_PATH environment variable."
              << std::endl;
    exit(-1);
  }

  // Create a vpRAOgre object with color background
  vpAROgre ogre(mcam, (unsigned int)grabber.getWidth(), (unsigned int)grabber.getHeight());
  // Initialize it
  ogre.init(IC);
  ogre.load("Robot", "robot.mesh");
  ogre.setScale("Robot", 0.001f,0.001f,0.001f);
  ogre.setRotation("Robot", vpRotationMatrix(vpRxyzVector(M_PI/2, -M_PI/2, 0)));

  try
  {
    // Rendering loop
    while(ogre.continueRendering() && !grabber.end()){
      // Acquire a frame
      grabber.acquire(IC);

      // Convert it to a grey level image for tracking purpose
      vpImageConvert::convert(IC,I);
			
      // Update pose calculation
      try{
        // kill the point list
        mPose.clearPoint() ;
				
        // track the dot
        for (int i=0 ; i < 4 ; i++)
        {
          // track the point
          md[i].track(I, mcog[i]) ;
          md[i].setGrayLevelPrecision(0.90);
          // pixel->meter conversion
          {
            double x=0, y=0;
            vpPixelMeterConversion::convertPoint(mcam, mcog[i], x, y)  ;
            mP[i].set_x(x) ;
            mP[i].set_y(y) ;
          }

          // and added to the pose computation point list
          mPose.addPoint(mP[i]) ;
        }
        // the pose structure has been updated

        // the pose is now updated using the virtual visual servoing approach
        // Dementhon or lagrange is no longuer necessary, pose at the
        // previous iteration is sufficient
        mPose.computePose(vpPose::VIRTUAL_VS, cmo);
      }
      catch(...){
        vpERROR_TRACE("Error in tracking loop") ;
        return false;
      }

      // Display with ogre
      ogre.display(IC,cmo);

      // Wait so that the video does not go too fast
      vpTime::wait(15);
    }
    // Close the grabber
    grabber.close();
  }
  catch (Ogre::Exception& e)
  {
    std::cerr << "Exception:\n";
    std::cerr << e.getFullDescription().c_str() << "\n";
    return 1;
  }
  catch (...)
  {
    std::cerr << "Exception: " << "\n";
    return 1;
  }

  return EXIT_SUCCESS; 
}
Example #4
0
int main()
{
  try {
#if defined(VISP_HAVE_OGRE) 
#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || (VISP_HAVE_OPENCV_VERSION >= 0x020100)

	// Image to stock gathered data
    // Here we acquire a color image. The consequence will be that
    // the background texture used in Ogre renderer will be also in color.
    vpImage<vpRGBa> I;

    // Now we try to find an available framegrabber
#if defined(VISP_HAVE_V4L2)
    // Video for linux 2 grabber
    vpV4l2Grabber grabber;
	grabber.open(I);
    grabber.acquire(I);
#elif defined(VISP_HAVE_DC1394)
    // libdc1394-2
    vp1394TwoGrabber grabber;
	grabber.open(I);
    grabber.acquire(I);
#elif defined(VISP_HAVE_OPENCV)
    // OpenCV to gather images
    cv::VideoCapture grabber(0); // open the default camera
    if(!grabber.isOpened()) { // check if we succeeded
      std::cout << "Failed to open the camera" << std::endl;
      return -1;
    }
    cv::Mat frame;
    grabber >> frame; // get a new frame from camera
    vpImageConvert::convert(frame, I);
#endif

    // Parameters of our camera
    double px = 565;
    double py = 565;
    double u0 = I.getWidth() / 2;
    double v0 = I.getHeight() / 2;
    vpCameraParameters cam(px,py,u0,v0);
    // The matrix with our pose
    // Defines the pose of the object in the camera frame
    vpHomogeneousMatrix cMo;

    // Our object
    // A simulator with the camera parameters defined above,
    // a grey level background image and of the good size
    vpAROgre ogre(cam, I.getWidth(), I.getHeight());
    // Initialisation
    // Here we load the requested plugins specified in the "plugins.cfg" file
    // and the resources specified in the "resources.cfg" file
    // These two files can be found respectively in ViSP_HAVE_OGRE_PLUGINS_PATH
    // and ViSP_HAVE_OGRE_RESOURCES_PATH folders
    ogre.init(I);

    // Create a basic scene
    // -----------------------------------
    //  	      Loading things
    // -----------------------------------
    //  As you will see in section 5, our
    //  application knows locations where
    //  it can search for medias.
    //  Here we use a mesh included in
    //  the installation files : a robot.
    // -----------------------------------
    // Here we load the "robot.mesh" model that is found thanks to the resources locations
    // specified in the "resources.cfg" file
    ogre.load("Robot", "robot.mesh");
    ogre.setPosition("Robot", vpTranslationVector(-0.3, 0.2, 0));
    ogre.setScale("Robot", 0.001f,0.001f,0.001f);
    ogre.setRotation("Robot", vpRotationMatrix(vpRxyzVector(M_PI, 0, 0)));

    cMo[2][3] = 1.; // Z = 1 meter

    std::cout << "cMo:\n" << cMo << std::endl;

    // Rendering loop, ended with on escape
    while(ogre.continueRendering()){
      // Acquire a new image
#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394)
      grabber.acquire(I);
#elif defined(VISP_HAVE_OPENCV)
	  grabber >> frame;
      vpImageConvert::convert(frame, I);
#endif
      //Pose computation
      // ...
      // cMo updated
      // Display the robot at the position specified by cMo with vpAROgre
      ogre.display(I,cMo);
    }
#else
    std::cout << "You need an available framegrabber to run this example" << std::endl;
#endif
#else
    std::cout << "You need Ogre3D to run this example" << std::endl;
#endif
    return 0;
  }
  catch(vpException e) {
    std::cout << "Catch an exception: " << e << std::endl;
    return 1;
  }
  catch(...) {
    std::cout << "Catch an exception " << std::endl;
    return 1;
  }
}