Beispiel #1
0
void
vpPolygon::updateBoundingBox()
{
  if(_corners.size() == 0){
    _bbox.setBottomRight(vpImagePoint(0, 0));
    _bbox.setTopLeft(vpImagePoint(0, 0));
    _goodPoly = false;
    return;
  }

  std::set<double> setI;
  std::set<double> setJ;
  for(unsigned int i=0; i<_corners.size(); ++i){
    setI.insert(_corners[i].get_i());
    setJ.insert(_corners[i].get_j());
  }
  vpImagePoint tl(*(setI.begin()), *(setJ.begin()));
  std::set<double>::const_iterator iterI = setI.end();
  std::set<double>::const_iterator iterJ = setJ.end();
  --iterI;
  --iterJ;
  vpImagePoint br(*iterI, *iterJ);

  if(tl == br){
    _goodPoly = false;
  }
  _bbox.setTopLeft(tl);
  _bbox.setBottomRight(br);
}
Beispiel #2
0
/*!
  Basic constructor.
  
  By default, it defines a triangle with the three 2D points coordinates :
  \f$ (0,0) \f$, \f$ (1,0) \f$ and \f$ (0,1) \f$.
*/
vpPolygon::vpPolygon()
{
  _goodPoly = true;
  std::vector<vpImagePoint> corners;
  corners.push_back(vpImagePoint(0,0));
  corners.push_back(vpImagePoint(1,0));
  corners.push_back(vpImagePoint(0,1));
  init(corners);
}
Beispiel #3
0
int main(int argc,char** argv){
  CmdLine cmd(argc,argv);
  DEBUG_WRITE("Starting program");
  vpDisplay* disp = new vpDisplayX();

  vpVideoWriter writer;
  vpImage<vpRGBa> out_I,out_I_overlay;
  std::vector< vpImage<vpRGBa> > in_I;
  if(cmd.should_exit()) return 0; //exit if needed

  DEBUG_WRITE("initializing writer");
  writer.setFileName((cmd.get_output_dir() + std::string("/%08d.jpg")).c_str());
  writer.open(out_I);

  std::vector<boost::shared_ptr<VideoWithCaption> > videos = cmd.get_videos();
  unsigned int cursor_x=0,cursor_y=0;

  DEBUG_WRITE("initializing images");

  for(std::vector<boost::shared_ptr<VideoWithCaption> >::iterator i=videos.begin();i!=videos.end();i++){
    if(cmd.get_debug())
      std::cout << "initializing image from " << (*i)->caption << "(" << (*i)->path <<")"<< std::endl;
    (*i)->reader.acquire((*i)->I);
    (*i)->y=cursor_y;
    (*i)->x=cursor_x;
    cursor_x+=(*i)->I.getWidth();
    if(cmd.get_debug())
      std::cout << "configuring a new image at " << (*i)->x << "," << (*i)->y << std::endl;
  }

  out_I.resize(std::max(cursor_y,videos[0]->I.getHeight()),cursor_x);
  disp->init(out_I);
  if(cmd.get_debug())
    std::cout << "Output image size set to " << out_I.getWidth() << "x" << out_I.getHeight() << std::endl;
  bool last_frame_reached=false;
  for(;!last_frame_reached;){
    vpDisplay::display(out_I);
    for(std::vector<boost::shared_ptr<VideoWithCaption> >::iterator i=videos.begin();i!=videos.end();i++){
      if((*i)->reader.getFrameIndex()-2>(*i)->reader.getLastFrameIndex()){
        last_frame_reached=true;
        break;
      }
      out_I.insert((*i)->I,vpImagePoint((*i)->y,(*i)->x));
      vpDisplay::displayCharString(out_I,vpImagePoint((*i)->y+CAPTION_OFFSET_X,(*i)->x+CAPTION_OFFSET_Y),(*i)->caption.c_str(),vpColor::blue);
      (*i)->reader.acquire((*i)->I);
    }
    vpDisplay::flush(out_I);
    vpDisplay::getImage(out_I,out_I_overlay);
    writer.saveFrame(out_I_overlay);
    writer.saveFrame(out_I_overlay);
    writer.saveFrame(out_I_overlay);
  }
  writer.close();

  return 0;
}
Beispiel #4
0
/*!
  Initialise the face to track. All the points in the map, representing all the
  map detected in the image, are parsed in order to extract the id of the points
  that are indeed in the face.

  \param _tracker : ViSP OpenCV KLT Tracker.
*/
void
vpMbtDistanceKltPoints::init(const vpKltOpencv& _tracker)
{
  // extract ids of the points in the face
  nbPointsInit = 0;
  nbPointsCur = 0;
  initPoints = std::map<int, vpImagePoint>();
  curPoints = std::map<int, vpImagePoint>();
  curPointsInd = std::map<int, int>();
  std::vector<vpImagePoint> roi;
  polygon->getRoiClipped(cam, roi);

  for (unsigned int i = 0; i < static_cast<unsigned int>(_tracker.getNbFeatures()); i ++){
    int id;
    float x_tmp, y_tmp;
    _tracker.getFeature((int)i, id, x_tmp, y_tmp);

    bool add = false;

    if(useScanLine)
    {
      if((unsigned int)y_tmp <  hiddenface->getMbScanLineRenderer().getPrimitiveIDs().getHeight() &&
         (unsigned int)x_tmp <  hiddenface->getMbScanLineRenderer().getPrimitiveIDs().getWidth() &&
         hiddenface->getMbScanLineRenderer().getPrimitiveIDs()[(unsigned int)y_tmp][(unsigned int)x_tmp] == polygon->getIndex())
        add = true;
    }
    else if(vpPolygon::isInside(roi, y_tmp, x_tmp))
    {
      add = true;
    }

    if(add){
      initPoints[id] = vpImagePoint(y_tmp, x_tmp);
      curPoints[id] = vpImagePoint(y_tmp, x_tmp);
      curPointsInd[id] = (int)i;
      nbPointsInit++;
      nbPointsCur++;
    }
  }

  if(nbPointsCur >= minNbPoint) enoughPoints = true;
  else enoughPoints = false;

  // initialisation of the value for the computation in SE3
  vpPlane plan(polygon->getPoint(0), polygon->getPoint(1), polygon->getPoint(2));

  d0 = plan.getD();
  N = plan.getNormal();

  N.normalize();
  N_cur = N;
  invd0 = 1.0 / d0;
}
  void
  TrackerViewer::displayMovingEdgeSites()
  {
    if (!sites_)
      return;
    for (unsigned i = 0; i < sites_->moving_edge_sites.size(); ++i)
      {
	double x = sites_->moving_edge_sites[i].x;
	double y = sites_->moving_edge_sites[i].y;
	int suppress = sites_->moving_edge_sites[i].suppress;
	vpColor color = vpColor::black;

	switch(suppress)
	  {
	  case 0:
	    color = vpColor::green;
	    break;
	  case 1:
	    color = vpColor::blue;
	    break;
	  case 2:
	    color = vpColor::purple;
	    break;
	  case 4:
	    color = vpColor::red;
	    break;
	  default:
	    ROS_ERROR_THROTTLE(10, "bad suppress value");
	  }

	vpDisplay::displayCross(image_, vpImagePoint(x, y), 3, color, 1);
      }
  }
PlanarDetector::PlanarDetector(vpImage<unsigned char> *Icurrent, vpImage<unsigned char> *Ireference) {
	Ireference_=Ireference;
	Icurrent_=Icurrent;

	//Select a part of the image by clincking on two points which define a rectangle
	corners_.push_back(vpImagePoint(0,0));
	corners_.push_back(vpImagePoint(Ireference->getRows()-1,Ireference->getCols()-1));

	//Build the reference points (and train the classifier).
	height = (unsigned int)(corners_[1].get_i() - corners_[0].get_i());
	width = (unsigned int)(corners_[1].get_j() - corners_[0].get_j());
	std::cerr << "Image size: " << Ireference->getRows() << " " << Ireference->getCols() << std::endl;
	std::cerr << "height and width of template: " << height << " " << width << std::endl;
	int num = planar.buildReference(*Ireference, corners_[0], height, width);
	std::cerr << num << std::endl;
}
/*!
  This method removes the outliers. A point is considered as outlier when its
  associated weight is below a given threshold (threshold_outlier).

  \param _w : Vector containing the weight of all the tracked points.
  \param threshold_outlier : Threshold to specify wether or not a point has to be deleted.
*/
void
vpMbtDistanceKltCylinder::removeOutliers(const vpColVector& _w, const double &threshold_outlier)
{
  std::map<int, vpImagePoint> tmp;
  std::map<int, int> tmp2;
  unsigned int nbSupp = 0;
  unsigned int k = 0;

  nbPointsCur = 0;
  std::map<int, vpImagePoint>::const_iterator iter = curPoints.begin();
  for( ; iter != curPoints.end(); iter++){
    if(_w[k] > threshold_outlier && _w[k+1] > threshold_outlier){
//     if(_w[k] > threshold_outlier || _w[k+1] > threshold_outlier){
      tmp[iter->first] = vpImagePoint(iter->second.get_i(), iter->second.get_j());
      tmp2[iter->first] = curPointsInd[iter->first];
      nbPointsCur++;
    }
    else{
      nbSupp++;
      initPoints.erase(iter->first);
    }

    k+=2;
  }

  if(nbSupp != 0){
    curPoints = std::map<int, vpImagePoint>();
    curPointsInd = std::map<int, int>();

    curPoints = tmp;
    curPointsInd = tmp2;
    if(nbPointsCur >= minNbPoint) enoughPoints = true;
    else enoughPoints = false;
  }
}
Beispiel #8
0
/*!
   Allows to detect a face in the image. When more than one face is detected, faces are sorted from largest to smallest.

   \param frame_gray : Input gray level image to process.
   \return true if one or more faces are found, false otherwise.

   The number of detected faces is returned using getNbObjects().
   If a face is found the functions getBBox(), getCog() return some information about the location of the face.

   The largest face is always available using getBBox(0) or getCog(0).
 */
bool vpDetectorFace::detect(const cv::Mat &frame_gray)
{
  bool detected = false;
  m_message.clear();
  m_polygon.clear();
  m_nb_objects = 0;

  m_faces.clear();
#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
  m_face_cascade.detectMultiScale( frame_gray, m_faces, 1.1, 2, 0, cv::Size(30, 30) );
#else
  m_face_cascade.detectMultiScale( frame_gray, m_faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
#endif

  m_nb_objects = m_faces.size();

  std::sort(m_faces.begin(), m_faces.end(), vpSortLargestFace);

  if (m_faces.size())
  for( size_t i = 0; i < m_faces.size(); i++ ) {
    std::ostringstream message;
    message << "Face " << i;
    m_message.push_back( message.str() );

    detected = true;

    std::vector<vpImagePoint> polygon;
    double x = m_faces[i].tl().x;
    double y = m_faces[i].tl().y;
    double w = m_faces[i].size().width;
    double h = m_faces[i].size().height;

    polygon.push_back(vpImagePoint(y  , x  ));
    polygon.push_back(vpImagePoint(y+h, x  ));
    polygon.push_back(vpImagePoint(y+h, x+w));
    polygon.push_back(vpImagePoint(y  , x+w));

    m_polygon.push_back(polygon);
  }

  return detected;
}
/*!
    Display the moving edge site with a color corresponding to their state.
    
    - If green : The vpMeSite is a good point.
    - If blue : The point is removed because of the vpMeSite tracking phase (constrast problem).
    - If purple : The point is removed because of the vpMeSite tracking phase (threshold problem).
    - If red : The point is removed because of the robust method in the virtual visual servoing (M-Estimator problem).
    - If cyan : The point is removed because it's too close to another.
    - Yellow otherwise
    
    \param I : The image.
    \param i : Pixel i of the site
    \param j : Pixel j of the site
    \param state : state of the site
*/
void vpMeSite::display(const vpImage<unsigned char>& I, const double &i, const double &j,const vpMeSiteState &state)
{
  switch(state)
  {
    case NO_SUPPRESSION:
      vpDisplay::displayCross(I,vpImagePoint(i,j),3,vpColor::green,1);
      break;
      
    case CONSTRAST:
      vpDisplay::displayCross(I,vpImagePoint(i,j),3,vpColor::blue,1);
      break;
      
    case THRESHOLD:
      vpDisplay::displayCross(I,vpImagePoint(i,j),3,vpColor::purple,1);
      break;
      
    case M_ESTIMATOR:
      vpDisplay::displayCross(I,vpImagePoint(i,j),3,vpColor::red,1);
      break;
      
    case TOO_NEAR:
      vpDisplay::displayCross(I,vpImagePoint(i,j),3,vpColor::cyan,1);
      
    default:
      vpDisplay::displayCross(I,vpImagePoint(i,j),3,vpColor::yellow,1);
  }
}
Beispiel #10
0
/*!
  Get the current list of KLT points and their id.
  
  \warning Contrary to getKltPoints which returns a pointer on CvPoint2D32f. This function convert and copy the openCV KLT points into vpImagePoints.
  
  \return the list of KLT points and their id through vpKltOpencv.
*/
std::map<int, vpImagePoint> 
vpMbKltTracker::getKltImagePointsWithId() const
{
  std::map<int, vpImagePoint> kltPoints;
  for (unsigned int i = 0; i < static_cast<unsigned int>(tracker.getNbFeatures()); i ++){
    int id;
    float x_tmp, y_tmp;
    tracker.getFeature((int)i, id, x_tmp, y_tmp);
    kltPoints[id] = vpImagePoint(y_tmp, x_tmp);
  }
  
  return kltPoints;
}
Beispiel #11
0
/*!
  Function called by the constructor to initialize the window and the basic parameters.

  \exception vpException::dimensionError if the parameter exceed the possible
  number of graph (4).

  \param graphNbr : The number of graph in the window.
*/
void
vpPlot::initNbGraph (unsigned int nbGraph)
{
  if(nbGraph > 4){
    throw vpException(vpException::dimensionError, "Cannot create more than 4 graphs");
  }
  graphList = new vpPlotGraph[nbGraph];
  graphNbr = nbGraph;
  
  switch (nbGraph)
  {
    case 1 : 
      graphList[0].initSize(vpImagePoint(0,0), (unsigned int)(700*factorj),(unsigned int)(700*factori),margei,margej);
      break;
    case 2 :
      graphList[0].initSize(vpImagePoint(0,0),(unsigned int)(700*factorj),(unsigned int)(350*factori),margei,margej);
      graphList[1].initSize(vpImagePoint((unsigned int)(350*factori),0),(unsigned int)(700*factorj),(unsigned int)(350*factori),margei,margej);
      break;
    case 3 :
      graphList[0].initSize(vpImagePoint(0,0),(unsigned int)(350*factorj),(unsigned int)(350*factori),margei,margej);
      graphList[1].initSize(vpImagePoint(0,(unsigned int)(350*factorj)),(unsigned int)(350*factorj),(unsigned int)(350*factori),margei,margej);
      graphList[2].initSize(vpImagePoint((unsigned int)(350*factori),0),(unsigned int)(700*factorj),(unsigned int)(350*factori),margei,margej);
      break;
    case 4 :
      graphList[0].initSize(vpImagePoint(0,0),(unsigned int)(350*factorj),(unsigned int)(350*factori),margei,margej);
      graphList[1].initSize(vpImagePoint(0,(unsigned int)(350*factorj)),(unsigned int)(350*factorj),(unsigned int)(350*factori),margei,margej);
      graphList[2].initSize(vpImagePoint((unsigned int)(350*factori),0),(unsigned int)(350*factorj),(unsigned int)(350*factori),margei,margej);
      graphList[3].initSize(vpImagePoint((unsigned int)(350*factori),(unsigned int)(350*factorj)),(unsigned int)(350*factorj),(unsigned int)(350*factori),margei,margej);
      break;
  }
  
  for (unsigned int i = 0; i < graphNbr; i++)
  {
    strcpy(graphList[i].title, "");
    strcpy(graphList[i].unitx, "");
    strcpy(graphList[i].unity, "");
    strcpy(graphList[i].unitz, "");
  }
}
Beispiel #12
0
/*!
  Get the current list of KLT points.
  
  \warning Contrary to getKltPoints which returns a pointer on CvPoint2D32f. This function convert and copy the openCV KLT points into vpImagePoints.
  
  \return the list of KLT points through vpKltOpencv.
*/
std::vector<vpImagePoint> 
vpMbKltTracker::getKltImagePoints() const
{
  std::vector<vpImagePoint> kltPoints;
  for (unsigned int i = 0; i < static_cast<unsigned int>(tracker.getNbFeatures()); i ++){
    int id;
    float x_tmp, y_tmp;
    tracker.getFeature((int)i, id, x_tmp, y_tmp);
    kltPoints.push_back(vpImagePoint(y_tmp, x_tmp));
  }
  
  return kltPoints;
}
Beispiel #13
0
static int qrcode_process(uint8_t* frame, int width, int height, int size)
{
	init_display(width, height);

	if (!initialized  || process_changed) {
		initialized = true;
		process_changed = false;
	}

	prepare_display(frame, width, height, size);

	if (qr_detector.detect(*grey_img)) {
		std::vector<vpImagePoint> pol = qr_detector.getPolygon(0);
		vpRect bbox = qr_detector.getBBox(0);
		vpDisplay::displayRectangle(*display_img, bbox, vpColor::green);
		vpDisplay::displayText(*display_img, (int)(bbox.getTop()-10), (int)bbox.getLeft(),
			"Message: \"" + qr_detector.getMessage(0) + "\"",
			vpColor::red);

		for (size_t j = 0; j < pol.size(); j++) {
			vpDisplay::displayCross(*display_img, pol[j], 14, vpColor::red, 3);
			std::ostringstream number;
			number << j;
			vpDisplay::displayText(*display_img, pol[j]+vpImagePoint(10,0),
				number.str(), vpColor::blue);
		}
		double x = 0;
		double y = 0;
		bbox.getCenter(x, y);

		std::string msg = qr_detector.getMessage(0);
		char cstr[SIZE_MESSAGE];
		strncpy(cstr, msg.c_str(),SIZE_MESSAGE);

		jakopter_com_write_float(com_out, 0, (float)bbox.getSize());
		jakopter_com_write_float(com_out, 4, (float)x);
		jakopter_com_write_float(com_out, 8, (float)y);
		jakopter_com_write_int(com_out, 12, strlen(cstr)+1);
		jakopter_com_write_buf(com_out, 16, cstr, SIZE_MESSAGE);
	}

	int bat = jakopter_com_read_int(com_in, 0);
	std::ostringstream bat_str;
	bat_str << bat;
	vpDisplay::displayText(*display_img, (int)display_img->getHeight()-25, 10,
		"Battery : " + bat_str.str() + " %", vpColor::red);

	vpDisplay::flush(*display_img);
	return 0;
}
Beispiel #14
0
/*!
  Function called by the constructor to initialize the window and the basic parameters.

  \param graphNbr : The number of graph in the window.
*/
void
vpPlot::init (int nbGraph)
{
  graphList = new vpPlotGraph[nbGraph];
  graphNbr = nbGraph;
  
  switch (nbGraph)
  {
    case 1 : 
      graphList[0].initSize(vpImagePoint(0,0), (int)(700*factorj),(int)(700*factori),margei,margej);
      break;
    case 2 :
      graphList[0].initSize(vpImagePoint(0,0),(int)(700*factorj),(int)(350*factori),margei,margej);
      graphList[1].initSize(vpImagePoint((int)(350*factori),0),(int)(700*factorj),(int)(350*factori),margei,margej);
      break;
    case 3 :
      graphList[0].initSize(vpImagePoint(0,0),(int)(350*factorj),(int)(350*factori),margei,margej);
      graphList[1].initSize(vpImagePoint(0,(int)(350*factorj)),(int)(350*factorj),(int)(350*factori),margei,margej);
      graphList[2].initSize(vpImagePoint((int)(350*factori),0),(int)(700*factorj),(int)(350*factori),margei,margej);
      break;
    case 4 :
      graphList[0].initSize(vpImagePoint(0,0),(int)(350*factorj),(int)(350*factori),margei,margej);
      graphList[1].initSize(vpImagePoint(0,(int)(350*factorj)),(int)(350*factorj),(int)(350*factori),margei,margej);
      graphList[2].initSize(vpImagePoint((int)(350*factori),0),(int)(350*factorj),(int)(350*factori),margei,margej);
      graphList[3].initSize(vpImagePoint((int)(350*factori),(int)(350*factorj)),(int)(350*factorj),(int)(350*factori),margei,margej);
      break;
  }
  
  for (int i = 0; i < graphNbr; i++)
  {
    strcpy(graphList[i].title, "");
    strcpy(graphList[i].unitx, "");
    strcpy(graphList[i].unity, "");
    strcpy(graphList[i].unitz, "");
    graphList[i].textdispayed=false;
  }
}
Beispiel #15
0
/*!
  Initialise the face to track. All the points in the map, representing all the
  map detected in the image, are parsed in order to extract the id of the points
  that are indeed in the face.

  \param _tracker : ViSP OpenCV KLT Tracker.
*/
void
vpMbtKltPolygon::init(const vpKltOpencv& _tracker)
{
  // extract ids of the points in the face
  nbPointsInit = 0;
  nbPointsCur = 0;
  initPoints = std::map<int, vpImagePoint>();
  curPoints = std::map<int, vpImagePoint>();
  curPointsInd = std::map<int, int>();
  std::vector<vpImagePoint> roi;
  getRoiClipped(cam, roi);
  
  for (unsigned int i = 0; i < static_cast<unsigned int>(_tracker.getNbFeatures()); i ++){
    int id;
    float x_tmp, y_tmp;
    _tracker.getFeature((int)i, id, x_tmp, y_tmp);

    if(isInside(roi, y_tmp, x_tmp)){
      initPoints[id] = vpImagePoint(y_tmp, x_tmp);
      curPoints[id] = vpImagePoint(y_tmp, x_tmp);
      curPointsInd[id] = (int)i;
      nbPointsInit++;
      nbPointsCur++;
    }
  }

  // initialisation of the value for the computation in SE3
  vpPlane plan(p[0], p[1], p[2]);

  d0 = plan.getD();
  N = plan.getNormal(); 
  
  N.normalize();
  N_cur = N;
  invd0 = 1.0 / d0;
}
Beispiel #16
0
bool crossesEastBorder(const vpImage<int> &I, bool checked[8], const vpImagePoint &point) {
  vpDirection direction;
  if ( !fromTo(point, vpImagePoint(point.get_i(), point.get_j() + 1), direction) ) {
    return false;
  }

  bool b = checked[(int) direction.m_direction];

  if (point.get_i() < 0 || point.get_j() < 0) {
    return false;
  }

  unsigned int i = (unsigned int) point.get_i();
  unsigned int j = (unsigned int) point.get_j();

  return I[i][j] != 0 && ( (unsigned int) point.get_j() == I.getWidth()-1 || b);
}
Beispiel #17
0
/*!
  Update the _center attribute of the polygon using the corners.

  The i coordinates is computed using:

  \f[
  i = \frac{1}{6{area}} \sum_{i=0}^{n-1} (i_i + i_{i+1})(i_{i+1} j_i - j_{i+1} i_i)
  \f]

  The computation of the j coordinate is similar.
*/
void
vpPolygon::updateCenter()
{
  if(_corners.size() == 0){
    _center = vpImagePoint(0, 0);
    _goodPoly = false;
    return;
  }
  double i_tmp = 0;
  double j_tmp = 0;
#if 0
  for(unsigned int i=0; i<(_corners.size()-1); ++i){
    i_tmp += (_corners[i].get_i() + _corners[i+1].get_i()) *
             (_corners[i+1].get_i() * _corners[i].get_j() - _corners[i+1].get_j() * _corners[i].get_i());

    j_tmp += (_corners[i].get_j() + _corners[i+1].get_j()) *
             (_corners[i+1].get_i() * _corners[i].get_j() - _corners[i+1].get_j() * _corners[i].get_i());
  }
#else
  for(unsigned int i=0; i<_corners.size(); ++i){
    unsigned int i_p_1 = ( i+1 ) % _corners.size();
    i_tmp += (_corners[i].get_i() + _corners[i_p_1].get_i()) *
             (_corners[i_p_1].get_i() * _corners[i].get_j() 
	      - _corners[i_p_1].get_j() * _corners[i].get_i());

    j_tmp += (_corners[i].get_j() + _corners[i_p_1].get_j()) *
             (_corners[i_p_1].get_i() * _corners[i].get_j() 
	      - _corners[i_p_1].get_j() * _corners[i].get_i());
  }
#endif

  if(_area > 0){
    _center.set_i(fabs(i_tmp / (6 * _area)));
    _center.set_j(fabs(j_tmp / (6 * _area)));
  }else{
    _center = _corners[0];
    _goodPoly = false;
  }
}
Beispiel #18
0
int main()
{
  try {
    vpImage<vpRGBa> I;
    vpImageIo::read(I, "monkey.ppm");

#if defined(VISP_HAVE_X11)
    vpDisplayX d(I);
#elif defined(VISP_HAVE_GDI)
    vpDisplayGDI d(I);
#elif defined(VISP_HAVE_OPENCV)
    vpDisplayOpenCV d(I);
#else
    std::cout << "No image viewer is available..." << std::endl;
#endif
    vpDisplay::setTitle(I, "Monkey");
    vpDisplay::display(I);

    vpDisplay::displayRectangle(I, vpImagePoint(90,90), 70, 90, vpColor::red, false, 2);
    vpDisplay::flush(I);

    vpImage<vpRGBa> O;
    vpDisplay::getImage(I, O);

    try {
      vpImageIo::write(I, "monkey-out.jpg");
      vpImageIo::write(O, "monkey-out-with-overlay.jpg");
    }
    catch(...) {
      std::cout << "Cannot write the image: unsupported format..." << std::endl;
    }

    vpDisplay::getClick(I);
  }
  catch(vpException e) {
    std::cout << "Catch an exception: " << e << std::endl;
  }
}
  void
  TrackerViewer::displayKltPoints()
  {
    if (!klt_)
      return;
    vpImagePoint pos;

    for (unsigned i = 0; i < klt_->klt_points_positions.size(); ++i)
    {
      double ii = klt_->klt_points_positions[i].i;
      double jj = klt_->klt_points_positions[i].j;
      int id = klt_->klt_points_positions[i].id;
      vpColor color = vpColor::red;

      vpDisplay::displayCross(image_, vpImagePoint(ii, jj), 15, color, 1);

      pos.set_i( vpMath::round( ii + 7 ) );
      pos.set_j( vpMath::round( jj + 7 ) );
      char ide[10];
      sprintf(ide, "%d", id);
      vpDisplay::displayCharString(image_, pos, ide, vpColor::red);
    }
  }
/*!
  compute the number of point in this instanciation of the tracker that corresponds
  to the points of the cylinder

  \param _tracker : the KLT tracker
  \return the number of points that are tracked in this face and in this instanciation of the tracker
*/
unsigned int
vpMbtDistanceKltCylinder::computeNbDetectedCurrent(const vpKltOpencv& _tracker)
{
  int id;
  float x, y;
  nbPointsCur = 0;
  curPoints = std::map<int, vpImagePoint>();
  curPointsInd = std::map<int, int>();

  for (unsigned int i = 0; i < static_cast<unsigned int>(_tracker.getNbFeatures()); i++){
    _tracker.getFeature((int)i, id, x, y);
    if(isTrackedFeature(id)){
      curPoints[id] = vpImagePoint(static_cast<double>(y),static_cast<double>(x));
      curPointsInd[id] = (int)i;
      nbPointsCur++;
    }
  }

  if(nbPointsCur >= minNbPoint) enoughPoints = true;
  else enoughPoints = false;

  return nbPointsCur;
}
Beispiel #21
0
bool vpFaceTrackerOkao::detect()
{
  m_message.clear();
  m_polygon.clear();
  m_nb_objects = 0;
  m_faces.clear();
  m_scores.clear();

  bool target_found = false;
  AL::ALValue result = m_mem_proxy.getData("FaceDetected");
  //-- Detect faces


  if (result.getSize() >=2)
  {
    //std::cout << "face" << std::endl;
    AL::ALValue info_face_array = result[1];
    target_found = true;
    double min_dist = m_image_width*m_image_height;
    unsigned int index_closest_cog = 0;
    vpImagePoint closest_cog;
    for (unsigned int i = 0; i < info_face_array.getSize()-1; i++ )
    {
      //Extract face info
      // Face Detected [1]/ First face [0]/ Shape Info [0]/ Alpha [1]
      float alpha = result[1][i][0][1];
      float beta = result[1][i][0][2];
      float sx = result[1][i][0][3];
      float sy = result[1][i][0][4];

      std::string name = result[1][i][1][2];
      float score = result[1][i][1][1];

      std::ostringstream message;
      if (score > 0.6)
        message << name;
      else
        message << "Unknown";

      m_message.push_back( message.str() );
      m_scores.push_back(score);
      // sizeX / sizeY are the face size in relation to the image
      float h = m_image_height * sx;
      float w = m_image_width * sy;

      // Center of face into the image
      float x = m_image_width / 2 - m_image_width * alpha;
      float y = m_image_height / 2 + m_image_height * beta;

      vpImagePoint cog(x,y);
      double dist = vpImagePoint::distance(m_previuos_cog,cog);

      if (dist< min_dist)
      {
        closest_cog = cog;
        index_closest_cog = i;
        min_dist = dist;
      }

      std::vector<vpImagePoint> polygon;
      double x_corner = x - h/2;
      double y_corner = y - w/2;

      polygon.push_back(vpImagePoint(y_corner  , x_corner  ));
      polygon.push_back(vpImagePoint(y_corner+w, x_corner  ));
      polygon.push_back(vpImagePoint(y_corner+w, x_corner+h));
      polygon.push_back(vpImagePoint(y_corner  , x_corner+h));

      m_polygon.push_back(polygon);
      m_nb_objects ++;

    }

    if (index_closest_cog !=0)
      std::swap(m_polygon[0], m_polygon[index_closest_cog]);
    m_previuos_cog = closest_cog;
  }

  return target_found;
}
Beispiel #22
0
/*!

  \relates vpImagePoint

  Returns a vpImagePoint with coordinates divided by a scale factor.

  \code
#include <iostream>
#include <visp3/core/vpImagePoint.h>

int main()
{
  vpImagePoint ip(100, 200); // Create an image point with coordinates i=100, j=200
  std::cout << "ip: " << ip << std::endl; // coordinates (100, 200)
  std::cout << "ip/2: " << ip/2 << std::endl; // new coordinates (50, 100)

  return 0;
}
  \endcode
*/
VISP_EXPORT vpImagePoint operator/( const vpImagePoint &ip1, const double scale ) {
  return ( vpImagePoint(ip1.get_i()/scale, ip1.get_j()/scale));
}
Beispiel #23
0
/*!

  \relates vpImagePoint

  Returns a vpImagePoint with an offset substracted to the two coordinates.

  \code
#include <iostream>
#include <visp3/core/vpImagePoint.h>

int main()
{
  vpImagePoint ip(100, 200); // Create an image point with coordinates i=100, j=200
  std::cout << "ip: " << ip << std::endl; // coordinates (100, 200)
  std::cout << "ip-12.34: " << ip-12.34 << std::endl; // new coordinates (87.66, 187.66)

  return 0;
}
  \endcode
*/
VISP_EXPORT vpImagePoint operator-( const vpImagePoint &ip1, const double offset ) {
  return ( vpImagePoint(ip1.get_i()-offset, ip1.get_j()-offset));
}
Beispiel #24
0
/*!

  \relates vpImagePoint

  Returns a vpImagePoint wich is the difference between \f$ ip1 \f$ and \f$ ip2 \f$.

*/
VISP_EXPORT vpImagePoint operator-( const vpImagePoint &ip1, const vpImagePoint &ip2 ) {
  return ( vpImagePoint(ip1.get_i()-ip2.get_i(), ip1.get_j()-ip2.get_j()));
}
Beispiel #25
0
/*!

  \relates vpImagePoint

  Returns a vpImagePoint with an offset added to the two coordinates.

  \code
#include <iostream>
#include <visp3/core/vpImagePoint.h>

int main()
{
  vpImagePoint ip(100, 200); // Create an image point with coordinates i=100, j=200
  std::cout << "ip: " << ip << std::endl; // coordinates (100, 200)
  std::cout << "ip+10: " << ip+10 << std::endl; // new coordinates (110, 210)

  return 0;
}
  \endcode
*/
VISP_EXPORT vpImagePoint operator+( const vpImagePoint &ip1, const int offset ) {
  return ( vpImagePoint(ip1.get_i()+offset, ip1.get_j()+offset));
}
int main(int argc, const char** argv)
{
  //! [Macro defined]
#if (defined(VISP_HAVE_ZBAR) || defined(VISP_HAVE_DMTX)) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))
  //! [Macro defined]
  try {
    vpImage<unsigned char> I;
    vpImageIo::read(I, "bar-code.pgm");

#ifdef VISP_HAVE_X11
    vpDisplayX d(I);
#elif defined(VISP_HAVE_GDI)
    vpDisplayGDI d(I);
#elif defined(VISP_HAVE_OPENCV)
    vpDisplayOpenCV d(I);
#endif

    //! [Create base detector]
    vpDetectorBase *detector = NULL;
    //! [Create base detector]

#if (defined(VISP_HAVE_ZBAR) && defined(VISP_HAVE_DMTX))
    int opt_barcode = 0; // 0=QRCode, 1=DataMatrix

    for (int i=0; i<argc; i++) {
      if (std::string(argv[i]) == "--code-type")
        opt_barcode = atoi(argv[i+1]);
      else if (std::string(argv[i]) == "--help") {
        std::cout << "Usage: " << argv[0]
                  << " [--code-type <0 for QRcode | 1 for DataMatrix>] [--help]"
                  << std::endl;
        return 0;
      }
    }
    //! [Create detector]
    if (opt_barcode == 0)
      detector = new vpDetectorQRCode;
    else
      detector = new vpDetectorDataMatrixCode;
    //! [Create detector]
#elif defined(VISP_HAVE_ZBAR)
    detector = new vpDetectorQRCode;
    (void)argc;
    (void)argv;
#elif defined(VISP_HAVE_DMTX)
    detector = new vpDetectorDataMatrixCode;
    (void)argc;
    (void)argv;
#endif

    vpDisplay::display(I);

    //! [Detection]
    bool status = detector->detect(I);
    //! [Detection]
    std::ostringstream legend;
    legend << detector->getNbObjects() << " bar code detected";
    vpDisplay::displayText(I, (int)I.getHeight()-30, 10, legend.str(), vpColor::red);

    //! [Parse detected codes]
    if (status) {
      for(size_t i=0; i < detector->getNbObjects(); i++) {
        //! [Parse detected codes]
        //! [Get location]
        std::vector<vpImagePoint> p = detector->getPolygon(i);
        vpRect bbox = detector->getBBox(i);
        //! [Get location]
        vpDisplay::displayRectangle(I, bbox, vpColor::green);
        //! [Get message]
        vpDisplay::displayText(I, (int)(bbox.getTop()-10), (int)bbox.getLeft(),
                               "Message: \"" + detector->getMessage(i) + "\"",
                               vpColor::red);
        //! [Get message]
        for(size_t j=0; j < p.size(); j++) {
          vpDisplay::displayCross(I, p[j], 14, vpColor::red, 3);
          std::ostringstream number;
          number << j;
          vpDisplay::displayText(I, p[j]+vpImagePoint(10,0), number.str(), vpColor::blue);
        }
      }

      vpDisplay::displayText(I, (int)I.getHeight()-15, 10, "A click to quit...", vpColor::red);
      vpDisplay::flush(I);
      vpDisplay::getClick(I);
    }
    delete detector;
  }
  catch(vpException &e) {
    std::cout << "Catch an exception: " << e << std::endl;
  }
#else
  (void)argc;
  (void)argv;
#endif
}
bool getVelocityVector_Point(visual_servo_control::request_servo_velocity_vector::Request &req, visual_servo_control::request_servo_velocity_vector::Response &res)
{
	// This part of the function gets locked if it is still calculating the next velocity
	// vector in the servo application. Hence, this service will return false if it is still busy. 
	ROS_INFO("Service called (getVelocityVector_Point)");
	std::string request_message = req.request_message.c_str();
	if(!program_lock)
	{
	     ROS_INFO("Program not locked, starting getVelocityVector_Point");
		program_lock = true;
          try {
               // If the servo system is not yet initialized, then initialize global variables.
               // The control message is compared to the incoming request message.
               std::string control_message = "initialize";
               if(!servo_initialized && camera_parameters_loaded && request_message == control_message)
               {       
                    ROS_INFO("Initializing servo. Camera are parameters loaded. Service request message was : %s", control_message.c_str());

                    int cog_x = req.pepper_blob_center_of_gravity_x;
                    int cog_y = req.pepper_blob_center_of_gravity_y;

                         // Check if found center of gravity is found.
                         if(cog_x > -1 && cog_y > -1)
                         {
                              // Create point from found coordinates in image.
                              vpImagePoint cog_image_coordinate = vpImagePoint (cog_x, cog_y);
            
                              // Set intrinsic camera parameters, since we will have to convert image pixel 
                              // coordinates to visual features expressed in meters.
                              // vpCameraParameters(px,py,u0,v0) where:
                              //   -px = horizontal pixel size. 
                              //   -py = vertical pixel size.
                              //   -u0 = X-Coordinate of image center.
                              //   -v0 = Y-Coordinate of image center.
                              // px and py are the ratios between the focal and the size of the pixel on the CCD
                              // so a let say a 6mm lens with a 10micrometers pixel size gi ves you px = 6e-3/1e-5 = 600
                              // In our case (F/Sx) : 0.00546213886287705 / 1.09363999535642e-05 = 500.
                              vpCameraParameters camera_params(500, 500, 240, 320);
                              //ROS_INFO("Real image center coordinates in image (x,y) : %f %f", camera_parameters[8].D(), camera_parameters[9].D() );

                              // Create a vpFeaturePoint using a vpFeaturePoint and the parameters of the camera. 
                              // The vpDot contains only the pixel coordinates of the point in an image. 
                              // Thus this method uses the camera parameters to compute the meter coordinates 
                              // in x and y in the image plane.
                              vpFeatureBuilder::create(p_cog, camera_params, cog_image_coordinate);   

                              // It is not possible to compute the depth of the point Z (meter) in the camera 
                              // frame in the vpFeatureBuilder above.  
                              // This coordinate is needed in vpFeaturePoint to compute the interaction matrix. 
                              p_cog.set_Z(0.40);  

                              // Sets the desired position (x,y,z) of the visual feature.
                              pd_cog.buildFrom(0,0,0.40);

                              // Define the task: eye-in-hand. 
                              // The robot is hence controlled in the camera frame.
                              task.setServo(vpServo::EYEINHAND_CAMERA);

                              // The result should be a point on a point.
                              task.addFeature(p_cog,pd_cog);

                              // Set the gain of the servo task: rate of change.
                              task.setLambda(0.99);

                              // Display task information.
                              task.print();

                              // The servo loop will start at iteration zero.
                              iteration = 0;
                              servo_initialized = true;
                              res.initialized = true;
                              res.found_vector = false;
                              ROS_INFO("Servo initialized.");
                         }
                         else
                         {
                              ROS_INFO("Servo not initialized : center of gravity not found in image.");                         
                              servo_initialized = false;
                              res.initialized = false;
                              res.found_vector = false;
                         }
               }  
               else
               {
                    // When initialized, perform servo loop.
                    std::string control_message = "cycle";
                    if(servo_initialized && request_message == control_message)
                    {
                         ROS_INFO("Performing servo loop iteration : %d", iteration);

                         int cog_x = req.pepper_blob_center_of_gravity_x;
                         int cog_y = req.pepper_blob_center_of_gravity_y;
                      
                              // Check if found center of gravity is found.
                              if(cog_x > -1 && cog_y > -1)
                              {
                                   // Create point from found coordinates in image.
                                   vpImagePoint cog_image_coordinate = vpImagePoint (cog_x, cog_y);
              
                                   // Set intrinsic camera parameters, since we will have to convert image pixel 
                                   // coordinates to visual features expressed in meters.
                                   // vpCameraParameters(px,py,u0,v0) where:
                                   //   -px = horizontal pixel size. 
                                   //   -py = vertical pixel size.
                                   //   -u0 = X-Coordinate of image center.
                                   //   -v0 = Y-Coordinate of image center.
                                   vpCameraParameters camera_params(500, 500, 240, 320);

                                   // Create a vpFeaturePoint using a vpFeaturePoint and the parameters of the camera. 
                                   // The vpDot contains only the pixel coordinates of the point in an image. 
                                   // Thus this method uses the camera parameters to compute the meter coordinates 
                                   // in x and y in the image plane.
                                   vpFeatureBuilder::create(p_cog, camera_params, cog_image_coordinate);   

                                   // It is not possible to compute the depth of the point Z (meter) in the camera 
                                   // frame in the vpFeatureBuilder above.  
                                   // This coordinate is needed in vpFeaturePoint to compute the interaction matrix. 
                                   p_cog.set_Z(0.40);      

                                   // Compute the visual servoing velocity vector.
                                   vpColVector v = task.computeControlLaw();

                                   // Get feature error (s-s*) for the feature point. For this feature
                                   // point, we have 2 errors (along x and y axis).
                                   // This error is expressed in meters in the camera frame
                                   vpColVector error = task.getError(); 
                                   res.error_y = error[0];
                                   res.error_x = error[1];
                                   ROS_INFO("Feature error (x,y): (%0.5f , %0.5f)", res.error_x, res.error_y);  

                                   // Fill and send velocity vector as response to this service.
                                   std::vector<double> velocity_vector(6);
                                   velocity_vector[0] = v.operator[](0); 
                                   velocity_vector[1] = v.operator[](1); 
                                   velocity_vector[2] = v.operator[](2); 
                                   velocity_vector[3] = v.operator[](3); 
                                   velocity_vector[4] = v.operator[](4); 
                                   velocity_vector[5] = v.operator[](5); 
                                   
                                   // Send velocity vecotor as a response.
                                   res.servo_velocity_vector     = velocity_vector;
                                   res.found_vector              = true;
                                   res.initialized               = true; 
		                         ROS_INFO("Velocity vector has been sent: %f %f %f %f %f %f", velocity_vector[0], velocity_vector[1], velocity_vector[2], velocity_vector[3], velocity_vector[4], velocity_vector[5]);

                                   // End this servo iteration.  
                                   iteration++;
                                   ROS_INFO("Servo iteration performed, velocity vector send.");
                              }
                              else
                              {
                                   ROS_INFO("Servo iteration failed : could not find center of gravity. Retry.");                                                       
                              
                                   // Fill and send empty velocity vector as response to this service.
                                   std::vector<double> velocity_vector(6);
                                   res.servo_velocity_vector = velocity_vector;
                                   res.found_vector = false; 
                                   res.initialized = true;
                              }
                         }
                         else
                         {
                               if(!servo_initialized)
                               {
                                   ROS_INFO("Servo function not yet initialized.");
                                   res.found_vector = false;
                                   res.initialized = false; 
                               }
                               if(request_message != control_message )
                               {
                                   ROS_INFO("Wrong request message, use: %s", control_message.c_str() );
                                   res.found_vector = false;
                                   res.initialized = false; 
                               }
                         }
               }       
          }
          catch(vpException e) 
          {
               ROS_ERROR("Could not calculate velocity vector in getVelocityVector_Point");
               res.found_vector = false;
               res.initialized = false; 
          }     
     	program_lock = false;
		return true;
     }
	else
	{
		ROS_INFO("Visual servo calculation currently already executing and service (getVelocityVector_Point) is therefore locked.");
		return false;
	}

}
Beispiel #28
0
/**!
   Unary function used to transform a cv::KeyPoint to a vpImagePoint.
   \param keypoint : KeyPoint to convert.

   \return A vpImagePoint with the 2D coordinates corresponding to the location of the KeyPoint.
 */
vpImagePoint vpConvert::keyPointToVpImagePoint(const cv::KeyPoint &keypoint) {
    return vpImagePoint(keypoint.pt.y, keypoint.pt.x);
}
int main(int argc, const char** argv)
{
#if (VISP_HAVE_OPENCV_VERSION >= 0x020100) && (defined(VISP_HAVE_ZBAR) || defined(VISP_HAVE_DMTX))
  int opt_device = 0;
  int opt_barcode = 0; // 0=QRCode, 1=DataMatrix

  for (int i=0; i<argc; i++) {
    if (std::string(argv[i]) == "--device")
      opt_device = atoi(argv[i+1]);
    else if (std::string(argv[i]) == "--code-type")
      opt_barcode = atoi(argv[i+1]);
    else if (std::string(argv[i]) == "--help") {
      std::cout << "Usage: " << argv[0]
                << " [--device <camera number>] [--code-type <0 for QRcode | 1 for DataMatrix>] [--help]"
                << std::endl;
      return 0;
    }
  }
  std::cout << "Use device: " << opt_device << std::endl;

  try {
    vpImage<unsigned char> I; // for gray images

    //! [Construct grabber]
#if defined(VISP_HAVE_V4L2)
    vpV4l2Grabber g;
    std::ostringstream device;
    device << "/dev/video" << opt_device;
    g.setDevice(device.str());
    g.setScale(1);
    g.acquire(I);
#elif defined(VISP_HAVE_OPENCV)
    cv::VideoCapture cap(opt_device); // open the default camera
    if(!cap.isOpened()) { // check if we succeeded
      std::cout << "Failed to open the camera" << std::endl;
      return -1;
    }
    cv::Mat frame;
    cap >> frame; // get a new frame from camera
    vpImageConvert::convert(frame, I);
#endif
    //! [Construct grabber]

#if defined(VISP_HAVE_X11)
    vpDisplayX d(I);
#elif defined(VISP_HAVE_GDI)
    vpDisplayGDI d(I);
#elif defined(VISP_HAVE_OPENCV)
    vpDisplayOpenCV d(I);
#endif
    vpDisplay::setTitle(I, "ViSP viewer");

    vpDetectorBase *detector = NULL;
#if (defined(VISP_HAVE_ZBAR) && defined(VISP_HAVE_DMTX))
    if (opt_barcode == 0)
      detector = new vpDetectorQRCode;
    else
      detector = new vpDetectorDataMatrixCode;
#elif defined(VISP_HAVE_ZBAR)
    detector = new vpDetectorQRCode;
    (void)opt_barcode;
#elif defined(VISP_HAVE_DMTX)
    detector = new vpDetectorDataMatrixCode;
    (void)opt_barcode;
#endif

    for(;;) {
      //! [Acquisition]
#if defined(VISP_HAVE_V4L2)
      g.acquire(I);
#else
      cap >> frame; // get a new frame from camera
      vpImageConvert::convert(frame, I);
#endif
      //! [Acquisition]
      vpDisplay::display(I);

      bool status = detector->detect(I);
      std::ostringstream legend;
      legend << detector->getNbObjects() << " bar code detected";
      vpDisplay::displayText(I, 10, 10, legend.str(), vpColor::red);

      if (status) {
        for(size_t i=0; i < detector->getNbObjects(); i++) {
          std::vector<vpImagePoint> p = detector->getPolygon(i);
          vpRect bbox = detector->getBBox(i);
          vpDisplay::displayRectangle(I, bbox, vpColor::green);
          vpDisplay::displayText(I, bbox.getTop()-20, bbox.getLeft(), "Message: \"" + detector->getMessage(i) + "\"", vpColor::red);
          for(size_t j=0; j < p.size(); j++) {
            vpDisplay::displayCross(I, p[j], 14, vpColor::red, 3);
            std::ostringstream number;
            number << j;
            vpDisplay::displayText(I, p[j]+vpImagePoint(10,0), number.str(), vpColor::blue);
          }
        }
      }

      vpDisplay::displayText(I, (int)I.getHeight()-25, 10, "Click to quit...", vpColor::red);
      vpDisplay::flush(I);
      if (vpDisplay::getClick(I, false)) // a click to exit
        break;
    }
    delete detector;
  }
  catch(vpException e) {
    std::cout << "Catch an exception: " << e << std::endl;
  }
#else
  (void)argc;
  (void)argv;
#endif
}
Beispiel #30
0
/*!
   Unary function to convert a cv::Point2d to a vpImagePoint.
   \param point : Point to convert.

   \return A vpImagePoint with the 2D coordinates stored in cv::Point2d.
 */
vpImagePoint vpConvert::point2dToVpImagePoint(const cv::Point2d &point) {
    return vpImagePoint(point.y, point.x);
}