Example #1
0
/*!
  Initialise the face to track. All the points in the map, representing all the
  map detected in the image, are parsed in order to extract the id of the points
  that are indeed in the face.

  \param _tracker : ViSP OpenCV KLT Tracker.
*/
void
vpMbtDistanceKltPoints::init(const vpKltOpencv& _tracker)
{
  // extract ids of the points in the face
  nbPointsInit = 0;
  nbPointsCur = 0;
  initPoints = std::map<int, vpImagePoint>();
  curPoints = std::map<int, vpImagePoint>();
  curPointsInd = std::map<int, int>();
  std::vector<vpImagePoint> roi;
  polygon->getRoiClipped(cam, roi);

  for (unsigned int i = 0; i < static_cast<unsigned int>(_tracker.getNbFeatures()); i ++){
    int id;
    float x_tmp, y_tmp;
    _tracker.getFeature((int)i, id, x_tmp, y_tmp);

    bool add = false;

    if(useScanLine)
    {
      if((unsigned int)y_tmp <  hiddenface->getMbScanLineRenderer().getPrimitiveIDs().getHeight() &&
         (unsigned int)x_tmp <  hiddenface->getMbScanLineRenderer().getPrimitiveIDs().getWidth() &&
         hiddenface->getMbScanLineRenderer().getPrimitiveIDs()[(unsigned int)y_tmp][(unsigned int)x_tmp] == polygon->getIndex())
        add = true;
    }
    else if(vpPolygon::isInside(roi, y_tmp, x_tmp))
    {
      add = true;
    }

    if(add){
      initPoints[id] = vpImagePoint(y_tmp, x_tmp);
      curPoints[id] = vpImagePoint(y_tmp, x_tmp);
      curPointsInd[id] = (int)i;
      nbPointsInit++;
      nbPointsCur++;
    }
  }

  if(nbPointsCur >= minNbPoint) enoughPoints = true;
  else enoughPoints = false;

  // initialisation of the value for the computation in SE3
  vpPlane plan(polygon->getPoint(0), polygon->getPoint(1), polygon->getPoint(2));

  d0 = plan.getD();
  N = plan.getNormal();

  N.normalize();
  N_cur = N;
  invd0 = 1.0 / d0;
}
Example #2
0
/*!
  Set the new value of the klt tracker.

  \param t : Klt tracker containing the new values.
*/
void            
vpMbKltTracker::setKltOpencv(const vpKltOpencv& t){
  tracker.setMaxFeatures(t.getMaxFeatures());
  tracker.setWindowSize(t.getWindowSize());
  tracker.setQuality(t.getQuality());
  tracker.setMinDistance(t.getMinDistance());
  tracker.setHarrisFreeParameter(t.getHarrisFreeParameter());
  tracker.setBlockSize(t.getBlockSize());
  tracker.setPyramidLevels(t.getPyramidLevels());
}
/*!
  compute the number of point in this instanciation of the tracker that corresponds
  to the points of the cylinder

  \param _tracker : the KLT tracker
  \return the number of points that are tracked in this face and in this instanciation of the tracker
*/
unsigned int
vpMbtDistanceKltCylinder::computeNbDetectedCurrent(const vpKltOpencv& _tracker)
{
  int id;
  float x, y;
  nbPointsCur = 0;
  curPoints = std::map<int, vpImagePoint>();
  curPointsInd = std::map<int, int>();

  for (unsigned int i = 0; i < static_cast<unsigned int>(_tracker.getNbFeatures()); i++){
    _tracker.getFeature((int)i, id, x, y);
    if(isTrackedFeature(id)){
      curPoints[id] = vpImagePoint(static_cast<double>(y),static_cast<double>(x));
      curPointsInd[id] = (int)i;
      nbPointsCur++;
    }
  }

  if(nbPointsCur >= minNbPoint) enoughPoints = true;
  else enoughPoints = false;

  return nbPointsCur;
}
Example #4
0
/*!
  Initialise the face to track. All the points in the map, representing all the
  map detected in the image, are parsed in order to extract the id of the points
  that are indeed in the face.

  \param _tracker : ViSP OpenCV KLT Tracker.
*/
void
vpMbtKltPolygon::init(const vpKltOpencv& _tracker)
{
  // extract ids of the points in the face
  nbPointsInit = 0;
  nbPointsCur = 0;
  initPoints = std::map<int, vpImagePoint>();
  curPoints = std::map<int, vpImagePoint>();
  curPointsInd = std::map<int, int>();
  std::vector<vpImagePoint> roi;
  getRoiClipped(cam, roi);
  
  for (unsigned int i = 0; i < static_cast<unsigned int>(_tracker.getNbFeatures()); i ++){
    int id;
    float x_tmp, y_tmp;
    _tracker.getFeature((int)i, id, x_tmp, y_tmp);

    if(isInside(roi, y_tmp, x_tmp)){
      initPoints[id] = vpImagePoint(y_tmp, x_tmp);
      curPoints[id] = vpImagePoint(y_tmp, x_tmp);
      curPointsInd[id] = (int)i;
      nbPointsInit++;
      nbPointsCur++;
    }
  }

  // initialisation of the value for the computation in SE3
  vpPlane plan(p[0], p[1], p[2]);

  d0 = plan.getD();
  N = plan.getNormal(); 
  
  N.normalize();
  N_cur = N;
  invd0 = 1.0 / d0;
}
/*!
  Initialise the cylinder to track. All the points in the map, representing all the
  map detected in the image, are parsed in order to extract the id of the points
  that are indeed in the face.

  \param _tracker : ViSP OpenCV KLT Tracker.
  \param cMo : Pose of the object in the camera frame at initialization.
*/
void
vpMbtDistanceKltCylinder::init(const vpKltOpencv& _tracker, const vpHomogeneousMatrix &cMo)
{
  c0Mo = cMo;
  cylinder.changeFrame(cMo);

  // extract ids of the points in the face
  nbPointsInit = 0;
  nbPointsCur = 0;
  initPoints = std::map<int, vpImagePoint>();
  initPoints3D = std::map<int, vpPoint>();
  curPoints = std::map<int, vpImagePoint>();
  curPointsInd = std::map<int, int>();

  for (unsigned int i = 0; i < static_cast<unsigned int>(_tracker.getNbFeatures()); i ++){
    int id;
    float x_tmp, y_tmp;
    _tracker.getFeature((int)i, id, x_tmp, y_tmp);

    bool add = false;

    if(useScanLine)
    {
      if((unsigned int)y_tmp <  hiddenface->getMbScanLineRenderer().getPrimitiveIDs().getHeight() &&
         (unsigned int)x_tmp <  hiddenface->getMbScanLineRenderer().getPrimitiveIDs().getWidth())
      {
        for(unsigned int kc = 0 ; kc < listIndicesCylinderBBox.size() ; kc++)
          if(hiddenface->getMbScanLineRenderer().getPrimitiveIDs()[(unsigned int)y_tmp][(unsigned int)x_tmp] == listIndicesCylinderBBox[kc])
          {
            add = true;
            break;
          }
      }
    }
    else
    {
      std::vector<vpImagePoint> roi;
      for(unsigned int kc = 0 ; kc < listIndicesCylinderBBox.size() ; kc++)
      {
        hiddenface->getPolygon()[(size_t) listIndicesCylinderBBox[kc]]->getRoiClipped(cam, roi);
        if(vpPolygon::isInside(roi, y_tmp, x_tmp))
        {
          add = true;
          break;
        }
        roi.clear();
      }
    }

    if(add){

      double xm=0, ym=0;
      vpPixelMeterConversion::convertPoint(cam, x_tmp, y_tmp, xm, ym);
      double Z = computeZ(xm,ym);
      if(!vpMath::isNaN(Z)){
        initPoints[id] = vpImagePoint(y_tmp, x_tmp);
        curPoints[id] = vpImagePoint(y_tmp, x_tmp);
        curPointsInd[id] = (int)i;
        nbPointsInit++;
        nbPointsCur++;


        vpPoint p;
        p.setWorldCoordinates(xm * Z, ym * Z, Z);
        initPoints3D[id] = p;
        //std::cout << "Computed Z for : " << xm << "," << ym << " : " << computeZ(xm,ym) << std::endl;
      }
    }
  }

  if(nbPointsCur >= minNbPoint) enoughPoints = true;
  else enoughPoints = false;

  //std::cout << "Nb detected points in cylinder : " << nbPointsCur << std::endl;
}