예제 #1
0
/*!

  Initialize a point feature with polar coordinates
  \f$(\rho,\theta)\f$ using the coordinates of the point
  \f$(x,y,Z)\f$, where \f$(x,y)\f$ correspond to the perspective
  projection of the point in the image plane and \f$Z\f$ the 3D depth
  of the point in the camera frame. The values of \f$(x,y,Z)\f$ are
  expressed in meters.

  This function intends to introduce noise in the conversion from
  cartesian to polar coordinates. Cartesian \f$(x,y)\f$ coordinates
  are first converted in pixel coordinates in the image using \e
  goodCam camera parameters. Then, the pixels coordinates of the point
  are converted back to cartesian coordinates \f$(x^{'},y^{'})\f$ using
  the noisy camera parameters \e wrongCam. From these new coordinates
  in the image plane, the polar coordinates are computed by:

  \f[\rho = \sqrt{x^2+y^2}  \hbox{,}\; \; \theta = \arctan \frac{y}{x}\f]

  \param s : Visual feature \f$(\rho,\theta)\f$ and \f$Z\f$ to initialize.

  \param goodCam : Camera parameters used to introduce noise. These
  parameters are used to convert cartesian coordinates of the point \e
  p in the image plane in pixel coordinates.

  \param wrongCam : Camera parameters used to introduce noise. These
  parameters are used to convert pixel coordinates of the point in
  cartesian coordinates of the point in the image plane.

  \param p : A point with \f$(x,y)\f$ cartesian coordinates in the
  image plane corresponding to the camera perspective projection, and
  with 3D depth \f$Z\f$.
*/
void
vpFeatureBuilder::create(vpFeaturePointPolar &s,
			 const vpCameraParameters &goodCam,
			 const vpCameraParameters &wrongCam,
			 const vpPoint &p)
{
  try {
    double x = p.get_x();
    double y = p.get_y();

    s.set_Z( p.get_Z() );

    double u=0, v=0;
    vpMeterPixelConversion::convertPoint(goodCam, x, y, u, v);
    vpPixelMeterConversion::convertPoint(wrongCam, u, v, x, y);

    double rho   = sqrt(x*x + y*y);
    double theta = atan2(y, x);

    s.set_rho(rho) ;
    s.set_theta(theta) ;
  }
  catch(...) {
    vpERROR_TRACE("Error caught") ;
    throw ;
  }
}
예제 #2
0
/*!
  Create a vpColVector of a projected point.

  \param p : Point to project.
  \param v : Resulting vector.
  \param K : Camera parameters.
*/
void
vpMbScanLine::createVectorFromPoint(const vpPoint &p, vpColVector &v, const vpCameraParameters &K)
{
    v = vpColVector(3);

    v[0] = p.get_X() * K.get_px() + K.get_u0() * p.get_Z();
    v[1] = p.get_Y() * K.get_py() + K.get_v0() * p.get_Z();
    v[2] = p.get_Z();
}
예제 #3
0
/*!
  Interpolate two vpPoints.

  \param a : first point.
  \param b : second point.
  \param alpha : interpolation factor.

  \return Interpolated vpPoint.
*/
vpPoint
vpMbScanLine::mix(const vpPoint &a, const vpPoint &b, double alpha)
{
  vpPoint res;
  res.set_X(a.get_X() + ( b.get_X() - a.get_X() ) * alpha);
  res.set_Y(a.get_Y() + ( b.get_Y() - a.get_Y() ) * alpha);
  res.set_Z(a.get_Z() + ( b.get_Z() - a.get_Z() ) * alpha);

  return res;
}
/*!
  Initialize a vpFeatureVanishingPoint thanks to a vpPoint.
  The vpFeatureVanishingPoint is initialized thanks to the parameters of the point in the image plan.
  All the parameters are given in meter.

  \param s : Visual feature to initialize.

  \param t : The vpPoint used to create the vpFeatureVanishingPoint.
*/
void
vpFeatureBuilder::create(vpFeatureVanishingPoint &s, const vpPoint &t)
{
  try
  {
    s.set_x( t.get_x()) ;
    s.set_y( t.get_y()) ;
  }
  catch(...)
  {
    vpERROR_TRACE("Cannot create vanishing point feature") ;
    throw ;
  }
}
/*!
  Build a vpMbtDistanceSphere thanks to its center, 3 points (including the center) with
  coordinates expressed in the object frame and defining the plane that contain
  the circle and its radius.

  \param _p1 : Center of the circle.
  \param _p2,_p3 : Two points on the plane containing the circle. With the center of the circle we have 3 points
  defining the plane that contains the circle.
  \param r : Radius of the circle.
*/
void
vpMbtDistanceSphere::buildFrom(const vpPoint &_p1, const double r)
{
    sphere = new vpSphere ;
    p1 = new vpPoint ;

    // Get the points
    *p1 = _p1;

    // Get the radius
    radius = r;

    // Build our sphere
    sphere->setWorldCoordinates(_p1.get_oX(), _p1.get_oY(), _p1.get_oZ(), r);
}
예제 #6
0
/*!
  From the coordinates of the point in image plane b and the homography between image
  a and b computes the coordinates of the point in image plane a.

  \param b_P : 2D coordinates of the point in the image plane b.

  \return A point with 2D coordinates in the image plane a.
*/
vpPoint vpHomography::operator*(const vpPoint& b_P) const
{
  vpPoint a_P ;
  vpColVector v(3),v1(3) ;

  v[0] = b_P.get_x() ;
  v[1] = b_P.get_y() ;
  v[2] = b_P.get_w() ;

  v1[0] = (*this)[0][0]*v[0] + (*this)[0][1]*v[1]+ (*this)[0][2]*v[2] ;
  v1[1] = (*this)[1][0]*v[0] + (*this)[1][1]*v[1]+ (*this)[1][2]*v[2] ;
  v1[2] = (*this)[2][0]*v[0] + (*this)[2][1]*v[1]+ (*this)[2][2]*v[2] ;

  //  v1 = M*v ;
  a_P.set_x(v1[0]) ;
  a_P.set_y(v1[1]) ;
  a_P.set_w(v1[2]) ;

  return a_P ;
}
예제 #7
0
/*!
  Create a vpMbScanLineEdge from two points while ordering them.

  \param a : First point of the line.
  \param b : Second point of the line.

  \return Resulting vpMbScanLineEdge.
*/
vpMbScanLine::vpMbScanLineEdge
vpMbScanLine::makeMbScanLineEdge(const vpPoint &a, const vpPoint &b)
{
  vpColVector _a(3);
  vpColVector _b(3);

  _a[0] = std::ceil((a.get_X() * 1e8) * 1e-6);
  _a[1] = std::ceil((a.get_Y() * 1e8) * 1e-6);
  _a[2] = std::ceil((a.get_Z() * 1e8) * 1e-6);

  _b[0] = std::ceil((b.get_X() * 1e8) * 1e-6);
  _b[1] = std::ceil((b.get_Y() * 1e8) * 1e-6);
  _b[2] = std::ceil((b.get_Z() * 1e8) * 1e-6);

  bool b_comp = false;
  for(unsigned int i = 0 ; i < 3 ; ++i)
    if (_a[i] < _b[i])
    {
      b_comp = true;
      break;
    }
    else if(_a[i] > _b[i])
      break;

  if (b_comp)
    return std::make_pair(_a, _b);

  return std::make_pair(_b, _a);
}
예제 #8
0
/*!

  Initialize a point feature with polar coordinates
  \f$(\rho,\theta)\f$ using the coordinates of the point
  \f$(x,y,Z)\f$, where \f$(x,y)\f$ correspond to the perspective
  projection of the point in the image plane and \f$Z\f$ the 3D depth
  of the point in the camera frame. The values of \f$(x,y,Z)\f$ are
  expressed in meters. From the coordinates in the image plane, the
  polar coordinates are computed by:

  \f[\rho = \sqrt{x^2+y^2}  \hbox{,}\; \; \theta = \arctan \frac{y}{x}\f]

  \param s : Visual feature \f$(\rho,\theta)\f$ and \f$Z\f$ to initialize.

  \param p : A point with \f$(x,y)\f$ cartesian coordinates in the
  image plane corresponding to the camera perspective projection, and
  with 3D depth \f$Z\f$.
*/
void
vpFeatureBuilder::create(vpFeaturePointPolar &s, const vpPoint &p)
{
  try {

    double x = p.get_x();
    double y = p.get_y();

    double rho   = sqrt(x*x + y*y);
    double theta = atan2(y, x);

    s.set_rho(rho) ;
    s.set_theta(theta) ;

    s.set_Z( p.get_Z() )  ;


    if (s.get_Z() < 0) {
      vpERROR_TRACE("Point is behind the camera ") ;
      std::cout <<"Z = " << s.get_Z() << std::endl ;

      throw(vpFeatureException(vpFeatureException::badInitializationError,
			       "Point is behind the camera ")) ;
    }

    if (fabs(s.get_Z()) < 1e-6) {
      vpERROR_TRACE("Point Z coordinates is null ") ;
      std::cout <<"Z = " << s.get_Z() << std::endl ;

      throw(vpFeatureException(vpFeatureException::badInitializationError,
			       "Point Z coordinates is null")) ;
    }

  }
  catch(...) {
    vpERROR_TRACE("Error caught") ;
    throw ;
  }
}
예제 #9
0
void 
kltFbTracker::genFeaturesForTrackOnFace(std::vector<vpPoint>& features, int faceID, int numOfPtsPerFace)
{
	const vpPoint& 	vp1 = pyg[faceID].p[0],
			 		vp2 = pyg[faceID].p[1],
			 		vp3 = pyg[faceID].p[2],
			 		vp4 = pyg[faceID].p[3];

	float step = 1.0 / numOfPtsPerFace;

	// avoid the edge
	for (int i = 1; i < numOfPtsPerFace; i++)
		for (int j = 1; j < numOfPtsPerFace; j++)
		{
			vpPoint fp;
			float alpha = i * step;
			float beta  = j * step;
			fp.set_oX((alpha + beta - 1) * vp2.get_oX() + (1 - alpha) * vp1.get_oX() + (1 - beta) * vp3.get_oX());
			fp.set_oY((alpha + beta - 1) * vp2.get_oY() + (1 - alpha) * vp1.get_oY() + (1 - beta) * vp3.get_oY());
			fp.set_oZ((alpha + beta - 1) * vp2.get_oZ() + (1 - alpha) * vp1.get_oZ() + (1 - beta) * vp3.get_oZ());

			features.push_back(fp);
		}
}
예제 #10
0
파일: vpConvert.cpp 프로젝트: yochju/visp
/*!
   Unary function to convert the 3D coordinates in the object frame to a cv::Point3d.
   \param point : Point to convert.

   \return A cv::Point3d with the 3D coordinates stored in vpPoint in the object frame.
 */
cv::Point3d vpConvert::vpObjectPointToPoint3d(const vpPoint &point) {
    return cv::Point3d(point.get_oX(), point.get_oY(), point.get_oZ());
}
예제 #11
0
파일: vpConvert.cpp 프로젝트: yochju/visp
/*!
   Unary function to convert the 3D coordinates in the object frame to a cv::Point3f.
   \param point : Point to convert.

   \return A cv::Point3f with the 3D coordinates stored in vpPoint in the object frame.
 */
cv::Point3f vpConvert::vpObjectPointToPoint3f(const vpPoint &point) {
    return cv::Point3f((float) point.get_oX(), (float) point.get_oY(), (float) point.get_oZ());
}
예제 #12
0
파일: vpConvert.cpp 프로젝트: yochju/visp
/*!
   Unary function to convert the 3D coordinates in the camera frame to a cv::Point3d.
   \param point : Point to convert.

   \return A cv::Point3d with the 3D coordinates stored in vpPoint in the camera frame.
 */
cv::Point3d vpConvert::vpCamPointToPoint3d(const vpPoint &point) {
    return cv::Point3d(point.get_X(), point.get_Y(), point.get_Z());
}
예제 #13
0
파일: vpConvert.cpp 프로젝트: yochju/visp
/*!
   Unary function to convert the 3D coordinates in the camera frame to a cv::Point3f.
   \param point : Point to convert.

   \return A cv::Point3f with the 3D coordinates stored in vpPoint in the camera frame.
 */
cv::Point3f vpConvert::vpCamPointToPoint3f(const vpPoint &point) {
    return cv::Point3f((float) point.get_X(), (float) point.get_Y(), (float) point.get_Z());
}
예제 #14
0
void
vpMbtDistanceKltCylinder::buildFrom(const vpPoint &p1, const vpPoint &p2, const double &r)
{
  p1Ext = p1;
  p2Ext = p2;

  vpColVector ABC(3);
  vpColVector V1(3);
  vpColVector V2(3);

  V1[0] = p1.get_oX();
  V1[1] = p1.get_oY();
  V1[2] = p1.get_oZ();
  V2[0] = p2.get_oX();
  V2[1] = p2.get_oY();
  V2[2] = p2.get_oZ();

  // Get the axis of the cylinder
  ABC = V1-V2;

  // Build our extremity circles
  circle1.setWorldCoordinates(ABC[0],ABC[1],ABC[2],p1.get_oX(),p1.get_oY(),p1.get_oZ(),r);
  circle2.setWorldCoordinates(ABC[0],ABC[1],ABC[2],p2.get_oX(),p2.get_oY(),p2.get_oZ(),r);

  // Build our cylinder
  cylinder.setWorldCoordinates(ABC[0],ABC[1],ABC[2],(p1.get_oX()+p2.get_oX())/2.0,(p1.get_oY()+p2.get_oY())/2.0,(p1.get_oZ()+p2.get_oZ())/2.0,r);
}
예제 #15
0
/*!
  Build a 3D plane thanks to 3 points and stores it in \f$ plane \f$.
  
  \param P : The first point to define the plane
  \param Q : The second point to define the plane
  \param R : The third point to define the plane
  \param plane : The vpPlane instance used to store the computed plane equation.
*/
void
buildPlane(vpPoint &P, vpPoint &Q, vpPoint &R, vpPlane &plane)
{
  vpColVector a(3);
  vpColVector b(3);
  vpColVector n(3);
  //Calculate vector corresponding to PQ
  a[0]=P.get_oX()-Q.get_oX();
  a[1]=P.get_oY()-Q.get_oY();
  a[2]=P.get_oZ()-Q.get_oZ();

  //Calculate vector corresponding to PR
  b[0]=P.get_oX()-R.get_oX();
  b[1]=P.get_oY()-R.get_oY();
  b[2]=P.get_oZ()-R.get_oZ();

  //Calculate normal vector to plane PQ x PR
  n=vpColVector::cross(a,b);

  //Equation of the plane is given by:
  double A = n[0];
  double B = n[1];
  double C = n[2];
  double D=-(A*P.get_oX()+B*P.get_oY()+C*P.get_oZ());

  double norm =  sqrt(A*A+B*B+C*C) ;
  plane.setA(A/norm) ;
  plane.setB(B/norm) ;
  plane.setC(C/norm) ;
  plane.setD(D/norm) ;
}
예제 #16
0
/*!
  Compute the norm of two vpPoints.

  \param a : first point.
  \param b : second point.

  \return Resulting norm.
*/
double
vpMbScanLine::norm(const vpPoint &a, const vpPoint &b)
{
  return sqrt(vpMath::sqr(a.get_X()-b.get_X()) + vpMath::sqr(a.get_Y() - b.get_Y()) + vpMath::sqr(a.get_Z() - b.get_Z()));
}
예제 #17
0
파일: vpPolygon3D.cpp 프로젝트: tswang/visp
/*!
  Get the clipped points according to a plane equation.

  \param cam : camera parameters
  \param p1 : First extremity of the line.
  \param p2 : Second extremity of the line.
  \param p1Clipped : Resulting p1.
  \param p2Clipped : Resulting p2.
  \param p1ClippedInfo : Resulting clipping flag for p1.
  \param p2ClippedInfo : Resulting clipping flag for p2.
  \param A : Param A from plane equation.
  \param B : Param B from plane equation.
  \param C : Param C from plane equation.
  \param D : Param D from plane equation.
  \param flag : flag specifying the clipping used when calling this function.
  
  \return True if the points have been clipped, False otherwise
*/
bool
vpPolygon3D::getClippedPointsFovGeneric(const vpPoint &p1, const vpPoint &p2,
                                vpPoint &p1Clipped, vpPoint &p2Clipped, 
                                unsigned int &p1ClippedInfo, unsigned int &p2ClippedInfo,
                                const vpColVector &normal, const unsigned int &flag)
{    
  vpRowVector p1Vec(3);
  p1Vec[0] = p1.get_X(); p1Vec[1] = p1.get_Y(); p1Vec[2] = p1.get_Z();
  p1Vec = p1Vec.normalize();
  
  vpRowVector p2Vec(3);
  p2Vec[0] = p2.get_X(); p2Vec[1] = p2.get_Y(); p2Vec[2] = p2.get_Z();
  p2Vec = p2Vec.normalize();
  
  if((clippingFlag & flag) == flag){
    double beta1 = acos( p1Vec * normal );
    double beta2 = acos( p2Vec * normal );

//    std::cout << beta1 << " && " << beta2 << std::endl;

    //    if(!(beta1 < M_PI / 2.0 && beta2 < M_PI / 2.0))
    if(beta1 < M_PI / 2.0 && beta2 < M_PI / 2.0)
      return false;
    else if (beta1 < M_PI / 2.0 || beta2 < M_PI / 2.0){
      vpPoint pClipped;
      double t = -(normal[0] * p1.get_X() + normal[1] * p1.get_Y() + normal[2] * p1.get_Z());
      t = t / ( normal[0] * (p2.get_X() - p1.get_X()) + normal[1] * (p2.get_Y() - p1.get_Y()) + normal[2] * (p2.get_Z() - p1.get_Z()) );
      
      pClipped.set_X((p2.get_X() - p1.get_X())*t + p1.get_X());
      pClipped.set_Y((p2.get_Y() - p1.get_Y())*t + p1.get_Y());
      pClipped.set_Z((p2.get_Z() - p1.get_Z())*t + p1.get_Z());
      
      if(beta1 < M_PI / 2.0){
        p1ClippedInfo = p1ClippedInfo | flag;
        p1Clipped = pClipped;
      }
      else{
        p2ClippedInfo = p2ClippedInfo | flag;
        p2Clipped = pClipped;
      }
    }
  }
  
  return true;
}
예제 #18
0
파일: vpPolygon3D.cpp 프로젝트: tswang/visp
bool
vpPolygon3D::getClippedPointsDistance(const vpPoint &p1, const vpPoint &p2,
                               vpPoint &p1Clipped, vpPoint &p2Clipped,
                               unsigned int &p1ClippedInfo, unsigned int &p2ClippedInfo,
                               const unsigned int &flag, const double &distance)
{
    // Since p1 and p1Clipped can be the same object as well as p2 and p2Clipped
    // to avoid a valgrind "Source and destination overlap in memcpy" error,
    // we introduce a two temporary points.
    vpPoint p1Clipped_, p2Clipped_;
    p1Clipped_ = p1;
    p2Clipped_ = p2;

    p1Clipped = p1Clipped_;
    p2Clipped = p2Clipped_;


    bool test1 = (p1Clipped.get_Z() < distance && p2Clipped.get_Z() < distance);
    if(flag == vpPolygon3D::FAR_CLIPPING)
        test1 = (p1Clipped.get_Z() > distance && p2Clipped.get_Z() > distance);

    bool test2 = (p1Clipped.get_Z() < distance || p2Clipped.get_Z() < distance);
    if(flag == vpPolygon3D::FAR_CLIPPING)
        test2 = (p1Clipped.get_Z() > distance || p2Clipped.get_Z() > distance);

    bool test3 = (p1Clipped.get_Z() < distance);
    if(flag == vpPolygon3D::FAR_CLIPPING)
        test3 = (p1Clipped.get_Z() > distance);

    if(test1)
      return false;

    else if(test2){
      vpPoint pClippedNear;
      double t;
      t = (p2Clipped.get_Z() - p1Clipped.get_Z());
      t = (distance - p1Clipped.get_Z()) / t;

      pClippedNear.set_X((p2Clipped.get_X() - p1Clipped.get_X())*t + p1Clipped.get_X());
      pClippedNear.set_Y((p2Clipped.get_Y() - p1Clipped.get_Y())*t + p1Clipped.get_Y());
      pClippedNear.set_Z(distance);

      if(test3){
        p1Clipped = pClippedNear;
        if(flag == vpPolygon3D::FAR_CLIPPING)
            p1ClippedInfo = p1ClippedInfo | vpPolygon3D::FAR_CLIPPING;
        else
            p1ClippedInfo = p1ClippedInfo | vpPolygon3D::NEAR_CLIPPING;
      }
      else{
        p2Clipped = pClippedNear;
        if(flag == vpPolygon3D::FAR_CLIPPING)
            p2ClippedInfo = p2ClippedInfo | vpPolygon3D::FAR_CLIPPING;
        else
            p2ClippedInfo = p2ClippedInfo | vpPolygon3D::NEAR_CLIPPING;
      }
    }

    return true;
}
예제 #19
0
/*!
\brief Carries out the camera pose the image of a rectangle and
the intrinsec parameters, the length on x axis is known but the
proprtion of the rectangle are unknown.

This method is taken from "Markerless Tracking using Planar Structures
in the Scene" by Gilles Simon. The idea is to compute the homography H
giving the image point of the rectangle by associating them with the
coordinates (0,0)(1,0)(1,1/s)(0,1/s) (the rectangle is on the Z=0 plane).
If K is the intrinsec parameters matrix, we have  s = ||Kh1||/ ||Kh2||. s
gives us the proportion of the rectangle

\param p1,p2,p3,p4: the image of the corners of the rectangle
(respectively the image of  (0,0),(lx,0),(lx,lx/s) and (0,lx/s)) (input)
\param cam: the camera used (input)
\param lx: the rectangle size on the x axis (input)
\param cMo: the camera pose (output)
\return int : OK if no pb occurs
*/
double
vpPose::poseFromRectangle(vpPoint &p1,vpPoint &p2,
                          vpPoint &p3,vpPoint &p4,
                          double lx, vpCameraParameters & cam,
                          vpHomogeneousMatrix & cMo)
{

  double rectx[4] ;
  double recty[4] ;
  rectx[0]= 0 ;
  recty[0]=0 ;
  rectx[1]=1 ;
  recty[1]=0 ;
  rectx[2]=1 ;
  recty[2]=1 ;
  rectx[3]=0 ;
  recty[3]=1 ;
  double irectx[4] ;
  double irecty[4] ;
  irectx[0]=(p1.get_x()) ;
  irecty[0]=(p1.get_y()) ;
  irectx[1]=(p2.get_x()) ;
  irecty[1]=(p2.get_y()) ;
  irectx[2]=(p3.get_x()) ;
  irecty[2]=(p3.get_y()) ;
  irectx[3]=(p4.get_x()) ;
  irecty[3]=(p4.get_y()) ;

  //calcul de l'homographie
  vpMatrix H(3,3);
  vpHomography hom;

  //  vpHomography::HartleyDLT(4,rectx,recty,irectx,irecty,hom);
  vpHomography::HLM(4,rectx,recty,irectx,irecty,1,hom);
  for (unsigned int i=0 ; i < 3 ; i++)
    for(unsigned int j=0 ; j < 3 ; j++)
      H[i][j] = hom[i][j] ;
  //calcul de s =  ||Kh1||/ ||Kh2|| =ratio (length on x axis/ length on y axis)
  vpColVector kh1(3);
  vpColVector kh2(3);
  vpMatrix K(3,3);
  K = cam.get_K();
  K.setIdentity();
  vpMatrix Kinv =K.pseudoInverse();

  vpMatrix KinvH =Kinv*H;
  kh1=KinvH.column(1);
  kh2=KinvH.column(2);


  double s= sqrt(kh1.sumSquare())/sqrt(kh2.sumSquare());


  vpMatrix D(3,3);
  D.setIdentity();
  D[1][1]=1/s;
  vpMatrix cHo=H*D;

  //Calcul de la rotation et de la translation
  //  PoseFromRectangle(p1,p2,p3,p4,1/s,lx,cam,cMo );
  p1.setWorldCoordinates(0,0,0) ;
  p2.setWorldCoordinates(lx,0,0) ;
  p3.setWorldCoordinates(lx,lx/s,0) ;
  p4.setWorldCoordinates(0,lx/s,0) ;


  vpPose P ;
  P.addPoint(p1) ;
  P.addPoint(p2) ;
  P.addPoint(p3) ;
  P.addPoint(p4) ;


  P.computePose(vpPose::DEMENTHON_LOWE,cMo) ;
  return lx/s ;

}
예제 #20
0
/*!
  Test the visibility of a line. As a result, a subsampled line of the given one with all its visible parts.

  \param a : First point of the line.
  \param b : Second point of the line.
  \param lines : List of lines corresponding of the visible parts of the given line.
  \param displayResults : True if the results have to be displayed. False otherwise.
*/
void
vpMbScanLine::queryLineVisibility(vpPoint a,
                         vpPoint b,
                         std::vector<std::pair<vpPoint, vpPoint> > &lines, const bool &displayResults)
{
  vpColVector _a, _b;
  createVectorFromPoint(a, _a, K);
  createVectorFromPoint(b, _b, K);

  double x0 = _a[0] / _a[2];
  double y0 = _a[1] / _a[2];
  double z0 = _a[2];
  double x1 = _b[0] / _b[2];
  double y1 = _b[1] / _b[2];
  double z1 = _b[2];

  vpMbScanLineEdge edge = makeMbScanLineEdge(a, b);
  lines.clear();

  if(displayResults){
#if defined(VISP_HAVE_X11) && defined(DEBUG_DISP)
    double i1(0.0), j1(0.0), i2(0.0), j2(0.0);
    a.project();
    b.project();
    vpMeterPixelConversion::convertPoint(K,a.get_x(), a.get_y(),j1,i1);
    vpMeterPixelConversion::convertPoint(K,b.get_x(), b.get_y(),j2,i2);

    vpDisplay::displayLine(linedebugImg,i1,j1,i2,j2,vpColor::yellow,3);
#endif
  }

  if (!visibility_samples.count(edge))
      return;

  // Initialized as the biggest difference between the two points is on the X-axis
  double *v0(&x0), *w0(&z0);
  double *v1(&x1), *w1(&z1);
  unsigned int size(w);

  if (std::fabs(y0 - y1) > std::fabs(x0 - x1)) // Test if the biggest difference is on the Y-axis
  {
    v0 = &y0;
    v1 = &y1;
    size = h;
  }

  if (*v0 > *v1)
  {
      std::swap(v0, v1);
      std::swap(w0, w1);
      std::swap(a, b);
  }

  //if (*v0 >= size - 1 || *v1 < 0 || *v1 == *v0)
  if (*v0 >= size - 1 || *v1 < 0 || std::fabs(*v1 - *v0) <= std::numeric_limits<double>::epsilon())
      return;

  const int _v0 = std::max(0, int(std::ceil(*v0)));
  const int _v1 = std::min<int>((int)(size - 1), (int)(std::ceil(*v1) - 1));

  const std::set<int> &visible_samples = visibility_samples[edge];
  int last = _v0;
  vpPoint line_start;
  vpPoint line_end;
  bool b_line_started = false;
  for(std::set<int>::const_iterator it = visible_samples.begin() ; it != visible_samples.end() ; ++it)
  {
      const int v = *it;
      const double alpha = getAlpha(v, (*v0) * (*w0), (*w0), (*v1) * (*w1), (*w1));
      const vpPoint p = mix(a, b, alpha);
      if (last + 1 != v)
      {
          if(b_line_started)
            lines.push_back(std::make_pair(line_start, line_end));
          b_line_started = false;
      }
      if (v == _v0)
      {
          line_start = a;
          line_end = p;
          b_line_started = true;
      }
      else if (v == _v1)
      {
          line_end = b;
          if (!b_line_started)
              line_start = p;
          b_line_started = true;
      }
      else
      {
          line_end = p;
          if (!b_line_started)
              line_start = p;
          b_line_started = true;
      }
      last = v;
  }
  if (b_line_started)
      lines.push_back(std::make_pair(line_start, line_end));

  if(displayResults){
#if defined(VISP_HAVE_X11) && defined(DEBUG_DISP)
    double i1(0.0), j1(0.0), i2(0.0), j2(0.0);
    for(unsigned int i = 0 ; i < lines.size() ; i++){
      lines[i].first.project();
      lines[i].second.project();
      vpMeterPixelConversion::convertPoint(K,lines[i].first.get_x(), lines[i].first.get_y(),j1,i1);
      vpMeterPixelConversion::convertPoint(K,lines[i].second.get_x(), lines[i].second.get_y(),j2,i2);

      vpDisplay::displayLine(linedebugImg,i1,j1,i2,j2,vpColor::red,3);
    }
    vpDisplay::flush(linedebugImg);
#endif
  }
}