コード例 #1
0
ファイル: vpMbScanLine.cpp プロジェクト: tswang/visp
/*!
  Create a vpColVector of a projected point.

  \param p : Point to project.
  \param v : Resulting vector.
  \param K : Camera parameters.
*/
void
vpMbScanLine::createVectorFromPoint(const vpPoint &p, vpColVector &v, const vpCameraParameters &K)
{
    v = vpColVector(3);

    v[0] = p.get_X() * K.get_px() + K.get_u0() * p.get_Z();
    v[1] = p.get_Y() * K.get_py() + K.get_v0() * p.get_Z();
    v[2] = p.get_Z();
}
コード例 #2
0
ファイル: vpCalibration.cpp プロジェクト: DaikiMaekawa/visp
/*!
  Compute and return the standard deviation expressed in pixel
  for pose matrix and camera intrinsic parameters for model without distortion.
  \param cMo_est : the matrix that defines the pose to be tested.
  \param camera : camera intrinsic parameters to be tested.
  \return the standard deviation by point of the error in pixel .
*/
double vpCalibration::computeStdDeviation(const vpHomogeneousMatrix& cMo_est,
                                          const vpCameraParameters& camera)
{
  double residual_ = 0 ;

  std::list<double>::const_iterator it_LoX = LoX.begin();
  std::list<double>::const_iterator it_LoY = LoY.begin();
  std::list<double>::const_iterator it_LoZ = LoZ.begin();
  std::list<vpImagePoint>::const_iterator it_Lip = Lip.begin();

  double u0 = camera.get_u0() ;
  double v0 = camera.get_v0() ;
  double px = camera.get_px() ;
  double py = camera.get_py() ;
  vpImagePoint ip;

  for (unsigned int i =0 ; i < npt ; i++)
  {
    double oX = *it_LoX;
    double oY = *it_LoY;
    double oZ = *it_LoZ;

    double cX = oX*cMo_est[0][0]+oY*cMo_est[0][1]+oZ*cMo_est[0][2] + cMo_est[0][3];
    double cY = oX*cMo_est[1][0]+oY*cMo_est[1][1]+oZ*cMo_est[1][2] + cMo_est[1][3];
    double cZ = oX*cMo_est[2][0]+oY*cMo_est[2][1]+oZ*cMo_est[2][2] + cMo_est[2][3];

    double x = cX/cZ ;
    double y = cY/cZ ;

    ip = *it_Lip;
    double u = ip.get_u() ;
    double v = ip.get_v() ;

    double xp = u0 + x*px;
    double yp = v0 + y*py;

    residual_ += (vpMath::sqr(xp-u) + vpMath::sqr(yp-v))  ;

    ++it_LoX;
    ++it_LoY;
    ++it_LoZ;
    ++it_Lip;
  }
  this->residual = residual_ ;
  return sqrt(residual_/npt) ;
}
コード例 #3
0
ファイル: vpSimulator.cpp プロジェクト: tswang/visp
void
vpSimulator::setInternalCameraParameters(vpCameraParameters &_cam)
{
  internalCameraParameters = _cam ;


  float px = (float)_cam.get_px();
  float py = (float)_cam.get_py();
  float v  = internal_height/(2.f*py);

  internalCamera->ref() ;
  internalCamera->heightAngle = 2*atan(v);
  internalCamera->aspectRatio=(internal_width/internal_height)*(px/py);
  internalCamera->nearDistance = 0.001f ;

  internalCamera->farDistance = 1000;
  internalCamera->unrefNoDelete() ;
}
コード例 #4
0
ファイル: vpSimulator.cpp プロジェクト: tswang/visp
void
vpSimulator::setExternalCameraParameters(vpCameraParameters &_cam)
{
//   SoPerspectiveCamera *camera ;
//   camera  = (SoPerspectiveCamera *)this->externalView->getCamera() ;
  externalCameraParameters = _cam ;

  float px = (float)_cam.get_px();
  float py = (float)_cam.get_py();
  float v  = external_height/(2*py);

  externalCamera->ref() ;
  externalCamera->heightAngle = 2*atan(v);
  externalCamera->aspectRatio=(external_width/external_height)*(px/py);
  externalCamera->nearDistance = 0.001f ;
  externalCamera->farDistance = 1000;
  externalCamera->unrefNoDelete() ;

}
コード例 #5
0
ファイル: vpCircle.cpp プロジェクト: tswang/visp
/*!
  Computes the coordinates of the point corresponding to the intersection
  between a circle and a line.

  \warning This functions assumes changeFrame() and projection() have already been called.

  \sa changeFrame(), projection()

  \param circle : Circle to consider for the intersection.
  \param cam : Camera parameters that have to be used for the intersection computation.
  \param rho : The rho parameter of the line.
  \param theta : The theta parameter of the line.
  \param i : resulting i-coordinate of the intersection point.
  \param j : resulting j-coordinate of the intersection point.
*/
void
vpCircle::computeIntersectionPoint(const vpCircle &circle, const vpCameraParameters &cam, const double &rho, const double &theta, double &i, double &j)
{
   // This was taken from the code of art-v1. (from the artCylinder class)
   double px = cam.get_px() ;
   double py = cam.get_py() ;
   double u0 = cam.get_u0() ;
   double v0 = cam.get_v0() ;

   double mu11 = circle.p[3];
   double mu02 = circle.p[4];
   double mu20 = circle.p[2];
   double Xg = u0 + circle.p[0]*px;
   double Yg = v0 + circle.p[1]*py;

   // Find Intersection between line and ellipse in the image.

   // Optimised calculation for X
   double stheta = sin(theta);
   double ctheta = cos(theta);
   double sctheta = stheta*ctheta;
   double m11yg = mu11*Yg;
   double ctheta2 = vpMath::sqr(ctheta);
   double m02xg = mu02*Xg;
   double m11stheta = mu11*stheta;
   j = ((mu11*Xg*sctheta-mu20*Yg*sctheta+mu20*rho*ctheta
   -m11yg+m11yg*ctheta2+m02xg-m02xg*ctheta2+
   m11stheta*rho)/(mu20*ctheta2+2.0*m11stheta*ctheta
       +mu02-mu02*ctheta2));
   //Optimised calculation for Y
   double rhom02 = rho*mu02;
   double sctheta2 = stheta*ctheta2;
   double ctheta3 = ctheta2*ctheta;
   i = (-(-rho*mu11*stheta*ctheta-rhom02+rhom02*ctheta2
    +mu11*Xg*sctheta2-mu20*Yg*sctheta2-ctheta*mu11*Yg
    +ctheta3*mu11*Yg+ctheta*mu02*Xg-ctheta3*mu02*Xg)/
        (mu20*ctheta2+2.0*mu11*stheta*ctheta+mu02-
   mu02*ctheta2)/stheta);
}
コード例 #6
0
ファイル: camera.cpp プロジェクト: jkammerl/visp_bridge
  sensor_msgs::CameraInfo toSensorMsgsCameraInfo(vpCameraParameters& cam_info, unsigned int cam_image_width, unsigned int cam_image_height ){
      sensor_msgs::CameraInfo ret;

      std::vector<double> D(5);
      D[0]=cam_info.get_kdu();
      D[1] = D[2] = D[3] = D[4] = 0.;
      ret.D = D;
      ret.P.assign(0.);
      ret.K.assign(0.);
      ret.R.assign(0.);

      ret.R[0] = 1.;
      ret.R[1 * 3 + 1] = 1.;
      ret.R[2 * 3 + 2] = 1.;

      ret.P[0 * 4 + 0] = cam_info.get_px();
      ret.P[1 * 4 + 1] = cam_info.get_py();
      ret.P[0 * 4 + 2] = cam_info.get_u0();
      ret.P[1 * 4 + 2] = cam_info.get_v0();
      ret.P[2 * 4 + 2] = 1;


      ret.K[0 * 3 + 0] = cam_info.get_px();
      ret.K[1 * 3 + 1] = cam_info.get_py();
      ret.K[0 * 3 + 2] = cam_info.get_u0();
      ret.K[1 * 3 + 2] = cam_info.get_v0();
      ret.K[2 * 3 + 2] = 1;

      ret.distortion_model = "plumb_bob";
      ret.binning_x = 0;
      ret.binning_y = 0;
      ret.width = cam_image_width;
      ret.height = cam_image_height;

      return ret;
  }
コード例 #7
0
/*!
\brief Carries out the camera pose the image of a rectangle and
the intrinsec parameters, the length on x axis is known but the
proprtion of the rectangle are unknown.

This method is taken from "Markerless Tracking using Planar Structures
in the Scene" by Gilles Simon. The idea is to compute the homography H
giving the image point of the rectangle by associating them with the
coordinates (0,0)(1,0)(1,1/s)(0,1/s) (the rectangle is on the Z=0 plane).
If K is the intrinsec parameters matrix, we have  s = ||Kh1||/ ||Kh2||. s
gives us the proportion of the rectangle

\param p1,p2,p3,p4: the image of the corners of the rectangle
(respectively the image of  (0,0),(lx,0),(lx,lx/s) and (0,lx/s)) (input)
\param cam: the camera used (input)
\param lx: the rectangle size on the x axis (input)
\param cMo: the camera pose (output)
\return int : OK if no pb occurs
*/
double
vpPose::poseFromRectangle(vpPoint &p1,vpPoint &p2,
                          vpPoint &p3,vpPoint &p4,
                          double lx, vpCameraParameters & cam,
                          vpHomogeneousMatrix & cMo)
{

  double rectx[4] ;
  double recty[4] ;
  rectx[0]= 0 ;
  recty[0]=0 ;
  rectx[1]=1 ;
  recty[1]=0 ;
  rectx[2]=1 ;
  recty[2]=1 ;
  rectx[3]=0 ;
  recty[3]=1 ;
  double irectx[4] ;
  double irecty[4] ;
  irectx[0]=(p1.get_x()) ;
  irecty[0]=(p1.get_y()) ;
  irectx[1]=(p2.get_x()) ;
  irecty[1]=(p2.get_y()) ;
  irectx[2]=(p3.get_x()) ;
  irecty[2]=(p3.get_y()) ;
  irectx[3]=(p4.get_x()) ;
  irecty[3]=(p4.get_y()) ;

  //calcul de l'homographie
  vpMatrix H(3,3);
  vpHomography hom;

  //  vpHomography::HartleyDLT(4,rectx,recty,irectx,irecty,hom);
  vpHomography::HLM(4,rectx,recty,irectx,irecty,1,hom);
  for (unsigned int i=0 ; i < 3 ; i++)
    for(unsigned int j=0 ; j < 3 ; j++)
      H[i][j] = hom[i][j] ;
  //calcul de s =  ||Kh1||/ ||Kh2|| =ratio (length on x axis/ length on y axis)
  vpColVector kh1(3);
  vpColVector kh2(3);
  vpMatrix K(3,3);
  K = cam.get_K();
  K.setIdentity();
  vpMatrix Kinv =K.pseudoInverse();

  vpMatrix KinvH =Kinv*H;
  kh1=KinvH.column(1);
  kh2=KinvH.column(2);


  double s= sqrt(kh1.sumSquare())/sqrt(kh2.sumSquare());


  vpMatrix D(3,3);
  D.setIdentity();
  D[1][1]=1/s;
  vpMatrix cHo=H*D;

  //Calcul de la rotation et de la translation
  //  PoseFromRectangle(p1,p2,p3,p4,1/s,lx,cam,cMo );
  p1.setWorldCoordinates(0,0,0) ;
  p2.setWorldCoordinates(lx,0,0) ;
  p3.setWorldCoordinates(lx,lx/s,0) ;
  p4.setWorldCoordinates(0,lx/s,0) ;


  vpPose P ;
  P.addPoint(p1) ;
  P.addPoint(p2) ;
  P.addPoint(p3) ;
  P.addPoint(p4) ;


  P.computePose(vpPose::DEMENTHON_LOWE,cMo) ;
  return lx/s ;

}
コード例 #8
0
ファイル: vpCalibration.cpp プロジェクト: DaikiMaekawa/visp
/*!
  Compute the multi-images calibration according to the desired method using many poses.

  \param method : Method used to estimate the camera parameters.
  \param table_cal : Vector of vpCalibration.
  \param cam_est : Estimated intrinsic camera parameters.
  \param globalReprojectionError : Global reprojection error or global residual.
  \param verbose : Set at true if information about the residual at each loop
  of the algorithm is hoped.

  \return 0 if the computation was managed succeed.
*/
int vpCalibration::computeCalibrationMulti(vpCalibrationMethodType method,
                                           std::vector<vpCalibration> &table_cal,
                                           vpCameraParameters& cam_est,
                                           double &globalReprojectionError,
                                           bool verbose)
{
  try{
    unsigned int nbPose = (unsigned int) table_cal.size();
    for(unsigned int i=0;i<nbPose;i++){
      if(table_cal[i].get_npt()>3)
        table_cal[i].computePose(cam_est,table_cal[i].cMo);
    }
    switch (method) {
    case CALIB_LAGRANGE : {
      if(nbPose > 1){
        std::cout << "this calibration method is not available in" << std::endl
                  << "vpCalibration::computeCalibrationMulti()" << std::endl;
        return -1 ;
      }
      else {
        table_cal[0].calibLagrange(cam_est,table_cal[0].cMo);
        table_cal[0].cam = cam_est ;
        table_cal[0].cam_dist = cam_est ;
        table_cal[0].cMo_dist = table_cal[0].cMo ;
      }
      break;
    }
    case CALIB_LAGRANGE_VIRTUAL_VS :
    case CALIB_LAGRANGE_VIRTUAL_VS_DIST : {
      if(nbPose > 1){
        std::cout << "this calibration method is not available in" << std::endl
                  << "vpCalibration::computeCalibrationMulti()" << std::endl
                  << "with several images." << std::endl;
        return -1 ;
      }
      else {
        table_cal[0].calibLagrange(cam_est,table_cal[0].cMo);
        table_cal[0].cam = cam_est ;
        table_cal[0].cam_dist = cam_est ;
        table_cal[0].cMo_dist = table_cal[0].cMo ;
      }
      calibVVSMulti(table_cal, cam_est, globalReprojectionError, verbose);
      break ;
    }
    case CALIB_VIRTUAL_VS:
    case CALIB_VIRTUAL_VS_DIST: {
      calibVVSMulti(table_cal, cam_est, globalReprojectionError, verbose);
      break ;
    }
    }
    //Print camera parameters
    if(verbose){
      //       std::cout << "Camera parameters without distortion :" << std::endl;
      cam_est.printParameters();
    }

    switch (method)
    {
    case CALIB_LAGRANGE :
    case CALIB_LAGRANGE_VIRTUAL_VS :
    case CALIB_VIRTUAL_VS:
      verbose = false ;
      break;
    case CALIB_LAGRANGE_VIRTUAL_VS_DIST :
    case CALIB_VIRTUAL_VS_DIST:
      {
        if(verbose)
          std::cout << "Compute camera parameters with distortion"<<std::endl;

        calibVVSWithDistortionMulti(table_cal, cam_est, globalReprojectionError, verbose);
      }
      break ;
    }
    //Print camera parameters
    if(verbose){
      //       std::cout << "Camera parameters without distortion :" << std::endl;
      table_cal[0].cam.printParameters();
      //       std::cout << "Camera parameters with distortion:" << std::endl;
      cam_est.printParameters();
      std::cout<<std::endl;
    }
    return 0 ;
  }
  catch(...){ throw; }
}
コード例 #9
0
ファイル: vpCalibration.cpp プロジェクト: DaikiMaekawa/visp
/*!
  Compute the calibration according to the desired method using one pose.

  \param method : Method that will be used to estimate the parameters.
  \param cMo_est : estimated homogeneous matrix that defines the pose.
  \param cam_est : estimated intrinsic camera parameters.
  \param verbose : set at true if information about the residual at each loop
  of the algorithm is hoped.

  \return 0 if the calibration computation succeed.
*/
int vpCalibration::computeCalibration(vpCalibrationMethodType method,
                                      vpHomogeneousMatrix &cMo_est,
                                      vpCameraParameters &cam_est,
                                      bool verbose)
{
  try{
    computePose(cam_est,cMo_est);
    switch (method)
    {
    case CALIB_LAGRANGE :
    case CALIB_LAGRANGE_VIRTUAL_VS :
      {
        calibLagrange(cam_est, cMo_est);
      }
      break;
    case CALIB_VIRTUAL_VS:
    case CALIB_VIRTUAL_VS_DIST:
    case CALIB_LAGRANGE_VIRTUAL_VS_DIST:
    default:
      break;
    }

    switch (method)
    {
    case CALIB_VIRTUAL_VS:
    case CALIB_VIRTUAL_VS_DIST:
    case CALIB_LAGRANGE_VIRTUAL_VS:
    case CALIB_LAGRANGE_VIRTUAL_VS_DIST:
      {
        if (verbose){std::cout << "start calibration without distortion"<< std::endl;}
        calibVVS(cam_est, cMo_est, verbose);
      }
      break ;
    case CALIB_LAGRANGE:
    default:
      break;
    }
    this->cMo = cMo_est;
    this->cMo_dist = cMo_est;

    //Print camera parameters
    if(verbose){
      //       std::cout << "Camera parameters without distortion :" << std::endl;
      cam_est.printParameters();
    }

    this->cam = cam_est;

    switch (method)
    {
    case CALIB_VIRTUAL_VS_DIST:
    case CALIB_LAGRANGE_VIRTUAL_VS_DIST:
      {
        if (verbose){std::cout << "start calibration with distortion"<< std::endl;}
        calibVVSWithDistortion(cam_est, cMo_est, verbose);
      }
      break ;
    case CALIB_LAGRANGE:
    case CALIB_VIRTUAL_VS:
    case CALIB_LAGRANGE_VIRTUAL_VS:
    default:
      break;
    }
    //Print camera parameters
    if(verbose){
      //       std::cout << "Camera parameters without distortion :" << std::endl;
      this->cam.printParameters();
      //       std::cout << "Camera parameters with distortion :" << std::endl;
      cam_est.printParameters();
    }

    this->cam_dist = cam_est ;

    this->cMo_dist = cMo_est;
    return 0 ;
  }
  catch(...){
    throw;
  }
}
コード例 #10
0
ファイル: vpCalibration.cpp プロジェクト: DaikiMaekawa/visp
/*!
  Compute and return the standard deviation expressed in pixel
  for pose matrix and camera intrinsic parameters with pixel to meter model.
  \param cMo_est : the matrix that defines the pose to be tested.
  \param camera : camera intrinsic parameters to be tested.
  \return the standard deviation by point of the error in pixel .
*/
double vpCalibration::computeStdDeviation_dist(const vpHomogeneousMatrix& cMo_est,
                                               const vpCameraParameters& camera)
{
  double residual_ = 0 ;

  std::list<double>::const_iterator it_LoX = LoX.begin();
  std::list<double>::const_iterator it_LoY = LoY.begin();
  std::list<double>::const_iterator it_LoZ = LoZ.begin();
  std::list<vpImagePoint>::const_iterator it_Lip = Lip.begin();

  double u0 = camera.get_u0() ;
  double v0 = camera.get_v0() ;
  double px = camera.get_px() ;
  double py = camera.get_py() ;
  double kud = camera.get_kud() ;
  double kdu = camera.get_kdu() ;

  double inv_px = 1/px;
  double inv_py = 1/px;
  vpImagePoint ip;

  for (unsigned int i =0 ; i < npt ; i++)
  {
    double oX = *it_LoX;
    double oY = *it_LoY;
    double oZ = *it_LoZ;

    double cX = oX*cMo_est[0][0]+oY*cMo_est[0][1]+oZ*cMo_est[0][2] + cMo_est[0][3];
    double cY = oX*cMo_est[1][0]+oY*cMo_est[1][1]+oZ*cMo_est[1][2] + cMo_est[1][3];
    double cZ = oX*cMo_est[2][0]+oY*cMo_est[2][1]+oZ*cMo_est[2][2] + cMo_est[2][3];

    double x = cX/cZ ;
    double y = cY/cZ ;

    ip = *it_Lip;
    double u = ip.get_u() ;
    double v = ip.get_v() ;

    double r2ud = 1+kud*(vpMath::sqr(x)+vpMath::sqr(y)) ;

    double xp = u0 + x*px*r2ud;
    double yp = v0 + y*py*r2ud;

    residual_ += (vpMath::sqr(xp-u) + vpMath::sqr(yp-v))  ;

    double r2du = (vpMath::sqr((u-u0)*inv_px)+vpMath::sqr((v-v0)*inv_py)) ;

    xp = u0 + x*px - kdu*(u-u0)*r2du;
    yp = v0 + y*py - kdu*(v-v0)*r2du;

    residual_ += (vpMath::sqr(xp-u) + vpMath::sqr(yp-v))  ;
    ++it_LoX;
    ++it_LoY;
    ++it_LoZ;
    ++it_Lip;
  }
  residual_ /=2;

  this->residual_dist = residual_;
  return sqrt(residual_/npt) ;
}
コード例 #11
0
ファイル: vpMomentObject.cpp プロジェクト: s-trinh/visp
/*!
 * Manikandan. B
 * Photometric moments v2
 * Intended to be used by 'vpMomentObject's of type DENSE_FULL_OBJECT
 * @param image                   : Grayscale image
 * @param cam                     : Camera parameters (to change to )
 * @param bg_type                 : White/Black background surrounding the image
 * @param normalize_with_pix_size : This flag if SET, the moments, after calculation are normalized w.r.t  pixel size
 *                                  available from camera parameters
 */
void vpMomentObject::fromImage(const vpImage<unsigned char>& image, const vpCameraParameters& cam,
    vpCameraImgBckGrndType bg_type, bool normalize_with_pix_size)
{
  std::vector<double> cache(order*order,0.);
  values.assign(order*order,0);

  // (x,y) - Pixel co-ordinates in metres
  double x=0;
  double y=0;
  //for indexing into cache[] and values[]
  unsigned int idx = 0;
  unsigned int kidx = 0;

  double intensity = 0;

  //double Imax = static_cast<double>(image.getMaxValue());

  double iscale = 1.0;
  if (flg_normalize_intensity) {                                            // This makes the image a probability density function
    double Imax = 255.;                                                     // To check the effect of gray level change. ISR Coimbra
    iscale = 1.0/Imax;
  }

  if (bg_type == vpMomentObject::WHITE) {
      /////////// WHITE BACKGROUND ///////////
      for(unsigned int j=0;j<image.getRows();j++){
          for(unsigned int i=0;i<image.getCols();i++){
              x = 0;
              y = 0;
              intensity = (double)(image[j][i])*iscale;
              double intensity_white = 1. - intensity;

              vpPixelMeterConversion::convertPoint(cam,i,j,x,y);
              cacheValues(cache,x,y, intensity_white);			// Modify 'cache' which has x^p*y^q to x^p*y^q*(1 - I(x,y))

              // Copy to "values"
              for(unsigned int k=0;k<order;k++){
                  kidx = k*order;
                  for(unsigned int l=0;l<order-k;l++){
                      idx = kidx+l;
                      values[idx]+= cache[idx];
                  }
              }
          }
      }
  }
  else {
      /////////// BLACK BACKGROUND	///////////
      for(unsigned int j=0;j<image.getRows();j++){
          for(unsigned int i=0;i<image.getCols();i++){
              x = 0;
              y = 0;
              intensity = (double)(image[j][i])*iscale;
              vpPixelMeterConversion::convertPoint(cam,i,j,x,y);

              // Cache values for fast moment calculation
              cacheValues(cache,x,y, intensity);					// Modify 'cache' which has x^p*y^q to x^p*y^q*I(x,y)

              // Copy to moments array 'values'
              for(unsigned int k=0;k<order;k++){
                  kidx = k*order;
                  for(unsigned int l=0;l<order-k;l++){
                      idx = kidx+l;
                      values[idx]+= cache[idx];
                  }
              }

          }
      }
  }

  if (normalize_with_pix_size){
      // Normalisation equivalent to sampling interval/pixel size delX x delY
      double norm_factor = 1./(cam.get_px()*cam.get_py());
      for (std::vector<double>::iterator it = values.begin(); it!=values.end(); ++it) {
          *it = (*it) * norm_factor;
      }
  }
}
コード例 #12
0
ファイル: vpMomentObject.cpp プロジェクト: s-trinh/visp
void vpMomentObject::fromImage(const vpImage<unsigned char>& image, unsigned char threshold, const vpCameraParameters& cam){
#ifdef VISP_HAVE_OPENMP
  #pragma omp parallel shared(threshold)
  {
    std::vector<double> curvals(order*order);
    curvals.assign(order*order,0.);

    #pragma omp for nowait//automatically organize loop counter between threads
    for(int i=0;i<(int)image.getCols();i++){
      for(int j=0;j<(int)image.getRows();j++){
        unsigned int i_ = static_cast<unsigned int>(i);
        unsigned int j_ = static_cast<unsigned int>(j);
        if(image[j_][i_]>threshold){
          double x=0;
          double y=0;
          vpPixelMeterConversion::convertPoint(cam,i_,j_,x,y);

          double yval=1.;
          for(unsigned int k=0;k<order;k++){
            double xval=1.;
            for(unsigned int l=0;l<order-k;l++){
              curvals[(k*order+l)]+=(xval*yval);
              xval*=x;
            }
            yval*=y;
          }
        }
      }
    }

    #pragma omp master //only set this variable in master thread
    {
      values.assign(order*order, 0.);
    }

    #pragma omp barrier
    for(unsigned int k=0;k<order;k++){
      for(unsigned int l=0;l<order-k;l++){
        #pragma omp atomic
        values[k*order+l]+= curvals[k*order+l];
      }
    }

  }
#else
    std::vector<double> cache(order*order,0.);
    values.assign(order*order,0);
    for(unsigned int i=0;i<image.getCols();i++){
        for(unsigned int j=0;j<image.getRows();j++){
            if(image[j][i]>threshold){
                double x=0;
                double y=0;
                vpPixelMeterConversion::convertPoint(cam,i,j,x,y);
                cacheValues(cache,x,y);
                for(unsigned int k=0;k<order;k++){
                    for(unsigned int l=0;l<order-k;l++){
                        values[k*order+l]+=cache[k*order+l];
                    }
                }
            }
        }
    }
#endif

    //Normalisation equivalent to sampling interval/pixel size delX x delY
    double norm_factor = 1./(cam.get_px()*cam.get_py());
    for (std::vector<double>::iterator it = values.begin(); it!=values.end(); ++it) {
        *it = (*it) * norm_factor;
    }
}
コード例 #13
0
ファイル: vpPolygon3D.cpp プロジェクト: tswang/visp
/*!
  Compute the region of interest in the image according to the used clipping.

  \warning If the FOV clipping is used, camera normals have to be precomputed.
  
  \param cam : camera parameters used to compute the field of view.
*/
void
vpPolygon3D::computePolygonClipped(const vpCameraParameters &cam)
{
  polyClipped.clear();
  std::vector<vpColVector> fovNormals;
  std::vector<std::pair<vpPoint,unsigned int> > polyClippedTemp;
  std::vector<std::pair<vpPoint,unsigned int> > polyClippedTemp2;

  if(cam.isFovComputed() && clippingFlag > 3)
    fovNormals = cam.getFovNormals();

  for(unsigned int i = 0 ; i < nbpt ; i++){
      p[i%nbpt].projection();
      polyClippedTemp.push_back(std::make_pair(p[i%nbpt],vpPolygon3D::NO_CLIPPING));
  }

  if(clippingFlag != vpPolygon3D::NO_CLIPPING)
  for(unsigned int i = 1 ; i < 64 ; i=i*2)
  {
      if(((clippingFlag & i) == i) || ((clippingFlag > vpPolygon3D::FAR_CLIPPING) && (i==1)))
      {
      for(unsigned int j = 0 ; j < polyClippedTemp.size() ; j++)
      {
          vpPoint p1Clipped = polyClippedTemp[j].first;
          vpPoint p2Clipped = polyClippedTemp[(j+1)%polyClippedTemp.size()].first;

          unsigned int p2ClippedInfoBefore = polyClippedTemp[(j+1)%polyClippedTemp.size()].second;
          unsigned int p1ClippedInfo = polyClippedTemp[j].second;
          unsigned int p2ClippedInfo = polyClippedTemp[(j+1)%polyClippedTemp.size()].second;

          bool problem = true;

          switch(i){
          case 1:
            problem = !(vpPolygon3D::getClippedPointsDistance(p1Clipped, p2Clipped, p1Clipped, p2Clipped, p1ClippedInfo, p2ClippedInfo,
                                                               i, distNearClip));
            break;
          case 2:
            problem = !(vpPolygon3D::getClippedPointsDistance(p1Clipped, p2Clipped, p1Clipped, p2Clipped, p1ClippedInfo, p2ClippedInfo,
                                                               i, distFarClip));
            break;
          case 4:
            problem = !(vpPolygon3D::getClippedPointsFovGeneric(p1Clipped, p2Clipped, p1Clipped, p2Clipped, p1ClippedInfo, p2ClippedInfo,
                                                        fovNormals[0], vpPolygon3D::LEFT_CLIPPING));
            break;
          case 8:
            problem = !(vpPolygon3D::getClippedPointsFovGeneric(p1Clipped, p2Clipped, p1Clipped, p2Clipped, p1ClippedInfo, p2ClippedInfo,
                                                        fovNormals[1], vpPolygon3D::RIGHT_CLIPPING));
            break;
          case 16:
            problem = !(vpPolygon3D::getClippedPointsFovGeneric(p1Clipped, p2Clipped, p1Clipped, p2Clipped, p1ClippedInfo, p2ClippedInfo,
                                                        fovNormals[2], vpPolygon3D::UP_CLIPPING));
            break;
          case 32:
            problem = !(vpPolygon3D::getClippedPointsFovGeneric(p1Clipped, p2Clipped, p1Clipped, p2Clipped, p1ClippedInfo, p2ClippedInfo,
                                                        fovNormals[3], vpPolygon3D::DOWN_CLIPPING));
            break;
          }

          if(!problem)
          {
            p1Clipped.projection();
            polyClippedTemp2.push_back(std::make_pair(p1Clipped, p1ClippedInfo));

            if(p2ClippedInfo != p2ClippedInfoBefore)
            {
              p2Clipped.projection();
              polyClippedTemp2.push_back(std::make_pair(p2Clipped, p2ClippedInfo));
            }

            if(nbpt == 2){
              if(p2ClippedInfo == p2ClippedInfoBefore)
              {
                p2Clipped.projection();
                polyClippedTemp2.push_back(std::make_pair(p2Clipped, p2ClippedInfo));
              }
              break;
            }
          }
      }

      polyClippedTemp = polyClippedTemp2;
      polyClippedTemp2.clear();
      }
  }

  polyClipped = polyClippedTemp;
}