/*! Create a vpColVector of a projected point. \param p : Point to project. \param v : Resulting vector. \param K : Camera parameters. */ void vpMbScanLine::createVectorFromPoint(const vpPoint &p, vpColVector &v, const vpCameraParameters &K) { v = vpColVector(3); v[0] = p.get_X() * K.get_px() + K.get_u0() * p.get_Z(); v[1] = p.get_Y() * K.get_py() + K.get_v0() * p.get_Z(); v[2] = p.get_Z(); }
sensor_msgs::CameraInfo toSensorMsgsCameraInfo(vpCameraParameters& cam_info, unsigned int cam_image_width, unsigned int cam_image_height ){ sensor_msgs::CameraInfo ret; std::vector<double> D(5); D[0]=cam_info.get_kdu(); D[1] = D[2] = D[3] = D[4] = 0.; ret.D = D; ret.P.assign(0.); ret.K.assign(0.); ret.R.assign(0.); ret.R[0] = 1.; ret.R[1 * 3 + 1] = 1.; ret.R[2 * 3 + 2] = 1.; ret.P[0 * 4 + 0] = cam_info.get_px(); ret.P[1 * 4 + 1] = cam_info.get_py(); ret.P[0 * 4 + 2] = cam_info.get_u0(); ret.P[1 * 4 + 2] = cam_info.get_v0(); ret.P[2 * 4 + 2] = 1; ret.K[0 * 3 + 0] = cam_info.get_px(); ret.K[1 * 3 + 1] = cam_info.get_py(); ret.K[0 * 3 + 2] = cam_info.get_u0(); ret.K[1 * 3 + 2] = cam_info.get_v0(); ret.K[2 * 3 + 2] = 1; ret.distortion_model = "plumb_bob"; ret.binning_x = 0; ret.binning_y = 0; ret.width = cam_image_width; ret.height = cam_image_height; return ret; }
/*! Compute and return the standard deviation expressed in pixel for pose matrix and camera intrinsic parameters for model without distortion. \param cMo_est : the matrix that defines the pose to be tested. \param camera : camera intrinsic parameters to be tested. \return the standard deviation by point of the error in pixel . */ double vpCalibration::computeStdDeviation(const vpHomogeneousMatrix& cMo_est, const vpCameraParameters& camera) { double residual_ = 0 ; std::list<double>::const_iterator it_LoX = LoX.begin(); std::list<double>::const_iterator it_LoY = LoY.begin(); std::list<double>::const_iterator it_LoZ = LoZ.begin(); std::list<vpImagePoint>::const_iterator it_Lip = Lip.begin(); double u0 = camera.get_u0() ; double v0 = camera.get_v0() ; double px = camera.get_px() ; double py = camera.get_py() ; vpImagePoint ip; for (unsigned int i =0 ; i < npt ; i++) { double oX = *it_LoX; double oY = *it_LoY; double oZ = *it_LoZ; double cX = oX*cMo_est[0][0]+oY*cMo_est[0][1]+oZ*cMo_est[0][2] + cMo_est[0][3]; double cY = oX*cMo_est[1][0]+oY*cMo_est[1][1]+oZ*cMo_est[1][2] + cMo_est[1][3]; double cZ = oX*cMo_est[2][0]+oY*cMo_est[2][1]+oZ*cMo_est[2][2] + cMo_est[2][3]; double x = cX/cZ ; double y = cY/cZ ; ip = *it_Lip; double u = ip.get_u() ; double v = ip.get_v() ; double xp = u0 + x*px; double yp = v0 + y*py; residual_ += (vpMath::sqr(xp-u) + vpMath::sqr(yp-v)) ; ++it_LoX; ++it_LoY; ++it_LoZ; ++it_Lip; } this->residual = residual_ ; return sqrt(residual_/npt) ; }
void vpSimulator::setInternalCameraParameters(vpCameraParameters &_cam) { internalCameraParameters = _cam ; float px = (float)_cam.get_px(); float py = (float)_cam.get_py(); float v = internal_height/(2.f*py); internalCamera->ref() ; internalCamera->heightAngle = 2*atan(v); internalCamera->aspectRatio=(internal_width/internal_height)*(px/py); internalCamera->nearDistance = 0.001f ; internalCamera->farDistance = 1000; internalCamera->unrefNoDelete() ; }
void vpSimulator::setExternalCameraParameters(vpCameraParameters &_cam) { // SoPerspectiveCamera *camera ; // camera = (SoPerspectiveCamera *)this->externalView->getCamera() ; externalCameraParameters = _cam ; float px = (float)_cam.get_px(); float py = (float)_cam.get_py(); float v = external_height/(2*py); externalCamera->ref() ; externalCamera->heightAngle = 2*atan(v); externalCamera->aspectRatio=(external_width/external_height)*(px/py); externalCamera->nearDistance = 0.001f ; externalCamera->farDistance = 1000; externalCamera->unrefNoDelete() ; }
/*! Computes the coordinates of the point corresponding to the intersection between a circle and a line. \warning This functions assumes changeFrame() and projection() have already been called. \sa changeFrame(), projection() \param circle : Circle to consider for the intersection. \param cam : Camera parameters that have to be used for the intersection computation. \param rho : The rho parameter of the line. \param theta : The theta parameter of the line. \param i : resulting i-coordinate of the intersection point. \param j : resulting j-coordinate of the intersection point. */ void vpCircle::computeIntersectionPoint(const vpCircle &circle, const vpCameraParameters &cam, const double &rho, const double &theta, double &i, double &j) { // This was taken from the code of art-v1. (from the artCylinder class) double px = cam.get_px() ; double py = cam.get_py() ; double u0 = cam.get_u0() ; double v0 = cam.get_v0() ; double mu11 = circle.p[3]; double mu02 = circle.p[4]; double mu20 = circle.p[2]; double Xg = u0 + circle.p[0]*px; double Yg = v0 + circle.p[1]*py; // Find Intersection between line and ellipse in the image. // Optimised calculation for X double stheta = sin(theta); double ctheta = cos(theta); double sctheta = stheta*ctheta; double m11yg = mu11*Yg; double ctheta2 = vpMath::sqr(ctheta); double m02xg = mu02*Xg; double m11stheta = mu11*stheta; j = ((mu11*Xg*sctheta-mu20*Yg*sctheta+mu20*rho*ctheta -m11yg+m11yg*ctheta2+m02xg-m02xg*ctheta2+ m11stheta*rho)/(mu20*ctheta2+2.0*m11stheta*ctheta +mu02-mu02*ctheta2)); //Optimised calculation for Y double rhom02 = rho*mu02; double sctheta2 = stheta*ctheta2; double ctheta3 = ctheta2*ctheta; i = (-(-rho*mu11*stheta*ctheta-rhom02+rhom02*ctheta2 +mu11*Xg*sctheta2-mu20*Yg*sctheta2-ctheta*mu11*Yg +ctheta3*mu11*Yg+ctheta*mu02*Xg-ctheta3*mu02*Xg)/ (mu20*ctheta2+2.0*mu11*stheta*ctheta+mu02- mu02*ctheta2)/stheta); }
/*! Compute and return the standard deviation expressed in pixel for pose matrix and camera intrinsic parameters with pixel to meter model. \param cMo_est : the matrix that defines the pose to be tested. \param camera : camera intrinsic parameters to be tested. \return the standard deviation by point of the error in pixel . */ double vpCalibration::computeStdDeviation_dist(const vpHomogeneousMatrix& cMo_est, const vpCameraParameters& camera) { double residual_ = 0 ; std::list<double>::const_iterator it_LoX = LoX.begin(); std::list<double>::const_iterator it_LoY = LoY.begin(); std::list<double>::const_iterator it_LoZ = LoZ.begin(); std::list<vpImagePoint>::const_iterator it_Lip = Lip.begin(); double u0 = camera.get_u0() ; double v0 = camera.get_v0() ; double px = camera.get_px() ; double py = camera.get_py() ; double kud = camera.get_kud() ; double kdu = camera.get_kdu() ; double inv_px = 1/px; double inv_py = 1/px; vpImagePoint ip; for (unsigned int i =0 ; i < npt ; i++) { double oX = *it_LoX; double oY = *it_LoY; double oZ = *it_LoZ; double cX = oX*cMo_est[0][0]+oY*cMo_est[0][1]+oZ*cMo_est[0][2] + cMo_est[0][3]; double cY = oX*cMo_est[1][0]+oY*cMo_est[1][1]+oZ*cMo_est[1][2] + cMo_est[1][3]; double cZ = oX*cMo_est[2][0]+oY*cMo_est[2][1]+oZ*cMo_est[2][2] + cMo_est[2][3]; double x = cX/cZ ; double y = cY/cZ ; ip = *it_Lip; double u = ip.get_u() ; double v = ip.get_v() ; double r2ud = 1+kud*(vpMath::sqr(x)+vpMath::sqr(y)) ; double xp = u0 + x*px*r2ud; double yp = v0 + y*py*r2ud; residual_ += (vpMath::sqr(xp-u) + vpMath::sqr(yp-v)) ; double r2du = (vpMath::sqr((u-u0)*inv_px)+vpMath::sqr((v-v0)*inv_py)) ; xp = u0 + x*px - kdu*(u-u0)*r2du; yp = v0 + y*py - kdu*(v-v0)*r2du; residual_ += (vpMath::sqr(xp-u) + vpMath::sqr(yp-v)) ; ++it_LoX; ++it_LoY; ++it_LoZ; ++it_Lip; } residual_ /=2; this->residual_dist = residual_; return sqrt(residual_/npt) ; }
/*! * Manikandan. B * Photometric moments v2 * Intended to be used by 'vpMomentObject's of type DENSE_FULL_OBJECT * @param image : Grayscale image * @param cam : Camera parameters (to change to ) * @param bg_type : White/Black background surrounding the image * @param normalize_with_pix_size : This flag if SET, the moments, after calculation are normalized w.r.t pixel size * available from camera parameters */ void vpMomentObject::fromImage(const vpImage<unsigned char>& image, const vpCameraParameters& cam, vpCameraImgBckGrndType bg_type, bool normalize_with_pix_size) { std::vector<double> cache(order*order,0.); values.assign(order*order,0); // (x,y) - Pixel co-ordinates in metres double x=0; double y=0; //for indexing into cache[] and values[] unsigned int idx = 0; unsigned int kidx = 0; double intensity = 0; //double Imax = static_cast<double>(image.getMaxValue()); double iscale = 1.0; if (flg_normalize_intensity) { // This makes the image a probability density function double Imax = 255.; // To check the effect of gray level change. ISR Coimbra iscale = 1.0/Imax; } if (bg_type == vpMomentObject::WHITE) { /////////// WHITE BACKGROUND /////////// for(unsigned int j=0;j<image.getRows();j++){ for(unsigned int i=0;i<image.getCols();i++){ x = 0; y = 0; intensity = (double)(image[j][i])*iscale; double intensity_white = 1. - intensity; vpPixelMeterConversion::convertPoint(cam,i,j,x,y); cacheValues(cache,x,y, intensity_white); // Modify 'cache' which has x^p*y^q to x^p*y^q*(1 - I(x,y)) // Copy to "values" for(unsigned int k=0;k<order;k++){ kidx = k*order; for(unsigned int l=0;l<order-k;l++){ idx = kidx+l; values[idx]+= cache[idx]; } } } } } else { /////////// BLACK BACKGROUND /////////// for(unsigned int j=0;j<image.getRows();j++){ for(unsigned int i=0;i<image.getCols();i++){ x = 0; y = 0; intensity = (double)(image[j][i])*iscale; vpPixelMeterConversion::convertPoint(cam,i,j,x,y); // Cache values for fast moment calculation cacheValues(cache,x,y, intensity); // Modify 'cache' which has x^p*y^q to x^p*y^q*I(x,y) // Copy to moments array 'values' for(unsigned int k=0;k<order;k++){ kidx = k*order; for(unsigned int l=0;l<order-k;l++){ idx = kidx+l; values[idx]+= cache[idx]; } } } } } if (normalize_with_pix_size){ // Normalisation equivalent to sampling interval/pixel size delX x delY double norm_factor = 1./(cam.get_px()*cam.get_py()); for (std::vector<double>::iterator it = values.begin(); it!=values.end(); ++it) { *it = (*it) * norm_factor; } } }
void vpMomentObject::fromImage(const vpImage<unsigned char>& image, unsigned char threshold, const vpCameraParameters& cam){ #ifdef VISP_HAVE_OPENMP #pragma omp parallel shared(threshold) { std::vector<double> curvals(order*order); curvals.assign(order*order,0.); #pragma omp for nowait//automatically organize loop counter between threads for(int i=0;i<(int)image.getCols();i++){ for(int j=0;j<(int)image.getRows();j++){ unsigned int i_ = static_cast<unsigned int>(i); unsigned int j_ = static_cast<unsigned int>(j); if(image[j_][i_]>threshold){ double x=0; double y=0; vpPixelMeterConversion::convertPoint(cam,i_,j_,x,y); double yval=1.; for(unsigned int k=0;k<order;k++){ double xval=1.; for(unsigned int l=0;l<order-k;l++){ curvals[(k*order+l)]+=(xval*yval); xval*=x; } yval*=y; } } } } #pragma omp master //only set this variable in master thread { values.assign(order*order, 0.); } #pragma omp barrier for(unsigned int k=0;k<order;k++){ for(unsigned int l=0;l<order-k;l++){ #pragma omp atomic values[k*order+l]+= curvals[k*order+l]; } } } #else std::vector<double> cache(order*order,0.); values.assign(order*order,0); for(unsigned int i=0;i<image.getCols();i++){ for(unsigned int j=0;j<image.getRows();j++){ if(image[j][i]>threshold){ double x=0; double y=0; vpPixelMeterConversion::convertPoint(cam,i,j,x,y); cacheValues(cache,x,y); for(unsigned int k=0;k<order;k++){ for(unsigned int l=0;l<order-k;l++){ values[k*order+l]+=cache[k*order+l]; } } } } } #endif //Normalisation equivalent to sampling interval/pixel size delX x delY double norm_factor = 1./(cam.get_px()*cam.get_py()); for (std::vector<double>::iterator it = values.begin(); it!=values.end(); ++it) { *it = (*it) * norm_factor; } }