//通过引用,修改rotation和scale,使shape1和shape2差距最小 void SimilarityTransform(const Mat_<double>& shape1, const Mat_<double>& shape2, Mat_<double>& rotation, double& scale) { rotation = Mat::zeros(2, 2, CV_64FC1); scale = 0; // center the data double center_x_1 = 0; double center_y_1 = 0; double center_x_2 = 0; double center_y_2 = 0; for (int i = 0; i < shape1.rows; i++) { center_x_1 += shape1(i, 0); center_y_1 += shape1(i, 1); center_x_2 += shape2(i, 0); center_y_2 += shape2(i, 1); } center_x_1 /= shape1.rows; center_y_1 /= shape1.rows; center_x_2 /= shape2.rows; center_y_2 /= shape2.rows; Mat_<double> temp1 = shape1.clone(); Mat_<double> temp2 = shape2.clone(); for (int i = 0; i < shape1.rows; i++) { temp1(i, 0) -= center_x_1; temp1(i, 1) -= center_y_1; temp2(i, 0) -= center_x_2; temp2(i, 1) -= center_y_2; } //至此,temp1(与shape1形状相同)和temp2(与shape2形状相同)已经移到了以(0,0)为原点的坐标系 Mat_<double> covariance1, covariance2; //covariance = 协方差 Mat_<double> mean1, mean2; // calculate covariance matrix calcCovarMatrix(temp1, covariance1, mean1, CV_COVAR_COLS); //输出covariance1为temp1的协方差,mean1为temp1的均值 calcCovarMatrix(temp2, covariance2, mean2, CV_COVAR_COLS); double s1 = sqrt(norm(covariance1)); //norm用来计算covariance1的L2范数 double s2 = sqrt(norm(covariance2)); scale = s1 / s2; temp1 = 1.0 / s1 * temp1; temp2 = 1.0 / s2 * temp2; // 至此,通过缩放,temp1和temp2的差距最小(周叔说,这是最小二乘法) double num = 0; double den = 0; for (int i = 0; i < shape1.rows; i++) { num = num + temp1(i, 1) * temp2(i, 0) - temp1(i, 0) * temp2(i, 1); den = den + temp1(i, 0) * temp2(i, 0) + temp1(i, 1) * temp2(i, 1); } double norm = sqrt(num * num + den * den); double sin_theta = num / norm; double cos_theta = den / norm; rotation(0, 0) = cos_theta; rotation(0, 1) = -sin_theta; rotation(1, 0) = sin_theta; rotation(1, 1) = cos_theta; }
void SimilarityTransform(const Mat_<double>& shape1, const Mat_<double>& shape2, Mat_<double>& rotation,double& scale){ rotation = Mat::zeros(2,2,CV_64FC1); scale = 0; // center the data double center_x_1 = 0; double center_y_1 = 0; double center_x_2 = 0; double center_y_2 = 0; for(int i = 0;i < shape1.rows;i++){ center_x_1 += shape1(i,0); center_y_1 += shape1(i,1); center_x_2 += shape2(i,0); center_y_2 += shape2(i,1); } center_x_1 /= shape1.rows; center_y_1 /= shape1.rows; center_x_2 /= shape2.rows; center_y_2 /= shape2.rows; Mat_<double> temp1 = shape1.clone(); Mat_<double> temp2 = shape2.clone(); for(int i = 0;i < shape1.rows;i++){ temp1(i,0) -= center_x_1; temp1(i,1) -= center_y_1; temp2(i,0) -= center_x_2; temp2(i,1) -= center_y_2; } Mat_<double> covariance1, covariance2; Mat_<double> mean1,mean2; // calculate covariance matrix calcCovarMatrix(temp1,covariance1,mean1,CV_COVAR_COLS); calcCovarMatrix(temp2,covariance2,mean2,CV_COVAR_COLS); double s1 = sqrt(norm(covariance1)); double s2 = sqrt(norm(covariance2)); scale = s1 / s2; temp1 = 1.0 / s1 * temp1; temp2 = 1.0 / s2 * temp2; double num = 0; double den = 0; for(int i = 0;i < shape1.rows;i++){ num = num + temp1(i,1) * temp2(i,0) - temp1(i,0) * temp2(i,1); den = den + temp1(i,0) * temp2(i,0) + temp1(i,1) * temp2(i,1); } double norm = sqrt(num*num + den*den); double sin_theta = num / norm; double cos_theta = den / norm; rotation(0,0) = cos_theta; rotation(0,1) = -sin_theta; rotation(1,0) = sin_theta; rotation(1,1) = cos_theta; }
void similarityTransform(const Mat_<double> &fromShape, const Mat_<double> &toShape, Mat_<double> &rotation, double &scale) { rotation = Mat::zeros(2, 2, CV_64FC1); scale = 0; assert(fromShape.rows == toShape.rows); int rows = fromShape.rows; double fcx = 0, fcy = 0, tcx = 0, tcy = 0; for (int i = 0; i < rows; i++) { fcx += fromShape(i, 0); fcy += fromShape(i, 1); tcx += toShape(i, 0); tcy += toShape(i, 1); } fcx /= rows; fcy /= rows; tcx /= rows; tcy /= rows; Mat_<double> ftmp = fromShape.clone(); Mat_<double> ttmp = toShape.clone(); for (int i = 0; i < rows; i++) { ftmp(i, 0) -= fcx; ftmp(i, 1) -= fcy; ttmp(i, 0) -= tcx; ttmp(i, 1) -= tcy; } Mat_<double> fcov, tcov; Mat_<double> fmean, tmean; calcCovarMatrix(ftmp, fcov, fmean, CV_COVAR_COLS); calcCovarMatrix(ttmp, tcov, tmean, CV_COVAR_COLS); double fsize = sqrtf(norm(fcov)); double tsize = sqrtf(norm(tcov)); scale = tsize / fsize; ftmp /= fsize; ttmp /= tsize; double num = 0, den = 0; // an efficient way to calculate rotation, using cross and dot production for (int i = 0; i < rows; i++) { num += ftmp(i, 1)*ttmp(i, 0) - ftmp(i, 0)*ttmp(i, 1); den += ftmp(i, 0)*ttmp(i, 0) + ftmp(i, 1)*ttmp(i, 1); } double norm = sqrtf(num*num + den*den); // theta is clock-wise rotation angle(fromShape -> toShape) double sinTheta = num / norm; double cosTheta = den / norm; rotation(0, 0) = cosTheta; rotation(0, 1) = sinTheta; rotation(1, 0) = -sinTheta; rotation(1, 1) = cosTheta; }
void SimilarityTransform(const Mat_<float>& shape1, const Mat_<float>& shape2, Mat_<float>& rotation,float& scale){ rotation = Mat::zeros(2,2,CV_32FC1); scale = 0; // center the data float center_x_1 = 0; float center_y_1 = 0; float center_x_2 = 0; float center_y_2 = 0; for(int i = 0;i < shape1.rows;i++){ center_x_1 += shape1(i,0); center_y_1 += shape1(i,1); center_x_2 += shape2(i,0); center_y_2 += shape2(i,1); } center_x_1 /= shape1.rows; center_y_1 /= shape1.rows; center_x_2 /= shape2.rows; center_y_2 /= shape2.rows; Mat_<float> temp1 = shape1.clone(); Mat_<float> temp2 = shape2.clone(); for(int i = 0;i < shape1.rows;i++){ temp1(i,0) -= center_x_1; temp1(i,1) -= center_y_1; temp2(i,0) -= center_x_2; temp2(i,1) -= center_y_2; } Mat_<float> covariance1, covariance2; Mat_<float> mean1,mean2; // calculate covariance matrix calcCovarMatrix(temp1,covariance1,mean1,CV_COVAR_SCALE|CV_COVAR_ROWS|CV_COVAR_NORMAL,CV_32F); calcCovarMatrix(temp2,covariance2,mean2,CV_COVAR_SCALE|CV_COVAR_ROWS|CV_COVAR_NORMAL,CV_32F); //cout<<covariance1<<endl; //cout<<covariance2<<endl; float s1 = sqrt(norm(covariance1)); float s2 = sqrt(norm(covariance2)); scale = s1 / s2; temp1 = 1.0 / s1 * temp1; temp2 = 1.0 / s2 * temp2; float num = 0; float den = 0; for(int i = 0;i < shape1.rows;i++){ num = num + temp1(i,1) * temp2(i,0) - temp1(i,0) * temp2(i,1); den = den + temp1(i,0) * temp2(i,0) + temp1(i,1) * temp2(i,1); } float norm = sqrt(num*num + den*den); float sin_theta = num / norm; float cos_theta = den / norm; rotation(0,0) = cos_theta; rotation(0,1) = -sin_theta; rotation(1,0) = sin_theta; rotation(1,1) = cos_theta; }
void Evm::amplify_spatial_lpyr_temporal_ideal(const cv::Mat &src, std::vector<cv::Mat_<cv::Vec3f> > &lapPyr, cv::Mat &dst, float alpha, float lambda_c, float fl, float fh, float samplingRate, float chromAttenuation) { Mat_<Vec3f> s = src.clone(); rgb2ntsc(src, s); buildLaplacianPyramid(s, lapPyr); if (is_not_first_frame) { //temporal ideal for (int i=0; i<lapPyr.size(); i++) { //TODO:implement temporal ideal filter // } //amplify // amplifyByAlpha(src, alpha, lambda_c); } else { lowpass1 = lapPyr; lowpass2 = lapPyr; filtered = lapPyr; is_not_first_frame = true; } dst = s + reconstructImgFromLapPyramid(filtered).mul(Vec3f(1,chromAttenuation,chromAttenuation)); s = dst.clone(); ntsc2rgb(s, s); dst = s.clone(); }
void Evm::amplify_spatial_lpyr_temporal_iir(const Mat& src, vector<Mat_<Vec3f> >& lapPyr, Mat& dst, float alpha, float lambda_c, float r1, float r2, float chromAttenuation) { Mat_<Vec3f> s = src.clone(); rgb2ntsc(src, s); buildLaplacianPyramid(s, lapPyr); if (is_not_first_frame) { //temporal iir for (int i=0; i<lapPyr.size(); i++) { Mat temp1 = (1-r1)*lowpass1[i] + r1*lapPyr[i]; Mat temp2 = (1-r2)*lowpass2[i] + r2*lapPyr[i]; lowpass1[i] = temp1; lowpass2[i] = temp2; filtered[i] = temp1 - temp2; } //amplify amplifyByAlpha(src, alpha, lambda_c); } else { // first frame lowpass1 = lapPyr; lowpass2 = lapPyr; filtered = lapPyr; is_not_first_frame = true; } dst = s + reconstructImgFromLapPyramid(filtered).mul(Vec3f(1,chromAttenuation,chromAttenuation)); s = dst.clone(); ntsc2rgb(s, s); dst = s.clone(); }
void fft2(Mat_<float> src) { int x = getOptimalDFTSize(2* src.rows ); int y = getOptimalDFTSize(2* src.cols ); copyMakeBorder(src, src, 0, (x - src.rows), 0, (y - src.cols), BORDER_CONSTANT, Scalar::all(0)); // Get padded image size const int wx = src.cols, wy = src.rows; const int cx = wx/2, cy = wy/2; //--------------------------------// // DFT - performing // cv::Mat_<float> imgs[] = {src.clone(), Mat::zeros(src.size(), CV_32F)}; cv::Mat_<cv::Vec2f> img_dft; merge(imgs,2,img_dft); dft(img_dft, img_dft); split(img_dft,imgs); cv::Mat_<float> magnitude, phase; cartToPolar(imgs[0],imgs[1],magnitude,phase); dftshift(magnitude); magnitude = magnitude + 1.0f; log(magnitude,magnitude); normalize(magnitude,magnitude,0,1,CV_MINMAX); namedWindow("img_dft",WINDOW_NORMAL); imshow("img_dft",magnitude); waitKey(0); cout << "out" << endl; }
// Pick only the more stable/rigid points under changes of expression void extract_rigid_points(Mat_<double>& source_points, Mat_<double>& destination_points) { if(source_points.rows == 68) { Mat_<double> tmp_source = source_points.clone(); source_points = Mat_<double>(); // Push back the rigid points (some face outline, eyes, and nose) source_points.push_back(tmp_source.row(0)); source_points.push_back(tmp_source.row(2)); source_points.push_back(tmp_source.row(14)); source_points.push_back(tmp_source.row(16)); source_points.push_back(tmp_source.row(36)); source_points.push_back(tmp_source.row(39)); source_points.push_back(tmp_source.row(43)); source_points.push_back(tmp_source.row(38)); source_points.push_back(tmp_source.row(42)); source_points.push_back(tmp_source.row(45)); source_points.push_back(tmp_source.row(31)); source_points.push_back(tmp_source.row(33)); source_points.push_back(tmp_source.row(35)); Mat_<double> tmp_dest = destination_points.clone(); destination_points = Mat_<double>(); // Push back the rigid points destination_points.push_back(tmp_dest.row(0)); destination_points.push_back(tmp_dest.row(2)); destination_points.push_back(tmp_dest.row(14)); destination_points.push_back(tmp_dest.row(16)); destination_points.push_back(tmp_dest.row(36)); destination_points.push_back(tmp_dest.row(39)); destination_points.push_back(tmp_dest.row(43)); destination_points.push_back(tmp_dest.row(38)); destination_points.push_back(tmp_dest.row(42)); destination_points.push_back(tmp_dest.row(45)); destination_points.push_back(tmp_dest.row(31)); destination_points.push_back(tmp_dest.row(33)); destination_points.push_back(tmp_dest.row(35)); } }
void calculate_tangent(const Mat& input_image, const Mat_<double>& tangent, const Mat_<int>& boundary, const Mat_<int>& region_id,int current_region_id, Mat_<double>& tangent){ int width = input_image.cols; int height = input_image.rows; Mat_<int> visited(height,width); Mat_<int> current_boundary(width*height,2); current_boundary = boundary.clone(); int current_boundary_size = current_boundary.rows(); while(true){ // find new boundary after thinning for(int i = 0;i < current_boundary_size;i++){ int x = current_boundary(i,0); int y = current_boundary(i,1); Mat_<int> new_boundary(width*height,2); int new_boundary_size = 0; for(int j = -1;j < 2;j++){ for(int k = -1;k < 2;k++){ int new_x = x + j; int new_y = y + k; if(new_x < 0 || new_y < 0 || new_x >= width || new_y >= height){ continue; } if(region_id(new_y,new_x) != current_region_id){ continue; } if(visited(new_y,new_x) == 1){ continue; } new_boundary(new_boundary_size,0) = new_x; new_boundary(new_boundary_size,1) = new_y; new_boundary_size = new_boundary_size + 1; visited(new_y,new_x) = 1; tangent(new_y,new_x) = tangent(y,x); } } } // if no boundary points found, exit if(new_boundary_size == 0){ break; } } }
Mat_<float> MotionDetection::SubstractAllPixels(float value, Mat_<float> frame) { Mat_<float> temp = frame.clone(); for(int i = 0; i < frame.cols; i++) { for(int j = 0; j < frame.rows; j++) { temp.at<float>(Point(i,j)) = value - frame.at<float>(Point(i,j)); } } return temp; }
Mat_<float> MotionDetection::Mat_MultiplyFloatInRect(Mat_<float> frame, float value, Rect rect) { Mat_<float> mat = frame.clone(); for(int i = rect.x; i < rect.x + rect.width; i++) { for (int j = rect.y; j < rect.y + rect.height; j++) { mat.at<float>(Point(i,j)) = frame.at<float>(Point(i,j)) * value; } } return mat; }
//============================================================================= // cropping from the source image to the destination image using the shape in s, used to determine if shape fitting converged successfully void PAW::Warp(const Mat& image_to_warp, Mat& destination_image, const Mat_<double>& landmarks_to_warp) { // set the current shape source_landmarks = landmarks_to_warp.clone(); // prepare the mapping coefficients using the current shape this->CalcCoeff(); // Do the actual mapping computation (where to warp from) this->WarpRegion(map_x, map_y); // Do the actual warp (with bi-linear interpolation) remap(image_to_warp, destination_image, map_x, map_y, CV_INTER_LINEAR); }
void applyFilter(Mat_<float> src, Mat_<float> output) { int wxOrig = src.cols; int wyOrig = src.rows; int m = cv::getOptimalDFTSize(2*wyOrig); int n = cv::getOptimalDFTSize(2*wxOrig); copyMakeBorder(src, src, 0, m - wyOrig, 0, n - wxOrig, cv::BORDER_CONSTANT, cv::Scalar::all(0)); const int wx = src.cols, wy = src.rows; const int cx = wx/2, cy = wy/2; std::cout << wxOrig << " " << wyOrig << std::endl; std::cout << wx << " " << wy << std::endl; std::cout << cx << " " << cy << std::endl; cv::Mat_<float> imgs[] = {src.clone(), cv::Mat_<float>::zeros(wy, wx)}; cv::Mat_<cv::Vec2f> img_dft; cv::merge(imgs, 2, img_dft); cv::dft(img_dft, img_dft); dftshift(img_dft); cout << "helo " << endl; cv::Mat hpf = BHPF(3000, 4, wy, wx, cx, cy); cv::mulSpectrums(hpf, img_dft, img_dft, cv::DFT_ROWS); dftshift(img_dft); cv::idft(img_dft, img_dft); //the result is a 2 channel image split(img_dft, imgs); normalize(imgs[0], output, 0, 1, CV_MINMAX); cv::Mat_<float> croppedOutput(output,cv::Rect(0,0,wxOrig,wyOrig)); output = croppedOutput; namedWindow("High-pass filtered input",WINDOW_NORMAL); namedWindow("Input",WINDOW_NORMAL); cv::imshow("Input", src); cv::imshow("High-pass filtered input", croppedOutput); imwrite("/home/student/ROVI/VisMand1/build-vis_man_1-Desktop-Debug/output/out.jpg",croppedOutput); cout << "lol" << endl; cv::waitKey(0); }
/** * @author JIA Pei * @version 2010-05-20 * @brief Basic AAM Fitting, for dynamic image sequence * @param iImg Input - image to be fitted * @param ioShape Input and Output - the fitted shape * @param oImg Output - the fitted image * @param epoch Input - the iteration epoch */ float VO_FittingAAMBasic::VO_BasicAAMFitting(const Mat& iImg, VO_Shape& ioShape, Mat& oImg, unsigned int epoch) { this->m_VOFittingShape.clone(ioShape); double t = (double)cvGetTickCount(); this->SetProcessingImage(iImg, this->m_VOAAMBasic); this->m_iIteration = 0; // Get m_MatModelAlignedShapeParam and m_fScale, m_vRotateAngles, m_MatCenterOfGravity this->m_VOAAMBasic->VO_CalcAllParams4AnyShapeWithConstrain( this->m_VOFittingShape, this->m_MatModelAlignedShapeParam, this->m_fScale, this->m_vRotateAngles, this->m_MatCenterOfGravity); this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing); // Get m_MatModelNormalizedTextureParam VO_TextureModel::VO_LoadOneTextureFromShape(this->m_VOFittingShape, this->m_ImageProcessing, this->m_vTriangle2D, this->m_vPointWarpInfo, this->m_VOFittingTexture ); // estimate the texture model parameters this->m_VOAAMBasic->VO_CalcAllParams4AnyTexture(this->m_VOFittingTexture, this->m_MatModelNormalizedTextureParam); // Calculate m_MatCurrentC this->m_VOAAMBasic->VO_SParamTParamProjectToCParam( this->m_MatModelAlignedShapeParam, this->m_MatModelNormalizedTextureParam, this->m_MatCurrentC ); // Set m_MatCurrentT, m_MatDeltaT, m_MatEstimatedT, m_MatDeltaC, m_MatEstimatedC, etc. this->m_MatCurrentT = Mat_<float>::zeros(this->m_MatCurrentT.size()); this->m_MatDeltaT = Mat_<float>::zeros(this->m_MatDeltaT.size()); this->m_MatEstimatedT = Mat_<float>::zeros(this->m_MatEstimatedT.size()); this->m_MatDeltaC = Mat_<float>::zeros(this->m_MatDeltaC.size()); this->m_MatEstimatedC = Mat_<float>::zeros(this->m_MatEstimatedC.size()); ////////////////////////////////////////////////////////////////////////////////////////////////////// // explained by JIA Pei. 2010-05-20 // For the first round, this->m_VOFittingShape should not change after calling "VO_CParamTParam2FittingShape" // But this is not the case. why? // Before calling VO_CParamTParam2FittingShape, this->m_VOFittingShape is calculated by // a) assigning m_VOTemplateAlignedShape // b) align to the real-size face using detected eyes and mouth // c) constrain the shape within the image // d) constrain the shape parameters and calculate those rigid transform parameters // cout << this->m_VOFittingShape << endl; ////////////////////////////////////////////////////////////////////////////////////////////////////// // Estimate m_VOFittingShape and m_VOFittingTexture this->VO_CParamTParam2FittingShape( this->m_MatCurrentC, this->m_MatCurrentT, this->m_VOModelNormalizedTexture, this->m_VOFittingShape, this->m_fScale, this->m_vRotateAngles, this->m_MatCenterOfGravity ); this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing); // Remember to call ConstrainShapeInImage() whenever you update m_VOFittingShape ////////////////////////////////////////////////////////////////////////////////////////////////////// // When calling VO_CParamTParam2FittingShape, this->m_VOFittingShape is calculated by // a) c parameters to reconstruct shape parameters // b) shape parameters to reconstruct shape // c) align to the real-size face by global shape normalization // cout << this->m_VOFittingShape << endl; ////////////////////////////////////////////////////////////////////////////////////////////////////// this->m_E_previous = this->m_E = this->VO_CalcErrorImage(this->m_ImageProcessing, this->m_VOFittingShape, this->m_VOModelNormalizedTexture, this->m_VOTextureError); do { float estScale = this->m_fScale; vector<float> estRotateAngles = this->m_vRotateAngles; Mat_<float> estCOG = this->m_MatCenterOfGravity.clone(); bool cBetter = false; bool poseBetter = false; /**First shape parameters, c parameters. refer to equation (9.3) * Cootes "Statistical Model of Appearance for Computer Vision" */ cv::gemm(this->m_VOTextureError.GetTheTextureInARow(), this->m_VOAAMBasic->m_MatRc, -1, Mat(), 0.0, this->m_MatDeltaC, GEMM_2_T); // damp -- C for(unsigned int i = 0; i < k_values.size(); i++) { // make damped c prediction cv::scaleAdd(this->m_MatDeltaC, k_values[i], this->m_MatCurrentC, this->m_MatEstimatedC); // make sure m_MatEstimatedC are constrained this->m_VOAAMBasic->VO_AppearanceParameterConstraint(this->m_MatEstimatedC); this->VO_CParamTParam2FittingShape( this->m_MatEstimatedC, this->m_MatCurrentT, this->m_VOModelNormalizedTexture, this->m_VOEstimatedShape, estScale, estRotateAngles, estCOG); if ( !VO_ShapeModel::VO_IsShapeInsideImage(this->m_VOEstimatedShape, this->m_ImageProcessing) ) continue; else this->m_E = this->VO_CalcErrorImage(this->m_ImageProcessing, this->m_VOEstimatedShape, this->m_VOModelNormalizedTexture, this->m_VOEstimatedTextureError); if (this->m_E < this->m_E_previous) { this->m_MatEstimatedC.copyTo(this->m_MatCurrentC); this->m_VOFittingShape.clone(this->m_VOEstimatedShape); this->m_VOTextureError.clone(this->m_VOEstimatedTextureError); this->m_E_previous = this->m_E; cBetter = true; this->m_fScale = estScale; this->m_vRotateAngles = estRotateAngles; this->m_MatCenterOfGravity = estCOG.clone(); break; } } /** Second pose, t parameters. refer to equation (9.3) * Cootes "Statistical Model of Appearance for Computer Vision" */ cv::gemm(this->m_VOTextureError.GetTheTextureInARow(), this->m_VOAAMBasic->m_MatRt, -1, Mat(), 0, this->m_MatDeltaT, GEMM_2_T); // damp -- T for(unsigned int i = 0; i < k_values.size(); i++) { // make damped c/pose prediction cv::scaleAdd(this->m_MatDeltaT, k_values[i], this->m_MatCurrentT, this->m_MatEstimatedT); this->VO_CParamTParam2FittingShape( this->m_MatCurrentC, this->m_MatEstimatedT, this->m_VOModelNormalizedTexture, this->m_VOEstimatedShape, estScale, estRotateAngles, estCOG); if ( !VO_ShapeModel::VO_IsShapeInsideImage(this->m_VOEstimatedShape, this->m_ImageProcessing) ) continue; else this->m_E = this->VO_CalcErrorImage(this->m_ImageProcessing, this->m_VOEstimatedShape, this->m_VOModelNormalizedTexture, this->m_VOEstimatedTextureError); if (this->m_E < this->m_E_previous) { // Since m_fScale, m_vRotateAngles and m_MatCenterOfGravity have been updated, // m_MatCurrentT should be assigned to 0 now! this->m_MatCurrentT = Mat_<float>::zeros(this->m_MatCurrentT.size()); // this->m_MatEstimatedT.copyTo(this->m_MatCurrentT); this->m_VOFittingShape.clone(this->m_VOEstimatedShape); this->m_VOTextureError.clone(this->m_VOEstimatedTextureError); this->m_E_previous = this->m_E; poseBetter = true; this->m_fScale = estScale; this->m_vRotateAngles = estRotateAngles; this->m_MatCenterOfGravity = estCOG.clone(); break; } } if( cBetter || poseBetter) { ioShape.clone(this->m_VOFittingShape); } else break; ++this->m_iIteration; }while( ( fabs(this->m_E) > FLT_EPSILON ) && (this->m_iIteration < epoch)/* && (cv::norm(this->m_MatDeltaC) > FLT_EPSILON) */ ); t = ((double)cvGetTickCount() - t )/ (cvGetTickFrequency()*1000.); cout << "Basic fitting time cost: " << t << " millisec" << endl; this->m_fFittingTime = t; VO_Fitting2DSM::VO_DrawMesh(ioShape, this->m_VOAAMBasic, oImg); return t; }
static int initialize_simplex(Mat_<double>& c, Mat_<double>& b,double& v,vector<int>& N,vector<int>& B,vector<unsigned int>& indexToRow){ N.resize(c.cols); N[0]=0; for (std::vector<int>::iterator it = N.begin()+1 ; it != N.end(); ++it){ *it=it[-1]+1; } B.resize(b.rows); B[0]=(int)N.size(); for (std::vector<int>::iterator it = B.begin()+1 ; it != B.end(); ++it){ *it=it[-1]+1; } indexToRow.resize(c.cols+b.rows); indexToRow[0]=0; for (std::vector<unsigned int>::iterator it = indexToRow.begin()+1 ; it != indexToRow.end(); ++it){ *it=it[-1]+1; } v=0; int k=0; { double min=DBL_MAX; for(int i=0;i<b.rows;i++){ if(b(i,b.cols-1)<min){ min=b(i,b.cols-1); k=i; } } } if(b(k,b.cols-1)>=0){ N.erase(N.begin()); for (std::vector<unsigned int>::iterator it = indexToRow.begin()+1 ; it != indexToRow.end(); ++it){ --(*it); } return 0; } Mat_<double> old_c=c.clone(); c=0; c(0,0)=-1; for(int i=0;i<b.rows;i++){ b(i,0)=-1; } print_simplex_state(c,b,v,N,B); dprintf(("\tWE MAKE PIVOT\n")); pivot(c,b,v,N,B,k,0,indexToRow); print_simplex_state(c,b,v,N,B); inner_simplex(c,b,v,N,B,indexToRow); dprintf(("\tAFTER INNER_SIMPLEX\n")); print_simplex_state(c,b,v,N,B); unsigned int nsize = (unsigned int)N.size(); if(indexToRow[0]>=nsize){ int iterator_offset=indexToRow[0]-nsize; if(b(iterator_offset,b.cols-1)>0){ return SOLVELP_UNFEASIBLE; } pivot(c,b,v,N,B,iterator_offset,0,indexToRow); } vector<int>::iterator iterator; { int iterator_offset=indexToRow[0]; iterator=N.begin()+iterator_offset; std::iter_swap(iterator,N.begin()); SWAP(int,indexToRow[*iterator],indexToRow[0]); swap_columns(c,iterator_offset,0); swap_columns(b,iterator_offset,0); } dprintf(("after swaps\n")); print_simplex_state(c,b,v,N,B); //start from 1, because we ignore x_0 c=0; v=0; for(int I=1;I<old_c.cols;I++){ if(indexToRow[I]<nsize){ dprintf(("I=%d from nonbasic\n",I)); int iterator_offset=indexToRow[I]; c(0,iterator_offset)+=old_c(0,I); print_matrix(c); }else{ dprintf(("I=%d from basic\n",I)); int iterator_offset=indexToRow[I]-nsize; c-=old_c(0,I)*b.row(iterator_offset).colRange(0,b.cols-1); v+=old_c(0,I)*b(iterator_offset,b.cols-1); print_matrix(c); } } dprintf(("after restore\n")); print_simplex_state(c,b,v,N,B); N.erase(N.begin()); for (std::vector<unsigned int>::iterator it = indexToRow.begin()+1 ; it != indexToRow.end(); ++it){ --(*it); } return 0; }
Mat_<double> estimateRotTransl( Mat_<double> const worldPts, Mat_<double> const imagePts) { assert(imagePts.cols == 2); assert(worldPts.cols == 3); assert(imagePts.rows == worldPts.rows); // TODO verify all worldPts have z=0 // See "pose estimation" section in the paper. // Set up linear system of equations. int const n = imagePts.rows; Mat_<double> F(2 * n, 9); for(int i = 0; i < n; i++) { F(2 * i, 0) = worldPts(i, 0); F(2 * i, 1) = 0; F(2 * i, 2) = -worldPts(i, 0) * imagePts(i, 0); F(2 * i, 3) = worldPts(i, 1); F(2 * i, 4) = 0; F(2 * i, 5) = -worldPts(i, 1) * imagePts(i, 0); F(2 * i, 6) = 1; F(2 * i, 7) = 0; F(2 * i, 8) = -imagePts(i, 0); F(2 * i + 1, 0) = 0; F(2 * i + 1, 1) = worldPts(i, 0); F(2 * i + 1, 2) = -worldPts(i, 0) * imagePts(i, 1); F(2 * i + 1, 3) = 0; F(2 * i + 1, 4) = worldPts(i, 1); F(2 * i + 1, 5) = -worldPts(i, 1) * imagePts(i, 1); F(2 * i + 1, 6) = 0; F(2 * i + 1, 7) = 1; F(2 * i + 1, 8) = -imagePts(i, 1); } // Find least-squares estimate of rotation + translation. SVD svd(F); Mat_<double> rrp = svd.vt.row(8); rrp = rrp.clone().reshape(0, 3).t(); if(rrp(2, 2) < 0) { rrp *= -1; // make sure depth is positive } // cout << "rrp: " << rrp << endl; Mat_<double> transl = \ 2 * rrp.col(2) / (norm(rrp.col(0)) + norm(rrp.col(1))); // cout << "transl: " << transl << endl; Mat_<double> rot = Mat_<double>::zeros(3, 3); rrp.col(0).copyTo(rot.col(0)); rrp.col(1).copyTo(rot.col(1)); SVD svd2(rot); rot = svd2.u * svd2.vt; if(determinant(rot) < 0) { rot.col(2) *= -1; // make sure it's a valid rotation matrix } if(abs(determinant(rot) - 1) > 1e-10) { cerr << "Warning: rotation matrix has determinant " \ << determinant(rot) << " where expected 1." << endl; } // cout << "rot: " << rot << endl; Mat_<double> rotTransl(3, 4); rot.col(0).copyTo(rotTransl.col(0)); rot.col(1).copyTo(rotTransl.col(1)); rot.col(2).copyTo(rotTransl.col(2)); transl.copyTo(rotTransl.col(3)); return rotTransl; }
int main (int argc, char **argv) { //Convert arguments to more convenient vector form vector<string> arguments = get_arguments(argc, argv); // Some initial parameters that can be overriden from command line vector<string> files, dFiles, outimages, outfeaturess; // these can be overriden using -cx, -cy, -fx, -fy, -dimx, -dimy flags float fx = 500, fy = 500, cx = 0, cy = 0; int dimx = 0, dimy = 0; // initial translation, rotation and scale (can be specified by the user) vector<Vec6d> initial_poses; bool useCLMTracker = true; // Get camera parameters CLMWrapper::get_camera_params(fx, fy, cx, cy, dimx, dimy, arguments); CLMWrapper::get_image_input_output_params(files, dFiles, outfeaturess, outimages, initial_poses, arguments); CLMWrapper::CLMParameters clmParams(arguments); // The modules that are being used for tracking CLMTracker::TrackerCLM clmModel; cout << "Loading the model" << endl; clmModel.Read(clmParams.defaultModelLoc, clmParams.override_pdm_loc); cout << "Model loaded" << endl; PoseDetectorHaar::PoseDetectorHaarParameters haarParams; haarParams.ClassifierLocation = "classifiers/haarcascade_frontalface_alt2.xml"; CascadeClassifier classifier(haarParams.ClassifierLocation); bool visualise = true; //clmParams.multi_view = true; // Do some image loading for(size_t i = 0; i < files.size(); i++) { string file = files.at(i); // Loading image Mat img = imread(file, -1); // Loading depth file if exists Mat dTemp; Mat_<float> dImg; if(dFiles.size() > 0) { string dFile = dFiles.at(i); dTemp = imread(dFile, -1); dTemp.convertTo(dImg, CV_32F); } if(dimx != 0) { cv::resize(img.clone(), img, cv::Size(dimx, dimy)); if(!dImg.empty()) { cv::resize(dImg.clone(), dImg, cv::Size(dimx, dimy)); } } bool trackingInitialised = false; // Making sure the image is in uchar grayscale Mat_<uchar> gray; convert_to_grayscale(img, gray); // Face detection initialisation Vec6d poseEstimateHaar; Matx66d poseEstimateHaarUncertainty; vector<Vec6d> poseEstimatesInitialiser; vector<Matx66d> covariancesInitialiser; vector<Rect> faceRegions; bool initSuccess = false; // if no pose defined we just use OpenCV if(initial_poses.empty()) { initSuccess = PoseDetectorHaar::InitialisePosesHaar(gray, dImg, poseEstimatesInitialiser, covariancesInitialiser, faceRegions, classifier, fx, fy, cx, cy, haarParams); if(initSuccess) { if(poseEstimatesInitialiser.size() > 1) { cout << "Multiple faces detected" << endl; } } if(initSuccess) { // perform landmark detection for every face detected for(int r=0; r < faceRegions.size(); ++r) { if(!clmParams.multi_view) { Vec6d pose(0.0); bool success = CLMWrapper::InitialiseCLM(gray, dImg, clmModel, pose, faceRegions[r], fx, fy, cx, cy, clmParams); } else { vector<Vec3d> orientations; // Have multiple hypotheses, check which one is best and keep it orientations.push_back(Vec3d(0,0,0)); orientations.push_back(Vec3d(0,0.5236,0)); orientations.push_back(Vec3d(0,-0.5236,0)); orientations.push_back(Vec3d(0.5236,0,0)); orientations.push_back(Vec3d(-0.5236,0,0)); double best_lhood; Mat best_shape; for (size_t p = 0; p<orientations.size(); ++p) { Vec6d pose; pose(1) = orientations[p][0]; pose(2) = orientations[p][1]; pose(3) = orientations[p][2]; CLMWrapper::InitialiseCLM(gray, dImg, clmModel, pose, faceRegions[r], fx, fy, cx, cy, clmParams); double curr_lhood = clmModel._clm._likelihood; if(p==0 || curr_lhood > best_lhood) { best_lhood = curr_lhood; best_shape = clmModel._shape.clone(); } } clmModel._shape = best_shape; clmModel._clm._likelihood = best_lhood; } // Writing out the detected landmarks if(!outfeaturess.empty()) { char name[100]; sprintf(name, "_det_%d", r); boost::filesystem::path slash("/"); std::string preferredSlash = slash.make_preferred().string(); // append detection number boost::filesystem::path out_feat_path(outfeaturess.at(i)); boost::filesystem::path dir = out_feat_path.parent_path(); boost::filesystem::path fname = out_feat_path.filename().replace_extension(""); boost::filesystem::path ext = out_feat_path.extension(); string outfeatures = dir.string() + preferredSlash + fname.string() + string(name) + ext.string(); write_out_landmarks(outfeatures, clmModel); } // displaying detected stuff Mat disp; create_display_image(img, disp, clmModel); if(visualise) { // For debug purposes show final likelihood //std::ostringstream s; //s << clmModel._clm._likelihood; //string lhoodSt("Lhood:"); //lhoodSt += s.str(); //cv::putText(disp, lhoodSt, cv::Point(10,20), CV_FONT_HERSHEY_SIMPLEX, 0.5, CV_RGB(255,0,0)); imshow("colour", disp); cv::waitKey(10); } if(!outimages.empty()) { string outimage = outimages.at(i); if(!outimage.empty()) { char name[100]; sprintf(name, "_det_%d", r); boost::filesystem::path slash("/"); std::string preferredSlash = slash.make_preferred().string(); // append detection number boost::filesystem::path out_feat_path(outimage); boost::filesystem::path dir = out_feat_path.parent_path(); boost::filesystem::path fname = out_feat_path.filename().replace_extension(""); boost::filesystem::path ext = out_feat_path.extension(); outimage = dir.string() + preferredSlash + fname.string() + string(name) + ext.string(); imwrite(outimage, disp); } } } } else { cout << "No faces detected" << endl; continue; } } else { initSuccess = true; // if pose defined we don't need the conventional initialisation if(!clmParams.multi_view) { clmModel._clm._pglobl = Mat(initial_poses[i]); Vec6d pose = CLMWrapper::GetPoseCLM(clmModel, dImg, fx, fy, cx, cy, clmParams); bool success = CLMWrapper::InitialiseCLM(gray, dImg, clmModel, pose, Rect(), fx, fy, cx, cy, clmParams); } else { vector<Vec3d> orientations; // Have multiple hypotheses, check which one is best and keep it orientations.push_back(Vec3d(0,0,0)); orientations.push_back(Vec3d(0,0.5236,0)); orientations.push_back(Vec3d(0,-0.5236,0)); orientations.push_back(Vec3d(0.5236,0,0)); orientations.push_back(Vec3d(-0.5236,0,0)); double best_lhood; Mat best_shape; for (size_t p = 0; p<orientations.size(); ++p) { clmModel._clm._pglobl = Mat(initial_poses[i]); clmModel._clm._pglobl.at<double>(1) = orientations[p][0]; clmModel._clm._pglobl.at<double>(2) = orientations[p][1]; clmModel._clm._pglobl.at<double>(3) = orientations[p][2]; Vec6d pose = CLMWrapper::GetPoseCLM(clmModel, dImg, fx, fy, cx, cy, clmParams); CLMWrapper::InitialiseCLM(gray, dImg, clmModel, pose, Rect(), fx, fy, cx, cy, clmParams); double curr_lhood = clmModel._clm._likelihood; if(p==0 || curr_lhood > best_lhood) { best_lhood = curr_lhood; best_shape = clmModel._shape.clone(); } } clmModel._shape = best_shape; } // Writing out the detected landmarks if(!outfeaturess.empty()) { string outfeatures = outfeaturess.at(i); write_out_landmarks(outfeatures, clmModel); } // displaying detected stuff Mat disp; create_display_image(img, disp, clmModel); if(visualise) { imshow("colour", disp); cv::waitKey(5); } if(!outimages.empty()) { string outimage = outimages.at(i); if(!outimage.empty()) { imwrite(outimage, disp); } } } // Reset the parameters if more images are coming if(files.size() > 0) { clmModel._clm._plocal.setTo(0); } } return 0; }
void run(int /* start_from */ ) { CvMat zeros; memset(&zeros, 0, sizeof(zeros)); C_Caller caller, bad_caller; CvMat objectPoints_c, r_vec_c, t_vec_c, A_c, distCoeffs_c, imagePoints_c, dpdr_c, dpdt_c, dpdf_c, dpdc_c, dpdk_c; const int n = 10; Mat imagePoints_cpp(1, n, CV_32FC2); imagePoints_c = imagePoints_cpp; Mat objectPoints_cpp(1, n, CV_32FC3); randu(objectPoints_cpp, Scalar::all(1), Scalar::all(10)); objectPoints_c = objectPoints_cpp; Mat t_vec_cpp(Mat::zeros(1, 3, CV_32F)); t_vec_c = t_vec_cpp; Mat r_vec_cpp; Rodrigues(Mat::eye(3, 3, CV_32F), r_vec_cpp); r_vec_c = r_vec_cpp; Mat A_cpp = camMat.clone(); A_c = A_cpp; Mat distCoeffs_cpp = distCoeffs.clone(); distCoeffs_c = distCoeffs_cpp; Mat dpdr_cpp(2*n, 3, CV_32F); dpdr_c = dpdr_cpp; Mat dpdt_cpp(2*n, 3, CV_32F); dpdt_c = dpdt_cpp; Mat dpdf_cpp(2*n, 2, CV_32F); dpdf_c = dpdf_cpp; Mat dpdc_cpp(2*n, 2, CV_32F); dpdc_c = dpdc_cpp; Mat dpdk_cpp(2*n, 4, CV_32F); dpdk_c = dpdk_cpp; caller.aspectRatio = 1.0; caller.objectPoints = &objectPoints_c; caller.r_vec = &r_vec_c; caller.t_vec = &t_vec_c; caller.A = &A_c; caller.distCoeffs = &distCoeffs_c; caller.imagePoints = &imagePoints_c; caller.dpdr = &dpdr_c; caller.dpdt = &dpdt_c; caller.dpdf = &dpdf_c; caller.dpdc = &dpdc_c; caller.dpdk = &dpdk_c; /********************/ int errors = 0; bad_caller = caller; bad_caller.objectPoints = 0; errors += run_test_case( CV_StsBadArg, "Zero objectPoints", bad_caller ); bad_caller = caller; bad_caller.r_vec = 0; errors += run_test_case( CV_StsBadArg, "Zero r_vec", bad_caller ); bad_caller = caller; bad_caller.t_vec = 0; errors += run_test_case( CV_StsBadArg, "Zero t_vec", bad_caller ); bad_caller = caller; bad_caller.A = 0; errors += run_test_case( CV_StsBadArg, "Zero camMat", bad_caller ); bad_caller = caller; bad_caller.imagePoints = 0; errors += run_test_case( CV_StsBadArg, "Zero imagePoints", bad_caller ); /****************************/ Mat bad_r_vec_cpp1(r_vec_cpp.size(), CV_32S); CvMat bad_r_vec_c1 = bad_r_vec_cpp1; Mat bad_r_vec_cpp2(2, 2, CV_32F); CvMat bad_r_vec_c2 = bad_r_vec_cpp2; Mat bad_r_vec_cpp3(r_vec_cpp.size(), CV_32FC2); CvMat bad_r_vec_c3 = bad_r_vec_cpp3; bad_caller = caller; bad_caller.r_vec = &bad_r_vec_c1; errors += run_test_case( CV_StsBadArg, "Bad rvec format", bad_caller ); bad_caller = caller; bad_caller.r_vec = &bad_r_vec_c2; errors += run_test_case( CV_StsBadArg, "Bad rvec format", bad_caller ); bad_caller = caller; bad_caller.r_vec = &bad_r_vec_c3; errors += run_test_case( CV_StsBadArg, "Bad rvec format", bad_caller ); /****************************/ Mat bad_t_vec_cpp1(t_vec_cpp.size(), CV_32S); CvMat bad_t_vec_c1 = bad_t_vec_cpp1; Mat bad_t_vec_cpp2(2, 2, CV_32F); CvMat bad_t_vec_c2 = bad_t_vec_cpp2; Mat bad_t_vec_cpp3(1, 1, CV_32FC2); CvMat bad_t_vec_c3 = bad_t_vec_cpp3; bad_caller = caller; bad_caller.t_vec = &bad_t_vec_c1; errors += run_test_case( CV_StsBadArg, "Bad tvec format", bad_caller ); bad_caller = caller; bad_caller.t_vec = &bad_t_vec_c2; errors += run_test_case( CV_StsBadArg, "Bad tvec format", bad_caller ); bad_caller = caller; bad_caller.t_vec = &bad_t_vec_c3; errors += run_test_case( CV_StsBadArg, "Bad tvec format", bad_caller ); /****************************/ Mat bad_A_cpp1(A_cpp.size(), CV_32S); CvMat bad_A_c1 = bad_A_cpp1; Mat bad_A_cpp2(2, 2, CV_32F); CvMat bad_A_c2 = bad_A_cpp2; bad_caller = caller; bad_caller.A = &bad_A_c1; errors += run_test_case( CV_StsBadArg, "Bad A format", bad_caller ); bad_caller = caller; bad_caller.A = &bad_A_c2; errors += run_test_case( CV_StsBadArg, "Bad A format", bad_caller ); /****************************/ Mat bad_distCoeffs_cpp1(distCoeffs_cpp.size(), CV_32S); CvMat bad_distCoeffs_c1 = bad_distCoeffs_cpp1; Mat bad_distCoeffs_cpp2(2, 2, CV_32F); CvMat bad_distCoeffs_c2 = bad_distCoeffs_cpp2; Mat bad_distCoeffs_cpp3(1, 7, CV_32F); CvMat bad_distCoeffs_c3 = bad_distCoeffs_cpp3; bad_caller = caller; bad_caller.distCoeffs = &zeros; errors += run_test_case( CV_StsBadArg, "Bad distCoeffs format", bad_caller ); bad_caller = caller; bad_caller.distCoeffs = &bad_distCoeffs_c1; errors += run_test_case( CV_StsBadArg, "Bad distCoeffs format", bad_caller ); bad_caller = caller; bad_caller.distCoeffs = &bad_distCoeffs_c2; errors += run_test_case( CV_StsBadArg, "Bad distCoeffs format", bad_caller ); bad_caller = caller; bad_caller.distCoeffs = &bad_distCoeffs_c3; errors += run_test_case( CV_StsBadArg, "Bad distCoeffs format", bad_caller ); /****************************/ Mat bad_dpdr_cpp1(dpdr_cpp.size(), CV_32S); CvMat bad_dpdr_c1 = bad_dpdr_cpp1; Mat bad_dpdr_cpp2(dpdr_cpp.cols+1, 3, CV_32F); CvMat bad_dpdr_c2 = bad_dpdr_cpp2; Mat bad_dpdr_cpp3(dpdr_cpp.cols, 7, CV_32F); CvMat bad_dpdr_c3 = bad_dpdr_cpp3; bad_caller = caller; bad_caller.dpdr = &zeros; errors += run_test_case( CV_StsBadArg, "Bad dpdr format", bad_caller ); bad_caller = caller; bad_caller.dpdr = &bad_dpdr_c1; errors += run_test_case( CV_StsBadArg, "Bad dpdr format", bad_caller ); bad_caller = caller; bad_caller.dpdr = &bad_dpdr_c2; errors += run_test_case( CV_StsBadArg, "Bad dpdr format", bad_caller ); bad_caller = caller; bad_caller.dpdr = &bad_dpdr_c3; errors += run_test_case( CV_StsBadArg, "Bad dpdr format", bad_caller ); /****************************/ bad_caller = caller; bad_caller.dpdt = &zeros; errors += run_test_case( CV_StsBadArg, "Bad dpdt format", bad_caller ); bad_caller = caller; bad_caller.dpdt = &bad_dpdr_c1; errors += run_test_case( CV_StsBadArg, "Bad dpdt format", bad_caller ); bad_caller = caller; bad_caller.dpdt = &bad_dpdr_c2; errors += run_test_case( CV_StsBadArg, "Bad dpdt format", bad_caller ); bad_caller = caller; bad_caller.dpdt = &bad_dpdr_c3; errors += run_test_case( CV_StsBadArg, "Bad dpdt format", bad_caller ); /****************************/ Mat bad_dpdf_cpp2(dpdr_cpp.cols+1, 2, CV_32F); CvMat bad_dpdf_c2 = bad_dpdf_cpp2; bad_caller = caller; bad_caller.dpdf = &zeros; errors += run_test_case( CV_StsBadArg, "Bad dpdf format", bad_caller ); bad_caller = caller; bad_caller.dpdf = &bad_dpdr_c1; errors += run_test_case( CV_StsBadArg, "Bad dpdf format", bad_caller ); bad_caller = caller; bad_caller.dpdf = &bad_dpdf_c2; errors += run_test_case( CV_StsBadArg, "Bad dpdf format", bad_caller ); bad_caller = caller; bad_caller.dpdf = &bad_dpdr_c3; errors += run_test_case( CV_StsBadArg, "Bad dpdf format", bad_caller ); /****************************/ bad_caller = caller; bad_caller.dpdc = &zeros; errors += run_test_case( CV_StsBadArg, "Bad dpdc format", bad_caller ); bad_caller = caller; bad_caller.dpdc = &bad_dpdr_c1; errors += run_test_case( CV_StsBadArg, "Bad dpdc format", bad_caller ); bad_caller = caller; bad_caller.dpdc = &bad_dpdf_c2; errors += run_test_case( CV_StsBadArg, "Bad dpdc format", bad_caller ); bad_caller = caller; bad_caller.dpdc = &bad_dpdr_c3; errors += run_test_case( CV_StsBadArg, "Bad dpdc format", bad_caller ); /****************************/ bad_caller = caller; bad_caller.dpdk = &zeros; errors += run_test_case( CV_StsBadArg, "Bad dpdk format", bad_caller ); bad_caller = caller; bad_caller.dpdk = &bad_dpdr_c1; errors += run_test_case( CV_StsBadArg, "Bad dpdk format", bad_caller ); bad_caller = caller; bad_caller.dpdk = &bad_dpdf_c2; errors += run_test_case( CV_StsBadArg, "Bad dpdk format", bad_caller ); bad_caller = caller; bad_caller.dpdk = &bad_dpdr_c3; errors += run_test_case( CV_StsBadArg, "Bad dpdk format", bad_caller ); bad_caller = caller; bad_caller.distCoeffs = 0; errors += run_test_case( CV_StsNullPtr, "distCoeffs is NULL while dpdk is not", bad_caller ); if (errors) ts->set_failed_test_info(cvtest::TS::FAIL_MISMATCH); else ts->set_failed_test_info(cvtest::TS::OK); }
int main(int argc, char **argv) { try { ParseArgs(argc, argv); int num_cameras = static_cast<int>(imgs.size()); if (num_cameras < 1) throw runtime_error("Need at least one camera"); // Find features cout << "Finding features...\n"; Ptr<detail::FeaturesFinder> features_finder = features_finder_creator->Create(); for (int i = 0; i < num_cameras; ++i) { int64 t = getTickCount(); cout << "Finding features in '" << img_names[i] << "'... "; Ptr<detail::ImageFeatures> features = new detail::ImageFeatures(); (*features_finder)(imgs[i], *features); features_collection[i] = features; cout << "#features = " << features_collection.find(i)->second->keypoints.size() << ", time = " << (getTickCount() - t) / getTickFrequency() << " sec\n"; } // Match all pairs cout << "Matching pairs... "; MatchesCollection matches_collection; Ptr<detail::FeaturesMatcher> matcher = features_matcher_creator.Create(); FeaturesCollection::iterator from_iter = features_collection.begin(); FeaturesCollection::iterator from_next_iter = from_iter; ++from_next_iter; FeaturesCollection::iterator to_iter; for (; from_next_iter != features_collection.end(); from_iter = from_next_iter++) { for (to_iter = from_next_iter; to_iter != features_collection.end(); ++to_iter) { cout << "(" << from_iter->first << "->" << to_iter->first << ") "; detail::MatchesInfo mi; (*matcher)(*(from_iter->second), *(to_iter->second), mi); matches_collection[make_pair(from_iter->first, to_iter->first)] = new vector<DMatch>(mi.matches); } } cout << endl; // Estimate homographies HomographiesP2 Hs; HomographiesP2 good_Hs; vector<Mat> Hs_from_0; RelativeConfidences rel_confs; Mat keypoints1, keypoints2; cout << "Estimating Hs...\n"; for (int from = 0; from < num_cameras - 1; ++from) { for (int to = from + 1; to < num_cameras; ++to) { const vector<DMatch> &matches = *(matches_collection.find(make_pair(from, to))->second); cout << "Estimating H between '" << img_names[from] << "' and '" << img_names[to] << "'... #matches = " << matches.size(); if (static_cast<int>(matches.size()) < min_num_matches) { cout << ", not enough matches\n"; continue; } ExtractMatchedKeypoints(*(features_collection.find(from)->second), *(features_collection.find(to)->second), matches, keypoints1, keypoints2); vector<uchar> inliers_mask; Mat_<double> H = findHomography(keypoints1.reshape(2), keypoints2.reshape(2), inliers_mask, RANSAC, H_est_thresh); if (H.empty()) { cout << ", can't estimate H\n"; continue; } Ptr<vector<DMatch> > inliers = new vector<DMatch>(); for (size_t i = 0; i < matches.size(); ++i) if (inliers_mask[i]) inliers->push_back(matches[i]); cout << ", #inliers = " << inliers->size(); double rms_err = 0; for (size_t i = 0; i < matches.size(); ++i) { const Point2d &kp1 = keypoints1.at<Point2d>(0, i); const Point2d &kp2 = keypoints2.at<Point2d>(0, i); double x = H(0, 0) * kp1.x + H(0, 1) * kp1.y + H(0, 2); double y = H(1, 0) * kp1.x + H(1, 1) * kp1.y + H(1, 2); double z = H(2, 0) * kp1.x + H(2, 1) * kp1.y + H(2, 2); x /= z; y /= z; rms_err += (kp2.x - x) * (kp2.x - x) + (kp2.y - y) * (kp2.y - y); } rms_err = sqrt(rms_err / matches.size()); cout << ", RMS err = " << rms_err; // See "Automatic Panoramic Image Stitching using Invariant Features" // by Matthew Brown and David G. Lowe, IJCV 2007 for the explanation double confidence = inliers->size() / (8 + 0.3 * matches.size()) - 1; rel_confs[make_pair(from, to)] = confidence; cout << ", conf = " << confidence; cout << endl; Hs[make_pair(from, to)] = H; matches_collection[make_pair(from, to)] = inliers; if (confidence > 0) good_Hs[make_pair(from, to)] = H; if (from == 0) Hs_from_0.push_back(H); } } // Linear calibration if (K_init.empty()) { cout << "Linear calibrating...\n"; if (lin_est_skew) K_init = CalibRotationalCameraLinear(good_Hs); else K_init = CalibRotationalCameraLinearNoSkew(good_Hs); cout << "K_init =\n" << K_init << endl; } // Non-linear refinement cout << "Refining camera...\n"; if (Hs_from_0.size() != num_cameras - 1) { stringstream msg; msg << "Refinement requires Hs between first and all other images, " << "but only " << Hs_from_0.size() << " were/was found"; throw runtime_error(msg.str()); } map<int, Mat> Rs; Rs[0] = Mat::eye(3, 3, CV_64F); for (int i = 1; i < num_cameras; ++i) Rs[i] = K_init.inv() * Hs_from_0[i - 1] * K_init; Mat_<double> K_refined = K_init.clone(); if (refine_skew) RefineRigidCamera(K_refined, Rs, features_collection, matches_collection); else { K_refined(0, 1) = 0; RefineRigidCamera(K_refined, Rs, features_collection, matches_collection, REFINE_FLAG_K_ALL & ~REFINE_FLAG_K_SKEW); } cout << "K_refined =\n" << K_refined << endl; cout << "SUMMARY\n"; cout << "K_init =\n" << K_init << endl; cout << "K_refined =\n" << K_refined << endl; } catch (const exception &e) { cout << "Error: " << e.what() << endl; } return 0; }