bool CustomPattern::findRtRANSAC(InputArray objectPoints, InputArray imagePoints, InputArray cameraMatrix, InputArray distCoeffs, OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess, int iterationsCount, float reprojectionError, int minInliersCount, OutputArray inliers, int flags) { solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess, iterationsCount, reprojectionError, minInliersCount, inliers, flags); return true; // for consistency with the other methods }
Eigen::Isometry3d transformEstimation(const Mat& rgb1,const Mat& rgb2,const Mat& depth1,const Mat& depth2,const CAMERA_INTRINSIC_PARAMETERS& C) { vector<KeyPoint> keyPts1,keyPts2; Mat descriptors1,descriptors2; extractKeypointsAndDescripotrs(rgb1,rgb2,keyPts1,keyPts2,descriptors1,descriptors2); vector<DMatch> matches; descriptorsMatch(rgb1,rgb2,keyPts1,keyPts2,descriptors1,descriptors2,matches); vector<Point2f> points; vector<Point3f> objectPoints; vector<Eigen::Vector2d> imagePoints1,imagePoints2; getObjectPointsAndImagePoints(depth1,depth2,keyPts1,keyPts2,matches,points,objectPoints,imagePoints1,imagePoints2,C); Mat translation,rotation; double camera_matrix_data[3][3] = { {C.fx, 0, C.cx}, {0, C.fy, C.cy}, {0, 0, 1} }; Mat cameraMatrix(3,3,CV_64F,camera_matrix_data); solvePnPRansac(objectPoints,points,cameraMatrix,Mat(),rotation,translation,false, 100, 1.0, 20); Mat rot; Rodrigues(rotation,rot); Eigen::Matrix3d r; Eigen::Vector3d t; cout<<rot<<endl; cout<<translation<<endl; r<< ((double*)rot.data)[0],((double*)rot.data)[1],((double*)rot.data)[2], ((double*)rot.data)[3],((double*)rot.data)[4],((double*)rot.data)[5], ((double*)rot.data)[6],((double*)rot.data)[7],((double*)rot.data)[8]; t<<((double*)translation.data)[0],((double*)translation.data)[1],((double*)translation.data)[2]; Eigen::Isometry3d T = Eigen::Isometry3d::Identity(); T.rotate(r); T.pretranslate(t); cout<<T.matrix()<<endl; BundleAdjustmentOptimization(objectPoints,imagePoints1,imagePoints2); return T; }
bool CustomPattern::findRtRANSAC(InputArray image, InputArray cameraMatrix, InputArray distCoeffs, OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess, int iterationsCount, float reprojectionError, int minInliersCount, OutputArray inliers, int flags) { vector<Point2f> imagePoints; vector<Point3f> objectPoints; if (!findPattern(image, imagePoints, objectPoints)) return false; solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess, iterationsCount, reprojectionError, minInliersCount, inliers, flags); return true; }
pnp sfm(camera &cam, vector<DMatch> &match, frame &f1, frame &f2) { vector<Point3f> pts_obj; vector<Point2f> pts_img; pnp result; unsigned short z; for (int i = 0; i < match.size(); ++i) { Point2f p = f1.kps[match[i].queryIdx].pt; if (0 == (z = f1.depth.ptr<unsigned short>(int(p.y))[int(p.x)])) continue; Point3f px(p.x, p.y, z); Point3f pd = map2vertex(cam, px); pts_obj.push_back(pd); pts_img.push_back(Point2f(f2.kps[match[i].trainIdx].pt)); } //It's very important for further computing PnP using solvePnPRansac(), //or it will be crash because of invaild input of solvePnPRansac(). if ((pts_obj.size() == 0) | (pts_img.size() == 0)) { result.inliers = -1; return result; } //camera intrinsic parameters K double p_data[3][3] = { {cam.fx, 0, cam.cx}, { 0, cam.fy, cam.cy}, { 0, 0, 1}, }; Mat p(3, 3, CV_64F, p_data); Mat r, t, inliers; solvePnPRansac(pts_obj, pts_img, p, Mat(), r, t, false, 100, 1.0, 100, inliers); result.r = r; result.t = t; result.inliers = inliers.rows; return result; }
PERF_TEST_P(DevInfo, solvePnPRansac, testing::ValuesIn(devices())) { DeviceInfo devInfo = GetParam(); setDevice(devInfo.deviceID()); Mat object(1, 10000, CV_32FC3); Mat image(1, 10000, CV_32FC2); declare.in(object, image, WARMUP_RNG); Mat rvec, tvec; declare.time(3.0); SIMPLE_TEST_CYCLE() { solvePnPRansac(object, image, Mat::ones(3, 3, CV_32FC1), Mat(1, 8, CV_32F, Scalar::all(0)), rvec, tvec); } SANITY_CHECK(rvec); SANITY_CHECK(tvec); }
void PnP(const std::vector< cv::DMatch > good_matches[],const int add,const std::vector<cv::KeyPoint> keypoints[], cv::Mat R[], cv::Mat t[],std::vector< cv::Point2f > points_1[], std::vector< cv::Point2f > points_2[],std::vector< cv::Point2f > mask3D[],cv::Mat& img_matches){ std::vector<int> indicator(5,-1); std::vector<cv::Point2f> points1,points2,points3; // image 1 & 2 // write in indicator for last 2 images for(int i = 0; i < good_matches[add-2].size();i++){ if(good_matches[add-2][i].trainIdx>=indicator.size()){ indicator.resize(good_matches[add-2][i].trainIdx+1,-1); indicator[good_matches[add-2][i].trainIdx] = i; } else{ indicator[good_matches[add-2][i].trainIdx] = i; } } // images 2 & 3 std::cout<< "number of matches between "<<add+1<<" pic and last pic: "<<points_1[add-1].size()<<std::endl; // how many matches initially // assign points1, points2, points3; find common matches and update mask3d and points_2 for(int i = 0; i < good_matches[add-1].size();i++){ if(good_matches[add-1][i].queryIdx<indicator.size()){ int ind = good_matches[add-1][i].queryIdx; if(indicator[ind]!=-1){ cv::Point2f temppoint = keypoints[add-2][good_matches[add-2][indicator[ind]].queryIdx].pt ; points1.push_back(temppoint); temppoint =keypoints[add-1][good_matches[add-2][indicator[ind]].trainIdx].pt; std::vector<cv::Point2f>::iterator p = std::find(points_1[add-1].begin(), points_1[add-1].end(), temppoint); std::vector<cv::Point2f>::iterator _p = std::find(mask3D[add-1].begin(), mask3D[add-1].end(), temppoint); points2.push_back(temppoint); temppoint = keypoints[add][good_matches[add-1][i].trainIdx].pt; std::vector<cv::Point2f>::iterator t = std::find(points_2[add-1].begin(), points_2[add-1].end(), temppoint); points3.push_back(temppoint); // update mask3d info 3rd cam if(_p!=mask3D[add-1].end()){ int nPosition = distance (mask3D[add-1].begin(), _p); mask3D[add][nPosition] = temppoint; } // delete common matches across 3 pics from points_1 and points_2 if(p!=points_1[add-1].end()){ int nPosition = distance (points_1[add-1].begin(), p); points_1[add-1].erase(p); points_2[add-1].erase(points_2[add-1].begin()+nPosition); } //if(t!=points_2[add-1].end()){ // points_2[add-1].erase(t); //} } } } /* for (int j=0; j<points_1[add-1].size();j++){ circle(img_matches, points_1[add-1][j], 10, cv::Scalar(0,0,255), 3); } cv::namedWindow("Matches", CV_WINDOW_NORMAL); cv::imshow("Matches", img_matches); cv::waitKey(); */ std::cout<<"number of common matches found across 3 pics: "<<points1.size()<<std::endl; std::cout<<"number of matches between "<<add+1<<" pic and last pic after deduction: "<<points_1[add-1].size()<<" "<<points_2[add-1].size()<<std::endl; // reconstruct 3d points of common matches from last pics for pnp cv::Mat projMatr1(3,4,CV_32F); if(add==2){ projMatr1=projMatr0; } else{ cv::Mat Rt1(3,4,CV_32F); // Rt = [R | t] R[add-2].convertTo(R[add-2],CV_32F); t[add-2].convertTo(t[add-2],CV_32F); hconcat(R[add-2], t[add-2], Rt1); // Rt concatenate projMatr1 = camIntrinsic * Rt1; } cv::Mat projMatr2(3,4,CV_32F); cv::Mat Rt(3,4,CV_32F); // Rt = [R | t] R[add-1].convertTo(R[add-1],CV_32F); t[add-1].convertTo(t[add-1],CV_32F); hconcat(R[add-1], t[add-1], Rt); // Rt concatenate projMatr2 = camIntrinsic * Rt; // std::cout<<Rt<<std::endl; cv::Mat points4Dtemp=cv::Mat_<float>(4,points1.size()); /* cv::Mat mask1,mask2; cv::findEssentialMat(points1, points2, camIntrinsic, cv::RANSAC, 0.999, 1.0, mask1); std::vector< cv::Point2f > points1x, points2x, points3x; for(int i=0;i<points1.size();i++){ if (mask1.at<uchar>(i,0) != 0){ points1x.push_back(points1[i]); points2x.push_back(points2[i]); points3x.push_back(points3[i]); } } cv::findEssentialMat(points2x, points3x, camIntrinsic, cv::RANSAC, 0.999, 1.0, mask2); std::vector< cv::Point2f > points1xx, points2xx, points3xx; for(int i=0;i<points1x.size();i++){ if (mask2.at<uchar>(i,0) != 0){ points1xx.push_back(points1x[i]); points2xx.push_back(points2x[i]); points3xx.push_back(points3x[i]); } } std::cout<<"number of common matches after ransac: "<<points1xx.size()<<std::endl; */ cv::triangulatePoints(projMatr1, projMatr2, points1, points2, points4Dtemp); std::vector<cv::Point3f> points3Dtemp; for (int i=0; i < points1.size(); i++) { float x = points4Dtemp.at<float>(3,i); cv::Point3f p; p.x = points4Dtemp.at<float>(0,i) / x; p.y = points4Dtemp.at<float>(1,i) / x; p.z = points4Dtemp.at<float>(2,i) / x; points3Dtemp.push_back(p); } //std::cout<<points3Dtemp.rows<<" "<<std::endl; //std::cout<<"6666666666666666666666666666666"<<std::endl; // PnP to get R and t of current pic cv::Mat Rv; cv::Mat inlier; solvePnPRansac(points3Dtemp,points3,camIntrinsic,dist,Rv,t[add],false,3,5,0.99,inlier,CV_ITERATIVE); int n_inl = countNonZero(inlier); std::cout<<"number of inliers in PnP: "<<n_inl<<std::endl; Rodrigues(Rv,R[add]); //std::cout<<"5555555555555555555555555555555"<<std::endl; // std::cout<<"R after pnp: "<<R[add]<<std::endl; // std::cout<<"t after pnp: "<<t[add-1]<<std::endl; }