void alignFrames(Mat *colorA, Mat *colorB, Mat *depthA, Mat *depthB, Transformation *inverse) { // Prepare the matcher Mat grayA, grayB; cvtColor(*colorA, grayA, CV_RGB2GRAY); cvtColor(*colorB, grayB, CV_RGB2GRAY); RobustMatcher rmatcher; Ptr<FeatureDetector> pfd = new SurfFeatureDetector(10); rmatcher.setFeatureDetector(pfd); // Match the two images vector<DMatch> matches; vector<KeyPoint> keypointsA, keypointsB; Mat fundemental= rmatcher.match(grayA, grayB, matches, keypointsA, keypointsB); Mat img_matches; drawMatches( grayA, keypointsA, grayB, keypointsB, matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); //-- Show detected matches imshow( "Good Matches", img_matches ); vector<Point2f> selPoints1, selPoints2; KeyPoint::convert(keypointsA, selPoints1); KeyPoint::convert(keypointsB, selPoints2); vector<Point3f *> setA, setB; for(int i = 0; i < matches.size(); i++) { Point2f point2D = selPoints1.at(matches.at(i).queryIdx); int bit0 = depthA->at<Vec3b>(point2D.y, point2D.x)[0]; int bit1 = depthA->at<Vec3b>(point2D.y, point2D.x)[1]; double depth = (bit0 | bit1 << 8 ); Point3f *point3D = new Point3f(point2D.x, point2D.y, depth / 100); setA.push_back(point3D); point2D = selPoints2.at(matches.at(i).trainIdx); bit0 = depthB->at<Vec3b>(point2D.y, point2D.x)[0]; bit1 = depthB->at<Vec3b>(point2D.y, point2D.x)[1]; depth = (bit0 | bit1 << 8 ); Point3f *point3DB = new Point3f(point2D.x, point2D.y, depth / 100); setB.push_back(point3DB); } Transformation result(false); HornMethod hornMethod; hornMethod.getTransformation(setA, setB, &result); inverse->invert(&result); // clean vectors SetA and SetB }
bool recognize(const cv::Mat &in, cv::Rect roi) { bool result= false; cv::Mat image1 = cv::imread("data/sim.png",0); // Prepare the matcher RobustMatcher rmatcher; rmatcher.setConfidenceLevel(0.98); rmatcher.setMinDistanceToEpipolar(1.0); rmatcher.setRatio(0.65f); cv::Ptr<cv::FeatureDetector> pfd = new cv::SurfFeatureDetector(10); rmatcher.setFeatureDetector(pfd); cv::Mat face = in(roi); // Match the two images std::vector<cv::DMatch> matches; std::vector<cv::KeyPoint> keypoints1, keypoints2; rmatcher.match(image1, face ,matches, keypoints1, keypoints2); // draw the matches cv::Mat imageMatches; cv::drawMatches(image1,keypoints1, // 1st image and its keypoints face,keypoints2, // 2nd image and its keypoints matches, // the matches imageMatches, // the image produced cv::Scalar(255,255,255)); // color of the lines if (rmatcher.getRecognition()){ result=true; } return result; }
/** Main program **/ int main() { help(); // load a mesh given the *.ply file path mesh.load(ply_read_path); // set parameters int numKeyPoints = 10000; //Instantiate robust matcher: detector, extractor, matcher RobustMatcher rmatcher; Ptr<FeatureDetector> detector = ORB::create(numKeyPoints); rmatcher.setFeatureDetector(detector); /** GROUND TRUTH OF THE FIRST IMAGE **/ // Create & Open Window namedWindow("MODEL REGISTRATION", WINDOW_KEEPRATIO); // Set up the mouse events setMouseCallback("MODEL REGISTRATION", onMouseModelRegistration, 0 ); // Open the image to register Mat img_in = imread(img_path, IMREAD_COLOR); Mat img_vis = img_in.clone(); if (!img_in.data) { cout << "Could not open or find the image" << endl; return -1; } // Set the number of points to register int num_registrations = n; registration.setNumMax(num_registrations); cout << "Click the box corners ..." << endl; cout << "Waiting ..." << endl; // Loop until all the points are registered while ( waitKey(30) < 0 ) { // Refresh debug image img_vis = img_in.clone(); // Current registered points vector<Point2f> list_points2d = registration.get_points2d(); vector<Point3f> list_points3d = registration.get_points3d(); // Draw current registered points drawPoints(img_vis, list_points2d, list_points3d, red); // If the registration is not finished, draw which 3D point we have to register. // If the registration is finished, breaks the loop. if (!end_registration) { // Draw debug text int n_regist = registration.getNumRegist(); int n_vertex = pts[n_regist]; Point3f current_poin3d = mesh.getVertex(n_vertex-1); drawQuestion(img_vis, current_poin3d, green); drawCounter(img_vis, registration.getNumRegist(), registration.getNumMax(), red); } else { // Draw debug text drawText(img_vis, "END REGISTRATION", green); drawCounter(img_vis, registration.getNumRegist(), registration.getNumMax(), green); break; } // Show the image imshow("MODEL REGISTRATION", img_vis); } /** COMPUTE CAMERA POSE **/ cout << "COMPUTING POSE ..." << endl; // The list of registered points vector<Point2f> list_points2d = registration.get_points2d(); vector<Point3f> list_points3d = registration.get_points3d(); // Estimate pose given the registered points bool is_correspondence = pnp_registration.estimatePose(list_points3d, list_points2d, SOLVEPNP_ITERATIVE); if ( is_correspondence ) { cout << "Correspondence found" << endl; // Compute all the 2D points of the mesh to verify the algorithm and draw it vector<Point2f> list_points2d_mesh = pnp_registration.verify_points(&mesh); draw2DPoints(img_vis, list_points2d_mesh, green); } else { cout << "Correspondence not found" << endl << endl; } // Show the image imshow("MODEL REGISTRATION", img_vis); // Show image until ESC pressed waitKey(0); /** COMPUTE 3D of the image Keypoints **/ // Containers for keypoints and descriptors of the model vector<KeyPoint> keypoints_model; Mat descriptors; // Compute keypoints and descriptors rmatcher.computeKeyPoints(img_in, keypoints_model); rmatcher.computeDescriptors(img_in, keypoints_model, descriptors); // Check if keypoints are on the surface of the registration image and add to the model for (unsigned int i = 0; i < keypoints_model.size(); ++i) { Point2f point2d(keypoints_model[i].pt); Point3f point3d; bool on_surface = pnp_registration.backproject2DPoint(&mesh, point2d, point3d); if (on_surface) { model.add_correspondence(point2d, point3d); model.add_descriptor(descriptors.row(i)); model.add_keypoint(keypoints_model[i]); } else { model.add_outlier(point2d); } } // save the model into a *.yaml file model.save(write_path); // Out image img_vis = img_in.clone(); // The list of the points2d of the model vector<Point2f> list_points_in = model.get_points2d_in(); vector<Point2f> list_points_out = model.get_points2d_out(); // Draw some debug text string num = IntToString((int)list_points_in.size()); string text = "There are " + num + " inliers"; drawText(img_vis, text, green); // Draw some debug text num = IntToString((int)list_points_out.size()); text = "There are " + num + " outliers"; drawText2(img_vis, text, red); // Draw the object mesh drawObjectMesh(img_vis, &mesh, &pnp_registration, blue); // Draw found keypoints depending on if are or not on the surface draw2DPoints(img_vis, list_points_in, green); draw2DPoints(img_vis, list_points_out, red); // Show the image imshow("MODEL REGISTRATION", img_vis); // Wait until ESC pressed waitKey(0); // Close and Destroy Window destroyWindow("MODEL REGISTRATION"); cout << "GOODBYE" << endl; }
int main(int argc,char **argv) { // Read input images char* str1; char* str2; char* str3; char* str4; str1 = argv[1]; str2 = argv[2]; str3 = argv[3]; str4 = argv[4]; cv::Mat image1 = cv::imread(str1,0); cv::Mat image2 = cv::imread(str2,0); cv::Mat imageD1 = cv::imread(str3,CV_LOAD_IMAGE_ANYDEPTH); cv::Mat imageD2 = cv::imread(str4,CV_LOAD_IMAGE_ANYDEPTH); // cv::Mat image1= cv::imread("/home/tyu/ubuntu_opencv/image/1.png",0); // cv::Mat image2= cv::imread("/home/tyu/ubuntu_opencv/image/2.png",0); // cv::Mat imageD1 = cv::imread("/home/tyu/ubuntu_opencv/image/1d.png",CV_LOAD_IMAGE_ANYDEPTH); // cv::Mat imageD2 = cv::imread("/home/tyu/ubuntu_opencv/image/2d.png",CV_LOAD_IMAGE_ANYDEPTH); if (!image1.data || !image2.data) { std::cout<<"Cannt load the image data!\n"; return 0; } // Display the images // cv::namedWindow("Right Image"); // cv::imshow("Right Image",image1); // cv::namedWindow("Left Image"); // cv::imshow("Left Image",image2); // Prepare the matcher RobustMatcher rmatcher; rmatcher.setConfidenceLevel(0.98); rmatcher.setMinDistanceToEpipolar(1.0); rmatcher.setRatio(0.65f); cv::Ptr<cv::FeatureDetector> pfd= new cv::SurfFeatureDetector(15000); rmatcher.setFeatureDetector(pfd); // Match the two images std::vector<cv::DMatch> matches; std::vector<cv::KeyPoint> keypoints1, keypoints2; cv::Mat fundemental= rmatcher.match(image1,image2,matches, keypoints1, keypoints2); std::cout<<fundemental<<std::endl; std::cout<<keypoints1.size()<<std::endl; // draw the matches // cv::Mat imageMatches; /* cv::drawMatches(image1,keypoints1, // 1st image and its keypoints image2,keypoints2, // 2nd image and its keypoints matches, // the matches imageMatches, // the image produced cv::Scalar(1,255,255)); // color of the lines*/ // std::cout<<"imageMatches\'s size is :"<<imageMatches.size()<<std::endl; /* /////draw lines/////// line(image1, cv::Point(0,255.3), cv::Point(640,255.3), cv::Scalar(0,0,0) ); );*/ // cv::namedWindow("Matches"); // cv::imshow("Matches",imageMatches); ///////////////////// // Convert keypoints into Point2f // std::vector<cv::Point2f> points1, points2; std::vector<cv::Point3f> points_xyz1,points_xyz2; compute3dPosition(imageD1,imageD2,keypoints1,keypoints2,points_xyz1,points_xyz2); std::cout<<points_xyz1.size()<<std::endl; std::cout<<points_xyz2.size()<<std::endl; cv::Mat p1 = cv::Mat(points_xyz1).t(); cv::Mat p2 = cv::Mat(points_xyz2).t(); cv::Mat mat; // mat = p1; // std::cout<<mat<<std::endl; int iterations = 0; double distance = 100.0; double Error = 0.0; while(iterations < 0) { cv::Mat np1 , np2, np1_, np2_; int num = 0; for(int i=0; i<points_xyz1.size(); i++) { num++; cv::Mat temp = p1.col(i).clone(); // temp.copyTo(np1.col(i)); np1.push_back(temp); temp = p2.col(i).clone(); np2.push_back(temp); } iterations++; } /* for (std::vector<cv::DMatch>::const_iterator it= matches.begin(); it!= matches.end(); ++it) { // Get the position of left keypoints float xpoint1= keypoints1[it->queryIdx].pt.x; float ypoint1= keypoints1[it->queryIdx].pt.y; double z = imageD1.at<ushort>(ypoint1,xpoint1); // Get the position of right keypoints float xpoint2= keypoints2[it->trainIdx].pt.x; float ypoint2= keypoints2[it->trainIdx].pt.y; double z1 = imageD2.at<ushort>(ypoint2,xpoint2); if(z != 0 &&z1 != 0) { points_xyz1.push_back(cv::Point3f(xpoint1,ypoint1,z/5000)); points_xyz2.push_back(cv::Point3f(xpoint2,ypoint2,z1/5000)); points1.push_back(cv::Point2f(xpoint1,ypoint1)); points2.push_back(cv::Point2f(xpoint2,ypoint2)); // cv::circle(image1,cv::Point(xpoint1,ypoint1),5,cv::Scalar(1,1,255),3); // cv::circle(image2,cv::Point(xpoint2,ypoint2),5,cv::Scalar(1,1,255),3); } else continue; }*/ std::ofstream p1out("p1.txt"), p2out("p2.txt"); if(!p1out||!p2out) std::cout<<"Can not open the file!"<<std::endl; // out<<"fundemental = "<<std::endl<<std::endl; // out<<fundemental<<std::endl<<std::endl; // out<<"p1 = "<<std::endl<<std::endl; //true location //write p1 location to out.txt for(int i = 0 ; i < points_xyz1.size(); i++) p1out<<points_xyz1[i].x<<" "<<points_xyz1[i].y<<" "<<points_xyz1[i].z<<std::endl; // out<<std::endl; //write p2 location to out.txt // out<<"p2 = "<<std::endl<<std::endl; for(int i = 0 ; i < points_xyz2.size(); i++) p2out<<points_xyz2[i].x<<" "<<points_xyz2[i].y<<" "<<points_xyz2[i].z<<std::endl; // out<<"p1 = "<<std::endl<<std::endl<<points_xyz1<<std::endl; // out<<"p2 = "<<std::endl<<std::endl<<points_xyz2<<std::endl; // out.close(); p1out.close();p2out.close(); //////////////////////////centroid///////// // std::cout<<"The centroid is :"<<std::endl<<centroid(points_xyz1)<<std::endl; // std::cout<<"The distance between each points to the centroid: "<<std::endl<<centroid_dist(points_xyz1,centroid(points_xyz1))<<std::endl; /* // Draw the epipolar lines std::vector<cv::Vec3f> lines1; cv::computeCorrespondEpilines(cv::Mat(points1),1,fundemental,lines1); for (std::vector<cv::Vec3f>::const_iterator it= lines1.begin(); it!=lines1.end(); ++it) { cv::line(image2,cv::Point(0,-(*it)[2]/(*it)[1]), cv::Point(image2.cols,-((*it)[2]+(*it)[0]*image2.cols)/(*it)[1]), cv::Scalar(255,255,255)); } std::vector<cv::Vec3f> lines2; cv::computeCorrespondEpilines(cv::Mat(points2),2,fundemental,lines2); for (std::vector<cv::Vec3f>::const_iterator it= lines2.begin(); it!=lines2.end(); ++it) { cv::line(image1,cv::Point(0,-(*it)[2]/(*it)[1]), cv::Point(image1.cols,-((*it)[2]+(*it)[0]*image1.cols)/(*it)[1]), cv::Scalar(255,255,255)); } // Display the images with epipolar lines cv::namedWindow("Right Image Epilines (RANSAC)"); cv::imshow("Right Image Epilines (RANSAC)",image1); cv::namedWindow("Left Image Epilines (RANSAC)"); cv::imshow("Left Image Epilines (RANSAC)",image2); cv::waitKey();*/ return 0; }
void ComputePnP(const char* folder_name,int start_idx,int total_num,int jump_num){//Compute two-view PnP int img_idx = start_idx; int img_num = total_num; int img_num2 = jump_num; char imgname[64]; char pcdname[64]; char imgname2[64]; char pcdname2[64]; ofstream out_pose,out_pose2; out_pose.open("RT.txt"); out_pose2.open("invRT.txt"); cv::Mat colorImage_1(KINECT_IMAGE_HEGIHT,KINECT_IMAGE_WIDTH,CV_8UC4); cv::Mat pointCloud_XYZforRGB_1(KINECT_IMAGE_HEGIHT,KINECT_IMAGE_WIDTH,CV_32FC3,cv::Scalar::all(0)); cv::Mat colorImage_2(KINECT_IMAGE_HEGIHT,KINECT_IMAGE_WIDTH,CV_8UC4); cv::Mat pointCloud_XYZforRGB_2(KINECT_IMAGE_HEGIHT,KINECT_IMAGE_WIDTH,CV_32FC3,cv::Scalar::all(0)); cv::Mat img1,img2; /////////////////////PCL objects////////////////////////// pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_1 (new pcl::PointCloud<pcl::PointXYZRGBA>); pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_2 (new pcl::PointCloud<pcl::PointXYZRGBA>); pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_1f (new pcl::PointCloud<pcl::PointXYZRGBA>); pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_2f (new pcl::PointCloud<pcl::PointXYZRGBA>); pcl::visualization::PCLVisualizer vislm; ////////////////////Find cv::Matched points between two RGB images//////////////////////// ifstream cam_in("CamPara.txt"); cam_in>>CamIntrinsic[0][0]>>CamIntrinsic[0][1]>>CamIntrinsic[0][2] >>CamIntrinsic[1][0]>>CamIntrinsic[1][1]>>CamIntrinsic[1][2] >>CamIntrinsic[2][0]>>CamIntrinsic[2][1]>>CamIntrinsic[2][2]; cam_in.close(); const cv::Mat Camera_Matrix(3,3,CV_64F,CamIntrinsic); const cv::Mat disCoef(1,5,CV_64F,DisCoef); cv::Mat img_Matches; int numKeyPoints = 400; int numKeyPoints2 = 400; RobustMatcher rMatcher; cv::Ptr<cv::FeatureDetector> detector = new cv::FastFeatureDetector(0); cv::Ptr<cv::FeatureDetector> detector2 = new cv::FastFeatureDetector(0); cv::Ptr<cv::DescriptorExtractor> extractor = new cv::OrbDescriptorExtractor(); cv::Ptr<cv::DescriptorMatcher> Matcher= new cv::BFMatcher(cv::NORM_HAMMING); rMatcher.setFeatureDetector(detector); rMatcher.setDescriptorExtractor(extractor); rMatcher.setDescriptorMatcher(Matcher); std::vector<cv::KeyPoint> img1_keypoints; cv::Mat img1_descriptors; std::vector<cv::KeyPoint> img2_keypoints; cv::Mat img2_descriptors; std::vector<cv::DMatch > Matches; //////////////////////PCL rigid motion estimation/////////////////////////// Eigen::Matrix4f TotaltransforMation = Eigen::Matrix4f::Identity(); Eigen::Matrix4f Ti = Eigen::Matrix4f::Identity(); pcl::PointCloud<pcl::PointXYZRGBA>::Ptr Transcloud_2 (new pcl::PointCloud<pcl::PointXYZRGBA>); ////////////////////////////////////////////////////////////////////////////// printf("From %d to %d",img_idx,img_idx+(img_num-1)*img_num2); //Camera position pcl::PointCloud<pcl::PointXYZ> Camera_pose0; pcl::PointCloud<pcl::PointXYZ> Camera_pose; Camera_pose0.push_back(pcl::PointXYZ(0,0,0)); Camera_pose0.push_back(pcl::PointXYZ(0.2,0,0)); Camera_pose0.push_back(pcl::PointXYZ(0,0.2,0)); Camera_pose0.push_back(pcl::PointXYZ(0,0,0.2)); /////////////Looping/////////////////////////// int d = 0; for(int i=0; i<img_num-1; ++i, d+=img_num2){ sprintf(imgname,"%s/rgb_%d.jpg",folder_name,img_idx+d); sprintf(pcdname,"%s/pcd_%d",folder_name,img_idx+d); sprintf(imgname2,"%s/rgb_%d.jpg",folder_name,img_idx+d+img_num2); sprintf(pcdname2,"%s/pcd_%d",folder_name,img_idx+d+img_num2); printf("comparing %s & %s\n",imgname,imgname2); ////////////Reading data///////////////////// img1 = cv::imread(imgname,CV_LOAD_IMAGE_GRAYSCALE); colorImage_1 = cv::imread(imgname,CV_LOAD_IMAGE_COLOR); printf("reading\n"); FILE* fin = fopen(pcdname,"rb"); fread(pointCloud_XYZforRGB_1.data,1,pointCloud_XYZforRGB_1.step*pointCloud_XYZforRGB_1.rows,fin); fclose(fin); img2 = cv::imread(imgname2,CV_LOAD_IMAGE_GRAYSCALE); colorImage_2 = cv::imread(imgname2,CV_LOAD_IMAGE_COLOR); printf("reading\n"); fin = fopen(pcdname2,"rb"); fread(pointCloud_XYZforRGB_2.data,1,pointCloud_XYZforRGB_2.step*pointCloud_XYZforRGB_2.rows,fin); fclose(fin); cloud_1->clear(); cloud_2->clear(); Mat2PCL_XYZRGB_ALL(colorImage_1,pointCloud_XYZforRGB_1,*cloud_1); Mat2PCL_XYZRGB_ALL(colorImage_2,pointCloud_XYZforRGB_2,*cloud_2); ///////////////////Finding 2D features////////////////////////// img1_keypoints.clear(); img2_keypoints.clear(); detector->detect(img1,img1_keypoints); extractor->compute(img1,img1_keypoints,img1_descriptors); detector2->detect(img2,img2_keypoints); extractor->compute(img2,img2_keypoints,img2_descriptors); Matches.clear(); printf("cv::Matching\n"); rMatcher.match(Matches, img1_keypoints, img2_keypoints,img1_descriptors,img2_descriptors); //printf("drawing\n"); drawMatches(img1, img1_keypoints, img2, img2_keypoints,Matches, img_Matches, cv::Scalar::all(-1), cv::Scalar::all(-1),vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS); cv::imshow("Good matches",img_Matches); cv::waitKey(10); ///////////////////Find corresponding 3D pints//////////////////////////////////////// //We should draw point cloud from the Matches... //////////////////3D geometry/////////////////////////////////////////////////////// ///////////////////////// //Compute PnP //////////////////////// cv::Mat rvec(1,3,cv::DataType<double>::type); cv::Mat tvec(1,3,cv::DataType<double>::type); cv::Mat rotationMatrix(3,3,cv::DataType<double>::type); vector<cv::Point2f> p2ds; vector<cv::Point3f> p3ds; Mat2PCL_XYZRGB_MATCH_PnP(pointCloud_XYZforRGB_1,p3ds,p2ds,img1_keypoints,img2_keypoints,Matches); printf("3D/2D points:%d,%d\n",p3ds.size(),p2ds.size()); if(p3ds.size() > 5){ cv::solvePnPRansac(p3ds,p2ds,Camera_Matrix,disCoef,rvec,tvec); cv::Rodrigues(rvec,rotationMatrix); PnPret2Mat4f(rotationMatrix,tvec,Ti); Ti = Ti.inverse(); } ///////////////////////Compose the translation, and transform the point cloud//////////////// Transcloud_2->clear(); std::cout<<"\nLocal motion from PnP is \n"<<Ti; TotaltransforMation = TotaltransforMation*Ti; pcl::transformPointCloud(*cloud_2,*Transcloud_2,TotaltransforMation); std::cout<<"\nTotal motion from PnP is \n"<<TotaltransforMation<<endl; //Add camera coordinate pcl::transformPointCloud(Camera_pose0,Camera_pose,TotaltransforMation); sprintf(pcdname2,"X%d",img_idx+d+img_num2); vislm.addLine(Camera_pose[0],Camera_pose[1],255,0,0,pcdname2); sprintf(pcdname2,"Y%d",img_idx+d+img_num2); vislm.addLine(Camera_pose[0],Camera_pose[2],0,255,0,pcdname2); sprintf(pcdname2,"Z%d",img_idx+d+img_num2); vislm.addLine(Camera_pose[0],Camera_pose[3],0,0,255,pcdname2); if(i==0){ vislm.addPointCloud(cloud_1->makeShared(),pcdname); //PCLviewer.showCloud(cloud_1,"init"); out_pose<<Eigen::Matrix4f::Identity()<<endl; } vislm.addPointCloud(Transcloud_2->makeShared(),pcdname2); out_pose<<TotaltransforMation<<endl; out_pose2<<TotaltransforMation.inverse()<<endl; } vislm.resetCamera(); vislm.spin(); out_pose.close(); out_pose2.close(); }