Beispiel #1
0
// Compute the essential matrix given a set of 
// paired keypoints list
void RobustMatcher::computeEssentialMat(const KeyPointVec &kpts1, const KeyPointVec &kpts2,
  const cv::Mat &K, cv::Mat &essentialMat, cv::Mat &inliers_mask)
{
  std::vector<cv::Point2f> kpts1_pts, kpts2_pts;

  // put keypoints inside vector
  for (int i = 0; i < kpts1.size(); ++i)
  {
    kpts1_pts.push_back(kpts1[i].pt);
    kpts2_pts.push_back(kpts2[i].pt);
  }

  // default params
  double f = 55;                           // focal length in mm
  double sx = 22.3, sy = 14.9;             // sensor size
  double width = 640, height = 480;        // image size

  double focal = K.at<double>(0,0);
  cv::Point2d pp = cv::Point2d(K.at<double>(0,2),K.at<double>(1,2));
  int method = cv::RANSAC;
  double prob = 0.999;
  
  // compute opencv essential mat
  essentialMat = 
    findEssentialMat(kpts1_pts, kpts2_pts, focal, pp, method, prob, ransac_thresh_, inliers_mask);

}
Beispiel #2
0
	bool Epipolar::isValidPair(vector<DMatch>& matches, vector<KeyPoint>& key1, vector<KeyPoint>& key2, Mat& cam, Mat& distor, Mat& ess, Mat& inliersMask, double inlierPercent){
		vector<Point2f>pts1, pts2;
		inliersMask.deallocate();
		Mat rot, trans;
		size_t n = matches.size();
		Utility:: getPointMatches(key1, key2, matches, pts1, pts2);
		undistortPoints(pts1, pts1, cam, distor);
		undistortPoints(pts2, pts2, cam, distor);
		ess = findEssentialMat(pts1, pts2, 1.0, Point(0, 0), RANSAC, 0.999, 1.25, inliersMask);
		int inliers = recoverPose(ess, pts1, pts2, rot, trans, 1.0, Point(0, 0), inliersMask);
		return ((double)inliers / n) > inlierPercent;
	}
Beispiel #3
0
	void Epipolar::calcEssMatrix(vector<DMatch>& goodMatches, vector<KeyPoint>& keys1, vector<KeyPoint>& keys2, Mat& cam, Mat& distor, Mat& ess, double inlierPercent){
		int inliers = 0;
		vector<Point2f>pts1, pts2;
		Mat rot, trans, inlierMask;
		size_t n = goodMatches.size();
		for (size_t j = 0; j < n; j++)
		{
			pts1.push_back(keys1[goodMatches[j].trainIdx].pt);
			pts2.push_back(keys2[goodMatches[j].queryIdx].pt);

		}
		undistortPoints(pts1, pts1, cam, distor);
		undistortPoints(pts2, pts2, cam, distor);
		ess = findEssentialMat(pts1, pts2, 1.0, Point(0, 0), RANSAC, 0.999, 1.25, inlierMask);
		inliers = recoverPose(ess, pts1, pts2, rot, trans, 1.0, Point(0, 0), inlierMask);
		if ((double)inliers / n < inlierPercent){
			ess.release();
		}
	}
bool find_transform(Mat& K, vector<Point2f>& p1, vector<Point2f>& p2, Mat& R, Mat& T, Mat& mask)
{
    double focal_length = 0.5*(K.at<double>(0) + K.at<double>(4));
    Point2d principle_point(K.at<double>(2), K.at<double>(5));

    Mat E = findEssentialMat(p1, p2, focal_length, principle_point, RANSAC, 0.999, 1.0, mask);
    if (E.empty()) return false;

    double feasible_count = countNonZero(mask);
    cout << (int)feasible_count << " -in- " << p1.size() << endl;
    if (feasible_count <= 15 || (feasible_count / p1.size()) < 0.6)
        return false;

    int pass_count = recoverPose(E, p1, p2, R, T, focal_length, principle_point, mask);

    if (((double)pass_count) / feasible_count < 0.7)
        return false;

    return true;
}
void add_Points( cv::Mat R[],cv::Mat t[],const std::vector< cv::Point2f > points_1[],
        const std::vector< cv::Point2f > points_2[],cv::Mat& points3D,const int add,std::vector< cv::Point2f > mask3D[],cv::Mat& img_matches){

    // obtain mask on new matches by two-view constraint
    cv::Mat mask;
    findEssentialMat(points_1[add-1], points_2[add-1], camIntrinsic, cv::RANSAC, 0.999, 0.25, mask);
    std::vector< cv::Point2f > pointsx,pointsComparex;
    for(int i=0;i<points_1[add-1].size();i++){
        if (mask.at<uchar>(i,0) != 0){
            pointsx.push_back(points_1[add-1][i]);
            pointsComparex.push_back(points_2[add-1][i]);
        }
    }
    std::cout<<"mask result: "<<points_1[add-1].size()<<"	"<<pointsx.size()<<std::endl;

    
    // draw red circles on new points, blue circles on points masked out
    for (int j=0; j<points_1[add-1].size();j++){
        if (mask.at<uchar>(j,0) != 0)
            circle(img_matches, points_1[add-1][j], 10, cv::Scalar(0,0,255), 3);
        else
            circle(img_matches, points_1[add-1][j], 10, cv::Scalar(255,0,0), 3);
    }
    cv::namedWindow("Matches", CV_WINDOW_NORMAL);
    cv::imshow("Matches", img_matches);
    cv::waitKey();
       
    // form projection matrix of the 2nd last camera
    cv::Mat projMatr1(3,4,CV_32F);
    cv::Mat Rt1(3,4,CV_32F);  // Rt = [R | t]
    R[add-1].convertTo(R[add-1],CV_32F);
    t[add-1].convertTo(t[add-1],CV_32F);
    hconcat(R[add-1], t[add-1], Rt1);  // Rt concatenate
    projMatr1 = camIntrinsic * Rt1;
    // form projection matrix of the last camera
    cv::Mat projMatr2(3,4,CV_32F);
    cv::Mat Rt(3,4,CV_32F);  // Rt = [R | t]
    R[add].convertTo(R[add],CV_32F);
    t[add].convertTo(t[add],CV_32F);
    hconcat(R[add], t[add], Rt);  // Rt concatenate
    projMatr2 = camIntrinsic * Rt;

    // triangulation on the masked matches
    cv::Mat points4Dtemp=cv::Mat_<float>(4,points_1[add-1].size());
    cv::triangulatePoints(projMatr1, projMatr2, pointsx, pointsComparex, points4Dtemp);
    
    // fill in new parts of mask3D with 2D points
    cv::Point2f x(-1,-1);
    for(int i = 0;i<points4Dtemp.cols;i++){
        for (int j=0;j<m;j++)
        {
            if (j == (add-1))
                mask3D[j].push_back(pointsx[i]);
            else if (j == add)
                mask3D[j].push_back(pointsComparex[i]);
            else
                mask3D[j].push_back(x);
        }
    }

    cv::Mat points3Dtemp(3,pointsx.size(),CV_32F);
    for (int i=0; i<pointsx.size(); i++)
    {
        float x = points4Dtemp.at<float>(3,i);
        points3Dtemp.at<float>(0,i) = points4Dtemp.at<float>(0,i) / x;
        points3Dtemp.at<float>(1,i) = points4Dtemp.at<float>(1,i) / x;
        points3Dtemp.at<float>(2,i) = points4Dtemp.at<float>(2,i) / x;
    }
    hconcat(points3D,points3Dtemp,points3D);
    n = points3D.cols;
}