Exemplo n.º 1
0
int main(int argc, char* argv[])
{
	cv::Mat cvmGray1 = imread("rgb0.bmp", CV_LOAD_IMAGE_GRAYSCALE);
	cv::Mat cvmGray2 = imread("rgb1.bmp", CV_LOAD_IMAGE_GRAYSCALE);

	SURF surf(100,4,2,false,true);
	// detecting keypoints & computing descriptors
	FREAK* pFreak = new FREAK(); 

	vector<KeyPoint> vKeypoints1; 
	vector<KeyPoint> vKeypoints2; 

	Mat cvmDescriptor1;
	Mat cvmDescriptor2;

	vector<DMatch> vMatches;
	BruteForceMatcher<HammingLUT> matcher;  


	
	surf(cvmGray1, cv::Mat(), vKeypoints1);
	pFreak->compute( cvmGray1, vKeypoints1, cvmDescriptor1 );

	surf(cvmGray2, cv::Mat(), vKeypoints2);
	double t = (double)getTickCount();

	pFreak->compute( cvmGray2, vKeypoints2, cvmDescriptor2 );
	t = ((double)getTickCount() - t)/getTickFrequency();

	matcher.match(cvmDescriptor1, cvmDescriptor2, vMatches);  

	

	std::cout << "whole time [s]: " << t << std::endl;	
    sort (vMatches.begin(), vMatches.end(), sort_pred);
    vector<DMatch> closest;

    int nSize = (int)vMatches.size();//>300?300:matches.size();
    cout << "matched point pairs: " << nSize << endl;
	for( int i=0;i < 100;i++) {
        closest.push_back( vMatches[i] );
        cout << vMatches[i].distance << " ";
    }
    // drawing the results
    Mat cvmImgMatches;

	cout << "FOUND " << vKeypoints1.size() << " keypoints on first image" << endl;
	cout << "FOUND " << vKeypoints2.size() << " keypoints on second image" << endl;

    cv::drawMatches( cvmGray1, vKeypoints1, cvmGray2, vKeypoints2, closest, cvmImgMatches);
    
    namedWindow("matches", 0);
    imshow("matches", cvmImgMatches);
    waitKey(0);

    return 0;
}
Exemplo n.º 2
0
Mat getHomography(Mat orig, Mat test) {
    vector<KeyPoint> kp_orig, kp_test;
    FAST(orig, kp_orig, 10);
    FAST(test, kp_test, 10);
    __android_log_write(ANDROID_LOG_INFO, "vision.cpp", "Keypoints computed");

    // TODO remove this
    char temp[50];
    sprintf(temp, "%d, %d", kp_test.size(), kp_orig.size());
    __android_log_write(ANDROID_LOG_INFO, "vision.cpp-size", temp);

    FREAK ext;
    Mat desc_orig, desc_test;
    ext.compute(orig, kp_orig, desc_orig);
    ext.compute(test, kp_test, desc_test);

    BFMatcher matcher(NORM_HAMMING);
    vector<DMatch> matches;
    matcher.match(desc_orig, desc_test, matches);

    __android_log_write(ANDROID_LOG_INFO, "vision.cpp", "Matching done");

    double min_dist = 100, max_dist = 0;
    for(int i=0; i<desc_orig.rows; i++) {
    	DMatch d = matches[i];
        double dist = d.distance;
        if(dist < min_dist) min_dist = dist;
        if(dist > max_dist) max_dist = dist;
    }
    double acceptable_dist = 3*min_dist;
    vector<DMatch> good_matches;
    for(int i=0; i<desc_orig.rows; i++) {
    	DMatch d = matches[i];
        if(d.distance < acceptable_dist) {
            good_matches.push_back(d);
        }
    }
    vector<Point2f> orig_pts;
    vector<Point2f> test_pts;

    for( int i = 0; i < good_matches.size(); i++ ) {
        //-- Get the keypoints from the good matches
    	DMatch match = good_matches[i];
    	KeyPoint kp1 = kp_orig[ match.queryIdx ];
    	KeyPoint kp2 = kp_orig[ match.trainIdx ];
        orig_pts.push_back( kp1.pt );
        test_pts.push_back( kp2.pt );
    }
    Mat H = findHomography( orig_pts, test_pts, CV_RANSAC );
    __android_log_write(ANDROID_LOG_INFO, "vision.cpp", "Computed Homography");

    return H;
}
Exemplo n.º 3
0
int main(int argc, char *argv[])
{
    if(argc != 2) {
        help(argv);
        return -1;
    }

    Mat img;
    if(!load_image(argv[1], img)) {
        cout << "Error loading image: " << argv[1] << endl;
        return -1;
    }

    vector<KeyPoint> keypoints;

    // Dense detector
    // The detector generates several levels of features.
    // Feature scale, step size, and size of boundary are multiplied by "featureScaleMul".
    DenseFeatureDetector detector(
        initFeatureScale,
        featureScaleLevels,
        featureScaleMul,
        initXyStep,
        initImgBound,
        false,        // varyXyStepWithScale 
        true          // varyImgBoundWithScale
    );

    // Dense sampling
    detector.detect(img, keypoints);

    //descriptor (64d vectors)
    Mat descriptors;

    //extractor
    FREAK extractor;
    extractor.compute(img, keypoints, descriptors);

    //cout << format(descriptors, "csv") << endl;
    
    int num_rows = descriptors.rows;
    int num_columns = descriptors.cols;
    for(int row = 0; row < num_rows; row++) {
        for(int col = 0; col < num_columns; col++) {
            cout << (char)descriptors.data[row * num_columns + col];
        }
    }

    return 0;
}
Exemplo n.º 4
0
int main(int argc, char** argv)
{
    int flag_use_image = 0;
    if( argc != 2 )
      {
        std::cout<< "Usage: ./init num" << std::endl;
        std::cout<< "num: 0 - image" << std::endl
                 << "     1 - video" << std::endl
                 << "     2 - dataset" << std::endl;
        return -1;
    }
    else
    {
        std::string val = argv[1];
        if(val == "0")
        {

        }
        else if(val == "1")
        {
            flag_use_image = 1;
        }
        else if(val == "2")
        {
            flag_use_image = 2;
        }
        else
        {
            std::cout<< "num error" << std::endl;
        }
    }

    std::string winName = "Image";
    namedWindow(winName, WINDOW_NORMAL);
    mat_canvas = imread( "data/book.jpg");

    if(mat_canvas.data == NULL)
    {
        std::cout<< "Image is not opened." << std::endl;
        return -1;
    }


    if(flag_use_image == 0)
    {



        setMouseCallback(winName, mouseEvent);




//        // write mat to file
//        std::string fileName = "mat_descriptors.yml";
//        FileStorage fs(fileName, FileStorage::WRITE);
//        fs << "descriptors" << mat_descriptors;
//        fs.release();
//        std::cout<< fileName << " is generated." << std::endl;

//        Mat copy;
//        FileStorage fs2("mat_descriptors.yml", FileStorage::READ);
//        fs2["descriptors"] >> copy;
//        fs2.release();

//        FileStorage fs3("test.yml", FileStorage::WRITE);
//        fs3 << "descriptors" << copy;
//        fs3.release();


        //////////////////////////////////////////////////////////
//        std::vector<cv::Point3f> vec_pois;
//        vec_pois.push_back(Point3f(0, 0, 0));
//        vec_pois.push_back(Point3f(1.1, 0.1, 0));
//        vec_pois.push_back(Point3f(0.3, 2.1, 0));
//        vec_pois.push_back(Point3f(7.3, 2, 0));
//        vec_pois.push_back(Point3f(1.3, 4.1, 0));

//        FileStorage fs3("POIs.yml", FileStorage::WRITE);
//        fs3 << "POIs" << vec_pois;
//        fs3.release();

        //////////////////////////////////////////////////////////




        while(1)
        {
            imshow(winName, mat_canvas );

            waitKey(30);
        }

    }
    //-- use dataset
    else if(flag_use_image == 2)
    {



        useDataset();





        while(1)
        {

            imshow(winName, mat_canvas );

            waitKey(30);
        }

    }
    else // video input: tracking features
    {
        VideoCapture cap;

        cap.open(1);
        if(!cap.isOpened())  // check if we succeeded
            return -1;
        cap.set(CV_CAP_PROP_FRAME_WIDTH, 800);
        cap.set(CV_CAP_PROP_FRAME_HEIGHT, 600);


        namedWindow("Keypoints", WINDOW_NORMAL);
        Mat mat_image;
        int num_vecKeypoints;
        int num_trackingPoints = 50;
        Mat mat_descriptors;

        char keyInput;

        //-- Step 1: Detect the keypoints using Detector
        // int minHessian = 400;





        OrbFeatureDetector detector;
        FREAK extractor;

        while(1)
        {
            cap >> mat_image;

            std::vector<KeyPoint> vec_keypoints, vec_goodKeypoints;

            detector.detect( mat_image, vec_keypoints );
            num_vecKeypoints = vec_keypoints.size();

            std::sort(vec_keypoints.begin(), vec_keypoints.end(),
                      jlUtilities::sort_feature_response);

            if(num_vecKeypoints > num_trackingPoints)
            {
                num_vecKeypoints = num_trackingPoints;
                vec_keypoints.erase(vec_keypoints.begin() + num_vecKeypoints,
                                   vec_keypoints.end());
            }


            extractor.compute( mat_image, vec_keypoints, mat_descriptors );


            // write mat to file
            std::string fileName = "mat_descriptors.yml";
            FileStorage fs(fileName, FileStorage::WRITE);
            fs << "descriptors" << mat_descriptors;
            fs.release();
            std::cout<< fileName << " is generated." << std::endl;

    //        Mat copy;
    //        FileStorage fs2("mat_descriptors.yml", FileStorage::READ);
    //        fs2["descriptors"] >> copy;
    //        fs2.release();

    //        FileStorage fs3("test.yml", FileStorage::WRITE);
    //        fs3 << "descriptors" << copy;
    //        fs3.release();


            //////////////////////////////////////////////////////////
    //        std::vector<cv::Point3f> vec_pois;
    //        vec_pois.push_back(Point3f(0, 0, 0));
    //        vec_pois.push_back(Point3f(1.1, 0.1, 0));
    //        vec_pois.push_back(Point3f(0.3, 2.1, 0));
    //        vec_pois.push_back(Point3f(7.3, 2, 0));
    //        vec_pois.push_back(Point3f(1.3, 4.1, 0));

    //        FileStorage fs3("POIs.yml", FileStorage::WRITE);
    //        fs3 << "POIs" << vec_pois;
    //        fs3.release();

            //////////////////////////////////////////////////////////

            //-- Draw keypoints
            Mat mat_kpImage;

            drawKeypoints( mat_image, vec_keypoints, mat_kpImage,
                           Scalar::all(-1), DrawMatchesFlags::DEFAULT );

            for (int i=0; i<num_trackingPoints; i++)	{
                cv::circle(mat_kpImage,
                    vec_keypoints[i].pt,	// center
                    3,							// radius
                    cv::Scalar(0,0,255),		// color
                    -1);						// negative thickness=filled

                char szLabel[50];
                sprintf(szLabel, "%d", i);
                putText (mat_kpImage, szLabel, vec_keypoints[i].pt,
                    cv::FONT_HERSHEY_PLAIN, // font face
                    1.0,					// font scale
                    cv::Scalar(255,0,0),	// font color
                    1);						// thickness
            }


            //-- Show detected (drawn) keypoints
            imshow("Keypoints", mat_kpImage );

            waitKey(30);
        }


    }


    return 0;
}
Exemplo n.º 5
0
int main( int argc, char** argv ) {
    // check http://docs.opencv.org/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.html
    // for OpenCV general detection/matching framework details

    if( argc != 3 ) {
        help(argv);
        return -1;
    }

    // Load images
    Mat imgA = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE );
    if( !imgA.data ) {
        std::cout<< " --(!) Error reading image " << argv[1] << std::endl;
        return -1;
    }

    Mat imgB = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE );
    if( !imgB.data ) {
        std::cout << " --(!) Error reading image " << argv[2] << std::endl;
        return -1;
    }

    std::vector<KeyPoint> keypointsA, keypointsB;
    Mat descriptorsA, descriptorsB;
    std::vector<DMatch> matches;

    // DETECTION
    // Any openCV detector such as
    SurfFeatureDetector detector(1000,4);

    // DESCRIPTOR
    // Our proposed FREAK descriptor
    // (roation invariance, scale invariance, pattern radius corresponding to SMALLEST_KP_SIZE,
    // number of octaves, optional vector containing the selected pairs)
    // FREAK extractor(true, true, 22, 4, std::vector<int>());
    FREAK extractor;

    // MATCHER
    // The standard Hamming distance can be used such as
    // BruteForceMatcher<Hamming> matcher;
    // or the proposed cascade of hamming distance using SSSE3
    BruteForceMatcher<Hamming> matcher;

    // detect
    double t = (double)getTickCount();
    detector.detect( imgA, keypointsA );
    detector.detect( imgB, keypointsB );
    t = ((double)getTickCount() - t)/getTickFrequency();
    std::cout << "detection time [s]: " << t/1.0 << std::endl;

    std::cout << " nb key points : " << keypointsA.size() << "," <<keypointsB.size() << std::endl;
    // extract
    t = (double)getTickCount();
    extractor.compute( imgA, keypointsA, descriptorsA );
    extractor.compute( imgB, keypointsB, descriptorsB );
    t = ((double)getTickCount() - t)/getTickFrequency();
    std::cout << "extraction time [s]: " << t << std::endl;

    // match
    t = (double)getTickCount();
    matcher.match(descriptorsA, descriptorsB, matches);
    t = ((double)getTickCount() - t)/getTickFrequency();
    std::cout << "matching time [s]: " << t << std::endl;

    // Draw matches
    Mat imgMatch;
    drawMatches(imgA, keypointsA, imgB, keypointsB, matches, imgMatch);

    namedWindow("matches", CV_WINDOW_KEEPRATIO);
    imshow("matches", imgMatch);
    waitKey(0);
}