int main(int argc, char** argv){
    float maximum_x, maximum_y;
    float minimum_y, minimum_x;

    if(argc != 3){
        cout << "error input the file names" << endl;
        return 0;
    }

    const char *sz_reference_file_path_part1 ="/Users/yangyang/Github/drone/Server/upload/files/";
    char *sz_reference_file_path = NULL;
    if(NULL == sz_reference_file_path){
        sz_reference_file_path = (char*) malloc(1 + strlen(sz_reference_file_path_part1) + strlen(argv[1]));
    }
    strcpy(sz_reference_file_path, sz_reference_file_path_part1);
    strcat(sz_reference_file_path, argv[1]);

    //Mat reference_object = imread("reference.jpg", CV_LOAD_IMAGE_GRAYSCALE);
    //Mat reference_object = imread("/home/webmaster/drone/Server/upload/files/reference.jpg",1); 
    Mat reference_object = imread(sz_reference_file_path, 1); 

    if(sz_reference_file_path != NULL){
        free(sz_reference_file_path);
        sz_reference_file_path = NULL;
    }

    //VideoCapture cap(0); // open the video camera no. 0
    //if (!cap.isOpened())  // if not success, exit program
    //{
        //cout << "Cannot open the video cam" << endl;
        //return -1;
    //}
    Mat image, image_orig;
    //cap.read(image);
    
    const char *sz_image_file_path_part1 = "/Users/yangyang/Github/drone/Server/upload/files/";
    char *sz_image_file_path = NULL;
    if(NULL == sz_image_file_path){
        sz_image_file_path = (char*) malloc(1 + strlen(sz_image_file_path_part1) + strlen(argv[1]));
    }
    strcpy(sz_image_file_path, sz_image_file_path_part1);
    strcat(sz_image_file_path, argv[2]);

    image_orig = imread(sz_image_file_path, 1);

    image = image_orig;
    //cvtColor(image_orig, image, CV_RGB2GRAY);

    Mat des1, des2;
    SURFDetector surf; 
    std::vector<KeyPoint> keypoints_object, keypoints_scene;
    std::vector<DMatch> matches;

    surf(reference_object, Mat(), keypoints_object, des1);
    surf(image, Mat(), keypoints_scene, des2);
    
    SURFMatcher<BFMatcher> matcher;
    matcher.match(des1, des2, matches);

    double max_dist = 0; double min_dist = 100;

  //-- Quick calculation of max and min distances between keypoints
    for( int i = 0; i < des1.rows; i++ )
    { double dist = matches[i].distance;
        if( dist < min_dist ) min_dist = dist;
        if( dist > max_dist ) max_dist = dist;
    }

    printf("-- Max dist : %f \n", max_dist );
    printf("-- Min dist : %f \n", min_dist );

    std::vector< DMatch > good_matches;

    for( int i = 0; i < des1.rows; i++ )
    { if( matches[i].distance < 3*min_dist )
        { good_matches.push_back( matches[i]); }
    }

    Mat img_matches;
    //drawMatches(reference_object, keypoints_object, image, keypoints_scene,
               //good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
               //vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

    //-- Localize the object
    std::vector<Point2f> obj;
    std::vector<Point2f> scene;

    for( int i = 0; i < (int)good_matches.size(); i++ )
    {
    //-- Get the keypoints from the good matches
        obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
        scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
    }

    Mat H = findHomography( obj, scene, CV_RANSAC );

    //-- Get the corners from the image_1 ( the object to be "detected" )
    std::vector<Point2f> obj_corners(4);
    obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( reference_object.cols, 0 );
    obj_corners[2] = cvPoint( reference_object.cols, reference_object.rows ); obj_corners[3] = cvPoint( 0, reference_object.rows );
    std::vector<Point2f> scene_corners(4);

    perspectiveTransform( obj_corners, scene_corners, H);

    //-- Draw lines between the corners (the mapped object in the scene - image_2 )
    //line( img_matches, scene_corners[0] + Point2f( reference_object.cols, 0), scene_corners[1] + Point2f( reference_object.cols, 0), Scalar(0, 255, 0), 4 );
    //line( img_matches, scene_corners[1] + Point2f( reference_object.cols, 0), scene_corners[2] + Point2f( reference_object.cols, 0), Scalar( 0, 255, 0), 4 );
    //line( img_matches, scene_corners[2] + Point2f( reference_object.cols, 0), scene_corners[3] + Point2f( reference_object.cols, 0), Scalar( 0, 255, 0), 4 );
    //line( img_matches, scene_corners[3] + Point2f( reference_object.cols, 0), scene_corners[0] + Point2f( reference_object.cols, 0), Scalar( 0, 255, 0), 4 );

    //-- Show detected matches
    
    //-- Drow lines on original corners
    line( image, scene_corners[0] , scene_corners[1] , Scalar(255, 0, 0), 4 );//red
    line( image, scene_corners[1] , scene_corners[2] , Scalar( 0, 0, 255), 4 );//blue
    line( image, scene_corners[2] , scene_corners[3] , Scalar( 0, 255, 0), 4 );//green
    line( image, scene_corners[3] , scene_corners[0] , Scalar( 0, 0, 0), 4 );//black
    //cout<<"scene_corners[0] = " << scene_corners[0] << endl;
    //cout<<"scene_corners[1] = " << scene_corners[1] << endl;
    //cout<<"scene_corners[2] = " << scene_corners[2] << endl;
    //cout<<"scene_corners[3] = " << scene_corners[3] << endl;
    

    //imshow( "Good Matches & Object detection", img_matches );
    //imshow("object in orginal image", image_orig);

    /**
     * for server to receive
     */
    //cout<< scene_corners[0].x << endl;//bottom right
    //cout<< scene_corners[0].y << endl;
    //cout<< scene_corners[1].x << endl;//top right
    //cout<< scene_corners[1].y << endl;
    //cout<< scene_corners[2].x << endl;//top left
    //cout<< scene_corners[2].y << endl;
    //cout<< scene_corners[3].x << endl;//bottom left
    //cout<< scene_corners[3].y << endl;

    float x[4] = {scene_corners[0].x, scene_corners[1].x, scene_corners[2].x, scene_corners[3].x};
    float y[4] = {scene_corners[0].y, scene_corners[1].y, scene_corners[2].y, scene_corners[3].y};
    maximum_x = find_max(x);
    minimum_x = find_mim(x);
    minimum_y = find_mim(y);
    maximum_y = find_max(y);

    //cout << "maximum x is:"<<maximum_x << endl;
    //cout << "maximum y is:"<< maximum_y << endl;

    //cout << "minimum x is:" <<minimum_x << endl;
    //cout << "minimum y is:"<<minimum_y << endl;

    Rect2d roi;
    roi.x = minimum_x, roi.y = minimum_y, roi.width = (maximum_x - minimum_x), roi.height = (maximum_y - minimum_y);
    Mat img1=image_orig(roi);
    //imshow("roi", img1);



    //Ptr<Tracker> tracker = Tracker::create("KCF");
    //tracker -> init(frame,roi);
    //printf("start the tracking process");

    waitKey(0);
    return 0;
}
Exemple #2
0
////////////////////////////////////////////////////
// This program demonstrates the usage of SURF_OCL.
// use cpu findHomography interface to calculate the transformation matrix
int main(int argc, char* argv[])
{
  std::vector<Mat> frames;

  // read input
  for (int i = 1; i < argc; i++) {
    Mat image = imread(argv[i], CV_LOAD_IMAGE_COLOR);
    if (!image.data) {
      std::cerr << "Ignoring image " << i << std::endl;
      continue;
    }
    if (i == 1) {
      Size orgSize = image.size();
      float orgRatio = orgSize.width / 1.0 / orgSize.height;
      if (!(ratio > 0)) {
        ratio = orgRatio;
      }
      outputSize = Size(lines * ratio, lines);
      if (orgRatio > ratio) {
        size.width = (1.0 + crop) * outputSize.width * orgRatio;
        size.height = (1.0 + crop) * outputSize.width;
      } else {
        size.width = (1.0 + crop) * outputSize.width;
        size.height = (1.0 + crop) * outputSize.width / orgRatio;
      }
      size.width += size.width % 2;
      size.height += size.height % 2;
      std::cout << "Input size: " << orgSize << std::endl;
      std::cout << "Process size: " << size << std::endl;
      std::cout << "Output size: " << outputSize << std::endl;
    }
    Mat frame;
    resize(image, frame, size);
    frames.push_back(image/*frame*/);
  }

    double surf_time = 0.;

    //declare input/output
    std::vector<KeyPoint> keypoints1, keypoints2;
    std::vector<DMatch> matches;

    UMat _descriptors1, _descriptors2;
    Mat descriptors1 = _descriptors1.getMat(ACCESS_RW),
        descriptors2 = _descriptors2.getMat(ACCESS_RW);

    //instantiate detectors/matchers
    SURFDetector surf;

    SURFMatcher<BFMatcher> matcher;
    UMat img1, img2;

  for (int k = 1; k < frames.size(); k++) {

    cvtColor(frames[k - 1], img1, CV_BGR2GRAY);
    cvtColor(frames[k], img2, CV_BGR2GRAY);

    for (int i = 0; i <= LOOP_NUM; i++)
    {
        surf(img1.getMat(ACCESS_READ), Mat(), keypoints1, descriptors1);
        surf(img2.getMat(ACCESS_READ), Mat(), keypoints2, descriptors2);
        matcher.match(descriptors1, descriptors2, matches);
    }
    std::cout << "FOUND " << keypoints1.size() << " keypoints on image " << (k - 1) << std::endl;
    std::cout << "FOUND " << keypoints2.size() << " keypoints on image " << k << std::endl;

    Mat H;
    std::vector<Point2f> corner;
    Mat img_matches = drawGoodMatches(img1.getMat(ACCESS_READ), img2.getMat(ACCESS_READ), keypoints1, keypoints2, matches, corner, H);

    Mat out;
    warpPerspective(frames[k - 1], out, H, frames[k].size(), INTER_CUBIC);

    std::ostringstream oss1, oss2;
    oss1 << "SURF_" << k << "_imag.jpg";
    oss2 << "SURF_" << k << "_warp.jpg";
    imwrite(oss1.str(), frames[k]); 
    imwrite(oss2.str(), out); 

  }

    return EXIT_SUCCESS;
}
Exemple #3
0
////////////////////////////////////////////////////
// This program demonstrates the usage of SURF_OCL.
// use cpu findHomography interface to calculate the transformation matrix
int main(int argc, char* argv[])
{
    /*const char* keys =
        "{ h help     | false            | print help message  }"
        "{ l left     | box.png          | specify left image  }"
        "{ r right    | box_in_scene.png | specify right image }"
        "{ o output   | SURF_output.jpg  | specify output save path }"
        "{ m cpu_mode | false            | run without OpenCL }";

    CommandLineParser cmd(argc, argv, keys);
    if (cmd.has("help"))
    {
        std::cout << "Usage: surf_matcher [options]" << std::endl;
        std::cout << "Available options:" << std::endl;
        cmd.printMessage();
        return EXIT_SUCCESS;
    }
    if (cmd.has("cpu_mode"))
    {
        ocl::setUseOpenCL(false);
        std::cout << "OpenCL was disabled" << std::endl;
    }*/

    UMat img1, img2;

    std::string outpath = "out.jpg";

    std::string leftName = "imageA.png";
    imread(leftName, IMREAD_GRAYSCALE).copyTo(img1);
    if (img1.empty())
    {
        std::cout << "Couldn't load " << leftName << std::endl;
        return EXIT_FAILURE;
    }

    std::string rightName = "imageB.png";
    imread(rightName, IMREAD_GRAYSCALE).copyTo(img2);
    if (img2.empty())
    {
        std::cout << "Couldn't load " << rightName << std::endl;
        return EXIT_FAILURE;
    }

    double surf_time = 0.;

    //declare input/output
    std::vector<KeyPoint> keypoints1, keypoints2;
    std::vector<DMatch> matches;

    UMat _descriptors1, _descriptors2;
    Mat descriptors1 = _descriptors1.getMat(ACCESS_RW),
        descriptors2 = _descriptors2.getMat(ACCESS_RW);

    //instantiate detectors/matchers
    SURFDetector surf;
    FREAK freak;

    

    SURFMatcher<BFMatcher> matcher;

    //-- start of timing section

    for (int i = 0; i <= LOOP_NUM; i++)
    {
        if (i == 1) workBegin();
        surf(img1.getMat(ACCESS_READ), Mat(), keypoints1, descriptors1);
        surf(img2.getMat(ACCESS_READ), Mat(), keypoints2, descriptors2);
        matcher.match(descriptors1, descriptors2, matches);
    }
    workEnd();
    std::cout << "FOUND " << keypoints1.size() << " keypoints on first image" << std::endl;
    std::cout << "FOUND " << keypoints2.size() << " keypoints on second image" << std::endl;

    surf_time = getTime();
    std::cout << "SURF run time: " << surf_time / LOOP_NUM << " ms" << std::endl << "\n";


    std::vector<Point2f> corner;
    Mat img_matches = drawGoodMatches(img1.getMat(ACCESS_READ), img2.getMat(ACCESS_READ), keypoints1, keypoints2, matches, corner);

    //-- Show detected matches

    namedWindow("surf matches", 0);
    imshow("surf matches", img_matches);
    imwrite(outpath, img_matches);

    waitKey(0);
    return EXIT_SUCCESS;
}