Example #1
0
	void loadtemplate(std::string name) {
		object = imread( name, CV_LOAD_IMAGE_GRAYSCALE );
		if(object.empty()) {
			loglne("Can not load template image, exit!");
			exit(-1);
		}
		buildPyramid(object, objpyr, ldetector.nOctaves-1);

		string model_filename = name + "_model.xml.gz";
		loglni("[loadtemplate] try to load "<<model_filename<<"...");
		FileStorage fs(model_filename, FileStorage::READ);
		if( fs.isOpened() ) {
			detector.read(fs.getFirstTopLevelNode());
			loglni("[loadtemplate] successfully loaded "<<model_filename.c_str());
		} else {
			loglni("[loadtemplate] try to train the model...");
			ldetector.setVerbose(true);
			ldetector.getMostStable2D(object, objKeypoints, 100, gen);
			detector.setVerbose(true);
			detector.train(objpyr, objKeypoints, patchSize.width,
				100, 11, 10000, ldetector, gen);
			loglni("[loadtemplate] training DONE! saving...");
			if( fs.open(model_filename, FileStorage::WRITE) )
				detector.write(fs, "ferns_model");
		}
		fs.release();

		cpts.push_back(Point2f(0,0));
		cpts.push_back(Point2f(object.cols,0));
		cpts.push_back(Point2f(object.cols,object.rows));
		cpts.push_back(Point2f(0,object.rows));
	}
Example #2
0
//Override
	inline void operator()(cv::Mat& frame) {
		if(frame.empty()) return;
		static helper::PerformanceMeasurer PM(1000);
		PM.tic();

		Mat gray;
		cvtColor(frame, gray, CV_RGB2GRAY);
		buildPyramid(gray, imgpyr, ldetector.nOctaves-1);
		double rms=-1, zncc=-1;
		double camR[3][3]={{1,0,0},{0,1,0},{0,0,1}},camT[3]={0};

		vector<int> pairs;
		vector<Point2f> dst_corners;
		objKeypoints = detector.getModelPoints();
		ldetector(imgpyr, imgKeypoints, 300);
		bool found = detector(imgpyr, imgKeypoints, H, dst_corners, &pairs);

		if(found && validateH(dst_corners) ) {
			evaluateH(gray, object, rms, zncc);
			static double crns[4][2] = {
				{0,object.rows},
				{object.cols,object.rows},
				{object.cols,0},
				{0,0}
			};
			helper::drawHomography(frame, H, crns);
			draw3D(frame);
			GetCameraPose(camR,camT);
		} else {
			for(int i=0; i<3; ++i) {
				camT[i]=numeric_limits<double>::quiet_NaN();
				for(int j=0; j<3; ++j)
					camR[i][j]=numeric_limits<double>::quiet_NaN();
			}
			rms = zncc = numeric_limits<double>::quiet_NaN();
		}
		double dur = PM.toc();
		cout<<"[evaluateH] RMS="<<rms<<"|ZNCC="<<zncc<<endl;
		cout<<"process duration="<<dur<<endl;

		if(savekeys) {
			CapKeyFrame(framecnt++, frame, camR, camT, rms, zncc, dur);
		}
	}
Example #3
0
int main(int argc, char** argv)
{
    int i;

    const char* object_filename = argc > 1 ? argv[1] : "box.png";
    const char* scene_filename = argc > 2 ? argv[2] : "box_in_scene.png";

    help();

    Mat object = imread( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
    Mat scene = imread( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );

    if( !object.data || !scene.data )
    {
        fprintf( stderr, "Can not load %s and/or %s\n",
                object_filename, scene_filename );
        exit(-1);
    }

    double imgscale = 1;
    Mat image;

    resize(scene, image, Size(), 1./imgscale, 1./imgscale, INTER_CUBIC);

    cvNamedWindow("Object", 1);
    cvNamedWindow("Image", 1);
    cvNamedWindow("Object Correspondence", 1);

    Size patchSize(32, 32);
    LDetector ldetector(7, 20, 2, 2000, patchSize.width, 2);
    ldetector.setVerbose(true);
    PlanarObjectDetector detector;

    vector<Mat> objpyr, imgpyr;
    int blurKSize = 3;
    double sigma = 0;
    GaussianBlur(object, object, Size(blurKSize, blurKSize), sigma, sigma);
    GaussianBlur(image, image, Size(blurKSize, blurKSize), sigma, sigma);
    buildPyramid(object, objpyr, ldetector.nOctaves-1);
    buildPyramid(image, imgpyr, ldetector.nOctaves-1);

    vector<KeyPoint> objKeypoints, imgKeypoints;
    PatchGenerator gen(0,256,5,true,0.8,1.2,-CV_PI/2,CV_PI/2,-CV_PI/2,CV_PI/2);

    string model_filename = format("%s_model.xml.gz", object_filename);
    printf("Trying to load %s ...\n", model_filename.c_str());
    FileStorage fs(model_filename, FileStorage::READ);
    if( fs.isOpened() )
    {
        detector.read(fs.getFirstTopLevelNode());
        printf("Successfully loaded %s.\n", model_filename.c_str());
    }
    else
    {
        printf("The file not found and can not be read. Let's train the model.\n");
        printf("Step 1. Finding the robust keypoints ...\n");
        ldetector.setVerbose(true);
        ldetector.getMostStable2D(object, objKeypoints, 100, gen);
        printf("Done.\nStep 2. Training ferns-based planar object detector ...\n");
        detector.setVerbose(true);

        detector.train(objpyr, objKeypoints, patchSize.width, 100, 11, 10000, ldetector, gen);
        printf("Done.\nStep 3. Saving the model to %s ...\n", model_filename.c_str());
        if( fs.open(model_filename, FileStorage::WRITE) )
            detector.write(fs, "ferns_model");
    }
    printf("Now find the keypoints in the image, try recognize them and compute the homography matrix\n");
    fs.release();

    vector<Point2f> dst_corners;
    Mat correspond( object.rows + image.rows, std::max(object.cols, image.cols), CV_8UC3);
    correspond = Scalar(0.);
    Mat part(correspond, Rect(0, 0, object.cols, object.rows));
    cvtColor(object, part, CV_GRAY2BGR);
    part = Mat(correspond, Rect(0, object.rows, image.cols, image.rows));
    cvtColor(image, part, CV_GRAY2BGR);

    vector<int> pairs;
    Mat H;

    double t = (double)getTickCount();
    objKeypoints = detector.getModelPoints();
    ldetector(imgpyr, imgKeypoints, 300);

    std::cout << "Object keypoints: " << objKeypoints.size() << "\n";
    std::cout << "Image keypoints: " << imgKeypoints.size() << "\n";
    bool found = detector(imgpyr, imgKeypoints, H, dst_corners, &pairs);
    t = (double)getTickCount() - t;
    printf("%gms\n", t*1000/getTickFrequency());

    if( found )
    {
        for( i = 0; i < 4; i++ )
        {
            Point r1 = dst_corners[i%4];
            Point r2 = dst_corners[(i+1)%4];
            line( correspond, Point(r1.x, r1.y+object.rows),
                 Point(r2.x, r2.y+object.rows), Scalar(0,0,255) );
        }
    }

    for( i = 0; i < (int)pairs.size(); i += 2 )
    {
        line( correspond, objKeypoints[pairs[i]].pt,
             imgKeypoints[pairs[i+1]].pt + Point2f(0,(float)object.rows),
             Scalar(0,255,0) );
    }

    imshow( "Object Correspondence", correspond );
    Mat objectColor;
    cvtColor(object, objectColor, CV_GRAY2BGR);
    for( i = 0; i < (int)objKeypoints.size(); i++ )
    {
        circle( objectColor, objKeypoints[i].pt, 2, Scalar(0,0,255), -1 );
        circle( objectColor, objKeypoints[i].pt, (1 << objKeypoints[i].octave)*15, Scalar(0,255,0), 1 );
    }
    Mat imageColor;
    cvtColor(image, imageColor, CV_GRAY2BGR);
    for( i = 0; i < (int)imgKeypoints.size(); i++ )
    {
        circle( imageColor, imgKeypoints[i].pt, 2, Scalar(0,0,255), -1 );
        circle( imageColor, imgKeypoints[i].pt, (1 << imgKeypoints[i].octave)*15, Scalar(0,255,0), 1 );
    }

    imwrite("correspond.png", correspond );
    imshow( "Object", objectColor );
    imshow( "Image", imageColor );

    waitKey(0);

    return 0;
}