bool TrackerForProject::init( const cv::Mat& frame, const cv::Rect& initial_position )
{
    position_ = initial_position;
    cv::cvtColor(frame, prevFrame_, CV_BGR2GRAY);

	Mat prev_(prevFrame_(position_));

	SurfFeatureDetector detector;
	detector.detect(prev_, keypoints1);

    return true;
}
    void addTrainImage(string name)
    {
    	/* Добавление нового эталонного изображения и вычисление его дескриптора */

    	Mat train_img = imread(_train_img_dir + "template_" + name + ".jpg");

    	if(!train_img.empty())
    	{
			resize(train_img, train_img, Size(SIGN_SIZE, SIGN_SIZE), 0, 0);
			_train_images.push_back(train_img);
			_train_sign_names.push_back(name);

			vector<KeyPoint> points;
			_detector.detect( train_img, points );
			_train_keypoints.push_back(points);

			Mat descriptors;
			_extractor.compute( train_img, points, descriptors);
			_train_descriptors.push_back(descriptors);	
		}
		else
		{
			cout << ERROR_STR << "Could not load train image " << _train_img_dir << name << ".jpg" << endl;
		}
    }
void init(Mat img){
    Pose = Mat::eye(4, 4, CV_64F);
    PreviousImageGrayScale = img;
    PreviousFeatures.clear();
    SurfDetector.detect(img, PreviousFeatures);
    SurfDescriptor.compute(img, PreviousFeatures, PreviousFeatureDescriptors);
}
Beispiel #4
0
void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
    //std_msgs::String imsignal;
    //std_msgs::String comsignal;
    cv_bridge::CvImageConstPtr cv_ptr;
    //char filename[40];
    cv_ptr =  cv_bridge::toCvShare(msg, enc::BGR8);
    Mat im,biimage;
    im = cv_ptr->image;
    //cvtColor(cv_ptr->image,im,CV_BGR2GRAY);
    inRange(im,Scalar(80,0,0), Scalar(255,255,50),biimage); //(85,80,80);
   // p.Uppercolor = Scalar(125,255,255);
    //imshow( "before", im );
    //waitKey(1);
    //imshow("after",biimage);
    SimpleBlobDetector::Params params;

    params.filterByColor =true;
    params.filterByCircularity =false;
    params.filterByConvexity =false;
    params.filterByInertia = false;
    params.blobColor = 255;
    // Filter by Area.
    params.filterByArea = true;
    params.minArea = 10000;
    params.maxArea = 800000;
    // Filter by Inertia
    params.minInertiaRatio = 10;
    params.minDistBetweenBlobs = 10;
    SimpleBlobDetector detector(params);
    std::vector<KeyPoint> keypoints;
    detector.detect(biimage,keypoints);
    Mat im_with_keypoints;
    drawKeypoints( biimage, keypoints, im_with_keypoints, Scalar(0,0,255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
    imshow("test",im_with_keypoints);
    waitKey(1);
    for
    (
    std::vector<KeyPoint>::iterator it = keypoints.begin();
    it != keypoints.end();
    ++it
    )
    {
        KeyPoint k =  *it;
        cout << k.pt << endl;
        x_bar=k.pt.x;
        y_bar=k.pt.y;
    if(x_bar>((im_with_keypoints.cols/2)-(im_with_keypoints.cols/15)) && x_bar<(im_with_keypoints.cols/2)+(im_with_keypoints.cols/15))
    {
        std_msgs::String sent;
        sent.data =  "getposition";
        com_pub.publish(sent);
        ros::Duration(0.05).sleep();
        sent.data =  "BR";
        com_pub.publish(sent);
        ros::Duration(2).sleep();
    }
}
}
Beispiel #5
0
    virtual void process() {
        std::vector<KeyPoint> ipts;
        detector.detect(inputImage,ipts);

        Mat descriptors;
        extractor.compute( inputImage, ipts, descriptors );

//        printf("num points %d\n",(int)ipts.size());
    }
Beispiel #6
0
bool findMatch(CvPoint &offset, FlannBasedMatcher matcher, SurfFeatureDetector detector, SurfDescriptorExtractor extractor, Mat des_object[])
{
	bool noMatch = true;
	Mat des_image, img_matches;
	vector<KeyPoint> kp_image;
	vector<vector<DMatch > > matches;
	vector<DMatch > good_matches;
	int iter = 0;
	Mat image = imread("/home/pi/opencv/photo.jpg" , CV_LOAD_IMAGE_GRAYSCALE );
	detector.detect( image, kp_image );
	extractor.compute( image, kp_image, des_image );
	while ( noMatch )
	{
		//printf("before kp and des detection 2\n");
	    	
		
		matcher.knnMatch(des_object[iter], des_image, matches, 2);
		for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
		{
		    if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
		    {
			good_matches.push_back(matches[i][0]);
		    }
		}
		
		//printf("Number of matches: %d\n", good_matches.size());
		if (good_matches.size() >= 10)
		{
			CvPoint center = cvPoint(0,0);
			for ( int z = 0 ; z < good_matches.size() ; z++ )
			{
				int index = good_matches.at(z).trainIdx;
				center.x += kp_image.at(index).pt.x;
				center.y += kp_image.at(index).pt.y;
			}
			center.x = center.x/good_matches.size();
			center.y = center.y/good_matches.size();
			int radius = 5;
			circle( image, center, radius, {0,0,255}, 3, 8, 0 );
			namedWindow("test");
			imshow("test", image);
			imwrite("centerPoint.jpg", image);
			waitKey(5000);
			int offsetX = center.x - image.cols/2;
			int offsetY = center.y - image.rows/2;
			offset = cvPoint(offsetX, offsetY);			
			noMatch = false;
		}
		//printf("draw good matches\n");
		//Show detected matches
		if ( iter++ == 3 || !noMatch )
			break;
		
		good_matches.clear();
	}
	return noMatch;
}
Beispiel #7
0
/*
 * Test Calonder classifier to match keypoints on given image:
 *      classifierFilename - name of file from which classifier will be read,
 *      imgFilename - test image filename.
 *
 * To calculate keypoint descriptors you may use RTreeClassifier class (as to train),
 * but it is convenient to use CalonderDescriptorExtractor class which is wrapper of
 * RTreeClassifier.
 */
static void testCalonderClassifier( const string& classifierFilename, const string& imgFilename )
{
    Mat img1 = imread( imgFilename, IMREAD_GRAYSCALE ), img2, H12;
    if( img1.empty() )
    {
        cout << "Test image can not be read." << endl;
        exit(-1);
    }
    warpPerspectiveRand( img1, img2, H12, theRNG() );

    // Exstract keypoints from test images
    SurfFeatureDetector detector;
    vector<KeyPoint> keypoints1; detector.detect( img1, keypoints1 );
    vector<KeyPoint> keypoints2; detector.detect( img2, keypoints2 );

    // Compute descriptors
    CalonderDescriptorExtractor<float> de( classifierFilename );
    Mat descriptors1;  de.compute( img1, keypoints1, descriptors1 );
    Mat descriptors2;  de.compute( img2, keypoints2, descriptors2 );

    // Match descriptors
    BFMatcher matcher(NORM_L1);
    vector<DMatch> matches;
    matcher.match( descriptors1, descriptors2, matches );

    // Prepare inlier mask
    vector<char> matchesMask( matches.size(), 0 );
    vector<Point2f> points1; KeyPoint::convert( keypoints1, points1 );
    vector<Point2f> points2; KeyPoint::convert( keypoints2, points2 );
    Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);
    for( size_t mi = 0; mi < matches.size(); mi++ )
    {
        if( norm(points2[matches[mi].trainIdx] - points1t.at<Point2f>((int)mi,0)) < 4 ) // inlier
            matchesMask[mi] = 1;
    }

    // Draw
    Mat drawImg;
    drawMatches( img1, keypoints1, img2, keypoints2, matches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask );
    string winName = "Matches";
    namedWindow( winName, WINDOW_AUTOSIZE );
    imshow( winName, drawImg );
    waitKey();
}
Beispiel #8
0
/*
 * Trains Calonder classifier and writes trained classifier in file:
 *      imgFilename - name of .txt file which contains list of full filenames of train images,
 *      classifierFilename - name of binary file in which classifier will be written.
 *
 * To train Calonder classifier RTreeClassifier class need to be used.
 */
static void trainCalonderClassifier( const string& classifierFilename, const string& imgFilename )
{
    // Reads train images
    ifstream is( imgFilename.c_str(), ifstream::in );
    vector<Mat> trainImgs;
    while( !is.eof() )
    {
        string str;
        getline( is, str );
        if (str.empty()) break;
        Mat img = imread( str, IMREAD_GRAYSCALE );
        if( !img.empty() )
            trainImgs.push_back( img );
    }
    if( trainImgs.empty() )
    {
        cout << "All train images can not be read." << endl;
        exit(-1);
    }
    cout << trainImgs.size() << " train images were read." << endl;

    // Extracts keypoints from train images
    SurfFeatureDetector detector;
    vector<BaseKeypoint> trainPoints;
    vector<IplImage> iplTrainImgs(trainImgs.size());
    for( size_t imgIdx = 0; imgIdx < trainImgs.size(); imgIdx++ )
    {
        iplTrainImgs[imgIdx] = trainImgs[imgIdx];
        vector<KeyPoint> kps; detector.detect( trainImgs[imgIdx], kps );

        for( size_t pointIdx = 0; pointIdx < kps.size(); pointIdx++ )
        {
            Point2f p = kps[pointIdx].pt;
            trainPoints.push_back( BaseKeypoint(cvRound(p.x), cvRound(p.y), &iplTrainImgs[imgIdx]) );
        }
    }

    // Trains Calonder classifier on extracted points
    RTreeClassifier classifier;
    classifier.train( trainPoints, theRNG(), 48, 9, 100 );
    // Writes classifier
    classifier.write( classifierFilename.c_str() );
}
KDvoid SURF_Descriptor ( KDint nIdx )
{
	Mat		tDst;
	Mat		tImg1;
	Mat		tImg2;

	tImg1 = imread ( "/res/image/box.png", CV_LOAD_IMAGE_GRAYSCALE );
	tImg2 = imread ( "/res/image/box_in_scene.png", CV_LOAD_IMAGE_GRAYSCALE );

	// -- Step 1: Detect the keypoints using SURF Detector
	KDint  nMinHessian = 400;

	SurfFeatureDetector   tDetector ( nMinHessian );
	std::vector<KeyPoint> aKeypoints1, aKeypoints2;

	tDetector.detect ( tImg1, aKeypoints1 );
	tDetector.detect ( tImg2, aKeypoints2 );

	// -- Step 2: Calculate descriptors (feature vectors)
	SurfDescriptorExtractor  tExtractor;
	Mat  tDescriptors1, tDescriptors2;

	tExtractor.compute ( tImg1, aKeypoints1, tDescriptors1 );
	tExtractor.compute ( tImg2, aKeypoints2, tDescriptors2 );
/*
	// -- Step 3: Matching descriptor vectors with a brute force matcher
	BruteForceMatcher< L2<KDfloat> >  tMatcher;
	std::vector< DMatch >             aMatches;

	tMatcher.match ( tDescriptors1, tDescriptors2, aMatches );

	// -- Draw matches
	drawMatches ( tImg1, aKeypoints1, tImg2, aKeypoints2, aMatches, tDst ); 

	g_pController->setFrame ( 0, tDst );
*/
}
    void tryFindImage_features(Mat input)
    {
    	/* Сравниваем входящее изрображение с набором эталонов и выбираем наиболее подходящее */

    	resize(input, input, Size(SIGN_SIZE, SIGN_SIZE), 0, 0);

    	vector<KeyPoint> keyPoints;
    	_detector.detect(input, keyPoints);

    	Mat descriptors;
    	_extractor.compute(input, keyPoints, descriptors);

    	int max_value = 0, max_position = 0; 

    	for(int i=0; i < 5; i++)
    	{
    		vector< vector<DMatch> > matches;

    		_matcher.knnMatch(descriptors, _train_descriptors[i], matches, 50);

    		int good_matches_count = 0;
		   
		    for (size_t j = 0; j < matches.size(); ++j)
		    { 
		        if (matches[j].size() < 2)
		                    continue;
		       
		        const DMatch &m1 = matches[j][0];
		        const DMatch &m2 = matches[j][1];
		            
		        if(m1.distance <= 0.7 * m2.distance)        
		            good_matches_count++;    
		    }

		    if(good_matches_count > max_value)
		    {
		    	max_value = good_matches_count;
		    	max_position = i;
		    }
    	}

    	cout << STATUS_STR << "Detected sign: " << _train_sign_names[max_position] << endl;
    }
Mat find_next_homography(Mat image, Mat image_next, vector<KeyPoint> keypoints_0, Mat descriptors_0,
						 SurfFeatureDetector detector, SurfDescriptorExtractor extractor, 
						 BFMatcher matcher, vector<KeyPoint>& keypoints_next, Mat& descriptors_next)
{

	//step 1 detect feature points in next image
	vector<KeyPoint> keypoints_1;
	detector.detect(image_next, keypoints_1);

	Mat img_keypoints_surf0, img_keypoints_surf1;
	drawKeypoints(image, keypoints_0, img_keypoints_surf0);
	drawKeypoints(image_next, keypoints_1, img_keypoints_surf1);
	//cout << "# im0 keypoints" << keypoints_0.size() << endl;
    //cout << "# im1 keypoints" << keypoints_1.size() << endl;
	imshow("surf 0", img_keypoints_surf0);
	imshow("surf 1", img_keypoints_surf1);

    //step 2: extract feature descriptors from feature points
	Mat descriptors_1;
	extractor.compute(image_next, keypoints_1, descriptors_1);

	//step 3: feature matching
	//cout << "fd matching" << endl;
	vector<DMatch> matches;
	vector<Point2f> matched_0;
	vector<Point2f> matched_1;

	matcher.match(descriptors_0, descriptors_1, matches);
	Mat img_feature_matches;
	drawMatches(image, keypoints_0, image_next, keypoints_1, matches, img_feature_matches );
	imshow("Matches", img_feature_matches);

	for (int i = 0; i < matches.size(); i++ )
	{
		matched_0.push_back(keypoints_0[matches[i].queryIdx].pt);	
		matched_1.push_back(keypoints_1[matches[i].trainIdx].pt);	
	}
	keypoints_next = keypoints_1;
	descriptors_next = descriptors_1;
	return findHomography(matched_0, matched_1, RANSAC);

}
Beispiel #12
0
vector<Mat> getHistAndLabels(SurfFeatureDetector &detector, BOWImgDescriptorExtractor &bowDE, int dictionarySize) {

    // setup variable and object I need
    IplImage *img2;
    Mat labels(0, 1, CV_32FC1);
    Mat trainingData(0, dictionarySize, CV_32FC1);
    vector<KeyPoint> keypoint1;
    Mat bowDescriptor1;
    Helper helper;
    vector<string> files = vector<string>();

    helper.GetFileList(EVAL_DIR, files);

    float labelVal;

    for (unsigned int iz = 0; iz < files.size(); iz++) {
        int isImage = helper.instr(files[iz], "jpg", 0, true);
        if (isImage > 0) {
            string sFileName = TRAINING_DIR;
            sFileName.append(files[iz]);
            const char * imageName = sFileName.c_str ();

            img2 = cvLoadImage(imageName,0);
            if (img2) {
                detector.detect(img2, keypoint1);
                bowDE.compute(img2, keypoint1, bowDescriptor1);
                trainingData.push_back(bowDescriptor1);
                labelVal = iz+1;
                labels.push_back(labelVal);
            }


        }
    }

    vector<Mat> retVec;
    retVec.push_back(trainingData);
    retVec.push_back(labels);
    return retVec;

}
void collectclasscentroids() {
	IplImage *img;
	int samplesOnGroup = 60;
	int trainingGroups = 4;
	int allSamples = samplesOnGroup * trainingGroups;
	for (int j = 1; j <= trainingGroups; j++)
		for (int i = 1; i <= samplesOnGroup; i++) {
			sprintf(ch, "%s%d%s%d%s", "train/", j, " (", i, ").jpg");
			//cout << ch << endl;
			printf("\rTraining : %3d %%",((((j-1)*samplesOnGroup)+i)*100/allSamples));
			const char* imageName = ch;
			img = cvLoadImage(imageName, 0);
			vector<KeyPoint> keypoint;
			detector.detect(img, keypoint);
			Mat features;
			extractor->compute(img, keypoint, features);
			bowTrainer.add(features);
		}
	printf("\n");
	return;
}
Beispiel #14
0
Mat getSingleImageHistogram(SurfFeatureDetector &detector, BOWImgDescriptorExtractor &bowDE, string evalFile) {

    // setup variable and object I need
    IplImage *img2;
    vector<KeyPoint> keypoint1;
    Mat bowDescriptor1;
    Helper helper;


    int isImage = helper.instr(evalFile, "jpg", 0, true);
    if (isImage > 0) {

        const char * imageName = evalFile.c_str ();
        img2 = cvLoadImage(imageName,0);
        if (img2) {
            detector.detect(img2, keypoint1);
            bowDE.compute(img2, keypoint1, bowDescriptor1);
        }
    }

    return bowDescriptor1;
}
Beispiel #15
0
float getClassMatch(SurfFeatureDetector &detector, BOWImgDescriptorExtractor &bowDE, IplImage* &img2, int dictionarySize, string sFileName, CvSVM &svm) {
    float response;

    vector<KeyPoint> keypoint2;
    Mat bowDescriptor2;
    Mat evalData(0, dictionarySize, CV_32FC1);
    Mat groundTruth(0, 1, CV_32FC1);
    Mat results(0, 1, CV_32FC1);


    detector.detect(img2, keypoint2);
    bowDE.compute(img2, keypoint2, bowDescriptor2);


    //evalData.push_back(bowDescriptor2);
    //groundTruth.push_back((float) classID);
    response = svm.predict(bowDescriptor2);
    //results.push_back(response);


    return response;
}
Beispiel #16
0
Mat getHistograms(SurfFeatureDetector &detector, BOWImgDescriptorExtractor &bowDE, int dictionarySize, vector<string> &collectionFilenames, string evalDir) {

    // setup variable and object I need
    IplImage *img2;
    Mat trainingData(0, dictionarySize, CV_32FC1);
    vector<KeyPoint> keypoint1;
    Mat bowDescriptor1;
    Helper helper;
    vector<string> files = vector<string>();

    helper.GetFileList(evalDir, files);

    cout << "Number of Collection Files to Process: " << files.size()-2 << endl;

    for (unsigned int iz = 0; iz < files.size(); iz++) {
        int isImage = helper.instr(files[iz], "jpg", 0, true);
        if (isImage > 0) {
            cout << "     Processing " << files[iz] << endl;

            collectionFilenames.push_back(files[iz]);
            string sFileName = EVAL_DIR;
            sFileName.append(files[iz]);
            const char * imageName = sFileName.c_str ();

            img2 = cvLoadImage(imageName,0);
            if (img2) {
                detector.detect(img2, keypoint1);
                bowDE.compute(img2, keypoint1, bowDescriptor1);
                trainingData.push_back(bowDescriptor1);
            }


        }
    }

    return trainingData;
}
int main(int argc, char** argv)
{
    if( argc < 2 )
    {
        printPrompt( argv[0] );
        return -1;
    }

    initModule_nonfree();

    // Get Input Data
    ifstream file(argv[1]);
    if ( !file.is_open() )
        return false;
    
    string str;
    
        // Image Name
    getline( file, str ); getline( file, str );
    string image_name = str;
        // Cloud Name
    getline( file, str ); getline( file, str );
    string cloud_name = str;
        // width of images to be created.
    getline( file, str ); getline( file, str );
    int w = atoi(str.c_str());
        // height of images to be created
    getline( file, str ); getline( file, str );
    int h = atoi(str.c_str());
        // resolution of voxel grids
    getline( file, str ); getline( file, str );
    float r = atof(str.c_str());
        // f (distance from pinhole)
    getline( file, str ); getline( file, str );
    float f = atof(str.c_str());
        // thetax (initial rotation about X Axis of map)
    getline( file, str ); getline( file, str );
    float thetaX = atof(str.c_str());
        // thetay (initial rotation about Y Axis of map)
    getline( file, str ); getline( file, str );
    float thetaY = atof(str.c_str());
        // number of points to go to
    getline( file, str ); getline( file, str );
    float nop = atoi(str.c_str());
        // Number of divisions
    getline( file, str ); getline( file, str );
    float divs = atoi(str.c_str());
        // Number of images to return
    getline( file, str ); getline( file, str );
    int numtoreturn = atoi(str.c_str());    
        // Should we load or create photos?
    getline( file, str ); getline( file, str );
    string lorc =str.c_str();
        // Directory to look for photos
    getline( file, str ); getline( file, str );
    string dir =str.c_str();
        // Directory to look for kp and descriptors
    getline( file, str ); getline( file, str );
    string kdir =str.c_str();
        // save photos?
    getline( file, str ); getline( file, str );
    string savePhotos =str.c_str();
    
    file.close();
    // Done Getting Input Data

    map<vector<float>, Mat> imagemap;
    map<vector<float>, Mat> surfmap;
    map<vector<float>, Mat> siftmap;
    map<vector<float>, Mat> orbmap;
    map<vector<float>, Mat> fastmap;
    imagemap.clear();

    vector<KeyPoint> SurfKeypoints;
    vector<KeyPoint> SiftKeypoints;
    vector<KeyPoint> OrbKeypoints;
    vector<KeyPoint> FastKeypoints;
    Mat SurfDescriptors;
    Mat SiftDescriptors;
    Mat OrbDescriptors;
    Mat FastDescriptors;

    int minHessian = 300;

    SurfFeatureDetector SurfDetector (minHessian);
    SiftFeatureDetector SiftDetector (minHessian);
    OrbFeatureDetector OrbDetector (minHessian);
    FastFeatureDetector FastDetector (minHessian);


    SurfDescriptorExtractor SurfExtractor;
    SiftDescriptorExtractor SiftExtractor;
    OrbDescriptorExtractor OrbExtractor;

    if ( !fs::exists( dir ) || lorc == "c" )
    { // Load Point Cloud and render images
        PointCloud<PT>::Ptr cloud (new pcl::PointCloud<PT>);
        io::loadPCDFile<PT>(cloud_name, *cloud);

        Eigen::Affine3f tf = Eigen::Affine3f::Identity();
        tf.rotate (Eigen::AngleAxisf (thetaX, Eigen::Vector3f::UnitX()));
        pcl::transformPointCloud (*cloud, *cloud, tf);
        tf = Eigen::Affine3f::Identity();
        tf.rotate (Eigen::AngleAxisf (thetaY, Eigen::Vector3f::UnitY()));
        pcl::transformPointCloud (*cloud, *cloud, tf);

        // Create images from point cloud
        imagemap = render::createImages(cloud, nop, w, h, r, f);

        if (savePhotos == "y")
        {
            for (map<vector<float>, Mat>::iterator i = imagemap.begin(); i != imagemap.end(); ++i)
            {
                // Create image name and storagename
                string imfn = dir + "/";
                string kpfn = kdir + "/";
                for (int j = 0; j < i->first.size(); j++)
                {
                    imfn += boost::to_string(i->first[j]) + " ";
                    kpfn += boost::to_string(i->first[j]) + " ";
                }
                imfn += ".jpg";
                imwrite(imfn, i->second);

                // Detect keypoints, add to keypoint map. Same with descriptors

                SurfDetector.detect(i->second, SurfKeypoints);
                SiftDetector.detect(i->second, SiftKeypoints);
                OrbDetector.detect(i->second, OrbKeypoints);
                FastDetector.detect(i->second, FastKeypoints);

                SurfExtractor.compute(i->second, SurfKeypoints, SurfDescriptors);
                SiftExtractor.compute(i->second, SiftKeypoints, SiftDescriptors);
                OrbExtractor.compute(i->second, OrbKeypoints, OrbDescriptors);
                SiftExtractor.compute(i->second, FastKeypoints, FastDescriptors);

                // Store KP and Descriptors in yaml file.

                kpfn += ".yml";
                FileStorage store(kpfn, cv::FileStorage::WRITE);
                write(store,"SurfKeypoints",SurfKeypoints);
                write(store,"SiftKeypoints",SiftKeypoints);
                write(store,"OrbKeypoints", OrbKeypoints);
                write(store,"FastKeypoints",FastKeypoints);
                write(store,"SurfDescriptors",SurfDescriptors);
                write(store,"SiftDescriptors",SiftDescriptors);
                write(store,"OrbDescriptors", OrbDescriptors);
                write(store,"FastDescriptors",FastDescriptors);
                store.release();

                surfmap[i->first] = SurfDescriptors;
                siftmap[i->first] = SiftDescriptors;
                orbmap[i->first]  = OrbDescriptors;
                fastmap[i->first] = FastDescriptors;
            }
        }
    } 
    else 
    { // load images from the folder dir
        // First look into the folder to get a list of filenames
        vector<fs::path> ret;
        const char * pstr = dir.c_str();
        fs::path p(pstr);
        get_all(pstr, ret);

        for (int i = 0; i < ret.size(); i++)
        {
            // Load Image via filename
            string fn = ret[i].string();
            istringstream iss(fn);
            vector<string> tokens;
            copy(istream_iterator<string>(iss), istream_iterator<string>(), back_inserter<vector<string> >(tokens));

            // Construct ID from filename
            vector<float> ID;
            for (int i = 0; i < 6; i++) // 6 because there are three location floats and three direction floats
                ID.push_back(::atof(tokens[i].c_str()));
            string imfn = dir + "/" + fn;

            // Read image and add to imagemap.
            Mat m = imread(imfn);
            imagemap[ID] = m;

            // Create Filename for loading Keypoints and descriptors
            string kpfn = kdir + "/";
            for (int j = 0; j < ID.size(); j++)
            {
                kpfn += boost::to_string(ID[j]) + " ";
            }

            kpfn = kpfn+ ".yml";
            
            // Create filestorage item to read from and add to map.
            FileStorage store(kpfn, cv::FileStorage::READ);

            FileNode n1 = store["SurfKeypoints"];
            read(n1,SurfKeypoints);
            FileNode n2 = store["SiftKeypoints"];
            read(n2,SiftKeypoints);
            FileNode n3 = store["OrbKeypoints"];
            read(n3,OrbKeypoints);
            FileNode n4 = store["FastKeypoints"];
            read(n4,FastKeypoints);
            FileNode n5 = store["SurfDescriptors"];
            read(n5,SurfDescriptors);
            FileNode n6 = store["SiftDescriptors"];
            read(n6,SiftDescriptors);
            FileNode n7 = store["OrbDescriptors"];
            read(n7,OrbDescriptors);
            FileNode n8 = store["FastDescriptors"];
            read(n8,FastDescriptors);

            store.release();

            surfmap[ID] = SurfDescriptors;
            siftmap[ID] = SiftDescriptors;
            orbmap[ID]  = OrbDescriptors;
            fastmap[ID] = FastDescriptors;
        }
    }

    TickMeter tm;
    tm.reset();
    cout << "<\n  Analyzing Images ..." << endl;

    // We have a bunch of images, now we compute their grayscale and black and white.
    map<vector<float>, Mat> gsmap;
    map<vector<float>, Mat> bwmap;
    for (map<vector<float>, Mat>::iterator i = imagemap.begin(); i != imagemap.end(); ++i)
    {
        vector<float> ID = i->first;
        Mat Image = i-> second;
        GaussianBlur( Image, Image, Size(5,5), 0, 0, BORDER_DEFAULT );


        gsmap[ID] = averageImage::getPixSumFromImage(Image, divs);
        bwmap[ID] = averageImage::aboveBelow(gsmap[ID]);
    }
    Mat image = imread(image_name);
    Mat gsimage = averageImage::getPixSumFromImage(image, divs);
    Mat bwimage = averageImage::aboveBelow(gsimage);

    // cout << gsimage <<endl;
    imwrite("GS.png", gsimage);
    namedWindow("GSIMAGE (Line 319)");
    imshow("GSIMAGE (Line 319)", gsimage);
    waitKey(0);

    vector<KeyPoint> imgSurfKeypoints;
    vector<KeyPoint> imgSiftKeypoints;
    vector<KeyPoint> imgOrbKeypoints;
    vector<KeyPoint> imgFastKeypoints;
    Mat imgSurfDescriptors;
    Mat imgSiftDescriptors;
    Mat imgOrbDescriptors;
    Mat imgFastDescriptors;

    SurfDetector.detect(image, imgSurfKeypoints);
    SiftDetector.detect(image, imgSiftKeypoints);
    OrbDetector.detect(image, imgOrbKeypoints);
    FastDetector.detect(image, imgFastKeypoints);

    SurfExtractor.compute(image, imgSurfKeypoints, imgSurfDescriptors);
    SiftExtractor.compute(image, imgSiftKeypoints, imgSiftDescriptors);
    OrbExtractor.compute(image, imgOrbKeypoints, imgOrbDescriptors);
    SiftExtractor.compute(image, imgFastKeypoints, imgFastDescriptors);


    tm.start();

    cout << ">\n<\n  Comparing Images ..." << endl;

    // We have their features, now compare them!
    map<vector<float>, float> gssim; // Gray Scale Similarity
    map<vector<float>, float> bwsim; // Above Below Similarity
    map<vector<float>, float> surfsim;
    map<vector<float>, float> siftsim;
    map<vector<float>, float> orbsim;
    map<vector<float>, float> fastsim;

    for (map<vector<float>, Mat>::iterator i = gsmap.begin(); i != gsmap.end(); ++i)
    {
        vector<float> ID = i->first;
        gssim[ID] = similarities::getSimilarity(i->second, gsimage);
        bwsim[ID] = similarities::getSimilarity(bwmap[ID], bwimage); 
        surfsim[ID] = similarities::compareDescriptors(surfmap[ID], imgSurfDescriptors);
        siftsim[ID] = similarities::compareDescriptors(siftmap[ID], imgSiftDescriptors);
        orbsim[ID] = 0;//similarities::compareDescriptors(orbmap[ID], imgOrbDescriptors);
        fastsim[ID] = 0;//similarities::compareDescriptors(fastmap[ID], imgFastDescriptors);
    }

    map<vector<float>, int> top;

    bool gotone = false;
    typedef map<vector<float>, int>::iterator iter;

    // Choose the best ones!
    for (map<vector<float>, Mat>::iterator i = imagemap.begin(); i != imagemap.end(); ++i)
    {
        vector<float> ID = i->first;

        int sim = /* gssim[ID] + 0.5*bwsim[ID] + */ 5*surfsim[ID] + 0.3*siftsim[ID] + orbsim[ID] + fastsim[ID];

        // cout << surfsim[ID] << "\t";
        // cout << siftsim[ID] << "\t";
        // cout << orbsim[ID] << "\t";
        // cout << fastsim[ID] << endl;

        if (!gotone)
        {
            top[ID] = sim;
            gotone = true;
        }

        iter it = top.begin();
        iter end = top.end();
        int max_value = it->second;
        vector<float> max_ID = it->first;
        for( ; it != end; ++it) 
        {
            int current = it->second;
            if(current > max_value) 
            {
                max_value = it->second;
                max_ID = it->first;
            }
        }
        // cout << "Sim: " << sim << "\tmax_value: " << max_value << endl;
        if (top.size() < numtoreturn)
            top[ID] = sim;
        else
        {
            if (sim < max_value)
            {
                top[ID] = sim;
                top.erase(max_ID);
            }
        }
    }
    tm.stop();
        double s = tm.getTimeSec();


    cout << ">\n<\n  Writing top " << numtoreturn << " images ..." << endl;

    int count = 1;
    namedWindow("Image");
    namedWindow("Match");
    namedWindow("ImageBW");
    namedWindow("MatchBW");
    namedWindow("ImageGS");
    namedWindow("MatchGS");

    imshow("Image", image);
    imshow("ImageBW", bwimage);
    imshow("ImageGS", gsimage);


    vector<KeyPoint> currentPoints;

    for (iter i = top.begin(); i != top.end(); ++i)
    {
        vector<float> ID = i->first;

        cout << "  Score: "<< i->second << "\tGrayScale: " << gssim[ID] << "\tBW: " << bwsim[ID] << "  \tSURF: " << surfsim[ID] << "\tSIFT: " << siftsim[ID] << endl;
        string fn = "Sim_" + boost::to_string(count) + "_" + boost::to_string(i->second) + ".png";
        imwrite(fn, imagemap[ID]);
        count++;

        normalize(bwmap[ID], bwmap[ID], 0, 255, NORM_MINMAX, CV_64F);
        normalize(gsmap[ID], gsmap[ID], 0, 255, NORM_MINMAX, CV_64F);

        imshow("Match", imagemap[ID]);
        imshow("MatchBW", bwmap[ID]);
        imshow("MatchGS", gsmap[ID]);


        waitKey(0);

    }

    cout << ">\nComparisons took " << s << " seconds for " << imagemap.size() << " images (" 
        << (int) imagemap.size()/s << " images per second)." << endl;

return 0;
}
bool compute(Mat CurrentImageGrayScale, Mat Kinverse, const int iteration){
    vector<KeyPoint> CurrentFeatures;
    SurfDetector.detect(CurrentImageGrayScale, CurrentFeatures);
    Mat CurrentFeatureDescriptors;
    SurfDescriptor.compute(CurrentImageGrayScale, CurrentFeatures, CurrentFeatureDescriptors);
    vector<DMatch> matches;
    matcher.match(PreviousFeatureDescriptors, CurrentFeatureDescriptors, matches);
    if (matches.size() > 200){
        nth_element(matches.begin(), matches.begin()+ 200, matches.end());
        matches.erase(matches.begin() + 201, matches.end());
    }
    //Debug(matches, PreviousImageGrayScale, CurrentImageGrayScale, PreviousFeatures, CurrentFeatures);
    vector< pair<double,double> > FirstImageFeatures;
    vector< pair<double,double> > SecondImageFeatures;
    for(int i  = 0; i < matches.size(); i++){
        Point2f myft = PreviousFeatures[matches[i].queryIdx].pt;
        Mat FtMatForm = (Mat_<double>(3,1) << (double)myft.x, (double)myft.y, 1.0);
        FtMatForm = Kinverse*FtMatForm;       
        pair<double,double> tmp = make_pair(FtMatForm.at<double>(0,0), FtMatForm.at<double>(1,0));
        FirstImageFeatures.push_back(tmp);
        
        myft = CurrentFeatures[matches[i].trainIdx].pt;
        FtMatForm = (Mat_<double>(3,1) << (double)myft.x, (double)myft.y, 1.0);
        FtMatForm = Kinverse*FtMatForm;       
        tmp = make_pair(FtMatForm.at<double>(0,0), FtMatForm.at<double>(1,0));
        SecondImageFeatures.push_back(tmp);
    }
    vector<int> inliers_indexes;
    Mat RobustEssentialMatrix= Ransac(FirstImageFeatures, SecondImageFeatures, 0.00001, 8, 2000, inliers_indexes);
    //cout << RobustEssentialMatrix << endl;
    
    //Debug2(matches, PreviousImageGrayScale, CurrentImageGrayScale, PreviousFeatures, CurrentFeatures, inliers_indexes);
    
    Mat P = Mat::eye(3,4,CV_64F);
    if (!GetRotationAndTraslation(RobustEssentialMatrix, FirstImageFeatures, SecondImageFeatures, inliers_indexes, P)){
        cerr << "Recovering Translation and Rotation: Failed" << endl;
        return false;
    }
    //cout << P << endl;
    Mat Transformation = Mat::zeros(4,4, CV_64F);
    Transformation.at<double>(3,3) = 1.0;
    for(int i = 0 ; i < 3; i++)
        for(int j = 0; j < 4; j++)
            Transformation.at<double>(i, j) = P.at<double>(i, j);
    Mat TransformationInverse = Transformation.inv();
    Pose = Pose * TransformationInverse;
    cerr << Pose.at<double>(0, 3) << " " << Pose.at<double>(1, 3) << " " << Pose.at<double>(2, 3) << endl;    
    
    PreviousImageGrayScale = CurrentImageGrayScale;
    PreviousFeatures = CurrentFeatures;
    PreviousFeatureDescriptors = CurrentFeatureDescriptors;
    
    //viejo
    
//    vector< pair<int,int> > correspondences = harrisFeatureMatcherMCC(PreviousImageGrayScale, CurrentImageGrayScale, PreviousFeatures, CurrentFeatures);
//    cout << "Iteracion" << iteration << "Cantidad de correspondencias " << correspondences.size() << endl;
//    vector< pair<double,double> > FirstImageFeatures;
//    vector< pair<double,double> > SecondImageFeatures;
//    for(int i  = 0; i < correspondences.size(); i++){
//        pair<int,int> myft = PreviousFeatures[correspondences[i].first];
//        Mat FtMatForm = (Mat_<double>(3,1) << (double)myft.first, (double)myft.second, 1.0);
//        FtMatForm = Kinverse*FtMatForm;       
//        pair<double,double> tmp = make_pair(FtMatForm.at<double>(0,0), FtMatForm.at<double>(1,0));
//        FirstImageFeatures.push_back(tmp);
//        
//        myft = CurrentFeatures[correspondences[i].second];
//        FtMatForm = (Mat_<double>(3,1) << (double)myft.first, (double)myft.second, 1.0);
//        FtMatForm = Kinverse*FtMatForm;       
//        tmp = make_pair(FtMatForm.at<double>(0,0), FtMatForm.at<double>(1,0));
//        SecondImageFeatures.push_back(tmp);
//    }
//    vector<int> inliers_indexes;
//    Mat RobustEssentialMatrix= Ransac(FirstImageFeatures, SecondImageFeatures, 0.98, 0.00001, 0.5, 8, FirstImageFeatures.size()/2, inliers_indexes);
//    cout << "Iteration" << iteration << "Final EssentialMatrix" << endl;
//    cout << RobustEssentialMatrix << endl;
//    
//    
//    vector<pair<int, int> > correspondences_inliers;
//    for(int i = 0; i < inliers_indexes.size(); i++)
//        correspondences_inliers.push_back(correspondences[inliers_indexes[i]]);
//    debugging2(PreviousImageGrayScale, CurrentImageGrayScale, PreviousFeatures, CurrentFeatures, correspondences_inliers);
//    
//    Mat P = Mat::eye(3,4,CV_64F);
//    if (!GetRotationAndTraslation(RobustEssentialMatrix, FirstImageFeatures, SecondImageFeatures, inliers_indexes, P))
//        return false;
//    cout << "Iteration" << iteration << "Camera Matrix" << endl;
//    cout << P << endl;
//    Mat Transformation = Mat::zeros(4,4, CV_64F);
//    Transformation.at<double>(3,3) = 1.0;
//    for(int i = 0 ; i < 3; i++)
//        for(int j = 0; j < 4; j++)
//            Transformation.at<double>(i, j) = P.at<double>(i, j);
//    Mat TransformationInverse = Transformation.inv();
//    Pose = Pose * TransformationInverse;
//    PreviousImageGrayScale = CurrentImageGrayScale;
//    PreviousFeatures = CurrentFeatures;
//    cerr << Pose.at<double>(0, 4) << Pose.at<double>(1, 4) << Pose.at<double>(2, 4) << endl;

}
Beispiel #19
0
void collectclasscentroids(SurfFeatureDetector &detector, Ptr<DescriptorExtractor> &extractor, BOWKMeansTrainer &bowTrainer, string trainingDir, bool runInBackground, bool writelog) {

    IplImage *img;
    vector<string> files = vector<string>();
    Helper helper;
    string event;
    char ch[30];

    // should put error correction here to check if directory exists

    helper.GetFileList(trainingDir, files);

    for (unsigned int iz = 0; iz < files.size(); iz++) {
        int isImage = helper.instr(files[iz], "jpg", 0, true);
        if (isImage > 0) {


            string sFileName = trainingDir;
            string sFeaturesDir = "/usr/local/share/archive-vision/build/features/";
            string sOutputImageFilename = "/usr/local/share/archive-vision/build/feature_point_images/";
            sFileName.append(files[iz]);
            sOutputImageFilename.append(files[iz]);
            sFeaturesDir.append(files[iz]);
            sFeaturesDir.append(".txt");
            const char * imageName = sFileName.c_str ();

            img = cvLoadImage(imageName,0);
            if (img) {
                string workingFile = files[iz];
                vector<KeyPoint> keypoint;
                detector.detect(img, keypoint);
                if (keypoint.size()) {
                    Mat features;
                    extractor->compute(img, keypoint, features);

                    event = "Processing " + workingFile;
                    helper.logEvent(event, 2, runInBackground, writelog);


                    //try to write out an image with the features highlighted
                    // Add results to image and save.
                    //				Mat output;
                    //				drawKeypoints(img, keypoint, output, Scalar(0, 128, 0), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
                    //				imwrite(sOutputImageFilename, output);




                    // try writing out all the feature, each to its own YML file and see what
                    // they look like
                    //				helper.WriteToFile(sFeaturesDir, features, "features");

                    bowTrainer.add(features);
                } else {
                    event = workingFile + "contains no keypoints.";
                    helper.logEvent(event, 1, runInBackground, writelog);
                }
            }


        }
    }
    return;
}
Beispiel #20
0
void read (Mat rgbimg, Mat depimg, Mat mask, PointCloud &pointcloud,
    PointCloud &keypoints, vector<KeyPoint> &key, int x1, int y1, double minHessian)
{
  vector<KeyPoint> keytemp;
  SurfFeatureDetector detector (minHessian);
  detector.detect (rgbimg, keytemp);

  // convert the feature points in the smaller mask into point cloud
  for (int k = 0; k < keytemp.size (); k++)
  {
    int i = (int) keytemp[k].pt.y;
    int j = (int) keytemp[k].pt.x;

    unsigned short depth = depimg.at<unsigned short> (i, j);

    if (mask.at<bool> (i, j) != 0 && depth != 0 && depth / MM_PER_M < DEPTH_THRESHOLD)
    {
      PointT point;
      double x = (WIDTH-(j + x1)-WIDTH/2) * depth / FOCAL / MM_PER_M;
      double y = (HEIGHT-(i + y1)-HEIGHT/2) * depth / FOCAL / MM_PER_M;
      double z = depth / MM_PER_M;

      point.x = x;
      point.y = y;
      point.z = z;

      Vec3b rgb = rgbimg.at<Vec3b> (i, j);
      point.b = (uint8_t) rgb[0];
      point.g = (uint8_t) rgb[1];
      point.r = (uint8_t) rgb[2];

      keypoints.points.push_back (point);
      //update the keypoints deleting some points outside the mask;
      key.push_back (keytemp[k]);
    }
  }

  // convert all the points in the mask into point cloud
  for (int i = 0; i < rgbimg.rows; i++)
  {
    for (int j = 0; j < rgbimg.cols; j++)
    {
      unsigned short depth = depimg.at<unsigned short> (i, j);
      if (mask.at<bool> (i, j) != 0 && depth != 0 && depth / MM_PER_M < DEPTH_THRESHOLD)
      {
        PointT point;
        double x = (WIDTH-(j + x1)-WIDTH/2) * depth / FOCAL / MM_PER_M;
        double y = (HEIGHT-(i + y1)-HEIGHT/2) * depth / FOCAL / MM_PER_M;
        double z = depth / MM_PER_M;

        point.x = x;
        point.y = y;
        point.z = z;

        Vec3b rgb = rgbimg.at<Vec3b> (i, j);
        point.b = (uint8_t) rgb[0];
        point.g = (uint8_t) rgb[1];
        point.r = (uint8_t) rgb[2];

        pointcloud.points.push_back (point);
      }
    }
  }

  pointcloud.width = (uint32_t) pointcloud.points.size ();
  pointcloud.height = 1;
  keypoints.width = (uint32_t) keypoints.points.size ();
  keypoints.height = 1;

}
bool TrackerForProject::filterRANSAC(cv::Mat newFrame_, vector<Point2f> &corners, vector<Point2f> &nextCorners)
{
	int ransacReprojThreshold = 3;

	cv::Mat prev_(prevFrame_(position_));
	cv::Mat new_(newFrame_);

	// detecting keypoints
    SurfFeatureDetector detector;

	detector.detect(prev_, keypoints1);

    vector<KeyPoint> keypoints2;
    detector.detect(new_, keypoints2);

    // computing descriptors
    SurfDescriptorExtractor extractor;
    Mat descriptors1;
    extractor.compute(prev_, keypoints1, descriptors1);
    Mat descriptors2;
    extractor.compute(newFrame_, keypoints2, descriptors2);

    // matching descriptors
    BFMatcher matcher;
    vector<DMatch> matches;
    matcher.match(descriptors1, descriptors2, matches);
	
	std::cout << matches.size() << std::endl;

	vector<Point2f> points1, points2;

    // fill the arrays with the points
    for (int i = 0; i < matches.size(); i++)
    {
		points1.push_back(keypoints1[matches[i].queryIdx].pt);
    }
    for (int i = 0; i < matches.size(); i++)
    {
        points2.push_back(keypoints2[matches[i].trainIdx].pt);
    }

    Mat H = findHomography(Mat(points1), Mat(points2), CV_RANSAC, ransacReprojThreshold);

    Mat points1Projected;
    perspectiveTransform(Mat(points1), points1Projected, H);

	vector<KeyPoint> keypoints3;

	for(int i = 0; i < matches.size(); i++)
	{
		Point2f p1 = points1Projected.at<Point2f>(matches[i].queryIdx);
        Point2f p2 = keypoints2.at(matches[i].trainIdx).pt;
		if(((p2.x - p1.x) * (p2.x - p1.x) +
			(p2.y - p1.y) * (p2.y - p1.y) <= ransacReprojThreshold * ransacReprojThreshold)&& ((p2.x > position_.x - 10) 
			&& (p2.x < position_.x + position_.width + 10) && (p2.y > position_.y - 10) &&(p2.y < position_.y + position_.height + 10)) )
		{
			corners.push_back(keypoints1.at(matches[i].queryIdx).pt);
			nextCorners.push_back(keypoints2.at(matches[i].trainIdx).pt);

			keypoints3.push_back(keypoints2.at(matches[i].trainIdx));
		}		
	}

	for(int i = 0; i < corners.size(); i++)
	{
		corners[i].x += position_.x;
		corners[i].y += position_.y;
	}

	keypoints1 = keypoints3;

	for(int i = 0; i < keypoints1.size(); i++)
	{
		keypoints1[i].pt.x -= position_.x;
		keypoints1[i].pt.y -= position_.y;
	}

    if (keypoints1.empty())
    {
        return false;
    }

    return true;
}
Beispiel #22
0
int main( int argc, char* argv[])
{
	// jmena souboru pro zpracovani
	string imageName1;
	string imageName2;


	// zpracovani parametru prikazove radky
	for( int i = 1; i < argc; i++){
		if( string(argv[ i]) == "-i1" && i + 1 < argc){
			imageName1 = argv[ ++i];
		} else if( string(argv[ i]) == "-i2" && i + 1 < argc){
			imageName2 = argv[ ++i];
		} else if( string(argv[ i]) == "-h"){
			cout << "Use: " << argv[0] << "  -i1 imageName1 -i2 imageName2" << endl;
			cout << "Merges two images into one. The images have to share some common area and have to be taken from one location." << endl;
			return 0;
		} else {
			cerr << "Error: Unrecognized command line parameter \"" << argv[ i] << "\" use -h to get more information." << endl;
		}
	}

	// kontrola zadani parametru
	if( imageName1.empty() || imageName2.empty()){
		cerr << "Error: Some mandatory command line options were not specified. Use -h for more information." << endl;
		return -1;
	}


	// nacteni sedotonovych obrazku 
	Mat img1 = imread( imageName1, 0);
	Mat img2 = imread( imageName2, 0);

	if( img1.data == NULL || img2.data == NULL){
		cerr << "Error: Failed to read input image files." << endl;
		return -1;
	}

	// SURF detektor lokalnich oblasti
	SurfFeatureDetector detector;

	// samotna detekce lokalnich priznaku
	vector< KeyPoint> keyPoints1, keyPoints2;
	detector.detect( img1, keyPoints1);
	detector.detect( img2, keyPoints2);
	cout << keyPoints1.size() << " " << keyPoints2.size();

	// extraktor SURF descriptoru
	SurfDescriptorExtractor descriptorExtractor;

	// samonty vypocet SURF descriptoru
	Mat descriptors1, descriptors2;
	descriptorExtractor.compute( img1, keyPoints1, descriptors1);
	descriptorExtractor.compute( img2, keyPoints2, descriptors2);

	// tento vektor je pouze pro ucely funkce hledajici korespondence
	vector< Mat> descriptorVector2;
	descriptorVector2.push_back( descriptors2);

	// objekt, ktery dokaze snad pomerne efektivne vyhledavat podebne vektory v prostorech s vysokym poctem dimenzi
	FlannBasedMatcher matcher;
	// Pridani deskriptoru, mezi kterymi budeme pozdeji hledat nejblizsi sousedy
	matcher.add( descriptorVector2);
	// Vytvoreni vyhledavaci struktury nad vlozenymi descriptory
	matcher.train();

	// nalezeni nejpodobnejsich descriptoru (z obrazku 2) pro descriptors1 (oblasti z obrazku 1)
	vector<cv::DMatch > matches;
	matcher.match( descriptors1, matches);

	// serazeni korespondenci od nejlepsi (ma nejmensi vzajemnou vzdalenost v prostoru descriptoru)
	sort( matches.begin(), matches.end(), compareDMatch);
	// pouzijeme jen 200 nejlepsich korespondenci
	matches.resize( min( 200, (int) matches.size()));

	// pripraveni korespondujicich dvojic
	Mat img1Pos( matches.size(), 1, CV_32FC2);
	Mat img2Pos( matches.size(), 1, CV_32FC2);

	// naplneni matic pozicemi
	for( int i = 0; i < (int)matches.size(); i++){
		img1Pos.at< Vec2f>( i)[0] = keyPoints1[ matches[ i].queryIdx].pt.x;
		img1Pos.at< Vec2f>( i)[1] = keyPoints1[ matches[ i].queryIdx].pt.y;
		img2Pos.at< Vec2f>( i)[0] = keyPoints2[ matches[ i].trainIdx].pt.x;
		img2Pos.at< Vec2f>( i)[1] = keyPoints2[ matches[ i].trainIdx].pt.y;
	}

	// Doplnte vypocet 3x3 matice homografie s vyuzitim algoritmu RANSAC. Pouzijte jdenu funkci knihovny OpenCV.
	/** FILL DONE **/
	Mat homography = findHomography( img1Pos, img2Pos, CV_RANSAC );


	// vystupni buffer pro vykresleni spojenych obrazku
	Mat outputBuffer( 1024, 1280, CV_8UC1);

	// Vysledny spojeny obraz budeme chtit vykreslit do outputBuffer tak, aby se dotykal okraju, ale nepresahoval je.
	// "Prilepime" obrazek 2 k prvnimu. Tuto "slepeninu" je potreba zvetsit a posunout, aby byla na pozadovane pozici.
	// K tomuto potrebujeme zjistit maximalni a minimalni souradnice vykreslenych obrazu. U obrazu 1 je to jednoduche, minima a maxima se 
	// ziskaji primo z rozmeru obrazu. U obrazku 2 musime pomoci drive ziskane homografie promitnout do prostoru obrazku 1 jeho rohove body.

	float minX = 0;
	float minY = 0;
	float maxX = (float) img1.cols;
	float maxY = (float) img1.rows;

	// rohy obrazku 2
	vector< Vec3d> corners;
	corners.push_back( Vec3d( 0, 0, 1));
	corners.push_back( Vec3d( img2.cols, 0, 1));
	corners.push_back( Vec3d( img2.cols, img2.rows, 1));
	corners.push_back( Vec3d( 0, img2.rows, 1));

	// promitnuti rohu obrazku 2 do prosotoru obrazku 1 a upraveni minim a maxim
	for( int i = 0; i < (int)corners.size();i ++){

		// Doplnte transformaci Mat( corners[ i]) do prostoru obrazku 1 pomoci homography.
		// Dejte si pozor odkud kam homography je. Podle toho pouzijte homography, nebo homography.inv().
		/**FILL ALMOST DONE**/
		Mat projResult = homography.inv() * Mat( corners[ i]);// * homography;

		minX = std::min( minX, (float) (projResult.at<double>( 0) / projResult.at<double>( 2)));
		maxX = std::max( maxX, (float) (projResult.at<double>( 0) / projResult.at<double>( 2)));
		minY = std::min( minY, (float) (projResult.at<double>( 1) / projResult.at<double>( 2)));
		maxY = std::max( maxY, (float) (projResult.at<double>( 1) / projResult.at<double>( 2)));
	}




	// Posuneme a zvetseme/zmenseme vysledny spojeny obrazek tak, by vysledny byl co nejvetsi, ale aby byl uvnitr vystupniho bufferu.

	// Zmena velikosti musi byt takova, aby se nam vysledek vesel na vysku i na sirku
	double scaleFactor = min( outputBuffer.cols / ( maxX - minX), outputBuffer.rows / ( maxY - minY));

	// Doplnte pripraveni matice, ktera zmeni velikost (scaleMatrix) o scaleFactor a druhe (translateMatrix), ktera posune vysledek o -minX a -minY. 
	// Po tomto bude obrazek ve vystupnim bufferu.
	Mat scaleMatrix = Mat::eye( 3, 3, CV_64F);
	Mat translateMatrix = Mat::eye( 3, 3, CV_64F);
	/**FILL DONE**/
    scaleMatrix.at<double>(0,0) = scaleFactor;
    scaleMatrix.at<double>(1,1) = scaleFactor;

    translateMatrix.at<double>(0,2) = -(double)minX; 
    translateMatrix.at<double>(1,2) = -(double)minY;
   
    cout << endl << minX << " " << minY << endl << translateMatrix << endl << endl;
    
	Mat centerMatrix = scaleMatrix * translateMatrix;


	// Transformace obrazku 1 
	warpPerspective( img1, outputBuffer, centerMatrix, outputBuffer.size(), 1, BORDER_TRANSPARENT);

	// Transformace obrazku 2 
	warpPerspective( img2, outputBuffer, centerMatrix * homography.inv(), outputBuffer.size(), 1, BORDER_TRANSPARENT);

	cout << "normMatrix" << endl;
	cout << centerMatrix << endl << endl;

	cout << "normMatrix" << endl;
	cout << homography << endl << endl;

#if VISUAL_OUTPUT
	imshow( "IMG1", img1);
	imshow( "IMG2", img2);
	imshow( "MERGED", outputBuffer);
	waitKey();
#endif
}
int main(int argc, char* argv[])
{

	int i, j;
	int samplesOnGroup = 60;
	int trainingGroups = 4;
	int allSamples = samplesOnGroup * trainingGroups;
	IplImage *img2;
	cout << "Vector quantization..." << endl;
	collectclasscentroids();
	vector<Mat> descriptors = bowTrainer.getDescriptors();
	int count = 0;
	for (vector<Mat>::iterator iter = descriptors.begin(); iter != descriptors.end(); iter++)
	{
		count += iter->rows;
	}
	cout << "Clustering " << count << " features" << endl;
	//choosing cluster's centroids as dictionary's words
	Mat dictionary = bowTrainer.cluster();
	bowDE.setVocabulary(dictionary);
	cout << "Extracting histograms in the form of BOW for each image " << endl;
	Mat labels(0, 1, CV_32FC1);
	Mat trainingData(0, dictionarySize, CV_32FC1);
	int k = 0;
	vector<KeyPoint> keypoint1;
	Mat bowDescriptor1;
	//extracting histogram in the form of bow for each image 
	for (j = 1; j <= trainingGroups; j++)
		for (i = 1; i <= samplesOnGroup; i++)
		{
			sprintf(ch, "%s%d%s%d%s", "eval/", j, " (", i, ").jpg");
			const char* imageName = ch;
			img2 = cvLoadImage(imageName, 0);
			detector.detect(img2, keypoint1);
			bowDE.compute(img2, keypoint1, bowDescriptor1);
			trainingData.push_back(bowDescriptor1);
			labels.push_back((float)j);
		}
	//Setting up SVM parameters
	CvSVMParams params;
	params.kernel_type = CvSVM::RBF;
	params.svm_type = CvSVM::C_SVC;
	params.gamma = 0.50625000000000009;
	params.C = 312.50000000000000;
	params.term_crit = cvTermCriteria(CV_TERMCRIT_ITER, 100, 0.000001);
	CvSVM svm;



	printf("%s\n", "Training SVM classifier");

	bool res = svm.train(trainingData, labels, cv::Mat(), cv::Mat(), params);

	cout << "Processing evaluation data..." << endl;


	Mat groundTruth(0, 1, CV_32FC1);
	Mat evalData(0, dictionarySize, CV_32FC1);
	k = 0;
	vector<KeyPoint> keypoint2;
	Mat bowDescriptor2;


	Mat results(0, 1, CV_32FC1);;
	for (j = 1; j <= trainingGroups; j++)
		for (i = 1; i <= samplesOnGroup; i++)
		{
			sprintf(ch, "%s%d%s%d%s", "eval/", j, " (", i, ").jpg");
			printf("\rEvaluating : %3d %%", ((((j - 1)*samplesOnGroup) + i) * 100 / allSamples));
			const char* imageName = ch;
			img2 = cvLoadImage(imageName, 0);
			detector.detect(img2, keypoint2);
			bowDE.compute(img2, keypoint2, bowDescriptor2);
			evalData.push_back(bowDescriptor2);
			groundTruth.push_back((float)j);
			float response = svm.predict(bowDescriptor2);
			results.push_back(response);
		}
	printf("\n");


	//calculate the number of unmatched classes 
	double errorRate = (double)countNonZero(groundTruth - results) / evalData.rows;
	printf("Error rate = %.2f %% \n", errorRate);
	printf("Press ENTER to exit...");
	getchar();
	return 0;
}
Beispiel #24
0
    void regressionTest()
    {
        assert( !dextractor.empty() );

        // Read the test image.
        string imgFilename =  string(ts->get_data_path()) + FEATURES2D_DIR + "/" + IMAGE_FILENAME;

        Mat img = imread( imgFilename );
        if( img.empty() )
        {
            ts->printf( cvtest::TS::LOG, "Image %s can not be read.\n", imgFilename.c_str() );
            ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
            return;
        }

        vector<KeyPoint> keypoints;
        FileStorage fs( string(ts->get_data_path()) + FEATURES2D_DIR + "/keypoints.xml.gz", FileStorage::READ );
        if( fs.isOpened() )
        {
            read( fs.getFirstTopLevelNode(), keypoints );

            Mat calcDescriptors;
            double t = (double)getTickCount();
            dextractor->compute( img, keypoints, calcDescriptors );
            t = getTickCount() - t;
            ts->printf(cvtest::TS::LOG, "\nAverage time of computing one descriptor = %g ms.\n", t/((double)cvGetTickFrequency()*1000.)/calcDescriptors.rows );

            if( calcDescriptors.rows != (int)keypoints.size() )
            {
                ts->printf( cvtest::TS::LOG, "Count of computed descriptors and keypoints count must be equal.\n" );
                ts->printf( cvtest::TS::LOG, "Count of keypoints is            %d.\n", (int)keypoints.size() );
                ts->printf( cvtest::TS::LOG, "Count of computed descriptors is %d.\n", calcDescriptors.rows );
                ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
                return;
            }

            if( calcDescriptors.cols != dextractor->descriptorSize() || calcDescriptors.type() != dextractor->descriptorType() )
            {
                ts->printf( cvtest::TS::LOG, "Incorrect descriptor size or descriptor type.\n" );
                ts->printf( cvtest::TS::LOG, "Expected size is   %d.\n", dextractor->descriptorSize() );
                ts->printf( cvtest::TS::LOG, "Calculated size is %d.\n", calcDescriptors.cols );
                ts->printf( cvtest::TS::LOG, "Expected type is   %d.\n", dextractor->descriptorType() );
                ts->printf( cvtest::TS::LOG, "Calculated type is %d.\n", calcDescriptors.type() );
                ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
                return;
            }

            // TODO read and write descriptor extractor parameters and check them
            Mat validDescriptors = readDescriptors();
            if( !validDescriptors.empty() )
                compareDescriptors( validDescriptors, calcDescriptors );
            else
            {
                if( !writeDescriptors( calcDescriptors ) )
                {
                    ts->printf( cvtest::TS::LOG, "Descriptors can not be written.\n" );
                    ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
                    return;
                }
            }
        }
        else
        {
            ts->printf( cvtest::TS::LOG, "Compute and write keypoints.\n" );
            fs.open( string(ts->get_data_path()) + FEATURES2D_DIR + "/keypoints.xml.gz", FileStorage::WRITE );
            if( fs.isOpened() )
            {
                SurfFeatureDetector fd;
                fd.detect(img, keypoints);
                write( fs, "keypoints", keypoints );
            }
            else
            {
                ts->printf(cvtest::TS::LOG, "File for writting keypoints can not be opened.\n");
                ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
                return;
            }
        }
    }
void SURFfeature::SURFfeaturematch(vector<CvPoint2D32f> &match_query, vector<CvPoint2D32f> &match_train)
{
    std::vector<cv::KeyPoint> kpts;
    std::vector<cv::KeyPoint> Quepts;



    SurfFeatureDetector surf (Th1 );
    surf.detect ( ImageGray1 , kpts ) ;

    surf.detect ( ImageGray2 , Quepts ) ;

    // extract descriptors
    SurfDescriptorExtractor fde ;
    cv::Mat desc;
    cv::Mat src;

    fde . compute ( ImageGray1 , kpts , desc ) ;
    fde . compute ( ImageGray2 , Quepts , src );

    //brute−force matching of des criptors
    //BruteForceMatcher<L2<float>> matcher ; //L2 norm
    std::vector<cv::DMatch> vec_matches;
    //matcher. match ( desc , src , vec_matches ) ;

    cv::Mat distance;
    int k=2; // find two neighbors
    cv::Mat results;

    cv::flann::Index flannIndex(desc, cv::flann::KDTreeIndexParams(), cvflann::FLANN_DIST_EUCLIDEAN);

    // search (nearest neighbor)
    flannIndex.knnSearch(src, results, distance, k, cv::flann::SearchParams() );

    float ErroRatio = 0.8;
    for(unsigned int i=0; i<kpts.size(); i++)
    {
        // Apply NNDR
        //if(results.at<int>(i,0) >= 0 && results.at<int>(i,1) >= 0 && distance.at<float>(i,0) <= ErroRatio * distance.at<float>(i,1))
        //{
            match_query.push_back(kpts.at(i).pt);

            int idx =results.at<int>(i,0);
            match_train.push_back(Quepts.at(idx).pt);
        //}
    }


//    for (size_t i = 0; i < vec_matches.size(); ++i)
//    {
//        const DMatch& match = vec_matches[i];
//        CvPoint2D32f pointA;
//        CvPoint2D32f pointB;
//        pointA.x=kpts[match.queryIdx].pt.x;
//        pointA.y=kpts[match.queryIdx].pt.y;
//
//        pointB.x=Quepts[match.trainIdx].pt.x;
//        pointB.y=Quepts[match.trainIdx].pt.y;
//
//        match_query.push_back(pointA);
//        match_train.push_back(pointB);
//    }

    desc.release();
    src.release();
    

}
Beispiel #26
0
void sift_demo( Mat dst,Mat dst2 ){
 
  SurfFeatureDetector detector (1500); 

  std::vector<KeyPoint> keypoints_1,keypoints_2;  
  detector.detect(dst, keypoints_1);
  detector.detect(dst2, keypoints_2);
  //drawKeypoints(dst, keypoints_1, dst);
  
  SurfDescriptorExtractor extractor;
  Mat descriptors_1, descriptors_2;
  extractor.compute( dst, keypoints_1, descriptors_1 );
  extractor.compute( dst2, keypoints_2, descriptors_2 );
  
  //-- Step 3: Matching descriptor vectors with a brute force matcher
  BFMatcher matcher(NORM_L2);
  std::vector< DMatch > matches;
  matcher.match( descriptors_1, descriptors_2, matches );
  pcl::PointXYZRGBA point;
    
  double max_dist = 0; double min_dist = 100;

    //filtrage des associations ratées
  for( int i = 0; i < descriptors_1.rows; i++ )
    { double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }

  std::vector< DMatch > good_matches;
  for( int i = 0; i < descriptors_1.rows; i++ ){
    if( matches[i].distance < 3*min_dist ){    
    good_matches.push_back( matches[i]);    
    }
  }
  nuage2.clear();
  //nuage2=nuage;
 for(int i=0;i<dst.cols;i++){
    for (int j=0;j<dst.rows;j++){ 
  
  //for(int i=0;i<matches.size;i++){  
   
      point.x=i;
      point.y=j;

       point.g=nuage.at(i,j).g;//dst.at<cv::Vec3b>(i,j)[1];
      point.b=nuage.at(i,j).b;//dst.at<cv::Vec3b>(i,j)[2];
      point.r=nuage.at(i,j).r;
	//if (depth.at<float>(i,j)==depth.at<float>(i,j)){
	  if (nuage.at(i,j).z==nuage.at(i,j).z){
//	std::cout<<depth.at<float>(i,j)<<endl;
	 point.z=nuage.at(i,j).z*100;//depth.at<float>(j,i)*100;
	  
	}
	nuage2.push_back(point);
      
    }
  }
 
  //-- Draw matches
  Mat img_matches;
  drawMatches( dst, keypoints_1, dst2, keypoints_2, good_matches, img_matches );  
  imshow("Matches", img_matches );
   
}