//static void matchDescriptors( const Mat& queryDescriptors, const vector<Mat>& trainDescriptors,
                       //vector<DMatch>& matches, FlannBasedMatcher& descriptorMatcher )
static void matchDescriptors( const Mat& queryDescriptors, const vector<Mat>& trainDescriptors,
                       vector<DMatch>& matches, FlannBasedMatcher& descriptorMatcher, const vector<Mat>& trainImages, const vector<string>& trainImagesNames )

{
    cout << "< Set train descriptors collection in the matcher and match query descriptors to them..." << endl;

    descriptorMatcher.add( trainDescriptors );
    descriptorMatcher.train();

    descriptorMatcher.match( queryDescriptors, matches );

    CV_Assert( queryDescriptors.rows == (int)matches.size() || matches.empty() );

    cout << "Number of matches: " << matches.size() << endl;
    cout << ">" << endl;

    for( int i = 0; i < trainDescriptors.size(); i++){

        std::vector< std::vector< DMatch> > matches2;

        std::vector< DMatch > good_matches;

        descriptorMatcher.knnMatch( queryDescriptors, trainDescriptors[i], matches2, 2);
        CV_Assert( queryDescriptors.rows == (int)matches2.size() || matches2.empty() );

        for (int j = 0; j < matches2.size(); ++j){
            const float ratio = 0.8; // As in Lowe's paper; can be tuned
            if (matches2[j][0].distance < ratio * matches2[j][1].distance){
                good_matches.push_back(matches2[j][0]);
            }

        }

        cout << "currentMatchSize : " << good_matches.size() << endl;

    }

    
}
Esempio n. 2
0
/**
 * @function main
 */
int main(int, char **argv)
{



    image1 = imread(argv[1], 1);
    image2 = imread(argv[2], 1);
    rows = image1.rows;
    cols = image1.cols;


    namedWindow("image1", WINDOW_AUTOSIZE);
    imshow("image1", image1);
    namedWindow("image2", WINDOW_AUTOSIZE);
    imshow("image2", image2);

    Mat image1_gray;
    Mat image2_gray;


    /// Converts an image from one color space to another.
    cvtColor(image1, image1_gray, COLOR_BGR2GRAY);
    cvtColor(image2, image2_gray, COLOR_BGR2GRAY);

    /// Detector parameters
    int blockSize = 2;
    int apertureSize = 3;
    double k = 0.04;

    /// Detecting corners
    /*
       void ocl::cornerHarris(const oclMat& src, oclMat& dst, int blockSize, int ksize, double k, int bordertype=cv::BORDER_DEFAULT)

       src – Source image. Only CV_8UC1 and CV_32FC1 images are supported now.
       dst – Destination image containing cornerness values. It has the same size as src and CV_32FC1 type.
       blockSize – Neighborhood size
       ksize – Aperture parameter for the Sobel operator
       k – Harris detector free parameter
       bordertype – Pixel extrapolation method. Only BORDER_REFLECT101, BORDER_REFLECT, BORDER_CONSTANT and BORDER_REPLICATE are supported now.
     */

    Mat image1dst;
    Mat image2dst;

    image1dst = Mat::zeros(image1.size(), CV_32FC1);
    image2dst = Mat::zeros(image2.size(), CV_32FC1);


    cornerHarris(image1_gray, image1dst, blockSize, apertureSize, k, BORDER_DEFAULT);
    cornerHarris(image2_gray, image2dst, blockSize, apertureSize, k, BORDER_DEFAULT);

    int threshHarris = 100;

    /// Normalizing
    /*
       void normalize(InputArray src, OutputArray dst, double alpha=1, double beta=0, int norm_type=NORM_L2, int dtype=-1, InputArray mask=noArray() )
       src – input array.
       dst – output array of the same size as src .
       alpha – norm value to normalize to or the lower range boundary in case of the range normalization.
       beta – upper range boundary in case of the range normalization; it is not used for the norm normalization.
       normType – normalization type (see the details below).
       dtype – when negative, the output array has the same type as src; otherwise, it has the same number of channels as src and the depth =CV_MAT_DEPTH(dtype).
       mask – optional operation mask.
     */

    Mat image1dst_norm;
    Mat image2dst_norm;

    normalize(image1dst, image1dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat());
    normalize(image2dst, image2dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat());

    /*
       On each element of the input array, the function convertScaleAbs performs three operations sequentially: scaling, taking an absolute value, conversion to an unsigned 8-bit type:

     */

    Mat image1dst_norm_scaled;
    Mat image2dst_norm_scaled;

    convertScaleAbs(image1dst_norm, image1dst_norm_scaled);
    convertScaleAbs(image2dst_norm, image2dst_norm_scaled);

    KeyPoint kp;

    for (int j = 0; j < image1dst_norm.rows; j++) {
	for (int i = 0; i < image1dst_norm.cols; i++) {
	    if ((int) image1dst_norm.at < float >(j, i) > threshHarris) {

		kp.pt.x = (float) j;
		kp.pt.y = (float) i;
		//necessaire je ne sais pas pk
		kp.size = 100.0;
		keypoints1.push_back(kp);

	    }
	}
    }

    for (int j = 0; j < image2dst_norm.rows; j++) {
	for (int i = 0; i < image2dst_norm.cols; i++) {
	    if ((int) image2dst_norm.at < float >(j, i) > threshHarris) {


		kp.pt.x = (float) j;
		kp.pt.y = (float) i;
		//necessaire je ne sais pas pk
		kp.size = 100.0;
		keypoints2.push_back(kp);

	    }
	}
    }


    BriefDescriptorExtractor briefDesc(64);

    Mat descriptors1, descriptors2;
    briefDesc.compute(image1, keypoints1, descriptors1);
    briefDesc.compute(image2, keypoints2, descriptors2);

    //Ptr<DescriptorMatcher> matcher =  new FlannBasedMatcher(DescriptorMatcher::create("Flann"));
    FlannBasedMatcher matcher;

    Mat descriptorAuxKp1;
    Mat descriptorAuxKp2;


    vector < int >associateIdx;

    for (int i = 0; i < descriptors1.rows; i++) {
	//on copie la ligne i du descripteur, qui correspond aux différentes valeurs données par le descripteur pour le Keypoints[i]
	descriptors1.row(i).copyTo(descriptorAuxKp1);

//ici on va mettre que les valeurs du descripteur des keypoints de l'image 2 que l'on veut comparer aux keypoints de l'image1 en cours de traitement
	descriptorAuxKp2.create(0, 0, CV_8UC1);


	//associateIdx va servir à faire la transition entre les indices renvoyés par matches et ceux des Keypoints
	associateIdx.erase(associateIdx.begin(), associateIdx.end());


	for (int j = 0; j < descriptors2.rows; j++) {

	    float p1x = keypoints1[i].pt.x;
	    float p1y = keypoints1[i].pt.y;
	    float p2x = keypoints2[j].pt.x;
	    float p2y = keypoints2[j].pt.y;

	    float distance = sqrt(pow((p1x - p2x), 2) + pow((p1y - p2y), 2));

	    //parmis les valeurs dans descriptors2 on ne va garder que ceux dont les keypoints associés sont à une distance définie du keypoints en cours, en l'occurence le ieme ici.
	    if (distance < 10) {

		descriptorAuxKp2.push_back(descriptors2.row(j));
		associateIdx.push_back(j);

	    }


	}
	//ici on ne matche qu'un keypoints de l'image1 avec tous les keypoints gardés de l'image 2
        matcher.add(descriptorAuxKp1);
        matcher.train();

	matcher.match(descriptorAuxKp2, matches);

	//on remet à la bonne valeur les attributs de matches
	for (int idxMatch = 0; idxMatch < matches.size(); idxMatch++) {
	    //on a comparer le keypoints i
	    matches[idxMatch].queryIdx = i;
	    //avec le keypoints2 j
	    matches[idxMatch].trainIdx = associateIdx[matches[idxMatch].trainIdx];
	}

	//on concatene les matches trouvés pour les points précedents avec les nouveaux
	matchesWithDist.insert(matchesWithDist.end(), matches.begin(), matches.end());


    }



//ici on trie les matchesWithDist par distance des valeurs des descripteurs et non par distance euclidienne
    nth_element(matchesWithDist.begin(), matchesWithDist.begin() + 24, matchesWithDist.end());
    // initial position
    // position of the sorted element
    // end position

    Mat imageMatches;
    Mat matchesMask;
    drawMatches(image1, keypoints1,	// 1st image and its keypoints
		image2, keypoints2,	// 2nd image and its keypoints
		matchesWithDist,	// the matches
		imageMatches,	// the image produced
		Scalar::all(-1),	// color of the lines
		Scalar(255, 255, 255)	//color of the keypoints
	);


    namedWindow(matches_window, CV_WINDOW_AUTOSIZE);
    imshow(matches_window, imageMatches);
    imwrite("resultat.png", imageMatches);



    /// Create a window and a trackbar
    namedWindow(transparency_window, WINDOW_AUTOSIZE);
    createTrackbar("Threshold: ", transparency_window, &thresh, max_thresh, interface);








    interface(0, 0);

    waitKey(0);
    return (0);
}
Esempio n. 3
0
int main( int argc, char* argv[])
{
	// jmena souboru pro zpracovani
	string imageName1;
	string imageName2;


	// zpracovani parametru prikazove radky
	for( int i = 1; i < argc; i++){
		if( string(argv[ i]) == "-i1" && i + 1 < argc){
			imageName1 = argv[ ++i];
		} else if( string(argv[ i]) == "-i2" && i + 1 < argc){
			imageName2 = argv[ ++i];
		} else if( string(argv[ i]) == "-h"){
			cout << "Use: " << argv[0] << "  -i1 imageName1 -i2 imageName2" << endl;
			cout << "Merges two images into one. The images have to share some common area and have to be taken from one location." << endl;
			return 0;
		} else {
			cerr << "Error: Unrecognized command line parameter \"" << argv[ i] << "\" use -h to get more information." << endl;
		}
	}

	// kontrola zadani parametru
	if( imageName1.empty() || imageName2.empty()){
		cerr << "Error: Some mandatory command line options were not specified. Use -h for more information." << endl;
		return -1;
	}


	// nacteni sedotonovych obrazku 
	Mat img1 = imread( imageName1, 0);
	Mat img2 = imread( imageName2, 0);

	if( img1.data == NULL || img2.data == NULL){
		cerr << "Error: Failed to read input image files." << endl;
		return -1;
	}

	// SURF detektor lokalnich oblasti
	SurfFeatureDetector detector;

	// samotna detekce lokalnich priznaku
	vector< KeyPoint> keyPoints1, keyPoints2;
	detector.detect( img1, keyPoints1);
	detector.detect( img2, keyPoints2);
	cout << keyPoints1.size() << " " << keyPoints2.size();

	// extraktor SURF descriptoru
	SurfDescriptorExtractor descriptorExtractor;

	// samonty vypocet SURF descriptoru
	Mat descriptors1, descriptors2;
	descriptorExtractor.compute( img1, keyPoints1, descriptors1);
	descriptorExtractor.compute( img2, keyPoints2, descriptors2);

	// tento vektor je pouze pro ucely funkce hledajici korespondence
	vector< Mat> descriptorVector2;
	descriptorVector2.push_back( descriptors2);

	// objekt, ktery dokaze snad pomerne efektivne vyhledavat podebne vektory v prostorech s vysokym poctem dimenzi
	FlannBasedMatcher matcher;
	// Pridani deskriptoru, mezi kterymi budeme pozdeji hledat nejblizsi sousedy
	matcher.add( descriptorVector2);
	// Vytvoreni vyhledavaci struktury nad vlozenymi descriptory
	matcher.train();

	// nalezeni nejpodobnejsich descriptoru (z obrazku 2) pro descriptors1 (oblasti z obrazku 1)
	vector<cv::DMatch > matches;
	matcher.match( descriptors1, matches);

	// serazeni korespondenci od nejlepsi (ma nejmensi vzajemnou vzdalenost v prostoru descriptoru)
	sort( matches.begin(), matches.end(), compareDMatch);
	// pouzijeme jen 200 nejlepsich korespondenci
	matches.resize( min( 200, (int) matches.size()));

	// pripraveni korespondujicich dvojic
	Mat img1Pos( matches.size(), 1, CV_32FC2);
	Mat img2Pos( matches.size(), 1, CV_32FC2);

	// naplneni matic pozicemi
	for( int i = 0; i < (int)matches.size(); i++){
		img1Pos.at< Vec2f>( i)[0] = keyPoints1[ matches[ i].queryIdx].pt.x;
		img1Pos.at< Vec2f>( i)[1] = keyPoints1[ matches[ i].queryIdx].pt.y;
		img2Pos.at< Vec2f>( i)[0] = keyPoints2[ matches[ i].trainIdx].pt.x;
		img2Pos.at< Vec2f>( i)[1] = keyPoints2[ matches[ i].trainIdx].pt.y;
	}

	// Doplnte vypocet 3x3 matice homografie s vyuzitim algoritmu RANSAC. Pouzijte jdenu funkci knihovny OpenCV.
	/** FILL DONE **/
	Mat homography = findHomography( img1Pos, img2Pos, CV_RANSAC );


	// vystupni buffer pro vykresleni spojenych obrazku
	Mat outputBuffer( 1024, 1280, CV_8UC1);

	// Vysledny spojeny obraz budeme chtit vykreslit do outputBuffer tak, aby se dotykal okraju, ale nepresahoval je.
	// "Prilepime" obrazek 2 k prvnimu. Tuto "slepeninu" je potreba zvetsit a posunout, aby byla na pozadovane pozici.
	// K tomuto potrebujeme zjistit maximalni a minimalni souradnice vykreslenych obrazu. U obrazu 1 je to jednoduche, minima a maxima se 
	// ziskaji primo z rozmeru obrazu. U obrazku 2 musime pomoci drive ziskane homografie promitnout do prostoru obrazku 1 jeho rohove body.

	float minX = 0;
	float minY = 0;
	float maxX = (float) img1.cols;
	float maxY = (float) img1.rows;

	// rohy obrazku 2
	vector< Vec3d> corners;
	corners.push_back( Vec3d( 0, 0, 1));
	corners.push_back( Vec3d( img2.cols, 0, 1));
	corners.push_back( Vec3d( img2.cols, img2.rows, 1));
	corners.push_back( Vec3d( 0, img2.rows, 1));

	// promitnuti rohu obrazku 2 do prosotoru obrazku 1 a upraveni minim a maxim
	for( int i = 0; i < (int)corners.size();i ++){

		// Doplnte transformaci Mat( corners[ i]) do prostoru obrazku 1 pomoci homography.
		// Dejte si pozor odkud kam homography je. Podle toho pouzijte homography, nebo homography.inv().
		/**FILL ALMOST DONE**/
		Mat projResult = homography.inv() * Mat( corners[ i]);// * homography;

		minX = std::min( minX, (float) (projResult.at<double>( 0) / projResult.at<double>( 2)));
		maxX = std::max( maxX, (float) (projResult.at<double>( 0) / projResult.at<double>( 2)));
		minY = std::min( minY, (float) (projResult.at<double>( 1) / projResult.at<double>( 2)));
		maxY = std::max( maxY, (float) (projResult.at<double>( 1) / projResult.at<double>( 2)));
	}




	// Posuneme a zvetseme/zmenseme vysledny spojeny obrazek tak, by vysledny byl co nejvetsi, ale aby byl uvnitr vystupniho bufferu.

	// Zmena velikosti musi byt takova, aby se nam vysledek vesel na vysku i na sirku
	double scaleFactor = min( outputBuffer.cols / ( maxX - minX), outputBuffer.rows / ( maxY - minY));

	// Doplnte pripraveni matice, ktera zmeni velikost (scaleMatrix) o scaleFactor a druhe (translateMatrix), ktera posune vysledek o -minX a -minY. 
	// Po tomto bude obrazek ve vystupnim bufferu.
	Mat scaleMatrix = Mat::eye( 3, 3, CV_64F);
	Mat translateMatrix = Mat::eye( 3, 3, CV_64F);
	/**FILL DONE**/
    scaleMatrix.at<double>(0,0) = scaleFactor;
    scaleMatrix.at<double>(1,1) = scaleFactor;

    translateMatrix.at<double>(0,2) = -(double)minX; 
    translateMatrix.at<double>(1,2) = -(double)minY;
   
    cout << endl << minX << " " << minY << endl << translateMatrix << endl << endl;
    
	Mat centerMatrix = scaleMatrix * translateMatrix;


	// Transformace obrazku 1 
	warpPerspective( img1, outputBuffer, centerMatrix, outputBuffer.size(), 1, BORDER_TRANSPARENT);

	// Transformace obrazku 2 
	warpPerspective( img2, outputBuffer, centerMatrix * homography.inv(), outputBuffer.size(), 1, BORDER_TRANSPARENT);

	cout << "normMatrix" << endl;
	cout << centerMatrix << endl << endl;

	cout << "normMatrix" << endl;
	cout << homography << endl << endl;

#if VISUAL_OUTPUT
	imshow( "IMG1", img1);
	imshow( "IMG2", img2);
	imshow( "MERGED", outputBuffer);
	waitKey();
#endif
}
//--------------------------------------【main( )函数】-----------------------------------------
//          描述:控制台应用程序的入口函数,我们的程序从这里开始执行
//-----------------------------------------------------------------------------------------------
int main( ) 
{
	//【0】改变console字体颜色
	system("color 6F"); 

	void ShowHelpText();

	//【1】载入图像、显示并转化为灰度图
	Mat trainImage = imread("1.jpg"), trainImage_gray;
	imshow("原始图",trainImage);
	cvtColor(trainImage, trainImage_gray, CV_BGR2GRAY);

	//【2】检测Surf关键点、提取训练图像描述符
	vector<KeyPoint> train_keyPoint;
	Mat trainDescriptor;
	SurfFeatureDetector featureDetector(80);
	featureDetector.detect(trainImage_gray, train_keyPoint);
	SurfDescriptorExtractor featureExtractor;
	featureExtractor.compute(trainImage_gray, train_keyPoint, trainDescriptor);

	//【3】创建基于FLANN的描述符匹配对象
	FlannBasedMatcher matcher;
	vector<Mat> train_desc_collection(1, trainDescriptor);
	matcher.add(train_desc_collection);
	matcher.train();

	//【4】创建视频对象、定义帧率
	VideoCapture cap(0);
	unsigned int frameCount = 0;//帧数

	//【5】不断循环,直到q键被按下
	while(char(waitKey(1)) != 'q')
	{
		//<1>参数设置
		int64 time0 = getTickCount();
		Mat testImage, testImage_gray;
		cap >> testImage;//采集视频到testImage中
		if(testImage.empty())
			continue;

		//<2>转化图像到灰度
		cvtColor(testImage, testImage_gray, CV_BGR2GRAY);

		//<3>检测S关键点、提取测试图像描述符
		vector<KeyPoint> test_keyPoint;
		Mat testDescriptor;
		featureDetector.detect(testImage_gray, test_keyPoint);
		featureExtractor.compute(testImage_gray, test_keyPoint, testDescriptor);

		//<4>匹配训练和测试描述符
		vector<vector<DMatch> > matches;
		matcher.knnMatch(testDescriptor, matches, 2);

		// <5>根据劳氏算法(Lowe's algorithm),得到优秀的匹配点
		vector<DMatch> goodMatches;
		for(unsigned int i = 0; i < matches.size(); i++)
		{
			if(matches[i][0].distance < 0.6 * matches[i][1].distance)
				goodMatches.push_back(matches[i][0]);
		}

		//<6>绘制匹配点并显示窗口
		Mat dstImage;
		drawMatches(testImage, test_keyPoint, trainImage, train_keyPoint, goodMatches, dstImage);
		imshow("匹配窗口", dstImage);

		//<7>输出帧率信息
		cout << "当前帧率为:" << getTickFrequency() / (getTickCount() - time0) << endl;
	}

	return 0;
}