コード例 #1
0
int OpenCVImageProcessor::getImageInfo(Mat srcImage, std::string *path, std::vector<std::string> desc){
    
    FileStorage fs(*path,FileStorage::READ);
    int minHessian = 400;
    Mat src_image,object_descriptor,scene_descriptor,resizedImage;
    Size size(320,220);
    resize(srcImage, resizedImage, size);
    cvtColor( resizedImage,src_image, cv::COLOR_BGR2GRAY);
    SurfFeatureDetector detector( minHessian );
    std::vector<KeyPoint> keypoints_object, keypoints_scene;
    
    detector.detect( src_image, keypoints_object );
    SurfDescriptorExtractor extractor;
    extractor.compute( src_image, keypoints_object, object_descriptor );
    std::vector<cv::DMatch> matches;
    printf("se desi %d; %d %d\n",src_image.cols,src_image.rows,desc.size());
    for(int i=0;i<desc.size();i++){
        FileNode descriptor=fs[desc[i]];
        std::string keypoints=desc[i];
        keypoints.append("_keypoints");
        FileNode keypointsfn=fs[keypoints];
        read(descriptor, scene_descriptor);
        read(keypointsfn , keypoints_scene);
        RobustMatcher matcher;
        int result=matcher.matchRnasac(matches, keypoints_object, keypoints_scene, object_descriptor, scene_descriptor);
        if(result>0) return i;
    }
    return -1;
}
コード例 #2
0
ファイル: main.cpp プロジェクト: AhmedSaad/Kinstruct
void alignFrames(Mat *colorA, Mat *colorB, Mat *depthA, Mat *depthB, Transformation *inverse)
{
	// Prepare the matcher

	
	Mat grayA, grayB;
	cvtColor(*colorA, grayA, CV_RGB2GRAY);
	cvtColor(*colorB, grayB, CV_RGB2GRAY);

	RobustMatcher rmatcher;

	Ptr<FeatureDetector> pfd = new SurfFeatureDetector(10);

	rmatcher.setFeatureDetector(pfd);
	// Match the two images
	vector<DMatch> matches;
	vector<KeyPoint> keypointsA, keypointsB;
	Mat fundemental= rmatcher.match(grayA, grayB, matches, keypointsA, keypointsB);
	

	Mat img_matches;
	drawMatches( grayA, keypointsA, grayB, keypointsB,
               matches, img_matches, Scalar::all(-1), Scalar::all(-1),
               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

  
    //-- Show detected matches
    imshow( "Good Matches", img_matches );

	vector<Point2f> selPoints1, selPoints2;
	KeyPoint::convert(keypointsA, selPoints1);
	KeyPoint::convert(keypointsB, selPoints2);

	vector<Point3f *> setA, setB;
	for(int i = 0; i < matches.size(); i++)
	{
		Point2f point2D = selPoints1.at(matches.at(i).queryIdx);
		int bit0 = depthA->at<Vec3b>(point2D.y, point2D.x)[0];
		int bit1 = depthA->at<Vec3b>(point2D.y, point2D.x)[1];
		double depth = (bit0 | bit1 << 8 );
		Point3f *point3D = new Point3f(point2D.x, point2D.y, depth / 100); 
		setA.push_back(point3D);

		point2D = selPoints2.at(matches.at(i).trainIdx);
		bit0 = depthB->at<Vec3b>(point2D.y, point2D.x)[0];
		bit1 = depthB->at<Vec3b>(point2D.y, point2D.x)[1];
		depth = (bit0 | bit1 << 8 );
		Point3f *point3DB = new Point3f(point2D.x, point2D.y, depth / 100); 
		setB.push_back(point3DB);
	}


	Transformation result(false);
	HornMethod hornMethod;
	hornMethod.getTransformation(setA, setB, &result);
	inverse->invert(&result);
	// clean vectors SetA and SetB
	
}
bool recognize(const cv::Mat &in, cv::Rect roi) {

    bool result= false;
	cv::Mat image1 = cv::imread("data/sim.png",0);

    // Prepare the matcher
    RobustMatcher rmatcher;
    rmatcher.setConfidenceLevel(0.98);
    rmatcher.setMinDistanceToEpipolar(1.0);
    rmatcher.setRatio(0.65f);

    cv::Ptr<cv::FeatureDetector> pfd = new cv::SurfFeatureDetector(10);
    rmatcher.setFeatureDetector(pfd);

    cv::Mat face = in(roi);


    // Match the two images
    std::vector<cv::DMatch> matches;
    std::vector<cv::KeyPoint> keypoints1, keypoints2;
    rmatcher.match(image1, face ,matches, keypoints1, keypoints2);


    // draw the matches
    cv::Mat imageMatches;
    cv::drawMatches(image1,keypoints1,          // 1st image and its keypoints
                    face,keypoints2,            // 2nd image and its keypoints
                    matches,                    // the matches
                    imageMatches,               // the image produced
                    cv::Scalar(255,255,255));   // color of the lines


    if (rmatcher.getRecognition()){
        result=true;
    }

    return result;
}
コード例 #4
0
ファイル: main_registration.cpp プロジェクト: 12rohanb/opencv
/**  Main program  **/
int main()
{

  help();

  // load a mesh given the *.ply file path
  mesh.load(ply_read_path);

  // set parameters
  int numKeyPoints = 10000;

  //Instantiate robust matcher: detector, extractor, matcher
  RobustMatcher rmatcher;
  Ptr<FeatureDetector> detector = ORB::create(numKeyPoints);
  rmatcher.setFeatureDetector(detector);

  /**  GROUND TRUTH OF THE FIRST IMAGE  **/

  // Create & Open Window
  namedWindow("MODEL REGISTRATION", WINDOW_KEEPRATIO);

  // Set up the mouse events
  setMouseCallback("MODEL REGISTRATION", onMouseModelRegistration, 0 );

  // Open the image to register
  Mat img_in = imread(img_path, IMREAD_COLOR);
  Mat img_vis = img_in.clone();

  if (!img_in.data) {
    cout << "Could not open or find the image" << endl;
    return -1;
  }

  // Set the number of points to register
  int num_registrations = n;
  registration.setNumMax(num_registrations);

  cout << "Click the box corners ..." << endl;
  cout << "Waiting ..." << endl;

  // Loop until all the points are registered
  while ( waitKey(30) < 0 )
  {
    // Refresh debug image
    img_vis = img_in.clone();

    // Current registered points
    vector<Point2f> list_points2d = registration.get_points2d();
    vector<Point3f> list_points3d = registration.get_points3d();

    // Draw current registered points
    drawPoints(img_vis, list_points2d, list_points3d, red);

    // If the registration is not finished, draw which 3D point we have to register.
    // If the registration is finished, breaks the loop.
    if (!end_registration)
    {
      // Draw debug text
      int n_regist = registration.getNumRegist();
      int n_vertex = pts[n_regist];
      Point3f current_poin3d = mesh.getVertex(n_vertex-1);

      drawQuestion(img_vis, current_poin3d, green);
      drawCounter(img_vis, registration.getNumRegist(), registration.getNumMax(), red);
    }
    else
    {
      // Draw debug text
      drawText(img_vis, "END REGISTRATION", green);
      drawCounter(img_vis, registration.getNumRegist(), registration.getNumMax(), green);
      break;
    }

    // Show the image
    imshow("MODEL REGISTRATION", img_vis);
  }

  /** COMPUTE CAMERA POSE **/

  cout << "COMPUTING POSE ..." << endl;

  // The list of registered points
  vector<Point2f> list_points2d = registration.get_points2d();
  vector<Point3f> list_points3d = registration.get_points3d();

  // Estimate pose given the registered points
  bool is_correspondence = pnp_registration.estimatePose(list_points3d, list_points2d, SOLVEPNP_ITERATIVE);
  if ( is_correspondence )
  {
    cout << "Correspondence found" << endl;

    // Compute all the 2D points of the mesh to verify the algorithm and draw it
    vector<Point2f> list_points2d_mesh = pnp_registration.verify_points(&mesh);
    draw2DPoints(img_vis, list_points2d_mesh, green);

  } else {
    cout << "Correspondence not found" << endl << endl;
  }

  // Show the image
  imshow("MODEL REGISTRATION", img_vis);

  // Show image until ESC pressed
  waitKey(0);


   /** COMPUTE 3D of the image Keypoints **/

  // Containers for keypoints and descriptors of the model
  vector<KeyPoint> keypoints_model;
  Mat descriptors;

  // Compute keypoints and descriptors
  rmatcher.computeKeyPoints(img_in, keypoints_model);
  rmatcher.computeDescriptors(img_in, keypoints_model, descriptors);

  // Check if keypoints are on the surface of the registration image and add to the model
  for (unsigned int i = 0; i < keypoints_model.size(); ++i) {
    Point2f point2d(keypoints_model[i].pt);
    Point3f point3d;
    bool on_surface = pnp_registration.backproject2DPoint(&mesh, point2d, point3d);
    if (on_surface)
    {
        model.add_correspondence(point2d, point3d);
        model.add_descriptor(descriptors.row(i));
        model.add_keypoint(keypoints_model[i]);
    }
    else
    {
        model.add_outlier(point2d);
    }
  }

  // save the model into a *.yaml file
  model.save(write_path);

  // Out image
  img_vis = img_in.clone();

  // The list of the points2d of the model
  vector<Point2f> list_points_in = model.get_points2d_in();
  vector<Point2f> list_points_out = model.get_points2d_out();

  // Draw some debug text
  string num = IntToString((int)list_points_in.size());
  string text = "There are " + num + " inliers";
  drawText(img_vis, text, green);

  // Draw some debug text
  num = IntToString((int)list_points_out.size());
  text = "There are " + num + " outliers";
  drawText2(img_vis, text, red);

  // Draw the object mesh
  drawObjectMesh(img_vis, &mesh, &pnp_registration, blue);

  // Draw found keypoints depending on if are or not on the surface
  draw2DPoints(img_vis, list_points_in, green);
  draw2DPoints(img_vis, list_points_out, red);

  // Show the image
  imshow("MODEL REGISTRATION", img_vis);

  // Wait until ESC pressed
  waitKey(0);

  // Close and Destroy Window
  destroyWindow("MODEL REGISTRATION");

  cout << "GOODBYE" << endl;

}
コード例 #5
0
ファイル: ransac.cpp プロジェクト: tyuownu/ubuntu_opencv
int main(int argc,char **argv)
{
	// Read input images
	char* str1;
	char* str2;
	char* str3;
	char* str4;
	str1 = argv[1]; str2 = argv[2];
	str3 = argv[3]; str4 = argv[4];
	cv::Mat image1 = cv::imread(str1,0);
	cv::Mat image2 = cv::imread(str2,0);
	cv::Mat imageD1 = cv::imread(str3,CV_LOAD_IMAGE_ANYDEPTH);
	cv::Mat imageD2 = cv::imread(str4,CV_LOAD_IMAGE_ANYDEPTH);
//	cv::Mat image1= cv::imread("/home/tyu/ubuntu_opencv/image/1.png",0);
//	cv::Mat image2= cv::imread("/home/tyu/ubuntu_opencv/image/2.png",0);
//	cv::Mat imageD1 = cv::imread("/home/tyu/ubuntu_opencv/image/1d.png",CV_LOAD_IMAGE_ANYDEPTH);
//	cv::Mat imageD2 = cv::imread("/home/tyu/ubuntu_opencv/image/2d.png",CV_LOAD_IMAGE_ANYDEPTH);
	if (!image1.data || !image2.data)
	{
		std::cout<<"Cannt load the image data!\n";
		return 0;
	}

    // Display the images
//	cv::namedWindow("Right Image");
//	cv::imshow("Right Image",image1);
//	cv::namedWindow("Left Image");
//	cv::imshow("Left Image",image2);

	// Prepare the matcher
	RobustMatcher rmatcher;
	rmatcher.setConfidenceLevel(0.98);
	rmatcher.setMinDistanceToEpipolar(1.0);
	rmatcher.setRatio(0.65f);
	cv::Ptr<cv::FeatureDetector> pfd= new cv::SurfFeatureDetector(15000); 
	rmatcher.setFeatureDetector(pfd);

	// Match the two images
	std::vector<cv::DMatch> matches;
	std::vector<cv::KeyPoint> keypoints1, keypoints2;
	cv::Mat fundemental= rmatcher.match(image1,image2,matches, keypoints1, keypoints2);
	std::cout<<fundemental<<std::endl;
	std::cout<<keypoints1.size()<<std::endl;

	// draw the matches
//	cv::Mat imageMatches;
/*	cv::drawMatches(image1,keypoints1,  // 1st image and its keypoints
		            image2,keypoints2,  // 2nd image and its keypoints
					matches,			// the matches
					imageMatches,		// the image produced
					cv::Scalar(1,255,255)); // color of the lines*/
//	std::cout<<"imageMatches\'s size is :"<<imageMatches.size()<<std::endl;

/*	/////draw lines///////
	line(image1,
			cv::Point(0,255.3),
			cv::Point(640,255.3),
			cv::Scalar(0,0,0)
			);

		);*/
//	cv::namedWindow("Matches");
//	cv::imshow("Matches",imageMatches);


	/////////////////////
	
	// Convert keypoints into Point2f	
//	std::vector<cv::Point2f> points1, points2;
	std::vector<cv::Point3f> points_xyz1,points_xyz2;
	compute3dPosition(imageD1,imageD2,keypoints1,keypoints2,points_xyz1,points_xyz2);

	std::cout<<points_xyz1.size()<<std::endl;
	std::cout<<points_xyz2.size()<<std::endl;





	cv::Mat p1 = cv::Mat(points_xyz1).t();
	cv::Mat p2 = cv::Mat(points_xyz2).t();
	cv::Mat mat;
//	mat = p1;
//	std::cout<<mat<<std::endl;
	int iterations = 0;
	double distance = 100.0;
	double Error = 0.0;
	while(iterations < 0)
	{
		cv::Mat np1 , np2, np1_, np2_;
		int num = 0;
		for(int i=0; i<points_xyz1.size(); i++)
		{
			num++;
			cv::Mat temp = p1.col(i).clone();
//			temp.copyTo(np1.col(i));
			np1.push_back(temp);
			temp = p2.col(i).clone();
			np2.push_back(temp);
		}
		iterations++;
	}

/*	for (std::vector<cv::DMatch>::const_iterator it= matches.begin();
			 it!= matches.end();
			 ++it) {

			 // Get the position of left keypoints
			 float xpoint1= keypoints1[it->queryIdx].pt.x;
			 float ypoint1= keypoints1[it->queryIdx].pt.y;
			 
			 double z = imageD1.at<ushort>(ypoint1,xpoint1);
			 
			 // Get the position of right keypoints
			 float xpoint2= keypoints2[it->trainIdx].pt.x;
			 float ypoint2= keypoints2[it->trainIdx].pt.y;
			 
			 
			 double z1 = imageD2.at<ushort>(ypoint2,xpoint2);
			 if(z != 0 &&z1 != 0)
			{
				points_xyz1.push_back(cv::Point3f(xpoint1,ypoint1,z/5000));
				points_xyz2.push_back(cv::Point3f(xpoint2,ypoint2,z1/5000));
				points1.push_back(cv::Point2f(xpoint1,ypoint1));
				points2.push_back(cv::Point2f(xpoint2,ypoint2));
//				cv::circle(image1,cv::Point(xpoint1,ypoint1),5,cv::Scalar(1,1,255),3);
//				cv::circle(image2,cv::Point(xpoint2,ypoint2),5,cv::Scalar(1,1,255),3);
				
			 }
			 else
				 continue;
			 
			 
	}*/




	std::ofstream p1out("p1.txt"), p2out("p2.txt");
	if(!p1out||!p2out)
		std::cout<<"Can not open the file!"<<std::endl;

//	out<<"fundemental = "<<std::endl<<std::endl;
//	out<<fundemental<<std::endl<<std::endl;
	
//	out<<"p1 = "<<std::endl<<std::endl;
//true location
	//write p1 location to out.txt
	for(int i = 0 ; i < points_xyz1.size(); i++)
		p1out<<points_xyz1[i].x<<" "<<points_xyz1[i].y<<" "<<points_xyz1[i].z<<std::endl;
//	out<<std::endl;
	//write p2 location to out.txt
//	out<<"p2 = "<<std::endl<<std::endl;
	for(int i = 0 ; i < points_xyz2.size(); i++)
		p2out<<points_xyz2[i].x<<" "<<points_xyz2[i].y<<" "<<points_xyz2[i].z<<std::endl;
//	out<<"p1 = "<<std::endl<<std::endl<<points_xyz1<<std::endl;
//	out<<"p2 = "<<std::endl<<std::endl<<points_xyz2<<std::endl;
//	out.close();
	p1out.close();p2out.close();

	//////////////////////////centroid/////////
//	std::cout<<"The centroid is :"<<std::endl<<centroid(points_xyz1)<<std::endl;
//	std::cout<<"The distance between each points to the centroid: "<<std::endl<<centroid_dist(points_xyz1,centroid(points_xyz1))<<std::endl;
	




/*	// Draw the epipolar lines
	std::vector<cv::Vec3f> lines1; 
	cv::computeCorrespondEpilines(cv::Mat(points1),1,fundemental,lines1);
		
	for (std::vector<cv::Vec3f>::const_iterator it= lines1.begin();
			 it!=lines1.end(); ++it) {

			 cv::line(image2,cv::Point(0,-(*it)[2]/(*it)[1]),
				             cv::Point(image2.cols,-((*it)[2]+(*it)[0]*image2.cols)/(*it)[1]),
							 cv::Scalar(255,255,255));
	}

	std::vector<cv::Vec3f> lines2; 
	cv::computeCorrespondEpilines(cv::Mat(points2),2,fundemental,lines2);
	
	for (std::vector<cv::Vec3f>::const_iterator it= lines2.begin();
		     it!=lines2.end(); ++it) {

			 cv::line(image1,cv::Point(0,-(*it)[2]/(*it)[1]),
				             cv::Point(image1.cols,-((*it)[2]+(*it)[0]*image1.cols)/(*it)[1]),
							 cv::Scalar(255,255,255));
	}

    // Display the images with epipolar lines
	cv::namedWindow("Right Image Epilines (RANSAC)");
	cv::imshow("Right Image Epilines (RANSAC)",image1);
	cv::namedWindow("Left Image Epilines (RANSAC)");
	cv::imshow("Left Image Epilines (RANSAC)",image2);

	cv::waitKey();*/
	return 0;
}
コード例 #6
0
void ImageRecognizer::queryBoWFeature(std::vector<std::string>* pReturnArray,
		const std::string& imagePath) {
	pReturnArray->clear();
	boost::shared_ptr<DBAdapter> dbAdapter(new HbaseAdapter);
	cv::Mat image = cv::imread(imagePath, 1);
	LocalFeature localFeature;
	LocalFeatureExtractor localFeatureExtractor;
	localFeatureExtractor.extractFeature(image, localFeature.keypoint,
			localFeature.descriptor);
	BoWHistogram histogram;
	ANNVocabulary::instance()->quantizeFeature(localFeature, &histogram);
	const std::vector<int64_t>& visualwordIdArray =
			histogram.visualwordIdArray();
	const std::vector<double>& frequencyArray = histogram.frequencyArray();
	std::cout << "histogram size: " << histogram.size() << std::endl;
	// TODO: modify the query pipeline
	boost::unordered_map<int64_t, double> candidateScoreMap;
	Ticker ticker;
	ticker.start();
	for (size_t i = 0; i < visualwordIdArray.size(); ++i) {
		std::vector<Posting> postingArray;
		InvertedIndexClient invertedIndexClient;
		invertedIndexClient.loadPostingList(&postingArray,
				visualwordIdArray[i]);
		for (size_t j = 0; j < postingArray.size(); ++j) {
			candidateScoreMap[postingArray[j].imageId] += postingArray[j].score
					* frequencyArray[i];
		}
	}
	ticker.stop();
	ticker.start();
	std::vector<RankItem<int64_t, double> > candidateRankList;
	candidateRankList.reserve(candidateScoreMap.size());
	for (boost::unordered_map<int64_t, double>::iterator iter =
			candidateScoreMap.begin(); iter != candidateScoreMap.end();
			++iter) {
		RankItem<int64_t, double> item(iter->first, iter->second);
		candidateRankList.push_back(item);
	}
	size_t candidateSize = (size_t) GlobalConfig::CANDIDATE_COUNT;
	if (candidateRankList.size() > candidateSize) {
		std::nth_element(candidateRankList.begin(),
				candidateRankList.begin() + candidateSize,
				candidateRankList.end());
	}
	ticker.stop();
	ticker.start();
	std::vector<double> queryVector;
	histogram.flatten(&queryVector, ANNVocabulary::instance()->size());
	std::vector<RankItem<int64_t, double> > rankList;
	for (size_t i = 0; i < candidateRankList.size() && i < candidateSize; ++i) {
		int64_t otherImageId = candidateRankList[i].index;
		BoWHistogram otherHistogram;
		otherHistogram.load(otherImageId);
		double score = otherHistogram.innerProduct(queryVector);
		RankItem<int64_t, double> item(otherImageId, -1.0 * score);
		rankList.push_back(item);
	}
	std::sort(rankList.begin(), rankList.end());
	ticker.stop();
	if (GlobalConfig::USE_VERIFICATION) {
		for (int i = 0;
				i < GlobalConfig::VERIFICATION_COUNT
						&& i < (int) rankList.size(); ++i) {
			RobustMatcher rmatcher;
			LocalFeature otherLocalFeature;
			otherLocalFeature.load(rankList[i].index);
			std::vector<cv::DMatch> matches;
			cv::Mat homography = rmatcher.match(localFeature, otherLocalFeature,
					&matches);
			if (matches.size() >= 7) {
				std::string path;
				dbAdapter->loadCell(&path, GlobalConfig::IMAGE_TABLE,
						rankList[i].index, GlobalConfig::IMAGE_HASH_COLUMN);
				pReturnArray->push_back(path);
			}
		}
	} else {
		for (size_t i = 0; i < rankList.size(); ++i) {
			std::string path;
			dbAdapter->loadCell(&path, GlobalConfig::IMAGE_TABLE,
					rankList[i].index, GlobalConfig::IMAGE_HASH_COLUMN);
			pReturnArray->push_back(path);
		}
	}
}
コード例 #7
0
ファイル: main.cpp プロジェクト: b0ki/Forschungsprojekt2
void disjunkt_image_test() {
	vector<string>* dateien = list_directory(
			"/Users/christian/Pictures/Referenzbilder/");
	RobustMatcher* rmatcher = new RobustMatcher();

	for (vector<string>::iterator it = dateien->begin(); it != dateien->end();
			it++) {
		cout << *it << endl;

		// Lade das Bild aus der Datei
		cv::Mat image = cv::imread(*it, 0);
		;
		unsigned int most_matches = 0; // speicher die Anzahl der meisten Matches
		string image2_filename; // speichert den Dateinamen von Bild 2 mit den meisten Matches
		string second_best_match_image_name;

		unsigned int second_most_matches = 0;

		std::vector<cv::KeyPoint> keypoints1;
		std::vector<cv::KeyPoint> keypoints2;
		std::vector<cv::DMatch> matches;


		std::list<ImageMatch*>* allImageMatches = new std::list<ImageMatch*>();
		for (vector<string>::iterator it2 = dateien->begin();
				it2 != dateien->end(); it2++) {
			// Lade ebenfalls das Bild
			cv::Mat image2 = cv::imread(*it2, 0);


			// Berechne SchlŸsselpunkte und Deskriptoren auf beiden Bildern und Teste mit dem RM ob das Bild erkannt wurde.
			rmatcher->match(image, image2, matches, keypoints1, keypoints2);

			if (matches.size() > 0) {
				// Sammle alle Matches > 0 in einem Vector
				ImageMatch* imageMatch = new ImageMatch(*it2, matches.size());
				allImageMatches->push_back(imageMatch);
			}

			//cout << *it << " matches " << *it2 << " with " << matches.size() << " points" << endl;
			/*int matches_size = matches.size();
			if (matches_size < most_matches) {
				if (matches_size > second_most_matches) {
					second_most_matches = matches_size;
					second_best_match_image_name = *it2;
				}
			}

			if (matches_size > most_matches) {
				most_matches = matches_size;
				image2_filename = *it2;
			}*/
		}

		// Wenn allImagesMatches nicht leer ist, dann sortiere diese
		allImageMatches->sort(ImageMatch::compareImageMatch);

		// Speicher den dateiname von Bild 1 als Key in einer Map.
		// Der Dateiname von Bild 2 mit den meisten Matches wird als Value in der Map gespeichert.
/*

		cout << "Ergebnis: " << *it << " == " << image2_filename << " matches: " << most_matches << endl;

		double similarity;
		// €hnliChkeit zum zweitbesten Ermitteln
		if (most_matches > 0) {
			similarity = (double) second_most_matches / (double) most_matches;
		} else {
			similarity = 0;
		}
		cout << "€hnlichkeit zum zweitbesten Bild: " << similarity << " Name: " << second_best_match_image_name << endl;
*/
		cout << "sortierte liste mit ImageMatches: " << endl;
		for (std::list<ImageMatch*>::iterator list_it = allImageMatches->begin(); list_it != allImageMatches->end(); list_it++) {
			ImageMatch* imageMatch = *list_it;
			cout << "Matches: " << imageMatch->getMatchesSize() << endl;
		}

		// Nun kann die €hnlichkeit zu den anderen Bildern ermittelt werden.
		// gehe die list mit den matches durch
		double similarity = 1;
		int firstValue = 0;
		for (std::list<ImageMatch*>::iterator list_it = allImageMatches->begin(); list_it != allImageMatches->end(); list_it++) {
			ImageMatch* imageMatch = *list_it;
			if (firstValue == 0) {
				firstValue = imageMatch->getMatchesSize();
			} else {
				int otherValue = imageMatch->getMatchesSize();
				similarity -= (double) otherValue / (double) firstValue;

			}
		}

		cout << "End-€hnlichkeit des besten Matches: " << similarity << endl;
	}
}
コード例 #8
0
ファイル: main.cpp プロジェクト: b0ki/Forschungsprojekt2
void method1() {
	// vector of keypoints
	std::vector<cv::KeyPoint> refKeypoints;
	std::vector<cv::KeyPoint> keypoints1;
	std::vector<cv::KeyPoint> keypoints2;
	//std::vector<cv::KeyPoint> keypoints3;

	// Construct the SURF feature detector object
	//cv::SurfFeatureDetector surf(5000.0);

	// Referenzbild
	cv::Mat referenz =
			cv::imread(
					"/Users/christian/SecureDropBox/Photos/SURF/IMG_20120228_192758.jpg",
					0); // open in b&w

	// Andere Bilder
	cv::Mat image1 =
			cv::imread(
					"/Users/christian/SecureDropBox/Photos/SURF/IMG_20120228_192836.jpg",
					0);
	//cv::Mat image2 = cv::imread("/Users/christian/Dropbox/Photos/SURF/blume2.jpg");

	Helper* helper = new Helper();
	// Keypoints berechnen
	double t = (double) getTickCount();
	helper->detectKeypoints(referenz, refKeypoints);
	t = ((double) getTickCount() - t) / getTickFrequency();
	cout << "Times passed compute keypoints ref image: " << t << endl;

	cv::Mat out;
	drawKeypoints(referenz, refKeypoints, out, cv::Scalar(0, 255, 255),
			DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
	//cv::imwrite("/Users/christian/Desktop/Master_LaTex/Implementierung/Bilder/Bild_Keypoints.png", out);

	cv::namedWindow("Color Reduces Image");
	cv::imshow("Color Reduces Image", out);
	cv::waitKey(0);

	cout << "anzahl der keypoints: " << refKeypoints.size() << endl;

	// do something ...

	helper->detectKeypoints(image1, keypoints1);
	//surf.detect(image2,keypoints2);

	//	cout << "size keypoints image1: " << keypoints1.size() << endl;
	//	cout << "size keypoints image2: " << keypoints2.size() << endl;
	//cout << "size keypoints image3: " << keypoints3.size() << endl;

	// Construction of the SURF descriptor extractor+
	//cv::SurfDescriptorExtractor surfDesc;
	// Extraction of the SURF descriptors
	cv::Mat refDescriptors;
	cv::Mat descriptors1;
	cv::Mat descriptors2;

	// Berechnung der Discriptoren auf allen Bildern.
	t = (double) getTickCount();

	helper->extractDescriptors(referenz, refKeypoints, refDescriptors);
	t = ((double) getTickCount() - t) / getTickFrequency();
	cout << "Times passed compute descriptir ref image: " << t << endl;

	cout << "Anzahl der descriptoren: " << *refDescriptors.size << endl;

	helper->extractDescriptors(image1, keypoints1, descriptors1);
	//surfDesc.compute(image2, keypoints2, descriptors2);

	// Draw Keypoints
	cv::Mat image_out;
	/*cv::drawKeypoints(referenz,			// original image
	 refKeypoints,					// vector of keypoints
	 image_out,						// the output image
	 cv::Scalar(255,255,255),	// keypoint color
	 cv::DrawMatchesFlags::DEFAULT);	// drawing flag
	 */
	//cv::waitKey(0);
	// Construction of the matcher
	cv::BruteForceMatcher<cv::L2<float> > matcher;

	// Match the two image descriptors
	std::vector<cv::DMatch> matches_image1;
	//std::vector<cv::DMatch> matches_image2;

	// do something ...

	//Helper *helper = new Helper();
	Helper::saveDescriptorsInFile("/Users/christian/keypoints.xml",
			descriptors1);
	cv::Mat descriptors_loaded;
	t = (double) getTickCount();
	Helper::loadDescriptorsFromFile("/Users/christian/keypoints.xml",
			descriptors_loaded);
	t = ((double) getTickCount() - t) / getTickFrequency();
	cout << "Times passed to load descriptors: " << t << endl;

	t = (double) getTickCount();
	// 1. Alle Bilder mit Referenzbild matchen
	matcher.match(refDescriptors, descriptors_loaded, matches_image1);
	//matcher.match(refDescriptors, descriptors2, matches_image2);

	t = ((double) getTickCount() - t) / getTickFrequency();
	cout << "Times passed to COMPUTE matches ref images: " << t << endl;

	Helper::saveKeypointsInFile("/Users/christian/keypoints.xml", keypoints1);
	//helper->saveDescriptorsInFile("/Users/christian/descriptors.xml", descriptors1);

	cout << "keypoints1 size: " << keypoints1.size() << endl;

	std::vector<cv::KeyPoint> keypoints_loaded;
	t = (double) getTickCount();
	Helper::loadKeyPointsFromFile("/Users/christian/keypoints.xml",
			keypoints_loaded);
	t = ((double) getTickCount() - t) / getTickFrequency();
	cout << "Times passed to load keypoints: " << t << endl;

	cout << "keypoints loaded size: " << keypoints_loaded.size() << endl;
	//cv::drawMatches(referenz, refKeypoints, image1, keypoints_loaded, matches_image1, image_out, cv::Scalar(0,255,0), cv::Scalar(0,0,255), new vector<vector<char> >(),  cv::DrawMatchesFlags::DEFAULT);
	cv::drawMatches(referenz, refKeypoints, image1, keypoints_loaded,
			matches_image1, image_out, cv::Scalar(0, 255, 255),
			cv::Scalar(0, 0, 0));
	//cv::imwrite("/Users/christian/Desktop/Master_LaTex/Implementierung/Bilder/bruteforce.png", image_out);
	/*
	 if (matches_image1 < matches_image2) {
	 cout << "matches_image1 kleiner" << endl;
	 } else {
	 cout << "matches_image1 groe§er" << endl;
	 }*/

	//cout << "distance image1 and image2: " << sum_distance(matches) << endl;
	//cv::waitKey(0);*/
	/*
	 cv::Mat readDescriptors_ref, readDescriptors_image1;
	 cout << "size descriptors1: " << *descriptors1.size << endl;
	 t = (double)getTickCount();
	 cv::FileStorage fs("test.xml", FileStorage::WRITE);
	 fs << "descriptors_ref_image" << refDescriptors;
	 fs << "image1" << descriptors1;
	 fs.release();
	 t = ((double)getTickCount() - t)/getTickFrequency();
	 cout << "Times passed to write descritpors into file: " << t << endl;

	 cv::Mat descriptors4;

	 t = (double)getTickCount();
	 cv::FileStorage fsRead("test.xml", FileStorage::READ);
	 fsRead["descriptors_ref_image"] >> readDescriptors_ref;
	 fsRead["image1"] >> readDescriptors_image1;
	 fsRead.release();
	 t = ((double)getTickCount() - t)/getTickFrequency();
	 cout << "Times passed to read descritpors from file: " << t << endl;
	 cout << "imag1e descriptors size: " << *readDescriptors_image1.size << endl;
	 */

	cv::namedWindow("Image");
	cv::imshow("Image", image_out);
	//cv::imwrite("keypoints_plain_gray.jpg",image_out);

	cv::waitKey(0);

	// Robust Matcher

	// prepare the matcher
	RobustMatcher rmatcher;
	/*rmatcher.setConfidenceLevel(0.98);
	 rmatcher.setMinDistanceToEpipolar(1.0);
	 rmatcher.setRatio(0.65f);*/
	//cv::Ptr<cv::FeatureDetector> pfd = new cv::SurfFeatureDetector(5000);
	//rmatcher.setFeatureDetector(pfd);
	// Get the images
	// Referenzbild
	//image1 = cv::imread("/Users/christian/SecureDropBox/Photos/SURF/IMG_20120228_192758.jpg");
	// Testbild 1
	//cv::Mat image2 = cv::imread("/Users/christian/SecureDropBox/Photos/SURF/IMG_20120228_192836.jpg");
	// Testbild 2:
	image1 =
			cv::imread(
					"/Users/christian/SecureDropBox/Photos/SURF/IMG_20120228_192758.jpg",
					0);

	// Testbild 3:
	cv::Mat image2 =
			cv::imread(
					"/Users/christian/SecureDropBox/Photos/SURF/IMG_20120228_192836.jpg",
					0);

	// Match the two images;
	std::vector<cv::DMatch> matches;
	std::vector<cv::KeyPoint> keypoints3, keypoints4;
	cv::Mat fundemental = rmatcher.match(image1, image2, matches, keypoints1,
			keypoints2);

	/*cv::drawKeypoints(,			// original image
	 keypoints2,					// vector of keypoints
	 image_out,						// the output image
	 cv::Scalar(255,255,255),	// keypoint color
	 cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);	// drawing flag
	 */

	cv::drawMatches(image1, keypoints1, image2, keypoints2, matches, image_out,
			cv::Scalar(0, 255, 255), cv::Scalar(0, 0, 0));
	/*
	 // Gebe die Summe der Distancen aus:
	 cout << "sumDistance image1 and image 2: " << sum_distance(matches) << endl;

	 fundemental = rmatcher.match(image1, image3, matches, keypoints1, keypoints3);
	 //cv::drawMatches(image1, keypoints1, image3, keypoints3, matches, image_out, cv::Scalar(255,255,255), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
	 cout << "sumDistance image1 and image 3: " << sum_distance(matches) << endl;

	 fundemental = rmatcher.match(image1, image4, matches, keypoints1, keypoints4);
	 //cv::drawMatches(image1, keypoints1, image4, keypoints4, matches, image_out, cv::Scalar(255,255,255), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
	 cout << "sumDistance image1 and image 4: " << sum_distance(matches) << endl;
	 */

	cv::namedWindow("RobustMatches");
	cv::imshow("Color Reduces Image", image_out);
	cv::waitKey(0);

	//cv::imwrite("/Users/christian/Desktop/Master_LaTex/Implementierung/Bilder/robust.png",image_out);

}
コード例 #9
0
ファイル: PnP_Localization.cpp プロジェクト: TerAtO86/PCLTest
void ComputePnP(const char* folder_name,int start_idx,int total_num,int jump_num){//Compute two-view PnP
	int img_idx = start_idx;
	int img_num = total_num;
	int img_num2 = jump_num;

	char imgname[64];
	char pcdname[64];
	char imgname2[64];
	char pcdname2[64];


	ofstream out_pose,out_pose2;
	out_pose.open("RT.txt");
	out_pose2.open("invRT.txt");
 cv::Mat colorImage_1(KINECT_IMAGE_HEGIHT,KINECT_IMAGE_WIDTH,CV_8UC4);
 cv::Mat pointCloud_XYZforRGB_1(KINECT_IMAGE_HEGIHT,KINECT_IMAGE_WIDTH,CV_32FC3,cv::Scalar::all(0));

 cv::Mat colorImage_2(KINECT_IMAGE_HEGIHT,KINECT_IMAGE_WIDTH,CV_8UC4);
 cv::Mat pointCloud_XYZforRGB_2(KINECT_IMAGE_HEGIHT,KINECT_IMAGE_WIDTH,CV_32FC3,cv::Scalar::all(0));
 
 
 cv::Mat img1,img2;
  /////////////////////PCL objects//////////////////////////

  pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_1 (new pcl::PointCloud<pcl::PointXYZRGBA>);
  pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_2 (new pcl::PointCloud<pcl::PointXYZRGBA>);
  pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_1f (new pcl::PointCloud<pcl::PointXYZRGBA>);
  pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_2f (new pcl::PointCloud<pcl::PointXYZRGBA>);

  
	pcl::visualization::PCLVisualizer vislm; 

////////////////////Find cv::Matched points between two RGB images////////////////////////
	ifstream cam_in("CamPara.txt");
	cam_in>>CamIntrinsic[0][0]>>CamIntrinsic[0][1]>>CamIntrinsic[0][2]
	>>CamIntrinsic[1][0]>>CamIntrinsic[1][1]>>CamIntrinsic[1][2]
	>>CamIntrinsic[2][0]>>CamIntrinsic[2][1]>>CamIntrinsic[2][2];

	cam_in.close();

	const cv::Mat Camera_Matrix(3,3,CV_64F,CamIntrinsic);
	const cv::Mat disCoef(1,5,CV_64F,DisCoef);

	cv::Mat img_Matches;

	int numKeyPoints = 400; 
	int numKeyPoints2 = 400; 

	RobustMatcher rMatcher; 
 

	cv::Ptr<cv::FeatureDetector> detector = new cv::FastFeatureDetector(0); 
	cv::Ptr<cv::FeatureDetector> detector2 = new cv::FastFeatureDetector(0); 
	cv::Ptr<cv::DescriptorExtractor> extractor = new cv::OrbDescriptorExtractor(); 
	cv::Ptr<cv::DescriptorMatcher> Matcher= new cv::BFMatcher(cv::NORM_HAMMING);
	rMatcher.setFeatureDetector(detector); 
	rMatcher.setDescriptorExtractor(extractor); 
	rMatcher.setDescriptorMatcher(Matcher); 

	std::vector<cv::KeyPoint> img1_keypoints; 
	cv::Mat img1_descriptors; 
	std::vector<cv::KeyPoint> img2_keypoints; 
	cv::Mat img2_descriptors; 

	std::vector<cv::DMatch > Matches; 

 //////////////////////PCL rigid motion estimation///////////////////////////
	Eigen::Matrix4f TotaltransforMation = Eigen::Matrix4f::Identity(); 
	Eigen::Matrix4f Ti = Eigen::Matrix4f::Identity();

	pcl::PointCloud<pcl::PointXYZRGBA>::Ptr Transcloud_2 (new pcl::PointCloud<pcl::PointXYZRGBA>);

//////////////////////////////////////////////////////////////////////////////

	printf("From %d to %d",img_idx,img_idx+(img_num-1)*img_num2);

	//Camera position
	pcl::PointCloud<pcl::PointXYZ> Camera_pose0;
	pcl::PointCloud<pcl::PointXYZ> Camera_pose;

	Camera_pose0.push_back(pcl::PointXYZ(0,0,0));
	Camera_pose0.push_back(pcl::PointXYZ(0.2,0,0));
	Camera_pose0.push_back(pcl::PointXYZ(0,0.2,0));
	Camera_pose0.push_back(pcl::PointXYZ(0,0,0.2));


	/////////////Looping///////////////////////////
	int d = 0;
	for(int i=0; i<img_num-1; ++i, d+=img_num2){

		sprintf(imgname,"%s/rgb_%d.jpg",folder_name,img_idx+d);

    	sprintf(pcdname,"%s/pcd_%d",folder_name,img_idx+d);

		sprintf(imgname2,"%s/rgb_%d.jpg",folder_name,img_idx+d+img_num2);

    	sprintf(pcdname2,"%s/pcd_%d",folder_name,img_idx+d+img_num2);

		printf("comparing %s & %s\n",imgname,imgname2);

		 ////////////Reading data/////////////////////
		 img1 = cv::imread(imgname,CV_LOAD_IMAGE_GRAYSCALE);
		 colorImage_1 = cv::imread(imgname,CV_LOAD_IMAGE_COLOR);
 
		 printf("reading\n");
		 FILE* fin = fopen(pcdname,"rb");
		 fread(pointCloud_XYZforRGB_1.data,1,pointCloud_XYZforRGB_1.step*pointCloud_XYZforRGB_1.rows,fin);
		 fclose(fin);


		 img2 = cv::imread(imgname2,CV_LOAD_IMAGE_GRAYSCALE);
		 colorImage_2 = cv::imread(imgname2,CV_LOAD_IMAGE_COLOR);


		 printf("reading\n");
		 fin = fopen(pcdname2,"rb");
		 fread(pointCloud_XYZforRGB_2.data,1,pointCloud_XYZforRGB_2.step*pointCloud_XYZforRGB_2.rows,fin);
		 fclose(fin);

		 cloud_1->clear();
		 cloud_2->clear();
		 Mat2PCL_XYZRGB_ALL(colorImage_1,pointCloud_XYZforRGB_1,*cloud_1);
		 Mat2PCL_XYZRGB_ALL(colorImage_2,pointCloud_XYZforRGB_2,*cloud_2);

        ///////////////////Finding 2D features//////////////////////////

		img1_keypoints.clear();
		img2_keypoints.clear();

		detector->detect(img1,img1_keypoints); 
		extractor->compute(img1,img1_keypoints,img1_descriptors); 
	
		detector2->detect(img2,img2_keypoints); 
		extractor->compute(img2,img2_keypoints,img2_descriptors); 

		Matches.clear();

		printf("cv::Matching\n");

		rMatcher.match(Matches, img1_keypoints, img2_keypoints,img1_descriptors,img2_descriptors); 

		//printf("drawing\n");
		drawMatches(img1, img1_keypoints, img2, img2_keypoints,Matches, img_Matches, cv::Scalar::all(-1), cv::Scalar::all(-1),vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
		cv::imshow("Good matches",img_Matches);
		cv::waitKey(10);
		///////////////////Find corresponding 3D pints////////////////////////////////////////
		//We should draw point cloud from the Matches...  
		//////////////////3D geometry///////////////////////////////////////////////////////

		 ///////////////////////// 
		 //Compute PnP
		 //////////////////////// 
		cv::Mat rvec(1,3,cv::DataType<double>::type);
		cv::Mat tvec(1,3,cv::DataType<double>::type);
		cv::Mat rotationMatrix(3,3,cv::DataType<double>::type);

		vector<cv::Point2f> p2ds;
		vector<cv::Point3f> p3ds;
		 
		Mat2PCL_XYZRGB_MATCH_PnP(pointCloud_XYZforRGB_1,p3ds,p2ds,img1_keypoints,img2_keypoints,Matches);
		printf("3D/2D points:%d,%d\n",p3ds.size(),p2ds.size());

		if(p3ds.size() > 5){
			cv::solvePnPRansac(p3ds,p2ds,Camera_Matrix,disCoef,rvec,tvec);
			cv::Rodrigues(rvec,rotationMatrix);
			PnPret2Mat4f(rotationMatrix,tvec,Ti);
			Ti = Ti.inverse();


		}
		///////////////////////Compose the translation, and transform the point cloud////////////////

		Transcloud_2->clear();


		std::cout<<"\nLocal motion from PnP is \n"<<Ti;
		TotaltransforMation = TotaltransforMation*Ti;
		pcl::transformPointCloud(*cloud_2,*Transcloud_2,TotaltransforMation);

		std::cout<<"\nTotal motion from PnP is \n"<<TotaltransforMation<<endl; 

       			//Add camera coordinate
			pcl::transformPointCloud(Camera_pose0,Camera_pose,TotaltransforMation);
			sprintf(pcdname2,"X%d",img_idx+d+img_num2);
			vislm.addLine(Camera_pose[0],Camera_pose[1],255,0,0,pcdname2);
			sprintf(pcdname2,"Y%d",img_idx+d+img_num2);
			vislm.addLine(Camera_pose[0],Camera_pose[2],0,255,0,pcdname2);
			sprintf(pcdname2,"Z%d",img_idx+d+img_num2);
			vislm.addLine(Camera_pose[0],Camera_pose[3],0,0,255,pcdname2);

			if(i==0){
				vislm.addPointCloud(cloud_1->makeShared(),pcdname);
				//PCLviewer.showCloud(cloud_1,"init");
				out_pose<<Eigen::Matrix4f::Identity()<<endl;
			}

			vislm.addPointCloud(Transcloud_2->makeShared(),pcdname2); 
			out_pose<<TotaltransforMation<<endl; 
			out_pose2<<TotaltransforMation.inverse()<<endl; 
	}
	vislm.resetCamera(); 
	vislm.spin();

	out_pose.close();
	out_pose2.close();
}