void ofxSurfStaticMatch(ofxSurfImage * src, ofxSurfImage * dst, IpPairVec * matches) {
    //IpPairVec matches;
    surfDetDes(src->getCvImage(),src->ipts,false,4,4,2,0.0006f);
    surfDetDes(dst->getCvImage(),dst->ipts,false,4,4,2,0.0006f);
    getMatches(src->ipts,dst->ipts,*matches);
    //return matches;
}
Example #2
0
void playMatchGen(team_t* team1, team_t* team2, int* goals1, int* goals2) {
  match_t* matches;
  player_t *players1, *players2;
  int i, numMatches, numPlayers1, numPlayers2;
  double g1 = 0.0, g2 = 0.0;

  getMatches(team1, team2, &matches, &numMatches);
  if (numMatches) {
    for (i=0; i < numMatches; ++i) {
      getPlayersOfMatch(matches + i,
                        &players1, &numPlayers1,
                        &players2, &numPlayers2);
      g1 += getGoalsPerGame(players1, numPlayers1);
      free(players1);
      g2 += getGoalsPerGame(players2, numPlayers2);
      free(players2);
      g1 += (double)matches[i].goals1 / (double)numMatches;
      g2 += (double)matches[i].goals2 / (double)numMatches;
    }
    free(matches);
  } else if (team1->difference > team2->difference) {
    g1 = 1;
    g2 = 0;
  } else {
    g2 = 1;
    g1 = 0;
  }
  *goals1 = g1;
  *goals2 = g2;
}
Example #3
0
bool FaceRecognition::matchImages(cv::Mat p_ImagePrimary, cv::Mat p_ImageScondary)
{
    std::vector<cv::KeyPoint> primaryKeyPoints = getKeyPoints(p_ImagePrimary);
    std::vector<cv::KeyPoint> secondaryKeyPoints = getKeyPoints(p_ImageScondary);

    std::vector<cv::Point2f> primaryPoints;
    std::vector<cv::Point2f> secondaryPoints;

    std::vector<cv::DMatch> goodMatches = getMatches(p_ImagePrimary, p_ImageScondary);
    if(!goodMatches.size() || goodMatches.size() < 4)
        return false;

    for( int i = 0; i < goodMatches.size(); i++ )
    {
        primaryPoints.push_back( primaryKeyPoints[ goodMatches[i].queryIdx ].pt );
        secondaryPoints.push_back( secondaryKeyPoints[ goodMatches[i].trainIdx ].pt );
    }

    cv::Mat homography;
    try
    {
        homography = cv::findHomography(primaryPoints, secondaryPoints, CV_RANSAC);
    }
    catch(std::exception)
    {
        return false;
    }
    return true;
}
Example #4
0
int mainStaticMatch()
{
  IplImage *img1, *img2;
  img1 = cvLoadImage("imgs/img1.jpg");
  img2 = cvLoadImage("imgs/img2.jpg");

  IpVec ipts1, ipts2;
  surfDetDes(img1,ipts1,false,4,4,2,0.0001f);
  surfDetDes(img2,ipts2,false,4,4,2,0.0001f);

  IpPairVec matches;
  getMatches(ipts1,ipts2,matches);

  for (unsigned int i = 0; i < matches.size(); ++i)
  {
    drawPoint(img1,matches[i].first);
    drawPoint(img2,matches[i].second);
  
    const int & w = img1->width;
    cvLine(img1,cvPoint(matches[i].first.x,matches[i].first.y),cvPoint(matches[i].second.x+w,matches[i].second.y), cvScalar(255,255,255),1);
    cvLine(img2,cvPoint(matches[i].first.x-w,matches[i].first.y),cvPoint(matches[i].second.x,matches[i].second.y), cvScalar(255,255,255),1);
  }

  std::cout<< "Matches: " << matches.size();

  cvNamedWindow("1", CV_WINDOW_AUTOSIZE );
  cvNamedWindow("2", CV_WINDOW_AUTOSIZE );
  cvShowImage("1", img1);
  cvShowImage("2",img2);
  cvWaitKey(0);

  return 0;
}
Example #5
0
bool SubstructLibrary::hasMatch(const ROMol &query, bool recursionPossible,
                                bool useChirality, bool useQueryQueryMatches,
                                int numThreads) {
  const int maxResults = 1;
  return getMatches(query, recursionPossible, useChirality,
                    useQueryQueryMatches, numThreads, maxResults)
             .size() > 0;
}
Example #6
0
QList<QChar> StrokesMatcher::doMatching()
{
    int strokeCount = inputCharacter.getStrokeCount();
    QStringList list=strokesData.values(strokeCount);
    for (int i=0; i<list.count(); i++) {
        compareTo=loadNextCharacterStrokeData(list.at(i));
        QPair<QChar,double> match = compareToNext();
        if (match.second>=0) addMatch(match);
    }
    return getMatches();
}
 void FeaturesTracker::drawMatches(){
     std::vector<cv::KeyPoint> pKeyPts = getPatternKeyPoints();
     std::vector<cv::KeyPoint> keyPts = getQueryKeyPoints();
     std::vector<cv::DMatch> matches = getMatches();
     cv::Point2f in, out;
     for (auto & m : matches) {
         in = keyPts[m.queryIdx].pt;
         out = pKeyPts[m.trainIdx].pt;
         ofLine(in.x, in.y, out.x+640, out.y);
     }
 }
Example #8
0
Tags::TagList Tags::getMatches( const QString & tagpart, bool partial, const QStringList & types )
{
    Tags::TagList list;

    // build a compound tag list from all the configured tag files
    QStringList::iterator it;
    for ( it = _tagFiles.begin(); it != _tagFiles.end(); it++ )
    {
        list += getMatches((*it).ascii(), tagpart, partial, types);
    }

    return list;
}
void ofxSurfMotion::update() {
    oldIpts = cam->ipts;
    motion.clear();
    surfDetDes(cam->getCvImage(),cam->ipts,true,3,4,2,0.0004f);
    getMatches(cam->ipts,oldIpts,matches);
    for(uint i=0; i<matches.size(); i++) {
        Motion m;
        m.src.set(matches[i].first.x,matches[i].first.y);
        m.dst.set(matches[i].second.x,matches[i].second.y);
        float dx = matches[i].first.dx;
        float dy = matches[i].first.dy;
        m.speed = sqrtf(dx*dx+dy+dy);
        motion.push_back(m);
    }
}
Example #10
0
int mainMotionPoints(void)
{
  // Initialise capture device
  CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
  if(!capture) error("No Capture");

  // Create a window 
  cvNamedWindow("OpenSURF", CV_WINDOW_AUTOSIZE );

  // Declare Ipoints and other stuff
  IpVec ipts, old_ipts, motion;
  IpPairVec matches;
  IplImage *img;

  // Main capture loop
  while( 1 ) 
  {
    // Grab frame from the capture source
    img = cvQueryFrame(capture);

    // Detect and describe interest points in the image
    old_ipts = ipts;
    surfDetDes(img, ipts, true, 3, 4, 2, 0.0004f);

    // Fill match vector
    getMatches(ipts,old_ipts,matches);
    for (unsigned int i = 0; i < matches.size(); ++i) 
    {
      const float & dx = matches[i].first.dx;
      const float & dy = matches[i].first.dy;
      float speed = sqrt(dx*dx+dy*dy);
      if (speed > 5 && speed < 30) 
        drawIpoint(img, matches[i].first, 3);
    }
        
    // Display the result
    cvShowImage("OpenSURF", img);

    // If ESC key pressed exit loop
    if( (cvWaitKey(10) & 255) == 27 ) break;
  }

  // Release the capture device
  cvReleaseCapture( &capture );
  cvDestroyWindow( "OpenSURF" );
  return 0;
}
Example #11
0
int mainStaticMatch( IplImage *img1, IplImage *img2)
{
//IplImage *img1, *img2;
//img1 = cvLoadImage("../imgs/img1.jpg");
//img2 = cvLoadImage("../imgs/img2.jpg");

  IpVec ipts1, ipts2;

  LARGE_INTEGER llPerfCount = {0};
  QueryPerformanceCounter(&llPerfCount);
  __int64 beginPerfCount = llPerfCount.QuadPart;

  //surfDetDes(img1,ipts1,false,4,4,2,0.0001f);
  //surfDetDes(img2,ipts2,false,4,4,2,0.0001f);
  surfDetDes(img1,ipts1,true,4,4,2,0.0001f);
  surfDetDes(img2,ipts2,true,4,4,2,0.0001f);

  IpPairVec matches;
  getMatches(ipts1,ipts2,matches);

  QueryPerformanceCounter(&llPerfCount);
  __int64 endPerfCount = llPerfCount.QuadPart;
  LARGE_INTEGER liPerfFreq={0};
  QueryPerformanceFrequency(&liPerfFreq);
  std::cout << __FUNCTION__ << " excute time: " 
	  <<  float(endPerfCount - beginPerfCount) * 1000 / liPerfFreq.QuadPart  << " millisecond(ºÁÃë)" << std::endl;

  for (unsigned int i = 0; i < matches.size(); ++i)
  {
    drawPoint(img1,matches[i].first);
    drawPoint(img2,matches[i].second);
  
    const int & w = img1->width;
    cvLine(img1,cvPoint(matches[i].first.x,matches[i].first.y),cvPoint(matches[i].second.x+w,matches[i].second.y), cvScalar(255,255,255),1);
    cvLine(img2,cvPoint(matches[i].first.x-w,matches[i].first.y),cvPoint(matches[i].second.x,matches[i].second.y), cvScalar(255,255,255),1);
  }

  std::cout<< "Matches: " << matches.size();

  cvNamedWindow("1", CV_WINDOW_AUTOSIZE );
  cvNamedWindow("2", CV_WINDOW_AUTOSIZE );
  cvShowImage("1", img1);
  cvShowImage("2",img2);
  cvWaitKey(0);

  return 0;
}
Example #12
0
int mainStaticMatch()
{
  // Make images as Mats; convert to IplImage for OpenSURF library actions
  cv::Mat mimg1, mimg2;
  mimg1=cv::imread("OpenSURF/imgs/img1.jpg", CV_LOAD_IMAGE_COLOR);
  mimg2=cv::imread("OpenSURF/imgs/img2.jpg", CV_LOAD_IMAGE_COLOR);

  IplImage iimg1, iimg2;
  iimg1=mimg1;
  iimg2=mimg2;

  IplImage *img1, *img2;
  img1 = &iimg1;
  img2 = &iimg2;

  IpVec ipts1, ipts2;
  surfDetDes(img1,ipts1,false,4,4,2,0.0001f);
  surfDetDes(img2,ipts2,false,4,4,2,0.0001f);

  IpPairVec matches;
  getMatches(ipts1,ipts2,matches);

  for (unsigned int i = 0; i < matches.size(); ++i)
  {
    drawPoint(img1,matches[i].first);
    drawPoint(img2,matches[i].second);
  
    const int & w = img1->width;
    cvLine(img1,cvPoint(matches[i].first.x,matches[i].first.y),cvPoint(matches[i].second.x+w,matches[i].second.y), cvScalar(255,255,255),1);
    cvLine(img2,cvPoint(matches[i].first.x-w,matches[i].first.y),cvPoint(matches[i].second.x,matches[i].second.y), cvScalar(255,255,255),1);
  }

  std::cout<< "Matches: " << matches.size();

  cvNamedWindow("1", CV_WINDOW_AUTOSIZE );
  cvNamedWindow("2", CV_WINDOW_AUTOSIZE );
  cvShowImage("1", img1);
  cvShowImage("2",img2);
  cvWaitKey(0);

  return 0;
}
Example #13
0
static bool
matchNext(bool lineStart) {
    lineStart = !skipUnimportantWhitespace() && lineStart;

    size_t variadicLength;
    const SVariadicWordDefinition* variadicWord;

    size_t constantLength;
    const SLexConstantsWord* constantWord;

    if (!getMatches(lineStart, &variadicLength, &variadicWord, &constantLength, &constantWord))
        return false;

    if (constantWord != NULL && constantLength >= variadicLength) {
        return acceptConstantWord(constantLength, constantWord);
    } else if (variadicLength > 0) {
        return acceptVariadic(variadicLength, variadicWord, lineStart);
    } else {
        return acceptString() || acceptChar();
    }
}
Example #14
0
void playPenalty(team_t* team1, team_t* team2,
                 int* goals1, int* goals2) {
  match_t* matches;
  int numMatches;
  int i;
  int diff = 0;

  getMatches(team1, team2, &matches, &numMatches);
  for (i = 0; i < numMatches; ++i) {
    if (matches[i].finalRound) {
      diff += matches[i].goals1;
      diff -= matches[i].goals2;
    }
    if (diff >= 0) {
      *goals1 += 1;
      *goals2 += 0;
    }
    else {
      *goals1 += 0;
      *goals2 += 1;
    }
  }
}
void ofxSurfVideoMatch(ofxSurfImage * cam, ofxSurfImage * mrk, IpPairVec * matches) {
    surfDetDes(cam->getCvImage(),cam->ipts,false,4,4,2,0.001f);
    getMatches(cam->ipts,mrk->ipts,*matches);
}
Example #16
0
int main(int argc, char ** argv) {


	//Set the arguments
	//std::string feat_detector = "SURF";
	//int threshold = 1000
	bool hamming=false;
	std::string feat_detector = "SURF";
	std::string feat_descriptor = "SURF";

	//Create the Feature extraction
	OSFeatureExtraction feature(10,200);

	//Create data analysis object
	//DataAnalysis dataAnalysis;


	//The directory where the files are stored TESTING
	std::string dir = "../brisk/images/PicsMG/Matching_Pics_Right_Overlapping";
	std::string dir1 = "../brisk/images/PicsMG/Matching_Pics_Right_Overlapping";//PicsOG/Matching_Images_OG_Left
	//Names of the two image files
	std::string name1 = "4";
	std::string name2 = "24";

	//Threshold
	float threshold = 325.125f;

	//Choose the images to compare
	//    name1 = to_string<int>(ii);
	//    if(ii==jj)
	//    continue;
	//
	//    name2 = to_string<int>(jj);

	cout<<"Image in directory 1: "<<name1<<", Image in directory 2: "<<name2<<endl;


	// names of the two images
	std::string fname1;
	std::string fname2;

	//Declare the two images
	IplImage *imgYUV1, *imgYUV2;

	//Determine the region of interest
	int horizonStart = 200;
	int horizonLine = 240;

	//******************************************************************
	//Compute the image that will be stored in the image bank
	//The Grayscale image
	IplImage *imgGray1;

	fname1 =dir + "/"+ name1+".jpg";
	imgYUV1 = cvLoadImage(fname1.c_str());
	cout<<"Size of the image is: "<<imgYUV1->height<<", "<<imgYUV1->width<<endl;
	//Create an image from the YUV image
	imgGray1 = cvCreateImage(cvSize(imgYUV1->width,imgYUV1->height),IPL_DEPTH_8U,1);

	//Convert the image to grayscale
	cvCvtColor(imgYUV1,imgGray1,CV_RGB2GRAY);

	//Create images that are of width 640 and height 1
	IplImage *imgFinal1 = cvCreateImage(cvSize(imgYUV1->width,1),IPL_DEPTH_8U,1);

	//Get a row of pixels by sub-sampling every 4th pixel and taking the mean
	imgFinal1 = getRowOfPixels(imgGray1, horizonStart, horizonLine);

	//Generate a vector of interest points
	IpVec interestPoints1;

	//Detect the interest points
	surfDet(imgFinal1,interestPoints1, 4, 3, 2, threshold);

	//Compute the SURF descriptor
	surfDes(imgFinal1,interestPoints1, false);

	//******************************************************************

	//Read in the current image
	//*****************************************************************
	fname2 = dir1+ "/"+ name2+".jpg";
	imgYUV2 = cvLoadImage(fname2.c_str());

	//*****************************************************************
	//Set the regions of interest in which we want to work
	//cvSetImageROI(imgYUV1, cvRect(0,0,imgYUV1->width,horizonLine));
	//cvSetImageROI(imgYUV2, cvRect(0,0,imgYUV2->width,horizonLine));

	//Note that the image dimensions do not change for the image
	//Create Grayscale images from the YUV images
	IplImage *imgGray2;
	imgGray2 = cvCreateImage(cvSize(imgYUV2->width,imgYUV2->height),IPL_DEPTH_8U,1);

	//cvSetImageROI(imgGray1, cvRect(0,0,imgYUV1->width,horizonLine));
	//cvSetImageROI(imgGray2, cvRect(0,0,imgYUV2->width,horizonLine));

#if (DEBUG_MODE)
	cout<<"The image WIDTH is: "<<imgYUV1->width<<endl;
	cout<<"The image HEIGHT is: "<<imgYUV1->height<<endl;
	cout<<"The image depth is: "<<imgYUV1->depth<<endl;
	cout<<"The number of channels is: "<<imgYUV1->nChannels<<endl;
	cout<<"The image data order is: "<<imgYUV1->dataOrder<<endl;
#endif

	//cvAddS(imgYUV1, cvScalar(100), imgYUV1);

	timespec ts, te, matchings, matchinge, detectors, detectore, extractors, extractore;
	clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
	//Convert the images to grayscale
	cvCvtColor(imgYUV2,imgGray2,CV_RGB2GRAY);

	//Create images that are of width 640 and height 1
	IplImage *imgFinal2 = cvCreateImage(cvSize(imgYUV1->width,1),IPL_DEPTH_8U,1);

	//Compute a vertical average of the row of pixels above the horizon
	imgFinal2 = getRowOfPixels(imgGray2, horizonStart, horizonLine);

#if (DEBUG_MODE)
	cvNamedWindow("1", CV_WINDOW_AUTOSIZE );
	cvNamedWindow("2", CV_WINDOW_AUTOSIZE );
	cvShowImage("1", imgFinal1);
	cvShowImage("2",imgFinal2);
	// Save the frame into a file
	cvSaveImage("Imgs/image1Row.jpg" ,imgFinal1);
	// Save the frame into a file
	cvSaveImage("Imgs/image2Row.jpg" ,imgFinal2);
	cvWaitKey(200);
#endif

	clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &detectors);
	//MC: Generate a vector of Interest Points
	//*****************************************************************
	IpVec interestPoints2;
	//*****************************************************************
	// create the OpenSURF detector:
	//*****************************************************************
	surfDet(imgFinal2,interestPoints2, 4, 3, 2, threshold);
	//*****************************************************************

	clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &detectore);
	float detectionTime = diff(detectors,detectore).tv_nsec/1000000.0f;


	clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &extractors);

	// get the OpenSURF descriptors
	// first image. Computes the descriptor for each of the keypoints.
	//Outputs a 64 bit vector describing the keypoints.
	//*****************************************************************
	surfDes(imgFinal2,interestPoints2, false);//False means we do not wish to use upright SURF (U-SURF)
	//*****************************************************************
	clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &extractore);
	float extractionTime = diff(extractors,extractore).tv_nsec/1000000.0f;

	clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &matchings);

	//OpenSURF matching
	//*****************************************************************
	cout<<"The number of interestPoints 1 is: "<<interestPoints1.size()<<endl;
	cout<<"The number of interestPoints 2 is: "<<interestPoints2.size()<<endl;
	IpPairVec matches;
	getMatches(interestPoints1, interestPoints2, matches, 0);
	//*****************************************************************

	clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &matchinge);
	float matchingTime = diff(matchings,matchinge).tv_nsec/1000000.0f;



	clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &te);
	float overallTime = diff(ts,te).tv_nsec/1000000.0f;

	//Find the number of valid matches
	feature.performRansac(matches);

	//Get the matching Statistics
	feature.calculateMatchingScore(*imgFinal2,interestPoints1, interestPoints2, matches);

//	ofstream writeFile;
//	std::string filename = "../../data/implementation/matchingData.txt";
//	writeFile.open(filename.c_str(), ios::app);//ios::app
//	//Output the matches
//	for (int i=1;i<matches.size();i++)
//	{
//		cout<<"First Match coords x,y: "<<matches[i].first.x<<", "<<matches[i].first.y<<endl;
//		cout<<"Second Match coords x,y: "<<matches[i].second.x<<", "<<matches[i].second.y<<endl;
//		cout<<"The distance between interest points is: "<<matches[i].first-matches[i].second<<endl;
//
//		writeFile <<matches[i].first.x<<", "<<matches[i].second.x<<", "<<matches[i].first-matches[i].second<<"\n";
//
//	}
//
//	//close the file
//	writeFile.close();
	//cv::Mat outimg;


	//Write the data to a file
	//    ofstream writeFile;
	//
	//    std::string filename = "../../data/implementation/matchingData.txt";
	//    writeFile.open(filename.c_str(), ios::app);//ios::app


	//To store the incorrect matches
#if(DISPLAY || DEBUG_MODE)
	std::vector<Ipoint> leftPoints;
	std::vector<Ipoint> rightPoints;
#endif

#if (DEBUG_MODE)
	//cout<<"The total number of keypoints in image 1 is: "<<keypoints.size()<<endl;
	//cout<<"The total number of keypoints in image 2 is: "<<keypoints2.size()<<endl;
#endif


#if 1// (DEBUG_MODE)
	cout<<"****************************************"<<endl;
	cout<<"The matching score for the image (condsidering all matches) is "<<feature.imageMatchingScore<<endl;
	cout<<"The total number of matches is "<<feature.totalNumMatches<<endl;
	cout<<"The total number of valid matches is "<<feature.totalNumValidMatches<<endl;
	cout<<"The total number of invalid Matches is: "<<feature.totalNumMatches -feature.totalNumValidMatches <<endl;
	cout<<"****************************************"<<endl;
#endif
#if 1//(DEBUG_MODE)
	std::cout<<"The times:"<<endl;
	std::cout<<"Detection Time: "<<detectionTime<<" us"<<endl;
	std::cout<<"Extraction Time: "<<extractionTime<<" us"<<endl;
	std::cout<<"Matching Time: "<<matchingTime<<" us"<<endl;
	std::cout<<"Overall Time: "<<overallTime<<" ms"<<endl;
#endif
	//	threshold = atoi(argv[3]+5);
	//	//    writeFile <<threshold<<", "<<name1<<", "<<name2<<", "<<keypoints.size()<<", "<<keypoints2.size()<<", "<<imageMatchingScoreBest<<", "<<imageMatchingScore<<","<<totalNumMatches<<", "<<totalNumBestMatches<<"\n";
	//	//    //close the file
	//	//    writeFile.close();
	//#if (DEBUG_MODE)
	//	cout<<"The total number of keypoints in image 1 is: "<<keypoints.size()<<endl;
	//	cout<<"The total number of keypoints in image 2 is: "<<keypoints2.size()<<endl;
	//#endif
	//
	//
	//#if (DISPLAY)
	//	drawMatches(imgYUV2, keypoints2, imgYUV1, keypoints,matches,outimg,
	//			cv::Scalar(0,255,0), cv::Scalar(0,0,255),
	//			std::vector<std::vector<char> >(), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
	////NOT_DRAW_SINGLE_POINTS
	//
	//	int colourChanger = 0;
	//	for (int k = 0; k<leftPoints.size(); k++)
	//	{
	//
	//		circle(imgYUV2,cv::Point(leftPoints[k].x, leftPoints[k].y), 5, cv::Scalar(colourChanger, 100, 255), 4, 8, 0);
	//
	//#if(DEBUG_MODE)
	//		cout<<"Incorrect coord Left row,col : "<<leftPoints[k].y<<", "<<leftPoints[k].x<<endl;
	//#endif
	//		colourChanger = colourChanger+30;
	//	}
	//	colourChanger = 0;
	//	for (int k = 0; k<rightPoints.size(); k++)
	//	{
	//		circle(imgYUV1,cv::Point(rightPoints[k].x, rightPoints[k].y), 5, cv::Scalar(colourChanger, 100, 255), 4, 8, 0);
	//#if(DEBUG_MODE)
	//		cout<<"Incorrect coord Right row,col : "<<rightPoints[k].y<<", "<<rightPoints[k].x<<endl;
	//#endif
	//		colourChanger = colourChanger+30;
	//	}
	//
	//
	//
	//	cv::namedWindow("Matches");
	//	cv::imshow("Matches", outimg);
	//	//imgYUV1 is right. imgYUV2 is left
	//#if(DEBUG_MODE)
	//	cv::imshow("keypoints", imgYUV1);
	//	cv::imshow("keypoints2", imgYUV2);
	//#endif
	//
	//	cv::waitKey();
	//#endif

	std::cout<< "Matches: " << feature.totalNumValidMatches<<endl;

	//Image created for drawing
	for (unsigned int i = 0; i < matches.size(); ++i)
	{
		matches[i].first.clusterIndex = i;
		matches[i].second.clusterIndex = i;

		if(matches[i].first.x!=-1){
			drawPoint(imgYUV1,matches[i].first);
			drawPoint(imgYUV2,matches[i].second);

			const int & w = imgYUV1->width;
			cvLine(imgYUV1,cvPoint(matches[i].first.x,matches[i].first.y),cvPoint(matches[i].second.x+w,matches[i].second.y), cvScalar(0,128,0),1.5);
			cvLine(imgYUV2,cvPoint(matches[i].first.x-w,matches[i].first.y),cvPoint(matches[i].second.x,matches[i].second.y), cvScalar(0,128,0),1.5);
		}
	}


	cvNamedWindow("1", CV_WINDOW_AUTOSIZE );
	cvNamedWindow("2", CV_WINDOW_AUTOSIZE );
	cvShowImage("1", imgYUV1);
	cvShowImage("2",imgYUV2);
	// Save the frame into a file
	cvSaveImage("Imgs/image1.jpg" ,imgYUV1);
	// Save the frame into a file
	cvSaveImage("Imgs/image2.jpg" ,imgYUV2);
	cvWaitKey(0);

	return 0;
}
Example #17
0
int mainStaticMatch()
{

    time_t start,end1,end2,end3,end4,end5;
    start = clock();

    IplImage *img1, *img2;
    img1 = cvLoadImage("../data/1.JPG");
    img2 = cvLoadImage("../data/2.JPG");


    end1 = clock();

    IpVec ipts1, ipts2;
    surfDetDes(img1,ipts1,false,4,4,2,0.0008f);
    surfDetDes(img2,ipts2,false,4,4,2,0.0008f);

    std::cout << "im1" << std::endl;
    std::cout << "Size:" << ipts1.size() << std::endl;

    std::cout << "im2" << std::endl;
    std::cout << "Size:" << ipts2.size() << std::endl;
    end2 = clock();

    IpPairVec matches;
    getMatches(ipts1,ipts2,matches);

    end3 = clock();

    for (unsigned int i = 0; i < matches.size(); ++i)
    {
        drawPoint(img1,matches[i].first);
        drawPoint(img2,matches[i].second);

        const int & w = img1->width;
        cvLine(img1,cvPoint(matches[i].first.x,matches[i].first.y),cvPoint(matches[i].second.x+w,matches[i].second.y), cvScalar(255,255,255),1);
        cvLine(img2,cvPoint(matches[i].first.x-w,matches[i].first.y),cvPoint(matches[i].second.x,matches[i].second.y), cvScalar(255,255,255),1);
    }

    std::cout << "Matches: " << matches.size() << std::endl;
    /*
      cvNamedWindow("1", CV_WINDOW_AUTOSIZE );
      cvNamedWindow("2", CV_WINDOW_AUTOSIZE );
      cvShowImage("1", img1);
      cvShowImage("2", img2);
      cvWaitKey(0);
    */
    end4 = clock();

//  cvSaveImage("result_gpu1.jpg",img1);
//	cvSaveImage("result_gpu2.jpg",img2);

    // Stitch two images
    IplImage *img = cvCreateImage(cvSize(img1->width + img2->width,
                                         img1->height),img1->depth,img1->nChannels);
    cvSetImageROI( img, cvRect( 0, 0, img1->width, img1->height ) );
    cvCopy(img1, img);
    cvSetImageROI( img, cvRect(img1->width,0, img2->width, img2->height) );
    cvCopy(img2, img);
    cvResetImageROI(img);
    cvSaveImage("result_gpu.jpg",img);

    end5 = clock();
    double dif1 = (double)(end1 - start) / CLOCKS_PER_SEC;
    double dif2 = (double)(end2 - end1) / CLOCKS_PER_SEC;
    double dif3 = (double)(end3 - end2) / CLOCKS_PER_SEC;
    double dif4 = (double)(end4 - end3) / CLOCKS_PER_SEC;
    double dif5 = (double)(end5 - end4) / CLOCKS_PER_SEC;
    double total = (double)(end5 - start) / CLOCKS_PER_SEC;
    std::cout.setf(std::ios::fixed,std::ios::floatfield);
    std::cout.precision(5);
    std::cout << "Time(load):" << dif1 << std::endl;
    std::cout << "Time(descriptor):" << dif2 << std::endl;
    std::cout << "Time(match):" << dif3 << std::endl;
    std::cout << "Time(plot):" << dif4 << std::endl;
    std::cout << "Time(save):" << dif5 << std::endl;
    std::cout << "Time(Total):" << total << std::endl;
    return 0;
}
bool PatternDetector::findPattern(const cv::Mat& image, PatternTrackingInfo& info)
{
    // Convert input image to gray
    getGray(image, m_grayImg);
    
    // Extract feature points from input gray image
    extractFeatures(m_grayImg, m_queryKeypoints, m_queryDescriptors);
    
    // Get matches with current pattern
    getMatches(m_queryDescriptors, m_matches);

#if _DEBUG
    cv::showAndSave("Raw matches", getMatchesImage(image, m_pattern.frame, m_queryKeypoints, m_pattern.keypoints, m_matches, 100));
#endif

#if _DEBUG
    cv::Mat tmp = image.clone();
#endif

    // Find homography transformation and detect good matches
    bool homographyFound = refineMatchesWithHomography(
        m_queryKeypoints, 
        m_pattern.keypoints, 
        homographyReprojectionThreshold, 
        m_matches, 
        m_roughHomography);

    if (homographyFound)
    {
#if _DEBUG
        cv::showAndSave("Refined matches using RANSAC", getMatchesImage(image, m_pattern.frame, m_queryKeypoints, m_pattern.keypoints, m_matches, 100));
#endif
        // If homography refinement enabled improve found transformation
        if (enableHomographyRefinement)
        {
            // Warp image using found homography
            cv::warpPerspective(m_grayImg, m_warpedImg, m_roughHomography, m_pattern.size, cv::WARP_INVERSE_MAP | cv::INTER_CUBIC);
#if _DEBUG
            cv::showAndSave("Warped image",m_warpedImg);
#endif
            // Get refined matches:
            std::vector<cv::KeyPoint> warpedKeypoints;
            std::vector<cv::DMatch> refinedMatches;

            // Detect features on warped image
            extractFeatures(m_warpedImg, warpedKeypoints, m_queryDescriptors);

            // Match with pattern
            getMatches(m_queryDescriptors, refinedMatches);

            // Estimate new refinement homography
            homographyFound = refineMatchesWithHomography(
                warpedKeypoints, 
                m_pattern.keypoints, 
                homographyReprojectionThreshold, 
                refinedMatches, 
                m_refinedHomography);
#if _DEBUG
            cv::showAndSave("MatchesWithRefinedPose", getMatchesImage(m_warpedImg, m_pattern.grayImg, warpedKeypoints, m_pattern.keypoints, refinedMatches, 100));
#endif
            // Get a result homography as result of matrix product of refined and rough homographies:
            info.homography = m_roughHomography * m_refinedHomography;

            // Transform contour with rough homography
#if _DEBUG
            cv::perspectiveTransform(m_pattern.points2d, info.points2d, m_roughHomography);
            info.draw2dContour(tmp, CV_RGB(0,200,0));
#endif

            // Transform contour with precise homography
            cv::perspectiveTransform(m_pattern.points2d, info.points2d, info.homography);
#if _DEBUG
            info.draw2dContour(tmp, CV_RGB(200,0,0));
#endif
        }
        else
        {
            info.homography = m_roughHomography;

            // Transform contour with rough homography
            cv::perspectiveTransform(m_pattern.points2d, info.points2d, m_roughHomography);
#if _DEBUG
            info.draw2dContour(tmp, CV_RGB(0,200,0));
#endif
        }
    }

#if _DEBUG
    if (1)
    {
        cv::showAndSave("Final matches", getMatchesImage(tmp, m_pattern.frame, m_queryKeypoints, m_pattern.keypoints, m_matches, 100));
    }
    std::cout << "Features:" << std::setw(4) << m_queryKeypoints.size() << " Matches: " << std::setw(4) << m_matches.size() << std::endl;
#endif

    return homographyFound;
}
Example #19
0
int mainMatch(void)
{
  // Initialise capture device
  CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
  if(!capture) error("No Capture");

  // Declare Ipoints and other stuff
  IpPairVec matches;
  IpVec ipts, ref_ipts;
  
  // This is the reference object we wish to find in video frame
  // Replace the line below with IplImage *img = cvLoadImage("imgs/object.jpg"); 
  // where object.jpg is the planar object to be located in the video
  IplImage *img = cvLoadImage("../imgs/object.jpg"); 
  if (img == NULL) error("Need to load reference image in order to run matching procedure");
  CvPoint src_corners[4] = {{0,0}, {img->width,0}, {img->width, img->height}, {0, img->height}};
  CvPoint dst_corners[4];

  // Extract reference object Ipoints
  surfDetDes(img, ref_ipts, false, 3, 4, 3, 0.004f);
  drawIpoints(img, ref_ipts);
  showImage(img);

  // Create a window 
  cvNamedWindow("OpenSURF", CV_WINDOW_AUTOSIZE );

  // Main capture loop
  while( true ) 
  {
    // Grab frame from the capture source
    img = cvQueryFrame(capture);
     
    // Detect and describe interest points in the frame
    surfDetDes(img, ipts, false, 3, 4, 3, 0.004f);

    // Fill match vector
    getMatches(ipts,ref_ipts,matches);
    
    // This call finds where the object corners should be in the frame
    if (translateCorners(matches, src_corners, dst_corners))
    {
      // Draw box around object
      for(int i = 0; i < 4; i++ )
      {
        CvPoint r1 = dst_corners[i%4];
        CvPoint r2 = dst_corners[(i+1)%4];
        cvLine( img, cvPoint(r1.x, r1.y),
          cvPoint(r2.x, r2.y), cvScalar(255,255,255), 3 );
      }

      for (unsigned int i = 0; i < matches.size(); ++i)
        drawIpoint(img, matches[i].first);
    }

    // Draw the FPS figure
    drawFPS(img);

    // Display the result
    cvShowImage("OpenSURF", img);

    // If ESC key pressed exit loop
    if( (cvWaitKey(10) & 255) == 27 ) break;
  }

  // Release the capture device
  cvReleaseCapture( &capture );
  cvDestroyWindow( "OpenSURF" );
  return 0;
}
Example #20
0
Tags::TagList Tags::getExactMatches( const QString & file, const QString & tag )
{
	setTagsFile( file );
	return getMatches( tag, false );
}
Example #21
0
Tags::TagList Tags::getPartialMatches( const QString & file, const QString & tagpart )
{
	setTagsFile( file );
	return getMatches( tagpart, true );
}
Example #22
0
Tags::TagList Tags::getPartialMatches( const QString & tagpart )
{
    return getMatches( tagpart, true );
}
Example #23
0
Tags::TagList Tags::getExactMatches( const QString & tag )
{
    return getMatches( tag, false );
}
Example #24
0
Tags::TagList Tags::getMatches( const QString & file, const QString & tagpart,  bool partial, const QStringList & types )
{
	setTagsFile( file );
	return getMatches( tagpart, partial, types);
}
Example #25
0
std::vector<unsigned int> SubstructLibrary::getMatches(
    const ROMol &query, bool recursionPossible, bool useChirality,
    bool useQueryQueryMatches, int numThreads, int maxResults) {
  return getMatches(query, 0, mols->size(), recursionPossible, useChirality,
                    useQueryQueryMatches, numThreads, maxResults);
}
Example #26
0
void ofxOpenSurf :: threadedFunction() 
{
    while( isThreadRunning() )
    {
        if( bSurfStaticImage )
        {
            if( srcImage != srcImageTemp )
            {
                lock();
                srcImage = srcImageTemp;
                unlock();
                
                surfDetDes
                ( 
                    srcImage->getCvImage(),     // image to find Ipoints in
                    srcIptsTemp,                // reference to vector of Ipoints
                    false,                      // run in rotation invariant mode?
                    4,                          // number of octaves to calculate
                    4,                          // number of intervals per octave
                    2,                          // initial sampling step
                    0.0006                      // blob response threshold
                 );
            }
            
            if( trgImage != trgImageTemp )
            {
                lock();
                trgImage = trgImageTemp;
                unlock();
                
                surfDetDes
                ( 
                    trgImage->getCvImage(),     // image to find Ipoints in
                    trgIptsTemp,                // reference to vector of Ipoints
                    false,                      // run in rotation invariant mode?
                    4,                          // number of octaves to calculate
                    4,                          // number of intervals per octave
                    2,                          // initial sampling step
                    0.0006                      // blob response threshold
                );
            }
            
            getMatches( srcIptsTemp, trgIptsTemp, matchesTemp );
            
            lock();
            srcIpts = srcIptsTemp;
            trgIpts = trgIptsTemp;
            matches = matchesTemp;
            bSurfStaticImage = false;
            unlock();

        }
        
        if( bSurfMotionImage )
        {
            if( srcImage != srcImageTemp )
            {
                lock();
                srcImage = srcImageTemp;
                unlock();
                
                surfDetDes
                ( 
                    srcImage->getCvImage(),     // image to find Ipoints in
                    srcIptsTemp,                // reference to vector of Ipoints
                    false,                      // run in rotation invariant mode?
                    4,                          // number of octaves to calculate
                    4,                          // number of intervals per octave
                    2,                          // initial sampling step
                    0.0004                      // blob response threshold
                );
            }
            
            lock();
            trgImage = trgImageTemp;
            unlock();
            
            surfDetDes
            ( 
                trgImage->getCvImage(),     // image to find Ipoints in
                trgIptsTemp,                // reference to vector of Ipoints
                false,                      // run in rotation invariant mode?
                4,                          // number of octaves to calculate
                4,                          // number of intervals per octave
                2,                          // initial sampling step
                0.001                       // blob response threshold
            );
            
            getMatches( srcIptsTemp, trgIptsTemp, matchesTemp );
            
            lock();
            srcIpts = srcIptsTemp;
            trgIpts = trgIptsTemp;
            matches = matchesTemp;
            bSurfMotionImage = false;
            unlock();
        }
    }
}
 int FeaturesTracker::numMatches(){
     std::vector<cv::DMatch> matches = getMatches();
     return matches.size();
 }
Example #28
0
void msrmtupdate(unsigned char *img,double updt[][9],IpVec *refpts,double
*refhist,int w,int h)
{
    long double tot=0,dist2=0;
    double hist[257 * 10]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
    static double ptr[4],p;
    int key[10][2];
    IpVec pts,tpts;
    IplImage *cl,*tmp2;
    cl=cvCreateImage(cvSize(w,h),IPL_DEPTH_8U,3);
    memcpy(cl->imageData,(img),cl->imageSize);
    tmp2=cvCloneImage(cl);
    surfDetDes(cl,tpts ,false, 5, 4, 2, 0.00004f);
    IpPairVec matches;
    IpVec ipts, ref_ipts;
    CvPoint src_corners[4] = {cvPoint(0,0), cvPoint(80,0), cvPoint(80, 60),
    cvPoint(0, 60)};
    CvPoint dst_corners[4];
    getMatches(tpts,*refpts,matches);
    int tt=0;
    tt=translateCorners(matches, src_corners, dst_corners);
    if (translateCorners(matches, src_corners, dst_corners))
     {
         // Draw box around object
         for(int i = 0; i < 4; i++ )

         {
             CvPoint r1 = dst_corners[i%4];
             CvPoint r2 = dst_corners[(i+1)%4];
             cvLine( cl, cvPoint(r1.x, r1.y), cvPoint(r2.x, r2.y), cvScalar(255,255,255),3 );
         }
         for (unsigned int i = 0; i < matches.size(); ++i)
         drawIpoint(cl, matches[i].first);
     }
    CvPoint cpt;
    cpt.x=((dst_corners[0].x)+(dst_corners[2].x))/2;
    cpt.y=((dst_corners[0].y)+(dst_corners[2].y))/2;
    p++;
    if(tt)
    {
        if((abs(ptr[2]-abs(dst_corners[0].x-dst_corners[1].x))>=30 ||
        abs(ptr[3]-abs(dst_corners[0].y-dst_corners[3].y))>=30 ||
        !isrect(dst_corners)) && p>3 )
        {
             tt=0;
        }
        else
        {
            cvCvtColor(tmp2,cl ,CV_RGB2HSV);
            ptr[0]=cpt.x;ptr[1]=cpt.y;ptr[2]=abs(dst_corners[0].xst_corners[1].x);ptr[3]=abs(dst_corners[0].y-dst_corners[3].y);
            crhist((unsigned char *)cl->imageData,hist,w,h,ptr);
            dist2=.1*(double)exp(-2*pow(comphist(hist,refhist),2));
        }
    }
    for(int i=0;i<N;i++)
    {
    if(tt && dist2>.05 )
    {
        updt[i][0]=cpt.x;
        updt[i][1]=cpt.y;
        updt[i][2]=ptr[2];
        updt[i][3]=ptr[3];
        updt[i][4]=1;
        updt[i][5]=1;
        updt[i][8]=1;
        tot++;
    }
    else
     {
        double pt[4];
        for(int k=0;k<4;k++)
        {
            pt[k]=updt[i][k];
        }
        cvCvtColor(tmp2,cl, CV_RGB2HSV);
        crhist((unsigned char *)cl->imageData,hist,w,h,pt);
        dist2=.1*(double)exp(-100*pow(comphist(hist,refhist),2));
        updt[i][8]=dist2;
        tot+=updt[i][8];
    }
    }
    for(int i=0;i<N;i++)
      updt[i][8]/=(double)tot;

}
 vector<cv::DMatch> FeaturesTracker::returnMatches(){
     std::vector<cv::DMatch> matches = getMatches();
     return matches;
 }
Example #30
0
//pointcloud can make memery leaks,so we use depth image instead
bool FlannMatcher::getFinalTransform(cv::Mat& image1,cv::Mat& image2,
                       cv::Mat& depth1,cv::Mat& depth2,
                       std::vector<cv::DMatch>& bestMatches,
                       Eigen::Matrix4f& bestTransform)
{
    vector<cv::KeyPoint> keypoints1,keypoints2;
    vector<cv::DMatch> matches;
    
    getMatches(depth1,depth2,image1,image2,matches,keypoints1,keypoints2);
    
    vector<Eigen::Vector3f> eigenPoints1,eigenPoints2;
    for(int i=0;i<matches.size();++i)
    {
        cv::Point2f p2d1;
        cv::Point2f p2d2;

        p2d1=keypoints1[matches[i].queryIdx].pt;
        p2d2=keypoints2[matches[i].trainIdx].pt;
        
        //calculate the first x,y,z
        unsigned short d1=depth1.at<unsigned short>(round(p2d1.y),round(p2d1.x));
        double z1=double(d1)/camera_factor;
        double x1=(p2d1.x-cx)*z1/fx;
        double y1=(p2d1.y-cy)*z1/fy;

        //calculate the second x,y,x
        unsigned short d2=depth2.at<unsigned short>(round(p2d2.y),round(p2d2.x));
        double z2=double(d2)/camera_factor;
        double x2=(p2d2.x-cx)*z2/fx;
        double y2=(p2d2.y-cy)*z2/fy;

        //push them into eigenPoints
        eigenPoints1.push_back(Eigen::Vector3f(x1,y1,z1));
        eigenPoints2.push_back(Eigen::Vector3f(x2,y2,z2));
    }
    
    /***********************/
    bool validTrans=false;
    pcl::TransformationFromCorrespondences tfc;
    int k=3;
    double bestError=1E10;
    float bestRatio=0.0;
    int numValidMatches=matches.size();
    
    vector<int> bestInliersIndex;
    
    bestMatches.clear();
    
    if(numValidMatches<k)
    	return false;
    
    for(int iteration=0;iteration<maxIterations;++iteration)
    {
        tfc.reset();
    	
    	for(int i=0;i<k;++i)
    	{
    	   int id_match=rand()%numValidMatches;
    	   /*
    	   Eigen::Vector3f from(pc1->at(keypoints1[matches[id_match].queryIdx].pt.x,matches[id_match].queryIdx].pt.y).x,
    	                        pc1->at(keypoints1[matches[id_match].queryIdx].pt.x,matches[id_match].queryIdx].pt.y).y,
    	                        pc1->at(keypoints1[matches[id_match].queryIdx].pt.x,matches[id_match].queryIdx].pt.y).z);
    	   Eigen::Vector3f to(pc2->at(keypoints2[matches[id_match].trainIdx].pt.x,matches[id_match].trainIdx].pt.y).x,
    	                      pc2->at(keypoints2[matches[id_match].trainIdx].pt.x,matches[id_match].trainIdx].pt.y).y,
    	                      pc2->at(keypoints2[matches[id_match].trainIdx].pt.x,matches[id_match].trainIdx].pt.y).z);                     
    	   tfc.add(from,to);
    	   */
    	   tfc.add(eigenPoints1[id_match],eigenPoints2[id_match]);
    	}
    	Eigen::Matrix4f transformation = tfc.getTransformation().matrix();
    	
    	vector<int> indexInliers;
	double maxInlierDistance = 0.05;
	double meanError;
	float ratio;
	
	evaluateTransform(transformation,
	                  eigenPoints1,eigenPoints2,
	                  maxInlierDistance*maxInlierDistance,
	                  indexInliers,
	                  meanError,
	                  ratio);
        
        if(meanError<0 || meanError >= maxInlierDistance)
                continue;
        if (meanError < bestError)
	{
	     if (ratio > bestRatio)
			bestRatio = ratio;

	     if (indexInliers.size()<10 || ratio<0.3)
			continue;	// not enough inliers found
	}
	
	tfc.reset();
	
	for(int idInlier = 0; idInlier < indexInliers.size(); idInlier++)
	{
	    int idMatch  = indexInliers[idInlier];
	    tfc.add(eigenPoints1[idInlier],eigenPoints2[idInlier]);
	}
	transformation = tfc.getTransformation().matrix();
	
	evaluateTransform(transformation,
	                  eigenPoints1,eigenPoints2,
	                  maxInlierDistance*maxInlierDistance,
	                  indexInliers,
	                  meanError,
	                  ratio);
	                  
	if (meanError < bestError)
	{
	     if (ratio > bestRatio)
			bestRatio = ratio;

	     if (indexInliers.size()<10 || ratio<0.3)
			continue;	// not enough inliers found
			
	     bestTransform=transformation;
	     bestError=meanError;
	     //cout<<"indexInliers size is: "<<indexInliers.size()<<endl;
	     bestInliersIndex=indexInliers;
	     
	}                  
    }
    
    if(bestInliersIndex.size()>0)
    {
        std::cout<<"**********************************"<<std::endl;
        std::cout<<"we get----> "<<bestInliersIndex.size()<<"/"<<eigenPoints1.size()<<" inliers!!"<<std::endl;
        std::cout<<"inliers percentage: "<<bestInliersIndex.size()*100/eigenPoints1.size()<<"% !"<<std::endl;
        std::cout<<"**********************************"<<std::endl;
        cout<<"transformation: "<<endl<<bestTransform<<endl;
    
        for(int i=0;i<bestInliersIndex.size();++i)
	{
	    //std::cout<<"inliers i is: "<<bestInliersInliers[i]<<endl;
	    bestMatches.push_back(matches[bestInliersIndex[i]]);
	}
        validTrans=true;
        
        /*
        //draw
        cv::Mat img_matches;
        cv::drawMatches(image1,keypoints1,image2,keypoints2,
                    matches,img_matches,CV_RGB(255,0,0));
        cv::drawMatches(image1,keypoints1,image2,keypoints2,
                    bestMatches,img_matches,CV_RGB(0,255,0));
        cv::imshow("ransac matches",img_matches);
        */
        drawInliers(image1,image2,keypoints1,keypoints2,matches,bestMatches);
        cv::waitKey(10);
    }
    else
    {
      cout<<"bestRatio is: "<<bestRatio<<" ,but no valid Transform founded!!"<<endl;
      validTrans=false;
    }
   return validTrans;

}