示例#1
0
int main( int argc, char** argv ) {
    IplImage *src, *templ,*ftmp[6]; //ftmp will hold results
	
	CvPoint minloc[6], maxloc[6];
	double minval[6], maxval[6];

    int i;
    if( argc == 3){
//Read in the source image to be searched:
        if((src=cvLoadImage(argv[1], 1))== 0) {
            printf("Error on reading src image %s\n",argv[i]);
            return(-1);
        }
//Read in the template to be used for matching:
        if((templ=cvLoadImage(argv[2], 1))== 0) {
            printf("Error on reading template %s\n",argv[2]);
            return(-1);
        }
//ALLOCATE OUTPUT IMAGES:
        int iwidth = src->width - templ->width + 1;
        int iheight = src->height - templ->height + 1;
        for(i=0; i<6; ++i){
            ftmp[i] = cvCreateImage(
                cvSize(iwidth,iheight),32,1);
        }
//DO THE MATCHING OF THE TEMPLATE WITH THE IMAGE:218 | Chapter 7: Histograms and Matching    Example 7-5. Template matching (continued)
        for(i=0; i<6; ++i){
            cvMatchTemplate( src, templ, ftmp[i], i);
            //cvNormalize(ftmp[i],ftmp[i],1,0,CV_MINMAX);
			cvMinMaxLoc(ftmp[i], &minval[i], &maxval[i], &minloc[i], &maxloc[i], 0);
			std::cerr /*<< i << ":" << "minval: " << minval[i] \
						<< " maxval: " << maxval[i] */					\
					  << " minloc: " << minloc[i].x << ", " << minloc[i].y	\
					  << " maxloc: " << maxloc[i].x << ", " << maxloc[i].y;
			std::cerr << "\n"; 
        }
//DISPLAY
        cvNamedWindow( "Template", 0 );
        cvShowImage( "Template", templ );
        cvNamedWindow( "Image", 0 );
        cvShowImage( "Image", src );
        cvNamedWindow( "SQDIFF", 0 );
        cvShowImage( "SQDIFF", ftmp[0] );
        cvNamedWindow( "SQDIFF_NORMED", 0 );
        cvShowImage( "SQDIFF_NORMED", ftmp[1] );
        cvNamedWindow( "CCORR", 0 );
        cvShowImage( "CCORR", ftmp[2] );
        cvNamedWindow( "CCORR_NORMED", 0 );
        cvShowImage( "CCORR_NORMED", ftmp[3] );
        cvNamedWindow( "CCOEFF", 0 );
        cvShowImage( "CCOEFF", ftmp[4] );
        cvNamedWindow( "CCOEFF_NORMED", 0 );
        cvShowImage( "CCOEFF_NORMED", ftmp[5] );
//LET USER VIEW RESULTS:
        cvWaitKey(0);
    } else { 
        printf("Call should be:"
               "matchTemplate image template \n");
    }
}
示例#2
0
ofPoint matchFinder::getPoint() {
	// get the size for our result image
	CvSize result_size = cvSize(input.getWidth() - tpl.getWidth() + 1,
								input.getHeight() - tpl.getHeight() + 1);
	
	// create the result image for the comparison
	IplImage *result_image = cvCreateImage(result_size, IPL_DEPTH_32F, 1);

	// make the comparison
	cvMatchTemplate(input.getCvImage(), tpl.getCvImage(), result_image, CV_TM_SQDIFF);
	
	// copy to ofCv image.
	IplImage *result_char = cvCreateImage(cvSize(result_image->width, result_image->height), 8, 1);
	
	ofcv_result_image.allocate(result_size.width, result_size.height);
	ofcv_result_image = result_char;
	
	// get the location of the best match
	CvPoint min_loc;
	CvPoint max_loc;
	cvMinMaxLoc(result_image, &min_val, &max_val, &min_loc, &max_loc, 0);
		
	// clean up
	cvReleaseImage(&result_image);
	
	// return value
	ofPoint p = ofPoint(min_loc.x, min_loc.y);
	return p;
}
示例#3
0
double template_original_match(IplImage* original_image,IplImage* template_image)
{
	//resize the template and original sub image. 
	//the scale difference 8 is concluded by repeated trials.
	//8*8 times matches will be done.The max value is more reasonable than resize the template and original with the same size and only do the matching once.
	
	IplImage* resized_original_image = cvCreateImage(cvSize(MATCHING_WIDTH + 8, MATCHING_HEIGHT + 8),original_image->depth,original_image-> nChannels);
	IplImage* resized_template_image = cvCreateImage(cvSize(MATCHING_WIDTH, MATCHING_HEIGHT),template_image->depth,template_image-> nChannels);
	IplImage* matching_result = cvCreateImage( cvSize(resized_original_image->width - resized_template_image -> width + 1,resized_original_image->height - resized_template_image->height + 1), IPL_DEPTH_32F, 1 );
	
	double min_val;  
	double max_val;
	CvPoint min_loc;
	CvPoint max_loc;  

	cvResize(original_image,resized_original_image);
	cvResize(template_image,resized_template_image);

	//cvSmooth(resized_original_image,resized_original_image);
	//cvSmooth(resized_template_image,resized_template_image);


	//The match with max_val is the best match
	cvMatchTemplate(resized_original_image,resized_template_image,matching_result,CV_TM_CCOEFF_NORMED);
	cvMinMaxLoc(matching_result, &min_val, &max_val, &min_loc, &max_loc, NULL);

	return max_val;
	
}
示例#4
0
文件: lk.cpp 项目: jmfs/libobtrack
/**
 * Calculates normalized cross correlation for every point.
 * @param imgI      Image 1.
 * @param imgJ      Image 2.
 * @param points0   Array of points of imgI
 * @param points1   Array of points of imgJ
 * @param nPts      Length of array/number of points.
 * @param status    Switch which point pairs should be calculated.
 *                  if status[i] == 1 => match[i] is calculated.
 *                  else match[i] = 0.0
 * @param match     Output: Array will contain ncc values.
 *                  0.0 if not calculated.
 * @param winsize   Size of quadratic area around the point
 *                  which is compared.
 * @param method    Specifies the way how image regions are compared.
 *                  see cvMatchTemplate
 */
void normCrossCorrelation(IplImage *imgI, IplImage *imgJ,
    CvPoint2D32f *points0, CvPoint2D32f *points1, int nPts, char *status,
    float *match, int winsize, int method)
{
  IplImage *rec0 = cvCreateImage(cvSize(winsize, winsize), 8, 1);
  IplImage *rec1 = cvCreateImage(cvSize(winsize, winsize), 8, 1);
  IplImage *res = cvCreateImage(cvSize(1, 1), IPL_DEPTH_32F, 1);

  int i;
  for (i = 0; i < nPts; i++)
  {
    if (status[i] == 1)
    {
      cvGetRectSubPix(imgI, rec0, points0[i]);
      cvGetRectSubPix(imgJ, rec1, points1[i]);
      cvMatchTemplate(rec0, rec1, res, method);
      match[i] = ((float *) (res->imageData))[0];
    }
    else
    {
      match[i] = 0.0;
    }
  }
  cvReleaseImage(&rec0);
  cvReleaseImage(&rec1);
  cvReleaseImage(&res);
}
void cv::matchTemplate( const Mat& image, const Mat& templ, Mat& result, int method )
{
    result.create( std::abs(image.rows - templ.rows) + 1,
                   std::abs(image.cols - templ.cols) + 1, CV_32F );
    CvMat _image = image, _templ = templ, _result = result;
    cvMatchTemplate( &_image, &_templ, &_result, method );    
}
示例#6
0
int main(int argc, char** argv){
	int i;
	if(argc != 3){
		printf("Error 1: 2 arguments expected, %d given.\n",argc-1);
		return 0;
	}
	IplImage* source = cvLoadImage(argv[1],CV_LOAD_IMAGE_COLOR);
	IplImage* tmpl   = cvLoadImage(argv[2],CV_LOAD_IMAGE_COLOR);
	int ww = source->width - tmpl->width + 1;
	int hh = source->height - tmpl->height + 1;
	IplImage *result = cvCreateImage(cvSize(ww,hh),IPL_DEPTH_32F, 1);//source->nChannels);

	cvMatchTemplate(source, tmpl, result, CV_TM_SQDIFF);

	CvPoint minLoc;
	CvPoint maxLoc;
	double minVal;
	double maxVal;
	cvMinMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, 0);
	cvRectangle(source, minLoc, cvPoint(minLoc.x+tmpl->width,minLoc.y+tmpl->height),cvScalar(0,0,255,1),1,8,0);

	cvNamedWindow("match",CV_WINDOW_AUTOSIZE);
	cvShowImage("match",source);
	cvWaitKey(0);
	cvReleaseImage(&source);
	cvReleaseImage(&tmpl);
	cvReleaseImage(&result);
	cvDestroyWindow("match");
}
示例#7
0
void MatchTemplatePlugin::ProcessStatic
( int i, ImagePlus *img, ImagePlus *oimg,
 int method, CvSize winsize, IplImage* &map){
	CvRect orect = cvBoundingRect(oimg->contourArray[i],1);
	RestrictRectLoc(orect, cvRect(0,0,img->orig->width,img->orig->height));
	cvSetImageROI(oimg->orig, orect);
	CvRect rect = cvRect(MAX(0,orect.x-winsize.width), MAX(0,orect.y-winsize.height),orect.width+2*winsize.width, orect.height+2*winsize.height);
	rect.width = MIN(rect.width,oimg->orig->width-rect.x);
	rect.height = MIN(rect.height,oimg->orig->height-rect.y);
	cvSetImageROI(img->orig, rect);

	CvSize mapsize = MyPoint(MyPoint(rect)-MyPoint(orect)+wxPoint(1,1)).ToCvSize();
	if (map && MyPoint(cvGetSize(map))!=MyPoint(mapsize))
		cvReleaseImage(&map);
	if( !map )
        map = cvCreateImage(mapsize, IPL_DEPTH_32F, 1);

	cvMatchTemplate( img->orig, oimg->orig, map, method );
	cvResetImageROI(img->orig);
	cvResetImageROI(oimg->orig);
	CvPoint minloc;
	CvPoint maxloc;
	double minval, maxval;
	cvMinMaxLoc( map, &minval, &maxval, &minloc, &maxloc);
	bool minisbest = (method == CV_TM_SQDIFF || method==CV_TM_SQDIFF_NORMED);
	rect.x = rect.x + (minisbest ? minloc.x : maxloc.x);
	rect.y = rect.y + (minisbest ? minloc.y : maxloc.y);

	CvPoint shift = cvPoint(rect.x - orect.x, rect.y - orect.y);
	ShiftContour(oimg->contourArray[i],img->contourArray[i],shift);
	ShiftFeatPoints(oimg->feats[i], img->feats[i], cvPointTo32f(shift));
}
/**
* Locate the user's eye with template matching
*
* @param    IplImage* img     the source image
* @param    IplImage* tpl     the eye template
* @param    CvRect*   window  search within this window,
*                            will be updated with the recent search window
* @param    CvRect*   eye     output parameter, will contain the current
*                            location of user's eye
* @return    int               '1' if found, '0' otherwise
*/
int
locate_eye(IplImage* img, IplImage* tpl, CvRect* window, CvRect* eye)
{
	IplImage*    tm;
	CvRect        win;
	CvPoint        minloc, maxloc, point;
	double        minval, maxval;
	int            w, h;
	/* get the centroid of eye */
	point = cvPoint(
		(*eye).x + (*eye).width / 2,
		(*eye).y + (*eye).height / 2
		);
	/* setup search window
	replace the predefined WIN_WIDTH and WIN_HEIGHT above
	for your convenient */
	win = cvRect(
		point.x - WIN_WIDTH / 2,
		point.y - WIN_HEIGHT / 2,
		WIN_WIDTH,
		WIN_HEIGHT
		);
	/* make sure that the search window is still within the frame */
	if (win.x < 0)
		win.x = 0;
	if (win.y < 0)
		win.y = 0;
	if (win.x + win.width > img->width)
		win.x = img->width - win.width;
	if (win.y + win.height > img->height)
		win.y = img->height - win.height;
	/* create new image for template matching result where:
	width  = W - w + 1, and
	height = H - h + 1 */
	w = win.width - tpl->width + 1;
	h = win.height - tpl->height + 1;
	tm = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 1);
	/* apply the search window */
	cvSetImageROI(img, win);
	/* template matching */
	cvMatchTemplate(img, tpl, tm, CV_TM_SQDIFF_NORMED);
	cvMinMaxLoc(tm, &minval, &maxval, &minloc, &maxloc, 0);
	/* release things */
	cvResetImageROI(img);
	cvReleaseImage(&tm);
	/* only good matches */
	if (minval > TM_THRESHOLD)
		return 0;
	/* return the search window */
	*window = win;
	/* return eye location */
	*eye = cvRect(
		win.x + minloc.x,
		win.y + minloc.y,
		TPL_WIDTH,
		TPL_HEIGHT
		);
	return 1;
}
IplImage* pattern_matching(const IplImage* img,const IplImage* pattern)
/*Return the probability map of the pattern in the img*/
{
	CvSize input_size= cvGetSize(img);
	CvSize pattern_size= cvGetSize(pattern);
	IplImage*  probability_map=cvCreateImage(cvSize(input_size.width-pattern_size.width+1,input_size.height-pattern_size.height+1),IPL_DEPTH_32F,1);
	cvMatchTemplate(img,pattern,probability_map,CV_TM_CCOEFF_NORMED);
	return probability_map;
}
示例#10
0
//------FUNCTION USED TO DETERMINE DENOMINATION----------
void detectDenomination(IplImage *img,int thresx,int thresy)
{
	CvPoint		minloc, maxloc;
	double		minval, maxval;
float max=-1;
int index=-1;
	//--------4 TEMPLATES NOTES  OF NoteFinal----------
	for(int i=0; i<4;i++)
	{
		//IplImage *tpl=cvLoadImage(Gandhitpl[i]);
IplImage *source = cvLoadImage(NoteFinal[i]);



 //declare a destination IplImage object with correct size, depth and channels
      IplImage *destination = cvCreateImage
	  ( cvSize( thresx,thresy),source->depth, source->nChannels );

//use cvResize to resize source to a destination image
	cvResize(source, destination);
	IplImage *tpl=destination;
		


		int res_width=img->width-tpl->width+1;
		int res_height=img->height-tpl->height+1;


	IplImage *res = cvCreateImage( cvSize( res_width, res_height ), IPL_DEPTH_32F, 1 );
	/* choose template matching method to be used */
	//cvMatchTemplate( img, tpl, res, CV_TM_SQDIFF );
	//cvMatchTemplate( img, tpl, res, CV_TM_SQDIFF_NORMED );
	//cvMatchTemplate( img, tpl, res, CV_TM_CCORR );
	//cvMatchTemplate( img, tpl, res, CV_TM_CCORR_NORMED );
	//cvMatchTemplate( img, tpl, res, CV_TM_CCOEFF );
	cvMatchTemplate( img, tpl, res, CV_TM_CCOEFF_NORMED );
	cvMinMaxLoc( res, &minval, &maxval,&minloc,&maxloc, 0);
		
	//cvNamedWindow("showtplmatch",1);
	//cvShowImage("showtplmatch",img);
//	printf("\n i= %d, max = %f",i,maxval);
//	cvWaitKey(100);
	if(max<maxval)
		{
			max=maxval;
			index=i;
		}
	}
	
	// ----- THE TPL NOTE FOR WHICH THE MATCH WAS MAXIMUM , THAT INDEX COUNT IS INCREMENTED 
	maxnote[index]++;
	
}
double comparison::run_template_matching_comparison(int type){
	int height = src2->height - src1->height +1;
	int width = src2->width - src1->width +1;
	 /* create a new image, to store phase correlation result */
	IplImage *ftmp = cvCreateImage( cvSize(width,height ), IPL_DEPTH_32F, 1);
	/* get phase correlation of input images */
	cvMatchTemplate(src2,src1,ftmp,type);
	/* find the maximum value and its location */
	CvPoint minloc, maxloc;
	double  minval, maxval;
	cvMinMaxLoc(ftmp, &minval, &maxval, &minloc, &maxloc, 0 );		
return maxval;
}
示例#12
0
文件: metric.cpp 项目: manasabhat/ITG
float correlation( IplImage img1,IplImage img2) 
{
#if PRINT_DEBUGS
	cvShowImage("img1",&img1);
	cvWaitKey(0);
	cvShowImage("img2",&img2);
	cvWaitKey(0);
#endif
	IplImage* res = cvCreateImage(cvSize(1,1),IPL_DEPTH_32F,1);
	cvMatchTemplate(&img1,&img2,res,CV_TM_CCORR_NORMED);
	CvScalar scal = cvGet2D( res,0,0);
	return( scal.val[0]);
}
示例#13
0
void gandhijitplMatch(GandhitplMatch detectedimg,int index)
{

	CvPoint		minloc, maxloc;
	double		minval, maxval;
	
	for(int i=0; i<4;i++)
	{
		//IplImage *tpl=cvLoadImage(Gandhitpl[i]);
IplImage *source = cvLoadImage(Gandhitpl[i]);

	printf("gandhitplmatchloaded");

 //declare a destination IplImage object with correct size, depth and channels
      IplImage *destination = cvCreateImage
	  ( cvSize(detectedimg.width ,detectedimg.height),source->depth, source->nChannels );

//use cvResize to resize source to a destination image
	cvResize(source, destination);
	IplImage *tpl=destination;
		


		int res_width=detectedimg.faceimg->width-tpl->width+1;
		int res_height=detectedimg.faceimg->height-tpl->height+1;


	IplImage *res = cvCreateImage( cvSize( res_width, res_height ), IPL_DEPTH_32F, 1 );
	/* choose template matching method to be used */
	//cvMatchTemplate( img, tpl, res, CV_TM_SQDIFF );
	//cvMatchTemplate( img, tpl, res, CV_TM_SQDIFF_NORMED );
	//cvMatchTemplate( img, tpl, res, CV_TM_CCORR );
	//cvMatchTemplate( img, tpl, res, CV_TM_CCORR_NORMED );
	//cvMatchTemplate( img, tpl, res, CV_TM_CCOEFF );
	cvMatchTemplate( detectedimg.faceimg, tpl, res, CV_TM_CCOEFF_NORMED );
	cvMinMaxLoc( res, &minval, &maxval,&minloc,&maxloc, 0);
		
	cvNamedWindow("showtplmatch",1);
	cvShowImage("showtplmatch",detectedimg.faceimg);
//	printf("\n i= %d, max = %f",i,maxval);
//	cvWaitKey(100);
	if(globalmaximum<maxval)
		{
			globalmaximum=maxval;
			globalmaxindex=index;
		}
	}
	

}
示例#14
0
void TemlateBit(IplImage* dest,const char *filename,CvPoint *ptDest,CvPoint *ptSrc){
	//IplImage *img;
	IplImage *tpl;
	IplImage *res;
	CvPoint minloc,maxloc;
	double minval,maxval;
	int img_width,img_height;
	int tpl_width,tpl_height;
	int res_width,res_height;
	if (0==dest)
	{
		return ;
	}
	tpl=cvLoadImage(filename,CV_LOAD_IMAGE_COLOR);
	if (0==tpl)
	{
		return ;
	}
	img_width=dest->width;
	img_height=dest->height;
	tpl_width=tpl->width;
	tpl_height=tpl->height;
	res_width=img_width-tpl_width+1;
	res_height=img_height-tpl_height+1;

	res=cvCreateImage(cvSize(res_width,res_height),IPL_DEPTH_32F,1);
	cvMatchTemplate(dest,tpl,res,CV_TM_SQDIFF);
	cvMinMaxLoc(res,&minval,&maxval,&minloc,&maxloc,0);
	//cvRectangle(dest,cvPoint(minloc.x/2,minloc.y/2),cvPoint(minloc.x/2+tpl_width/2,minloc.y/2+tpl_height/2),cvScalar(0,0,255,0),1,0,0);
	//ptDest=cvPoint(minloc.x,minloc.y);
	//ptSrc=cvPoint(minloc.x+tpl_width,minloc.y+tpl_height);
	ptDest->x=minloc.x;
	ptDest->y=minloc.y;
	ptSrc->x=minloc.x+tpl_width;
	ptSrc->y=minloc.y+tpl_height;
    //printf("ptDest.x=%d ptDest.y=%d \n ptSrc.x=%d ptSrc.y=%d \n",ptDest.x,ptDest.y,ptSrc.x,ptSrc.y);
	//cvNamedWindow("reference",CV_WINDOW_AUTOSIZE);
	//cvNamedWindow("template",CV_WINDOW_AUTOSIZE);
	//cvShowImage("reference",dest);
	//cvShowImage("template",tpl);

	//cvWaitKey(0);

	//cvDestroyWindow("reference");
	//cvDestroyWindow("template");
	//cvReleaseImage(&dest);
	cvReleaseImage(&tpl);
	cvReleaseImage(&res);

}
示例#15
0
//given a large patch and a smaller patch, finds the peak of teh NCC value to compute translation between patches
void findMatchingTranslation(IplImage *imageIpl,IplImage *templIpl,double *x,double *y,double *score,int mType)
{
    int lP_width=imageIpl->width;
    int lP_height=imageIpl->height;
    int sP_width=templIpl->width;
    //int sP_height=templIpl->height;
    int result_width = lP_width - sP_width + 1;
    int result_height = lP_height - sP_width + 1;

    IplImage* resultIpl = cvCreateImage(cvSize(result_width, result_height), IPL_DEPTH_32F, 1);


    cvMatchTemplate(imageIpl, templIpl, resultIpl, CV_TM_CCOEFF_NORMED);


    //famattodo: crop borders. Is it necessary using NCC from OpenCV? I don't think so.
    vector<Point2D*> peaks = findPeaks((float*)resultIpl->imageData, result_width, result_height, peakThresholdFillContours, 1, 0, sP_width, 0,mType);


    //famatdebug
    //ofstream outfv("result_fc.txt");
    //print(resultIpl,outfv);
    //outfv.close();
    //cout<<"Finished writing translation matching with (x,y)="<<*peaks[0]<<endl;
    //cout<<"peaks size="<<peaks.size()<<endl;
    //--------------------------

    cvReleaseImage(&resultIpl);

    if (!peaks.empty())
    {
        *x=peaks[0]->x;
        *y=peaks[0]->y;
        *score=peaks[0]->score;
    }
    else
    {
        *x=-1.0;
        *y=-1.0;
        *score=-1.0;
    }


    //deallocate memory for peaks
    for (unsigned int kk=0; kk<peaks.size(); kk++)
        delete peaks[kk];
    peaks.clear();

}
示例#16
0
int main(int argc, const char * argv[]) {
    
    IplImage *src, *templ, *ftmp[6];
    int i;
    
    if (argc == 3 && ((src = cvLoadImage(argv[1], 1)) != 0) && ((templ = cvLoadImage(argv[2], 1)) != 0)) {
        // Allocate output images
        int i_width = src->width - templ->width + 1;
        int i_height = src->height - templ->height + 1;
        for ( i = 0; i < 6; i++ ) {
            ftmp[i] = cvCreateImage( cvSize(i_width, i_height), 32, 1 );
        }
        // Do template matching
        for ( i = 0; i < 6; i++) {
            cvMatchTemplate( src, templ, ftmp[i], i );
            cvNormalize( ftmp[i], ftmp[i], 1, 0, CV_MINMAX );
        }
        
        //DISPLAY
        cvNamedWindow( "Template", 0 );
        cvShowImage(   "Template", templ );
        cvNamedWindow( "Image", 0 );
        cvShowImage(   "Image", src );
        
        cvNamedWindow( "SQDIFF", 0 );
        cvShowImage(   "SQDIFF", ftmp[0] );
        
        cvNamedWindow( "SQDIFF_NORMED", 0 );
        cvShowImage(   "SQDIFF_NORMED", ftmp[1] );
        
        cvNamedWindow( "CCORR", 0 );
        cvShowImage(   "CCORR", ftmp[2] );
        
        cvNamedWindow( "CCORR_NORMED", 0 );
        cvShowImage(   "CCORR_NORMED", ftmp[3] );
        
        cvNamedWindow( "CCOEFF", 0 );
        cvShowImage(   "CCOEFF", ftmp[4] );
        
        cvNamedWindow( "CCOEFF_NORMED", 0 );
        cvShowImage(   "CCOEFF_NORMED", ftmp[5] );
        
        cvWaitKey();

    }
    
    return 0;
}
示例#17
0
/**
 * Estimates the room number digits by smartly analyzing an input ROI or window
 * and applying template matching to sub-windows representing the digits' images
 * respectively in order to find the best match.
 * @param in: Datastructure containing input image, circle radius and center.
 */
void calcRoom(PointImage* in) {
	char path[20]; //temp to hold path to template images
	IplImage* tpl, *tempRes, *img = cvCreateImage(cvSize(0, 0), 8, 1);
	IplImage templates[10]; //array of loaded template images
	int room[3], matchIndex = -1; //array holding room detected digits
	CvRect rect;
	double maxMatch = 1.5, currMatch = 1.5; //their max range is typically 1.0.

	//Load the templates
	for (int k = 0; k < 10; k++) {
		sprintf(path, "%s%d%s", "templates/", k, ".png");
		templates[k] = *cvLoadImage(path, CV_LOAD_IMAGE_GRAYSCALE);
	}

	//find best match for every sub-window (the three subimages from ROI)
	for (int j = 0; j < 3; j++) {
		for (int i = 0; i <= 9; i++) {
			tpl = &templates[i];
			img = preProcess(in, tpl);
			rect = cvRect(j * cvRound(img->width / 3), 0, cvRound(img->width / 3), img->height);
			cvSetImageROI(img, rect);
			tempRes = cvCreateImage(cvSize(rect.width - tpl->width + 1, rect.height - tpl->height + 1), IPL_DEPTH_32F, 1);
			cvMatchTemplate(img, tpl, tempRes, CV_TM_SQDIFF_NORMED);
			currMatch = calcMatchingPercent(tempRes);
			if (currMatch < maxMatch) {
				maxMatch = currMatch;
				matchIndex = i;
			}
			cvReleaseImage(&tempRes);
		}
		//Assign current detected digit.
		room[j] = matchIndex;
		//Reset temporary vars.
		currMatch = 1.5;
		maxMatch = 1.5;
		matchIndex = -1;
	}
	//print the room no.
	room[1] = (room[1] == (CV_TM_CCOEFF_NORMED | 6)) ? 1 : room[1];
	printf("Room no: %d %d %d\n", room[0], room[1], room[2]);

	//Memory cleanup.
	cvReleaseImage(&img);
	cvReleaseImage(&in->img);
	for (int l = 0; l < 10; l++) {
		cvReleaseImage((IplImage**) &templates[l]);
	}
}
示例#18
0
int templateMatch(struct window *window, int frame, int diam, CvMat *tmpl) {
    // Init
    struct frame *fr = get_frame(window->frames, frame);

//    printf("Guess is (%d, %d), diameter is %d\n", window->guess.x, window->guess.y, diam);
    float init_x = (float)window->guess.x-diam, init_y = (float)window->guess.y-diam;

    // See if we can guess were the ball might be
    CvRect rect = cvRect(init_x, init_y, diam*2, diam*2);
    // Make sure rect is with image
    rect.x = rect.x < 0 ? 0 : rect.x;
    rect.y = rect.y < 0 ? 0 : rect.y;
    rect.width = rect.x+rect.width > fr->image->cols ? fr->image->cols-rect.x : rect.width;
    rect.height = rect.y+rect.height > fr->image->rows ? fr->image->rows-rect.y : rect.height;
    // Get sub rect
    CvMat *sub = cvCreateMatHeader(rect.height, rect.width, CV_32F);
    cvGetSubRect(fr->image, sub, rect);

    CvMat *res = cvCreateMat(sub->rows - tmpl->rows+1, sub->cols - tmpl->cols+1, CV_32F);

    // Match
    cvMatchTemplate(sub, tmpl, res, CV_TM_SQDIFF);

    // Find value and location of min = upper-left corner of template match
    CvPoint pt;
    double val;
    cvMinMaxLoc(res, &val, 0, &pt, 0, 0);
//    printf("#%d: value of match is %f\n", frame, val);
    if (val > 20000000) { // Works on sample video
//        printf("Doubling search area\n");
        templateMatch(window, frame, diam*2, tmpl);
        return 0;
    }

    // Match result
    struct MatchResult mr;
    mr.x = init_x+pt.x;
    mr.y = init_y+pt.y;
    mr.found = 1;

    fr->match = mr;

    window->guess.x = mr.x;
    window->guess.y = mr.y;

    return 0;
}
示例#19
0
ofVec2f Simple3DTracker::_predictNextPosition(ofVec2f currentPosition, float* minCost)
{
    int bestx = currentPosition.x, besty = currentPosition.y;
    float bestcost = 9999999, cost, distance;
    const float alpha = _weightedMatchingCoefficient;

    if(!_template || !_tmp || !_tmp2)
        return currentPosition;

    // template matching
    IplImage* haystack = _cameraImage();
    cvMatchTemplate(haystack, _template->getCvImage(), _tmp2, CV_TM_CCOEFF);
    cvNormalize(_tmp2, _tmp2, 1.0, 0.0, CV_MINMAX);

    // find the best match
    for(int y = 0; y < _tmp2->height; y++) {
        const float *src = (const float*)(_tmp2->imageData + y * _tmp2->widthStep);
        unsigned char *dst = (unsigned char*)(_tmp->getCvImage()->imageData + y * _tmp->getCvImage()->widthStep);
        for(int x = 0; x < _tmp2->width; x++) {
            dst[x] = (unsigned char)(src[x] * 255.0f);
            distance = currentPosition.distance(ofVec2f(x, y));
            if(distance <= _lookupRadius) {
                cost = (alpha * (1.0f - src[x])) + ((1.0f - alpha) * distance / _lookupRadius);
                if(cost <= bestcost) { // weighted matching
                    bestx = x;
                    besty = y;
                    bestcost = cost;
                }
            }
        }
    }
    _tmp->flagImageChanged();

    // get the resulting position...
    ofVec2f result(bestx + _template->width/2, besty + _template->height/2);

    // return the min cost?
    if(minCost)
        *minCost = bestcost;

    // update the template?
    if(result.distance(currentPosition) >= UPDATETPL_MINDIST)
        _setTemplate(result);

    // done!
    return result;
}
示例#20
0
IplImage*  ProcTemplateMatch::process(const IplImage* pInput)
{

	if (_pPimpl->pOutImg)
    	cvReleaseImage(&_pPimpl->pOutImg);

	// template image를 로드한다.
    IplImage *pTemplateImg = cvLoadImage( _pPimpl->szTemplateImg );

	// 상관계수를 구할 이미지
    CvSize size;
    size.width = pInput->width - pTemplateImg->width+1;
    size.height = pInput->height - pTemplateImg->height+1;

    IplImage *pCoeffImg = cvCreateImage( size, IPL_DEPTH_32F, 1 );

	// 상관계수를 구하여 coeff에 그려준다.
    cvMatchTemplate(pInput, pTemplateImg, pCoeffImg, CV_TM_CCOEFF_NORMED);

    CvPoint p;
	// 상관계수가 최대값을 가지는 위치를 찾는다
    cvMinMaxLoc(pCoeffImg, &min, &max, NULL, &p);
    left = p.x;
    top = p.y;
    width = pTemplateImg->width;
    height = pTemplateImg->width;

    _pPimpl->pOutImg = pCoeffImg;


#ifdef _DEBUG
    IplImage *pMatchedImg = cvCloneImage(pInput);
    // 찾은 물체에 사격형 박스를 그린다.
    cvRectangle(pMatchedImg, p, cvPoint(p.x + pTemplateImg->width,
                      p.y + pTemplateImg->height), CV_RGB(255,0,0));


	cvNamedWindow( this->name(), CV_WINDOW_AUTOSIZE);
    cvShowImage( this->name(),  pMatchedImg);
    cvReleaseImage(&pMatchedImg);
#endif

    cvReleaseImage(&pTemplateImg);
    return _pPimpl->pOutImg;
    
}
示例#21
0
void templateMatching() {
	IplImage *image = cvCloneImage(sourceImage);
	IplImage *binaryObject = cvCreateImage(cvGetSize(objectImage), IPL_DEPTH_8U, 1);
	IplImage *binaryTemplate = cvCreateImage(cvGetSize(templateImage), IPL_DEPTH_8U, 1);
	IplImage *differenceMapImage = cvCreateImage(cvSize(objectImage->width-templateImage->width+1, objectImage->height-templateImage->height+1), IPL_DEPTH_32F, 1);
	CvPoint minLocation;
	
	cvThreshold(objectImage, binaryObject, level, 255, CV_THRESH_BINARY);
	cvThreshold(templateImage, binaryTemplate, level, 255, CV_THRESH_BINARY);
	
	cvMatchTemplate(binaryObject, binaryTemplate, differenceMapImage, CV_TM_SQDIFF);
	cvMinMaxLoc(differenceMapImage, NULL, NULL, &minLocation, NULL, NULL);
	
	cvRectangle(image, minLocation, cvPoint(minLocation.x+templateImage->width, minLocation.y+templateImage->height), CV_RGB(255,0,0), 3, 1, 0);
	
	cvShowImage(windowName, image);
}
示例#22
0
/* track object */
void trackObject()
{
    CvPoint minloc, maxloc;
    double  minval, maxval;

    /* setup position of search window */
    int win_x0 = object_x0 - ( ( WINDOW_WIDTH  - TPL_WIDTH  ) / 2 );
    int win_y0 = object_y0 - ( ( WINDOW_HEIGHT - TPL_HEIGHT ) / 2 );
    
	/*
	 * Ooops, some bugs here.
	 * If the search window exceed the frame boundaries,
	 * it will trigger errors.
	 *
	 * Add some code to make sure that the search window 
	 * is still within the frame.
	 */
	
    /* search object in search window */
    cvSetImageROI( frame, 
                   cvRect( win_x0, 
                           win_y0, 
                           WINDOW_WIDTH, 
                           WINDOW_HEIGHT ) );
    cvMatchTemplate( frame, tpl, tm, CV_TM_SQDIFF_NORMED );
    cvMinMaxLoc( tm, &minval, &maxval, &minloc, &maxloc, 0 );
    cvResetImageROI( frame );
    
    /* if object found... */
    if( minval <= THRESHOLD ) {
        /* save object's current location */
        object_x0 = win_x0 + minloc.x;
        object_y0 = win_y0 + minloc.y;

        /* and draw a box there */
        cvRectangle( frame,
                     cvPoint( object_x0, object_y0 ),
                     cvPoint( object_x0 + TPL_WIDTH, 
					          object_y0 + TPL_HEIGHT ),
                     cvScalar( 0, 0, 255, 0 ), 1, 0, 0 );
    } else {
        /* if not found... */
        fprintf( stdout, "Lost object.\n" );
        is_tracking = 0;
    }
}
示例#23
0
GdkPixbuf*
find_objects (ObjectsMap *map)
{
  int obj_count;
  IplImage *res_ipl;
  IplImage *f_result;
  GdkPixbuf *res_pbuf;
  int obj_width, obj_height;
  int map_width, map_height;
  double min, max;
  CvPoint min_pnt, max_pnt;

  g_assert(map->map != NULL && map->objects != NULL &&
      map->n_of_objects != 0);

  map_width = map->map->width;
  map_height = map->map->height;
  res_ipl = cvCreateImage(cvGetSize(map->map),
                          map->map->depth,
                          N_CHANNELS_RGB);
  cvCvtColor(map->map, res_ipl, CV_GRAY2BGR);

  obj_count = map->n_of_objects;
  for (int i = 0; i < obj_count; ++i)
    {
      obj_width = map->objects[i]->width;
      obj_height = map->objects[i]->height;

      f_result = cvCreateImage(cvSize(map_width - obj_width + 1,
                                      map_height - obj_height + 1),
                               IPL_DEPTH_32F,
                               N_CHANNELS_GRAY);
      cvMatchTemplate(map->map, map->objects[i], f_result, CV_TM_CCOEFF);
      cvMinMaxLoc(f_result, &min, &max, &min_pnt, &max_pnt, NULL);
      place_rectangle_with_position(res_ipl, &max_pnt, obj_width, obj_height, POS_UP_LEFT);

      cvReleaseImage(&f_result);
    }

  res_pbuf = ipl2pixbuf(res_ipl);
  cvReleaseImage(&res_ipl);

  return res_pbuf;
}
static void
gst_template_match_match (IplImage * input, IplImage * templ,
                          IplImage * dist_image, double *best_res, CvPoint * best_pos, int method)
{
    double dist_min = 0, dist_max = 0;
    CvPoint min_pos, max_pos;
    cvMatchTemplate (input, templ, dist_image, method);
    cvMinMaxLoc (dist_image, &dist_min, &dist_max, &min_pos, &max_pos, NULL);
    if ((CV_TM_SQDIFF_NORMED == method) || (CV_TM_SQDIFF == method)) {
        *best_res = dist_min;
        *best_pos = min_pos;
        if (CV_TM_SQDIFF_NORMED == method) {
            *best_res = 1 - *best_res;
        }
    } else {
        *best_res = dist_max;
        *best_pos = max_pos;
    }
}
示例#25
0
    CvPoint Analysis::getLocation(IplImage *source, IplImage *pattern, bool upperLeft) {
        IplImage* matchRes;
        double minVal, maxVal;
        CvPoint minLoc, maxLoc;
        matchRes = cvCreateImage(cvSize(
                source->width - pattern->width + 1,
                source->height - pattern->height + 1
                ), IPL_DEPTH_32F, 1);

        cvMatchTemplate(source, pattern, matchRes, CV_TM_SQDIFF);
        cvMinMaxLoc(matchRes, &minVal, &maxVal, &minLoc, &maxLoc, 0);

        cvReleaseImage(&matchRes);
        if (!upperLeft) {
            minLoc.x += pattern->width;
            minLoc.y += pattern->height;
        }
        return minLoc;
    }
示例#26
0
void beadCenter(IplImage *patchIpl,IplImage *perfectMarkerTemplIpl,float *xx,float *yy,int mType)
{
    int result_width = patchIpl->width - perfectMarkerTemplIpl->width + 1;
    int result_height = patchIpl->height - perfectMarkerTemplIpl->height + 1;

    IplImage *resultIpl = cvCreateImage(cvSize(result_width, result_height), IPL_DEPTH_32F, 1);
    cvMatchTemplate(patchIpl, perfectMarkerTemplIpl, resultIpl, CV_TM_CCOEFF_NORMED);

    //famatdebug
    //ofstream outTempl("resultBeadCenter_fc.txt");
    //print(resultIpl,outTempl);
    //outTempl.close();
    //cout<<"Finished writing result bead center"<<endl;
    //----------------

    //we find the closest peak to (xx,yy)
    //note:highest peak is not always the best option (for example, when two markers are together)

    //famattodo: crop borders. Is it necessary using NCC from OpenCV? I don't think so.
    int maxNumPeaks=5;
    vector<Point2D*> peaks = findPeaks((float*)resultIpl->imageData, result_width, result_height, peakThresholdFillContours, maxNumPeaks, 0, perfectMarkerTemplIpl->width, 0,mType);

    //find the closest peak
    //at the most we allow a move as largar as ther adius of a marker
    double minDist=0.25*(perfectMarkerTemplIpl->width-1);
    double dist=1e11;
    //the center is 0.5*(result->width-1). The -1 is because array limits are from 0->width-1
    float cx=0.5f*(result_width-1),cy=0.5f*(result_height-1);//findPeaks returns (x,y) coodinates in array result. We need (delta_x,delta_y)
    for(unsigned int kk=0; kk<peaks.size(); kk++)
    {
        dist=min(dist,sqrt((double)(*xx-(peaks[kk]->x-cx))*(*xx-(peaks[kk]->x-cx))+(*yy-(peaks[kk]->y-cy))*(*yy-(peaks[kk]->y-cy))));
        if(dist<minDist)
        {
            (*xx)=peaks[kk]->x-cx;
            (*yy)=peaks[kk]->y-cy;
        }
        delete peaks[kk];//deallocate memory for peaks
    }

    //deallocate memory
    peaks.clear();
    cvReleaseImage(&resultIpl);
}
示例#27
0
int BotTemplateMatch(int slot_x, int slot_y, IplImage *Source, IplImage *TemplateImage, int start_x, int start_y, int end_x, int end_y)
{

	IplImage *aaresult = cvCreateImage(cvSize(Source->width - TemplateImage->width + 1,Source->height - TemplateImage->height + 1),32,1);
	cvMatchTemplate( Source, TemplateImage, aaresult, CV_TM_CCOEFF_NORMED);

	double min,max;
	CvPoint minpos, maxpos;
	cvMinMaxLoc(aaresult,&min,&max,&minpos,&maxpos);

	if(max > 0.8)
	{
		return 1;
	}
	else
	{
		return 0;
	}

}
void MatchTemplate(IplImage* imgSrc,IplImage* imgTemp)  

{   

	//double a=0.;    
	CvSize sizeSrc = cvGetSize(imgSrc);   
	CvSize sizeTemp = cvGetSize(imgTemp);   
	CvSize sizeResult = cvSize(sizeSrc.width-sizeTemp.width+1,sizeSrc.height-sizeTemp.height+1);   
	IplImage* imgResult = cvCreateImage(sizeResult,IPL_DEPTH_32F,1);   
	cvMatchTemplate(imgSrc,imgTemp,imgResult,CV_TM_SQDIFF);   

	float dMax = 0.;   
	CvPoint point = cvPoint(0,0);  

	//if(!a) MessageBox("Not Successful!",MB_OK);  

	for (int cx=0 ; cx  < imgResult->width;cx++)
	{   
		for (int cy=0 ; cy  < imgResult->height;cy++)
		{   
			float fTemp = CV_IMAGE_ELEM(imgResult,float,cy,cx);   
			if (dMax < fTemp) //ÕÒµ½×î½Ó½üµÄλÖà   
			{   
				dMax = fTemp;   
				point = cvPoint(cx,cy); //¼Ç¼λÖà   
			}   
		}   
	}   
	CvPoint point2 = cvPoint(point.x+sizeTemp.width,point.y+sizeTemp.height); //¶Ô½ÇλÖà   
	cvRectangle(imgSrc,point,point2,cvScalar(255));   
	cvNamedWindow( "Test", CV_WINDOW_AUTOSIZE );   
	cvShowImage("Test",imgSrc);   
	/*   for(;;)  
	{  
	int k = cvWaitKey(5);  
	if( k == 27 ) break;  
	}  
	*/
}  
示例#29
0
int findTempl( IplImage* src, IplImage* templ ) {
	int w = src->width - templ->width + 1;
	int h = src->height - templ->height + 1;

	IplImage* res = cvCreateImage( cvSize( w, h ), IPL_DEPTH_32F, 1 );

	cvMatchTemplate( src, templ, res, CV_TM_SQDIFF_NORMED );

	double    threshold = 0.1;
	int      x, y;
	CvScalar s;

	/* loop the comparison result array */
	for( y = 0 ; y < res->height ; y++ ) {
		for( x = 0 ; x < res->width ; x++ ) {
			/* get an element */
			s = cvGet2D( res, y, x );
			
			/* if value below the threshold, similar object is found */
			if( s.val[0] <= threshold ) {
				/* draw a box to mark the object */
				cvRectangle( res, 
					cvPoint( x, y ), 
					cvPoint( x + templ->width, y + templ->height ),
					cvScalar( 0, 0, 0, 0 ), 
					3, 0, 0 );
			}
		}
	}
	
	cvNamedWindow( "FindTemplate", CV_WINDOW_AUTOSIZE );
	cvShowImage( "FindTemplate", res);
	cvWaitKey();
	
	cvReleaseImage( &res );
	cvDestroyWindow( "FindTemplate" );
	return 0;
}
示例#30
0
// Match each temp with given source, return match value
double MatchTemplate(IplImage* imgSrc, IplImage* imgTemp, CvPoint* ppoint) {
	CvSize sizeSrc = cvGetSize(imgSrc);
	CvSize sizeTemp = cvGetSize(imgTemp);
	CvSize sizeResult = cvSize(sizeSrc.width - sizeTemp.width + 1,
			sizeSrc.height - sizeTemp.height + 1);
	IplImage* imgResult = cvCreateImage(sizeResult, IPL_DEPTH_32F, 1);
	cvMatchTemplate(imgSrc, imgTemp, imgResult, CV_TM_CCORR_NORMED);

	double max;
	cvMinMaxLoc(imgResult, 0, &max, 0, ppoint);
	/* Match as
	 float dMax = 0.0;
	 CvPoint point = cvPoint(0, 0);
	 for (int cx = 0; cx < sizeResult.width; cx++) {
	 for (int cy = 0; cy < sizeResult.height; cy++) {
	 float fTemp = CV_IMAGE_ELEM(imgResult,float,cy,cx);
	 if (dMax < fTemp) 					// 找到最接近的位置
	 {
	 dMax = fTemp;
	 point = cvPoint(cx, cy); 				// 记录位置
	 }
	 }
	 }*/

	if (globalArgs.verbosity) {
		CvPoint point2 = cvPoint(ppoint->x + sizeTemp.width,
				ppoint->y + sizeTemp.height); 	// 对角位置
		cvRectangle(imgSrc, *ppoint, point2, cvScalar(255));
		cvNamedWindow("Test", CV_WINDOW_AUTOSIZE);
		cvShowImage("Test", imgSrc);

		cvWaitKey(0);
		cvDestroyWindow("Test");
	}
	cvReleaseImage(&imgResult);
	return max;
}