/*	Eye blink code goes here & checks for bounding rects
	to determine whether eye is blinked or not.
	*/
int is_blink(CvSeq* comp, int num, CvRect window, CvRect eye)
{
		if (comp == 0 || num != 1)
			return 0;
 
		CvRect r1 = cvBoundingRect(comp, 1);
 
		/* component is within the search window */
		if (r1.x < window.x)
			return 0;
		if (r1.y < window.y)
			return 0;
		if (r1.x + r1.width > window.x + window.width)	
			return 0;
		if (r1.y + r1.height > window.y + window.height)
			return 0;
 
		/* get the centroid of eye */
		CvPoint pt = cvPoint(
			eye.x + eye.width/2 ,
			eye.y + eye.height/2 
			);
 
		/* component is located at the eye's centroid */
		if (pt.x <= r1.x || pt.x >= r1.x + r1.width)
	        return 0;
	    if (pt.y <= r1.y || pt.y >= r1.y + r1.height)
		    return 0;

		return 1;
}
Beispiel #2
0
void split_sign()
{
    CvSeq * sc;
	CvSeq * c;
	CvSeq * cmax;
    cvClearMemStorage(mem);
	cvFindContours(sign_vision,mem,&sc,sizeof(CvContour),CV_RETR_LIST,CV_CHAIN_APPROX_NONE,cvPoint(0,0));
    double smax=0;
    double s;
    c=sc;
	while(c!=NULL)
	{
        s=cvContourArea(c,CV_WHOLE_SEQ,0);
        if(s>smax)
        {
            smax=s;
            cmax=c;
        }
        c=c->h_next;
    }
    sign_rect=cvBoundingRect(cmax,0);
    cvSetImageROI(vision,sign_rect);
    reg_vision= cvCreateImage(cvSize(sign_rect.width,sign_rect.height),8,3);
	cvCopyImage(vision,reg_vision);
    cvResetImageROI(vision);
}
Beispiel #3
0
/*
 * @src Frame image
 * @contour  contour processing
 * @testImageHistogram histogram from model image
 * @h_bins number of hue bins
 * @s_bins number of sat bins 
 * @min minimum similarity to achieve when comparing histograms
 */
int Contours::histogramMatchingFilter(IplImage * src, CvHistogram * testImageHistogram,int h_bins,int s_bins, double min){
	CvRect box;
	CvMemStorage* mem = cvCreateMemStorage(0);
	
	double val;
	
	
	//get contour bounding box
	box=cvBoundingRect(this->c,0);
	
	//printf("box x:%d y:%d \n",box.x,box.y);
	
	IplImage * src_bbox=cvCreateImage(cvSize(box.width,box.height),src->depth,src->nChannels);
	
	//gets subimage bounded by box
    cvGetSubArr( src,(CvMat*)src_bbox, box );

	//gets subimage histogram
	utils::Histogram * h = new Histogram(h_bins,s_bins);
	CvHistogram* hist = h->getHShistogramFromRGB(src_bbox);
	//compares with object histogram
	val=cvCompareHist(hist,testImageHistogram,CV_COMP_BHATTACHARYYA);
	
	cvReleaseHist(&hist);
	cvReleaseImage(&src_bbox);
	cvReleaseMemStorage(&mem);
	delete h;
	
	return (val<min);
}
void MatchTemplatePlugin::ProcessStatic
( int i, ImagePlus *img, ImagePlus *oimg,
 int method, CvSize winsize, IplImage* &map){
	CvRect orect = cvBoundingRect(oimg->contourArray[i],1);
	RestrictRectLoc(orect, cvRect(0,0,img->orig->width,img->orig->height));
	cvSetImageROI(oimg->orig, orect);
	CvRect rect = cvRect(MAX(0,orect.x-winsize.width), MAX(0,orect.y-winsize.height),orect.width+2*winsize.width, orect.height+2*winsize.height);
	rect.width = MIN(rect.width,oimg->orig->width-rect.x);
	rect.height = MIN(rect.height,oimg->orig->height-rect.y);
	cvSetImageROI(img->orig, rect);

	CvSize mapsize = MyPoint(MyPoint(rect)-MyPoint(orect)+wxPoint(1,1)).ToCvSize();
	if (map && MyPoint(cvGetSize(map))!=MyPoint(mapsize))
		cvReleaseImage(&map);
	if( !map )
        map = cvCreateImage(mapsize, IPL_DEPTH_32F, 1);

	cvMatchTemplate( img->orig, oimg->orig, map, method );
	cvResetImageROI(img->orig);
	cvResetImageROI(oimg->orig);
	CvPoint minloc;
	CvPoint maxloc;
	double minval, maxval;
	cvMinMaxLoc( map, &minval, &maxval, &minloc, &maxloc);
	bool minisbest = (method == CV_TM_SQDIFF || method==CV_TM_SQDIFF_NORMED);
	rect.x = rect.x + (minisbest ? minloc.x : maxloc.x);
	rect.y = rect.y + (minisbest ? minloc.y : maxloc.y);

	CvPoint shift = cvPoint(rect.x - orect.x, rect.y - orect.y);
	ShiftContour(oimg->contourArray[i],img->contourArray[i],shift);
	ShiftFeatPoints(oimg->feats[i], img->feats[i], cvPointTo32f(shift));
}
/**
 * Experimentally-derived heuristics to determine whether
 * the connected components are eye pair or not.
 *
 * @param	CvSeq*  comp the connected components
 * @param	int     num  the number of connected components
 * @param   CvRect* eye  output parameter, will contain the location of the 
 *                       first component
 * @return	int          '1' if eye pair, '0' otherwise
 */
int
is_eye_pair(CvSeq* comp, int num, CvRect* eye)
{
	if (comp == 0 || num != 2)
		return 0;

	CvRect r1 = cvBoundingRect(comp, 1);
	comp = comp->h_next;

	if (comp == 0)
		return 0;

	CvRect r2 = cvBoundingRect(comp, 1);

	/* the width of the components are about the same */
	if (abs(r1.width - r2.width) >= 5)
		return 0;

	/* the height f the components are about the same */
	if (abs(r1.height - r2.height) >= 5)
		return 0;

	/* vertical distance is small */
	if (abs(r1.y - r2.y) >= 5)
		return 0;

	/* reasonable horizontal distance, based on the components' width */
	int dist_ratio = abs(r1.x - r2.x) / r1.width;
	if (dist_ratio < 2 || dist_ratio > 5)
		return 0;

	/* get the centroid of the 1st component */
	CvPoint point = cvPoint(
		r1.x + (r1.width / 2),
		r1.y + (r1.height / 2)
	);

	/* return eye boundaries */
	*eye = cvRect(
		point.x - (TPL_WIDTH / 2),
		point.y - (TPL_HEIGHT / 2),
		TPL_WIDTH,
		TPL_HEIGHT
	);

	return 1;
}
Beispiel #6
0
/*
 * Calculates up-right bounding rectangle of point set.
 * @overload bounding_rect
 * @return [CvRect] Bounding rectangle
 * @opencv_func cvBoundingRect
 */
VALUE
rb_bounding_rect(VALUE self)
{
  CvRect rect;
  try {
    rect = cvBoundingRect(CVCONTOUR(self), 1);
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return cCvRect::new_object(rect);
}
Beispiel #7
0
void CamShiftPlugin::ProcessStatic
( int i, ImagePlus *img, ImagePlus *oimg, int *hsizes, CvTermCriteria criteria,
IplImage** &planes, CvHistogram* &hist, IplImage* &backproject, CvRect &orect, CvPoint &ocenter, CvRect &searchwin, CvMat* &rotation, CvMat* &shift, bool oready){
	if (hist && hist->mat.dim[0].size!=hsizes[0])
		cvReleaseHist(&hist);
	if( !hist )
        hist = cvCreateHist( 3, hsizes, CV_HIST_ARRAY, NULL, 0);
    if( !backproject )
		backproject = cvCreateImage( cvGetSize(img->orig), IPL_DEPTH_8U, 1 );
	if( !planes ){
	    planes = (IplImage**) malloc(3 * sizeof(IplImage*));
        for (int p=0; p<3; p++)
			planes[p] = cvCreateImage( cvGetSize(img->orig), 8, 1 );
	}
	if (!rotation)
		rotation = cvCreateMat(2,3,CV_32FC1);
	if (!shift)
		shift = cvCreateMat(2,1,CV_32FC1);

	if (!oready){
		orect = cvBoundingRect(oimg->contourArray[i],1);
		cvCvtPixToPlane( oimg->orig, planes[0], planes[1], planes[2], 0 );
        for (int p=0; p<3; p++)
            cvSetImageROI(planes[p],orect);
        cvCalcHist( planes, hist, 0, NULL );
		cvNormalizeHist(hist, 255);
        for (int p=0; p<3; p++)
            cvResetImageROI(planes[p]);
		searchwin = orect; //cvRect(0,0,img->orig->width, img->orig->height);
		ocenter = cvPoint(orect.x+orect.width/2, orect.y+orect.height/2);
	}
	//The following checks shouldn't be needed.
	RestrictRect(searchwin, cvRect(0,0,backproject->width,backproject->height));

	cvCvtPixToPlane( img->orig, planes[0], planes[1], planes[2], 0 );
    cvCalcBackProject( planes, backproject, hist );
	CvBox2D track_box;
	CvConnectedComp track_comp;
    cvCamShift( backproject, searchwin,
                criteria,
                &track_comp, &track_box );
	searchwin = track_comp.rect;
	cvmSet(shift,0,0,track_box.center.x - ocenter.x);
	cvmSet(shift,1,0,track_box.center.y - ocenter.y);
//	shift->data.fl[0] = track_box.center.x - ocenter.x;
//	shift->data.fl[1] = track_box.center.y - ocenter.y;
	cv2DRotationMatrix(track_box.center, track_box.angle, 1.0, rotation);
	cvTransform(oimg->contourArray[i],img->contourArray[i],rotation,shift);
//	CvMat *ofm = FeatPointsToMat(oimg->feats[i]);
//	Cvmat *fm  = FeatPointsToMat(img->feats[i]);
//	cvTransform(ofm,img->contourArray[i],rotation,shift);
	TransformFeatPoints(oimg->feats[i], img->feats[i], rotation, shift);
}
Beispiel #8
0
ClassifierOutputData ShapeClassifier::ClassifyFrame(IplImage *frame) {
	cvZero(guessMask);
	if (!isTrained) return outputData;
    if(!frame) return outputData;

    IplImage *copy = cvCreateImage( cvSize(frame->width, frame->height), IPL_DEPTH_8U, 3);
    IplImage *grayscale = cvCreateImage( cvSize(frame->width, frame->height), IPL_DEPTH_8U, 1);
    IplImage *newMask = cvCreateImage( cvSize(frame->width, frame->height), IPL_DEPTH_8U, 1);
    cvZero(newMask);

    cvCvtColor(frame, grayscale, CV_BGR2GRAY);
    cvCanny(grayscale, grayscale, SHAPE_CANNY_EDGE_LINK, SHAPE_CANNY_EDGE_FIND, SHAPE_CANNY_APERTURE);
	cvDilate(grayscale, grayscale, 0, 2);

    cvCvtColor(grayscale, copy, CV_GRAY2BGR);

    CvSeq *frameContours;
    CvMemStorage *storage = cvCreateMemStorage(0);
    cvFindContours(grayscale, storage, &frameContours, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_TC89_KCOS);

    for (CvSeq *contour = frameContours; contour != NULL; contour = contour->h_next) {
        if ( contour->total > SHAPE_MIN_CONTOUR_POINTS) {
            int contourNum = 0;
            for (CvSeq *matchContour = templateContours; matchContour != NULL; matchContour = matchContour->h_next) {
//                double match_error = cvMatchShapes(contour, matchContour, CV_CONTOURS_MATCH_I1, 0);
                double match_error = pghMatchShapes(contour, matchContour);
				if (match_error < (0.75-threshold*.75)) {
                    cvDrawContours(copy, contour, colorSwatch[contourNum], CV_RGB(0,0,0), 0, 3, 8, cvPoint(0,0));
		            CvRect rect = cvBoundingRect(contour, 1);

                    // draw rectangle in mask image
                    cvRectangle(newMask, cvPoint(rect.x, rect.y), cvPoint(rect.x+rect.width, rect.y+rect.height), cvScalar(0xFF), CV_FILLED, 8);
                }
                contourNum = (contourNum+1) % COLOR_SWATCH_SIZE;
            }
        }
    }
    cvResize(copy, applyImage);
    IplToBitmap(applyImage, applyBitmap);

	// copy the final output mask
    cvResize(newMask, guessMask);

    cvReleaseMemStorage(&storage);
    cvReleaseImage(&copy);
    cvReleaseImage(&grayscale);
	cvReleaseImage(&newMask);

	UpdateStandardOutputData();
	return outputData;
}
Beispiel #9
0
vector<CvRect> CImageProcess::CheckBounds(IplImage *img)
{
	CvMemStorage* storage = cvCreateMemStorage( 0 );        
	CvSeq* contours = NULL;        
	IplImage *imgTemp = cvCloneImage( img );        
	cvFindContours( imgTemp, storage, &contours, sizeof( CvContour ), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE );  
	vector<CvRect> rect;
	for( ; contours != NULL; contours = contours -> h_next )        
	{                
		CvRect _rect = cvBoundingRect( contours, 0 );    
		rect.push_back(_rect);
	//	cvRectangle( img, cvPoint( rect.x, rect.y ),cvPoint( rect.x + rect.width, rect.y + rect.height ), cvScalar(0,0,0), 0 );        
	}  
	return rect;
}
CvRect basicOCR::findFirstChar(CvSeq* seq, int column)
{
	CvRect rcFirst = {0};
	int y = 0;	// find first row
	int x = 0;

	for (CvSeq* c = seq; c != NULL; c = c->h_next)
	{
		CvRect rc = cvBoundingRect(c,0);
		if (rc.y > column)
		{
			if (y == 0)
				y = rc.y;
			else if (rc.y < y)
			{
				y = rc.y;
				x = rc.x;
			}
		}
	}

	for (CvSeq* c = seq; c != NULL; c = c->h_next)
	{
		CvRect rc = cvBoundingRect(c,0);
		if ((rc.y >= (y - rc.height / 2)) && (rc.y <= (y + rc.height / 2)))	// in the same row
		{
			if (rc.x < x)
			{
				x = rc.x;	// find first column
				rcFirst = cvBoundingRect(c, 0);
			}
		}
	}

	return rcFirst;	// if cannot find return 0
}
Beispiel #11
0
int
main (int argc, char **argv)
{
    CvMemStorage *storage = cvCreateMemStorage( 0 );
    CvSeq *points = cvCreateSeq( CV_SEQ_ELTYPE_POINT, sizeof( CvSeq ), sizeof( CvPoint ), storage );
    cvSeqPush( points, &cvPoint( 100, 50 ) );
    cvSeqPush( points, &cvPoint( 50, 100 ) );
    cvSeqPush( points, &cvPoint( 50, 150 ) );
    cvSeqPush( points, &cvPoint( 150, 50 ) );

    CvRect rect = cvBoundingRect( points );
    cvPrintRect( rect );

    cvReleaseMemStorage( &storage );
    return 0;
}
//各種輪郭の特徴量の取得
void GetContourFeature(CvSeq *Contour) {
    //面積
    double Area = fabs(cvContourArea(Contour, CV_WHOLE_SEQ));
    //周囲長
    double Perimeter = cvArcLength(Contour);
    
    //円形度
    double CircleLevel = 4.0 * CV_PI * Area / (Perimeter * Perimeter);

    //傾いていない外接四角形領域(フィレ径)
    CvRect rect = cvBoundingRect(Contour);

    if(perimeter_max < Perimeter) {
        perimeter_max = Perimeter;
        max_perimeter_contor = Contour;
    }
}
Beispiel #13
0
/**
 * @internal
 * Extracts the relevant information of the found blobs
 * @note when this method is called, the found blobs should be stored in m_blobs member
 */
void BlobFinder::extractBlobsInformation()
{
  // Order blobs (from bigger to smaller) -> this way the most relevant are at the beginning
  std::sort( m_blobs.begin(), m_blobs.end(), std::greater< Blob >() );

  // Discard blobs (if there is more than the max)
  // TODO

  // To store contour moments
  CvMoments moment;

  // to read contour points
	CvSeqReader contourReader;
	CvPoint		  contourNode;

  // Calculate information about contours
  for( Blobs::size_type i = 0; (i < m_blobs.size()) && (i < m_maxBlobs); ++i )
  {
      // Current blob
      Blob& blob = m_blobs[ i ];

      // Get bbox
      blob.bbox = cvBoundingRect( blob.contour );

      // Get center through moments
      cvMoments( blob.contour, &moment );
      blob.center.x = (float)(moment.m10 / moment.m00);
      blob.center.y = (float)(moment.m01 / moment.m00);

      // Invert Y coordinate because our Y 0 is at top of the image,
      // and for Opencv is at the bottom of the image
      //blob.center.Y = inImage.GetHeight() - blob.center.Y;

    // Store the contour nodes
    cvStartReadSeq( blob.contour, &contourReader );
    for( int j = 0; j < blob.contour->total; ++j )
    {
	    // Read node of the contour
	    CV_READ_SEQ_ELEM( contourNode, contourReader );
	    blob.nodes.push_back( Point( (float)contourNode.x, (float)contourNode.y , 0) );
    }
  }

	// Store number of actual blobs
	m_nBlobs = min( (int)m_blobs.size(), (int)m_maxBlobs );
}
Beispiel #14
0
static COMMAND_FUNC( do_aspect_ratio )
{
	OpenCV_Seq *ocv_seq_p;
	ocv_seq_p=PICK_OCV_SEQ("sequence");
	if( ocv_seq_p == NO_OPENCV_SEQ ) return;
	if( ocv_seq_p->ocv_seq == NULL ) {
		sprintf(ERROR_STRING, "do_aspect_ratio: sequence is NULL.\n");
		//WARN(ERROR_STRING);
		return;
	}
	CvRect r = cvBoundingRect(ocv_seq_p->ocv_seq, 1);
	float aspectRatio = (float)(r.width)/(float)(r.height);
	char aspect[30];
	sprintf(aspect, "%f", aspectRatio);
	ASSIGN_VAR("contour_aspect", aspect);
	/*sprintf(ERROR_STRING,"Aspect ratio = %f", aspectRatio);
	WARN(ERROR_STRING);*/
}
void whu_MyHand::whu_GetFingerAngle(double &m_Angle)
{
	whu_Context.WaitNoneUpdateAll();
	//double m_Angle;
	int min_val;
	int FingerNum=0;//得到的指尖的个数//
	static int m_HandOpen=0;
	static int m_handClosed=0;
	memcpy(imgDepth16u->imageData,whu_DepthMD.Data(),640*480*2);
	cvConvertScale(imgDepth16u,depthShow,255/4096.0,0);      //转化为灰度图
	min_val = min_front(depthShow);    //取得灰度图最前端
	get_gray_hand(depthShow, min_val);

	CvRect rect = cvBoundingRect( contours, 0 );//返回一个2d矩形的点集合//得到包含轮廓的最小矩形
	for(int i = 20;i < contours->total;i++)
	{
		CvPoint *p = CV_GET_SEQ_ELEM(CvPoint,contours,i);
		CvPoint *a = CV_GET_SEQ_ELEM(CvPoint,contours,i-10);
		CvPoint *b = CV_GET_SEQ_ELEM(CvPoint,contours,i+10);
		if((double)((a->x-b->x)*(a->x-b->x)+(a->y-b->y)*(a->y-b->y))/(double)((a->x-p->x)*(a->x-p->x)+(a->y-p->y)*(a->y-p->y))<0.9)//&&cvGetReal2D(cpimg,(a->x+b->x)/2,(a->y+b->y)/2) >1)      //&&*(uchar*)(depthShow->imageData+((a->y+b->y)/2)*depthShow->widthStep)[(a->x+b->x)/2]==1
		{
			if((p->x-rect.x-(rect.width)/2)*(p->x-rect.x-(rect.width)/2)+(p->y-rect.y-(rect.height)/2)*(p->y-rect.y-(rect.height)/2) > rect.height*rect.height/10)//???
			{
				FingerNum++;
			}
		}
	}
	if (FingerNum>5)
	{
			if (m_Angle<150)
			{
				m_Angle=m_Angle+15;
			}
	} 
	else
	{
		if (m_Angle>60)
		{
			m_Angle=m_Angle-15;
		}
	}
	

}
Beispiel #16
0
int main(int argc, char** argv)
{
    char *lotto_ticket_img_file = argv[1];

    IplImage* lotto_ticket_img = cvLoadImage(lotto_ticket_img_file, 0);

    // Process image
    IplImage* processed_img = cvCreateImage(cvGetSize(lotto_ticket_img), lotto_ticket_img->depth, 1);

    cvThreshold(lotto_ticket_img, processed_img, 100, 100, CV_THRESH_BINARY_INV);

    // Find contours
    CvSeq*        contours = 0;
    CvMemStorage* storage  = cvCreateMemStorage(0);
 
    cvFindContours(processed_img, storage, &contours, sizeof(CvContour), CV_RETR_CCOMP);

    for(; contours != 0; contours = contours->h_next) {
        CvRect rect = cvBoundingRect(contours);

        CvPoint point1;
        CvPoint point2;

        point1.x = rect.x;
        point2.x = (rect.x + rect.width);
        point1.y = rect.y;
        point2.y = (rect.y + rect.height);

        cvRectangle(lotto_ticket_img, point1, point2, cvScalar(100), 1, 8, 0);
    }

    // Display processd image in a window
    cvNamedWindow(WINDOW_NAME);
    cvShowImage(WINDOW_NAME, lotto_ticket_img);

    cvWaitKey(0);

    cvReleaseImage(&lotto_ticket_img);
    cvReleaseImage(&processed_img);

    cvDestroyWindow(WINDOW_NAME);

    return 0;
}
Beispiel #17
0
void moBlobFinderModule::applyFilter(IplImage *src) {
	this->storage = cvCreateMemStorage(0);
	this->clearBlobs();
	this->storage = cvCreateMemStorage(0);
	cvCopy(src, this->output_buffer);
	
        CvSeq *contours = 0;
	cvFindContours(this->output_buffer, this->storage, &contours, sizeof(CvContour), CV_RETR_CCOMP);

        cvDrawContours(this->output_buffer, contours, cvScalarAll(255), cvScalarAll(255), 100);

	// Consider each contour a blob and extract the blob infos from it.
	int size;
	int ratio;
	int min_size = this->property("min_size").asInteger();
	int max_size = this->property("max_size").asInteger();
	CvSeq *cur_cont = contours;
	while (cur_cont != 0) {
		CvRect rect	= cvBoundingRect(cur_cont, 0);
		size = rect.width * rect.height;

		// Check ratio to make sure blob can physically represent a finger
		// magic number 6 is used for now to represent maximum ratio of
		// Length/thickness of finger
		if (rect.width < rect.height) {
			ratio = rect.height / (double)rect.width;
		} else {
			ratio = rect.width / (double)rect.height;
		}
		if ((ratio <= 6) && (size >= min_size) && (size <= max_size)) {
			moDataGenericContainer *blob = new moDataGenericContainer();
			blob->properties["implements"] = new moProperty("pos,size");
			blob->properties["x"] = new moProperty((rect.x + rect.width / 2) / (double) src->width);
			blob->properties["y"] = new moProperty((rect.y + rect.height / 2) / (double) src->height);
			blob->properties["width"] = new moProperty(rect.width);
			blob->properties["height"] = new moProperty(rect.height);
			this->blobs->push_back(blob);
			cvRectangle(this->output_buffer, cvPoint(rect.x,rect.y), cvPoint(rect.x + rect.width,rect.x + rect.height), cvScalar(250,10,10), 1);
		}
		cur_cont = cur_cont->h_next;
	}
	cvReleaseMemStorage(&this->storage);
    this->output_data->push(this->blobs);
}
Beispiel #18
0
int* getAvailablePlaces(IplImage *image){
	CvMemStorage *storage = cvCreateMemStorage(0);
	CvSeq *contours = 0;
	cvFindContours(image, storage, &contours);
	CvRect rect;
	double s = 0;
	double p = 0;
	int x;
	int y;
	int arr[] = {0, 0, 0, 0, 0, 0};
	for(CvSeq *seq = contours; seq != 0; seq = seq->h_next){
		s = cvContourArea(seq);
		p = cvArcLength(seq);
		if(abs(p*p/s) < 4*3.14 + 2 && s > 90){
			rect = cvBoundingRect(seq);
			x = rect.x + rect.width/2;
			y = rect.y + rect.height/2;
			if(30 < y && y < 50){
				if(150 < x && x < 180){
					arr[1] = 1;
				}
				if(200 < x && x < 220){
					arr[3] = 1;
				}
				if(240 < x && x < 270){
					arr[5] = 1;
				}
			}
			if(190 < y && y < 210){
				if(150 < x && x < 180){
					arr[0] = 1;
				}
				if(200 < x && x < 220){
					arr[2] = 1;
				}
				if(240 < x && x < 270){
					arr[4] = 1;
				}
			}
		}
	}
	return arr;
}
Beispiel #19
0
/*Function to element image */
IplImage* setImgROI_v(const IplImage* in_img, const CvSeq* contour)
{
	//IplImage*	out_img; 
 	CvRect	CvRectgl = cvBoundingRect((CvSeq*)contour);
	//Draw box of hand
	CvPoint p1 = cvPoint(CvRectgl.x, CvRectgl.y);
	CvPoint p2 = cvPoint(CvRectgl.x + CvRectgl.width, CvRectgl.y + CvRectgl.height);
	//create temporary image
	IplImage* img_t = cvCreateImage(cvSize(CvRectgl.width, CvRectgl.height), in_img->depth, in_img->nChannels);
	cvSetImageROI((IplImage*)in_img, CvRectgl);
	cvCopy(in_img, img_t, NULL);
	cvResetImageROI((IplImage*)in_img);
	//copy to out image
	//out_img = cvCreateImage(cvSize(CvRectgl.width, CvRectgl.height), img_t->depth, img_t->nChannels);
	//cvCopy(img_t, out_img,	NULL);	
	//release image
	//cvReleaseImage(&img_t);

	return img_t;
}
Beispiel #20
0
int PlateFinder::CountCharacter(IplImage *plate) {
	int cnt = 0;
	IplImage *resizeImg, *binaryImg;

	resizeImg = cvCreateImage (cvSize(408, 70), IPL_DEPTH_8U, 3);
	binaryImg = cvCreateImage (cvSize(408, 70), IPL_DEPTH_8U,  1);

	cvResize(plate, resizeImg);
	cvCvtColor(resizeImg, binaryImg, CV_RGB2GRAY);
	cvAdaptiveThreshold(binaryImg, binaryImg, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY, 13, 2);

	//cvShowImage("binaryImg", binaryImg);

	CvMemStorage *storage = cvCreateMemStorage(0);
	CvSeq *contours = cvCreateSeq(CV_SEQ_ELTYPE_POINT, sizeof(CvSeq), sizeof(CvPoint), storage);
	cvFindContours(binaryImg, storage, &contours);
	
	//cvShowImage("contours", binaryImg);

	//CvSeq *contours = 0;
	//cvFindContours(binaryImg, storage, &contours);

	while (contours) {
		CvRect rect = cvBoundingRect(contours);

		if (rect.width > 15 && rect.width < 50
			&& rect.height > 40 && rect.height < 65
			&& rect.width * rect.height > 1000) 
		{
			cvRectangle (resizeImg, cvPoint(rect.x, rect.y), 
				cvPoint(rect.x + rect.width, rect.y + rect.height), GREEN, 2);

			cnt++;
		}
		contours = contours->h_next;
	}

	//cvShowImage("resizeImg", resizeImg);

	return cnt;
}
Beispiel #21
0
void ShapeClassifier::UpdateContourImage() {
    cvZero(filterImage);

	// first, determine how many template contours we need to draw by counting the length of the sequence
	int numContours = 0;
    for (CvSeq *contour = templateContours; contour != NULL; contour = contour->h_next) {
		 numContours++;
	}
	if (numContours > 0) {

		int gridSize = (int) ceil(sqrt((double)numContours));
		int gridX = 0;
		int gridY = 0;
		int gridSampleW = FILTERIMAGE_WIDTH / gridSize;
		int gridSampleH = FILTERIMAGE_HEIGHT / gridSize;
		int contourNum = 0;
		for (CvSeq *contour = templateContours; contour != NULL; contour = contour->h_next) {

			cvSetImageROI(filterImage, cvRect(gridX*gridSampleW, gridY*gridSampleH, gridSampleW, gridSampleH));

			CvRect bounds = cvBoundingRect(contour, 1);
			int contourSize = max(bounds.width, bounds.height);
			IplImage *contourImg = cvCreateImage(cvSize(contourSize, contourSize), filterImage->depth, filterImage->nChannels);

			cvZero(contourImg);
			cvDrawContours(contourImg, contour, colorSwatch[contourNum], CV_RGB(255,255,255), 0, 2, CV_AA, cvPoint(-bounds.x, -bounds.y));
			cvResize(contourImg, filterImage);
			cvReleaseImage(&contourImg);
			cvResetImageROI(filterImage);

			contourNum = (contourNum+1) % COLOR_SWATCH_SIZE;
			gridX++;
			if (gridX >= gridSize) {
				gridX = 0;
				gridY++;
			}
		}
	}
	
    IplToBitmap(filterImage, filterBitmap);
}
Beispiel #22
0
void searchForMovement(IplImage *thresholdImage, IplImage *cameraFeed)
{
	bool objectDetected = false;
	CvSeq *contours = NULL;
	CvMemStorage *storage = cvCreateMemStorage(0);
	IplImage* temp = cvCreateImage(cvGetSize(thresholdImage), thresholdImage->depth, thresholdImage->nChannels);
	cvCopy(thresholdImage, temp, NULL);
	cvFindContours(thresholdImage, storage, &contours, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);// retrieves external contours

	if (contours > 0)
	{
		objectDetected = true;
	}
	else
	{
		objectDetected = false;
	}

	if (objectDetected)
	{
		//CvSeq *largestContourVec = 0;
		//cvContourArea();
		objectBoundingRectangle = cvBoundingRect(contours);//寻找边界矩形
		//contourTmpMin = cvMinAreaRect2(pContour, 0);//寻找最小面积的包围矩形
		int xpos = objectBoundingRectangle.x + objectBoundingRectangle.width / 2;
		int ypos = objectBoundingRectangle.y + objectBoundingRectangle.height / 2;
		theObject[0] = xpos, theObject[1] = ypos;
		int x = theObject[0];
		int y = theObject[1];

		cvCircle(cameraFeed, cvPoint(x, y), 10, cvScalar(0, 255, 0), 2);
		cvLine(cameraFeed, cvPoint(x, y), cvPoint(x, y - 25), cvScalar(0, 255, 0), 2);
		cvLine(cameraFeed, cvPoint(x, y), cvPoint(x, y + 25), cvScalar(0, 255, 0), 2);
		cvLine(cameraFeed, cvPoint(x, y), cvPoint(x - 25, y), cvScalar(0, 255, 0), 2);
		cvLine(cameraFeed, cvPoint(x, y), cvPoint(x + 25, y), cvScalar(0, 255, 0), 2);
		char showText[30];
		sprintf(showText, "X;%d, Y:%d", x, y);
		cvPutText(cameraFeed, showText, cvPoint(x, y + 30), &font, cvScalar(255, 0, 0));
	}
}
CvRect basicOCR::findPrintRect(CvSeq* seq, int x, int y, CvRect rcFirst)
{
	CvRect rcPrint = {0};

	//printf("\n>>>Testing in basicOCR::findPrintRect<<<\n");
	//printf("x = %d, y = %d\n\n", x, y);
	for (CvSeq* c = seq; c != NULL; c = c->h_next)
	{
		CvRect rc = cvBoundingRect(c,0);
		if ((rc.y >= (y - rcFirst.height)) && (rc.y <= (y + rcFirst.height)))	// in the same row
		{
			//printf("rc.x = %d, rc.y = %d\n", rc.x, rc.y);
			if (rc.x >= x)
				if (rcPrint.x == 0)
					rcPrint = rc;
				else if (rc.x <= rcPrint.x)
					rcPrint = rc;				
		}
	}

	return rcPrint;
}
void BlobDetectionEngine::findBlobs(IplImage *grayImg, bool drawBlobs)
{
	cvFindContours(grayImg, mem, &contours, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
	int i = 0;
	for (ptr = contours; ptr != NULL; ptr = ptr->h_next) {
		//Filter small contours
		CvRect rect = cvBoundingRect(ptr);
		if (  rect.height * rect.width < minAreaFilter ){
			continue;
		}
		filtCont[i] = ptr;

		//CvScalar color = CV_RGB( rand()&255, rand()&255, rand()&255 );
		CvScalar color = CV_RGB( 255, 255, 255 );
		cvDrawContours(visualImg, ptr, color, CV_RGB(0,0,0), 0, CV_FILLED, 8, cvPoint(0,0));
		cvRectangle(visualImg, cvPoint(rect.x +3, rect.y +3), cvPoint(rect.x + rect.width, rect.y + rect.height), color, 1);
		//sprintf(text, "B%d [%d,%d]", i, rect.x, rect.y);
		sprintf(text, "Blob %d", i);
		//cvPutText(visualImg, text, cvPoint(rect.x, rect.y), &font, color); 
		i++;
	}
	numOfFiltCont = i;
}
/* マーカ領域の検出 */
int DisplayDetector::detectMarkerRegions(IplImage *srcBinarizedImageGRAY, MarkerRegion *dstRegions, int maxRegions, float maxSizeThreshold, float minSizeThreshold){

	CvSeq *contour = 0;
	int regionCounter = 0;

	CvMemStorage *storage = cvCreateMemStorage(0);

	cvFindContours( srcBinarizedImageGRAY, storage, &contour, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE );		// 外周のみの輪郭を求める

	if( contour != 0 ){

		/* 領域リストをチェック */
		while ( contour && regionCounter < maxRegions ){

			double regionSize = fabs( cvContourArea(contour, CV_WHOLE_SEQ) );

			if( regionSize < minSizeThreshold || regionSize > maxSizeThreshold ){
				contour = contour->h_next;
				continue;
			}

			dstRegions[regionCounter].size = (float)regionSize;
			dstRegions[regionCounter].boundingBox = cvBoundingRect(contour, 0);
			dstRegions[regionCounter].centroid.x = dstRegions[regionCounter].boundingBox.x + dstRegions[regionCounter].boundingBox.width / 2.0f;			// 重心は、簡単にバウンディングボックスの中心とする
			dstRegions[regionCounter].centroid.y = dstRegions[regionCounter].boundingBox.y + dstRegions[regionCounter].boundingBox.height / 2.0f;
			dstRegions[regionCounter].distanceFromOrigin = sqrtf(dstRegions[regionCounter].centroid.x * dstRegions[regionCounter].centroid.x 
				+ dstRegions[regionCounter].centroid.y * dstRegions[regionCounter].centroid.y);

			regionCounter++;
			contour = contour->h_next;
		} // while
	}

	cvReleaseMemStorage( &storage );

	return regionCounter;
} // detectMarkerRegions
Beispiel #26
0
void EyeCorners::detectEyeCorners(IplImage* img){

	CvSeq* polygon;
	CvMemStorage* storage1 = 0;
	CvMemStorage* storage;

	IplImage* edge = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1); //converting  color image to gray scale
	CannyEdge cannyEdge;

	edge=cannyEdge.detectEdge(img);

	CvContour contour;
	CvSeq* first_contour=NULL;
	if(edge){
		int Nc = cvFindContours(edge,storage,&first_contour,sizeof(CvContour),CV_RETR_LIST);
		cout<<"Total contours detected:"<<Nc<<endl;
		cvShowImage("Canny",edge);
		//cvDrawContours(img,first_contour,cvScalarAll(255),	cvScalarAll(255), 100 );
		//cvFindDominantPoints(first_contour,storage,CV_DOMINANT_IPAN,1,2,3,4);
		cout<<"Length="<<cvContourPerimeter(first_contour)<<endl;
		cout<<first_contour->first;

	}
	CvRect eyeRect=cvBoundingRect(first_contour,1);
	cvRectangle(img,cvPoint(eyeRect.x,eyeRect.y),cvPoint(eyeRect.x+eyeRect.width,eyeRect.y+eyeRect.height),cvScalar(255,255,0),1);
    polygon = cvApproxPoly(first_contour, sizeof(CvContour), storage1,CV_POLY_APPROX_DP, 100);

    if(polygon){
    	cout<<"draw polygon pts:"<<polygon->total<<endl;
		for( int i = 0; i < polygon->total; i++ ){
			float* p = (float*)cvGetSeqElem( polygon, i );
			CvPoint temppt=cvPoint(cvRound(p[0]),cvRound(p[1]));
			cvCircle( img, temppt, 2, CV_RGB(255,0,0), 2, 8, 0 );
		}
    }
}
Beispiel #27
0
//--------------------------------------------------------------------------------
int ContourFinder::findContours( ofxCvGrayscaleImage&  input,
									  int minArea,
									  int maxArea,
									  int nConsidered,
									  bool bFindHoles,
                                      bool bUseApproximation) {
	reset();

	// opencv will clober the image it detects contours on, so we want to
    // copy it into a copy before we detect contours.  That copy is allocated
    // if necessary (necessary = (a) not allocated or (b) wrong size)
	// so be careful if you pass in different sized images to "findContours"
	// there is a performance penalty, but we think there is not a memory leak
    // to worry about better to create mutiple contour finders for different
    // sizes, ie, if you are finding contours in a 640x480 image but also a
    // 320x240 image better to make two ContourFinder objects then to use
    // one, because you will get penalized less.

	if( inputCopy.width == 0 ) {
		inputCopy.allocate( input.width, input.height );
		inputCopy = input;
	} else {
		if( inputCopy.width == input.width && inputCopy.height == input.height ) {
			inputCopy = input;
		} else {
			// we are allocated, but to the wrong size --
			// been checked for memory leaks, but a warning:
			// be careful if you call this function with alot of different
			// sized "input" images!, it does allocation every time
			// a new size is passed in....
			//inputCopy.clear();
			inputCopy.allocate( input.width, input.height );
			inputCopy = input;
		}
	}

	CvSeq* contour_list = NULL;

	contour_storage = cvCreateMemStorage( 1000 );
	storage	= cvCreateMemStorage( 1000 );

	CvContourRetrievalMode  retrieve_mode
        = (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL;
    teste = inputCopy.getCvImage();

	cvFindContours( teste, contour_storage, &contour_list,
                    sizeof(CvContour), retrieve_mode, bUseApproximation ? CV_CHAIN_APPROX_SIMPLE : CV_CHAIN_APPROX_NONE );
	CvSeq* contour_ptr = contour_list;

	nCvSeqsFound = 0;

	// put the contours from the linked list, into an array for sorting
	while( (contour_ptr != NULL) ) {
		float area = fabs( cvContourArea(contour_ptr, CV_WHOLE_SEQ) );
		if( (area > minArea) && (area < maxArea) ) {
                if (nCvSeqsFound < TOUCH_MAX_CONTOUR_LENGTH){
				cvSeqBlobs[nCvSeqsFound] = contour_ptr;	 // copy the pointer
                nCvSeqsFound++;
				}
		}
		contour_ptr = contour_ptr->h_next;
	}

	// sort the pointers based on size
	if( nCvSeqsFound > 0 ) {
		qsort( cvSeqBlobs, nCvSeqsFound, sizeof(CvSeq*), qsort_carea_compare);
	}

	// now, we have nCvSeqsFound contours, sorted by size in the array
    // cvSeqBlobs let's get the data out and into our structures that we like
	for( int i = 0; i < MIN(nConsidered, nCvSeqsFound); i++ ) {
		blobs.push_back( Blob() );
		float area = cvContourArea( cvSeqBlobs[i], CV_WHOLE_SEQ );

		cvMoments( cvSeqBlobs[i], myMoments );

		// this is if using non-angle bounding box
		CvRect rect	= cvBoundingRect( cvSeqBlobs[i], 0 );
		blobs[i].boundingRect.x      = rect.x;
		blobs[i].boundingRect.y      = rect.y;
		blobs[i].boundingRect.width  = rect.width;
		blobs[i].boundingRect.height = rect.height;

        cvCamShift(teste, rect, cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ), &track_comp, &track_box);

		// this is for using angle bounding box
		CvBox2D32f box;
		box = cvMinAreaRect2( cvSeqBlobs[i] );

		blobs[i].angleBoundingRect.x	  = box.center.x;
		blobs[i].angleBoundingRect.y	  = box.center.y;
		blobs[i].angleBoundingRect.width  = box.size.height;
		blobs[i].angleBoundingRect.height = box.size.width;
		blobs[i].angle = box.angle;

		// assign other parameters
		blobs[i].area                = fabs(area);
		blobs[i].hole                = area < 0 ? true : false;
		blobs[i].length 			 = cvArcLength(cvSeqBlobs[i]);
		blobs[i].centroid.x			 = (int) (myMoments->m10 / myMoments->m00);
		blobs[i].centroid.y 		 = (int) (myMoments->m01 / myMoments->m00);
		blobs[i].lastCentroid.x 	 = (int) 0;
		blobs[i].lastCentroid.y 	 = (int) 0;

		// get the points for the blob:
		CvPoint           pt;
		CvSeqReader       reader;
		cvStartReadSeq( cvSeqBlobs[i], &reader, 0 );

    	for( int j=0; j < min(TOUCH_MAX_CONTOUR_LENGTH, cvSeqBlobs[i]->total); j++ ) {
			CV_READ_SEQ_ELEM( pt, reader );
            blobs[i].pts.push_back( ofPoint((float)pt.x, (float)pt.y) );
		}
		blobs[i].nPts = blobs[i].pts.size();

	}

    nBlobs = blobs.size();

	// Free the storage memory.
	// Warning: do this inside this function otherwise a strange memory leak
	if( contour_storage != NULL ) { cvReleaseMemStorage(&contour_storage); }
	if( storage != NULL ) { cvReleaseMemStorage(&storage); }

	return nBlobs;
}
Beispiel #28
0
int opticaltri( CvMat * &clean_texture, int verts )
{
	char * im1fname = "conhull-dirty-thresh.jpg";
	char * im2fname = "conhull-clean-thresh.jpg";

	int count = MAX_COUNT;
	char * status;
	
	CvPoint2D32f * source_points;
	CvPoint2D32f * dest_points;
	CvPoint2D32f * delaunay_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));

	// count = opticalflow( im1fname, im2fname, source_points, dest_points, status ); 
	count = findsiftpoints( "conhull-dirty.jpg", "conhull-clean.jpg", source_points, dest_points, status ); 

	IplImage * image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_COLOR);

	CvMemStorage * storage = cvCreateMemStorage(0);
	CvSubdiv2D * delaunay = cvCreateSubdivDelaunay2D( cvRect(0,0,image1->width,image1->height), storage);

	IplImage * image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_COLOR);

	cvSet( image1, cvScalarAll(255) );

	std::map<CvPoint, CvPoint> point_lookup_map;
	std::vector<std::pair<CvPoint, CvPoint> > point_lookup;

	int num_matches = 0;
	int num_out_matches = 0;
	int max_dist = 50;
	int offset = 200;	

	// put corners in the point lookup as going to themselves
	point_lookup_map[cvPoint(0,0)] = cvPoint(0,0);
	point_lookup_map[cvPoint(0,image1->height-1)] = cvPoint(0,image1->height-1);
	point_lookup_map[cvPoint(image1->width-1,0)] = cvPoint(image1->width-1,0);
	point_lookup_map[cvPoint(image1->width-1,image1->height-1)] = cvPoint(image1->width-1,image1->height-1);

	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(0,0), cvPoint(0,0)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(0,image1->height-1), cvPoint(0,image1->height-1)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(image1->width-1,0), cvPoint(image1->width-1,0)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(image1->width-1,image1->height-1), cvPoint(image1->width-1,image1->height-1)));

	printf("Inserting corners...");
	// put corners in the Delaunay subdivision
	for(unsigned int i = 0; i < point_lookup.size(); i++) {
		cvSubdivDelaunay2DInsert( delaunay, cvPointTo32f(point_lookup[i].first) );
	}
	printf("done.\n");

	CvSubdiv2DEdge proxy_edge;
	for(int i = 0; i < count; i++) {
		if(status[i]) {
			CvPoint source = cvPointFrom32f(source_points[i]);
			CvPoint dest = cvPointFrom32f(dest_points[i]);
	
			if((((int)fabs((double)(source.x - dest.x))) > max_dist) ||
				 (((int)fabs((double)(source.y - dest.y))) > max_dist)) {	
				num_out_matches++;
			}
			else if((dest.x >= 0) && (dest.y >= 0) && (dest.x < (image1->width)) && (dest.y < (image1->height))) {
				if(point_lookup_map.find(source) == point_lookup_map.end()) {
					num_matches++;
				
					point_lookup_map[source] = dest;
					point_lookup.push_back(std::pair<CvPoint,CvPoint>(source,dest));
					// delaunay_points[i] = 
					(cvSubdivDelaunay2DInsert( delaunay, cvPointTo32f(source) ))->pt;
					cvSetImageROI( image1, cvRect(source.x-8,source.y-8,8*2,8*2) );
					cvResetImageROI( image2 );
					cvGetRectSubPix( image2, image1, dest_points[i] );
				}
				/*
				cvSet2D( image1, source.y, source.x, cvGet2D( image2, dest.y, dest.x ) );
				cvSet2D( image1, source.y, source.x+1, cvGet2D( image2, dest.y, dest.x+1 ) );
				cvSet2D( image1, source.y, source.x-1, cvGet2D( image2, dest.y, dest.x-1 ) );
				cvSet2D( image1, source.y+1, source.x, cvGet2D( image2, dest.y+1, dest.x ) );
				cvSet2D( image1, source.y-1, source.x, cvGet2D( image2, dest.y-1, dest.x ) );
				cvSet2D( image1, source.y+1, source.x+1, cvGet2D( image2, dest.y+1, dest.x+1 ) );
				cvSet2D( image1, source.y-1, source.x-1, cvGet2D( image2, dest.y-1, dest.x-1 ) );
				cvSet2D( image1, source.y+1, source.x-1, cvGet2D( image2, dest.y+1, dest.x-1 ) );
				cvSet2D( image1, source.y-1, source.x+1, cvGet2D( image2, dest.y-1, dest.x+1 ) );
				*/

				// cvCircle( image1, source, 4, CV_RGB(255,0,0), 2, CV_AA );
				// cvCircle( image2, dest, 4, CV_RGB(255,0,0), 2, CV_AA );
			}

			/*
			cvSetImageROI( image1, cvRect(source.x-offset,source.y-offset,offset*2,offset*2) );
			cvSetImageROI( image2, cvRect(dest.x-offset,dest.y-offset,offset*2,offset*2) );
			cvNamedWindow("image1",0);
			cvNamedWindow("image2",0);
			cvShowImage("image1",image1);
			cvShowImage("image2",image2);
			printf("%d,%d -> %d,%d\n",source.x,source.y,dest.x,dest.y);
			cvWaitKey(0);
			cvDestroyAllWindows();
			*/
		}
	}
	printf("%d %d\n",num_matches,num_out_matches);
	printf("%d lookups\n",point_lookup_map.size());

	cvResetImageROI( image1 );

	cvSaveImage("sparse.jpg", image1);

	cvReleaseImage(&image1);
	image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_COLOR);
	cvSet( image1, cvScalarAll(255) );
	printf("Warping image...");

	CvSeqReader  reader;
	int total = delaunay->edges->total;
	int elem_size = delaunay->edges->elem_size;


	std::vector<Triangle> trivec;
	std::vector<CvMat *> baryinvvec;

	for( int i = 0; i < total*2; i++ ) {
		if((i == 0) || (i == total)) {
			cvStartReadSeq( (CvSeq*)(delaunay->edges), &reader, 0 );
		}
		CvQuadEdge2D* edge = (CvQuadEdge2D*)(reader.ptr);

		if( CV_IS_SET_ELEM( edge ))	{
			CvSubdiv2DEdge curedge = (CvSubdiv2DEdge)edge;
			CvSubdiv2DEdge t = curedge;
			Triangle temptri;
			int count = 0;
			
			// construct a triangle from this edge
			do {
				CvSubdiv2DPoint* pt = cvSubdiv2DEdgeOrg( t );
				if(count < 3) {
					pt->pt.x = pt->pt.x >= image1->width ? image1->width-1 : pt->pt.x;
					pt->pt.y = pt->pt.y >= image1->height ? image1->height-1 : pt->pt.y;
					pt->pt.x = pt->pt.x < 0 ? 0 : pt->pt.x;
					pt->pt.y = pt->pt.y < 0 ? 0 : pt->pt.y;

					temptri.points[count] = cvPointFrom32f( pt->pt );
				}
				else {
					printf("More than 3 edges\n");
				}
				count++;
				if(i < total)
					t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
				else
					t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_RIGHT );
			} while( t != curedge );
			
			// check that triangle is not already in
			if( std::find(trivec.begin(), trivec.end(), temptri) == trivec.end() ) {
				// push triangle in and draw
				trivec.push_back(temptri);
				cvLine( image1, temptri.points[0], temptri.points[1], CV_RGB(255,0,0), 1, CV_AA, 0 );
				cvLine( image1, temptri.points[1], temptri.points[2], CV_RGB(255,0,0), 1, CV_AA, 0 );
				cvLine( image1, temptri.points[2], temptri.points[0], CV_RGB(255,0,0), 1, CV_AA, 0 );

				// compute barycentric computation vector for this triangle
				CvMat * barycen = cvCreateMat( 3, 3, CV_32FC1 );
				CvMat * baryceninv = cvCreateMat( 3, 3, CV_32FC1 );

				barycen->data.fl[3*0+0] = temptri.points[0].x;
				barycen->data.fl[3*0+1] = temptri.points[1].x;
				barycen->data.fl[3*0+2] = temptri.points[2].x;
				barycen->data.fl[3*1+0] = temptri.points[0].y;
				barycen->data.fl[3*1+1] = temptri.points[1].y;
				barycen->data.fl[3*1+2] = temptri.points[2].y;
				barycen->data.fl[3*2+0] = 1;
				barycen->data.fl[3*2+1] = 1;
				barycen->data.fl[3*2+2] = 1;

				cvInvert( barycen, baryceninv, CV_LU );
				baryinvvec.push_back(baryceninv);

				cvReleaseMat( &barycen );
			}
		}

		CV_NEXT_SEQ_ELEM( elem_size, reader );
	}
	printf("%d triangles...", trivec.size());
	cvSaveImage("triangles.jpg", image1);
	
	cvSet( image1, cvScalarAll(255) );
	IplImage * clean_nonthresh = cvLoadImage( "conhull-clean.jpg", CV_LOAD_IMAGE_COLOR );

	// for each triangle
	for(unsigned int i = 0; i < trivec.size(); i++) {
		Triangle curtri = trivec[i];
		CvMat * curpoints = cvCreateMat( 1, 3, CV_32SC2 );
		Triangle target;
		std::map<CvPoint,CvPoint>::iterator piter[3];
		
		printf("Triangle %d / %d\n",i,trivec.size());
		int is_corner = 0;
		for(int j = 0; j < 3; j++) {
			/*
			curpoints->data.i[2*j+0] = curtri.points[j].x;
			curpoints->data.i[2*j+1] = curtri.points[j].y;
			*/
			CV_MAT_ELEM( *curpoints, CvPoint, 0, j ) = curtri.points[j];
			printf("%d,%d\n",curtri.points[j].x,curtri.points[j].y);
	
			if((curtri.points[j] == cvPoint(0,0)) ||  (curtri.points[j] == cvPoint(0,image1->height - 1)) ||(curtri.points[j] == cvPoint(image1->width - 1,0)) ||(curtri.points[j] == cvPoint(image1->width - 1,image1->height - 1))) {
				is_corner++;
			}
			

			for(unsigned int k = 0; k < point_lookup.size(); k++) {
				std::pair<CvPoint,CvPoint> thispair = point_lookup[k];
				if(thispair.first == curtri.points[j]) {
					target.points[j] = thispair.second;
					break;
				}
			}

			/*
			piter[j] = point_lookup_map.find(curtri.points[j]);
			if(piter[j] != point_lookup_map.end() ) {
				target.points[j] = piter[j]->second;
			}
			*/
		}
			
		// if((piter[0] != point_lookup_map.end()) && (piter[1] != point_lookup_map.end()) && (piter[2] != point_lookup_map.end())) {
		if(is_corner < 3) {
			CvMat * newcorners = cvCreateMat( 3, 3, CV_32FC1 );
			newcorners->data.fl[3*0+0] = target.points[0].x;
			newcorners->data.fl[3*0+1] = target.points[1].x;
			newcorners->data.fl[3*0+2] = target.points[2].x;
			newcorners->data.fl[3*1+0] = target.points[0].y;
			newcorners->data.fl[3*1+1] = target.points[1].y;
			newcorners->data.fl[3*1+2] = target.points[2].y;
			newcorners->data.fl[3*2+0] = 1;
			newcorners->data.fl[3*2+1] = 1;
			newcorners->data.fl[3*2+2] = 1;

			CvContour hdr;
			CvSeqBlock blk;
			CvRect trianglebound = cvBoundingRect( cvPointSeqFromMat(CV_SEQ_KIND_CURVE+CV_SEQ_FLAG_CLOSED, curpoints, &hdr, &blk), 1 );
			printf("Bounding box: %d,%d,%d,%d\n",trianglebound.x,trianglebound.y,trianglebound.width,trianglebound.height);
			for(int y = trianglebound.y; (y < (trianglebound.y + trianglebound.height)) && ( y < image1->height); y++) {
				for(int x = trianglebound.x; (x < (trianglebound.x + trianglebound.width)) && (x < image1->width); x++) {
					// check to see if we're inside this triangle
					/*
					CvPoint v0 = cvPoint( curtri.points[2].x - curtri.points[0].x, curtri.points[2].y - curtri.points[0].y );
					CvPoint v1 = cvPoint( curtri.points[1].x - curtri.points[0].x, curtri.points[1].y - curtri.points[0].y );
					CvPoint v2 = cvPoint( x - curtri.points[0].x, y - curtri.points[0].y );
					
					int dot00 = v0.x * v0.x + v0.y * v0. y;
					int dot01 = v0.x * v1.x + v0.y * v1. y;
					int dot02 = v0.x * v2.x + v0.y * v2. y;
					int dot11 = v1.x * v1.x + v1.y * v1. y;
					int dot12 = v1.x * v2.x + v1.y * v2. y;

					double invDenom = 1.0 / (double)(dot00 * dot11 - dot01 * dot01);
					double u = (double)(dot11 * dot02 - dot01 * dot12) * invDenom;
					double v = (double)(dot00 * dot12 - dot01 * dot02) * invDenom;
					*/

					CvMat * curp = cvCreateMat(3, 1, CV_32FC1);
					CvMat * result = cvCreateMat(3, 1, CV_32FC1);
					curp->data.fl[0] = x;
					curp->data.fl[1] = y;
					curp->data.fl[2] = 1;
					cvMatMul( baryinvvec[i], curp, result );
					// double u = result->data.fl[0]/result->data.fl[2];
					// double v = result->data.fl[1]/result->data.fl[2];

					/*
					if((i == 3019) && (y == 1329) && (x > 2505) && (x < 2584)) {
						printf("Range %d: %f, %f, %f\t%f, %f, %f\n",x,result->data.fl[0],result->data.fl[1],result->data.fl[2],
								sourcepoint->data.fl[0],sourcepoint->data.fl[1],sourcepoint->data.fl[2]);
					}
					*/

					if( (result->data.fl[0] > MIN_VAL) && (result->data.fl[1] > MIN_VAL) && (result->data.fl[2] > MIN_VAL) && (fabs(1.0 - (result->data.fl[0]+result->data.fl[1]+result->data.fl[2])) <= 0.01) ) {
					// if((u > 0) || (v > 0) /*&& ((u +v) < 1)*/ )
						// printf("Barycentric: %f %f %f\n", result->data.fl[0], result->data.fl[1], result->data.fl[2]);
						// this point is inside this triangle
						// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
						//	trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);
						
						CvMat * sourcepoint = cvCreateMat(3, 1, CV_32FC1);
						cvMatMul( newcorners, result, sourcepoint );	
					
						double sourcex = sourcepoint->data.fl[0]/*/sourcepoint->data.fl[2]*/;
						double sourcey = sourcepoint->data.fl[1]/*/sourcepoint->data.fl[2]*/;
						if((sourcex >= 0) && (sourcey >= 0) && (sourcex < (image1->width)) && (sourcey < (image1->height))) {
							// printf("%d,%d %d,%d\n",x,y,(int)sourcex,(int)sourcey);
							cvSet2D( image1, y, x, cvGet2D( clean_nonthresh, (int)sourcey, (int)sourcex ) );
						}
	
						
						// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
						//		trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);

						cvReleaseMat( &sourcepoint );
					}
					cvReleaseMat( &result );
					cvReleaseMat( &curp );
				}
			}
			
			for(int k = 0; k < verts; k++) {
				double x = clean_texture->data.fl[2*k+0];
				double y = clean_texture->data.fl[2*k+1];
				
				// check to see if we're inside this triangle
				CvMat * curp = cvCreateMat(3, 1, CV_32FC1);
				CvMat * result = cvCreateMat(3, 1, CV_32FC1);
				curp->data.fl[0] = x;
				curp->data.fl[1] = y;
				curp->data.fl[2] = 1;
				cvMatMul( baryinvvec[i], curp, result );
			
				if( (result->data.fl[0] > MIN_VAL) && (result->data.fl[1] > MIN_VAL) && (result->data.fl[2] > MIN_VAL) && (fabs(1.0 - (result->data.fl[0]+result->data.fl[1]+result->data.fl[2])) <= 0.01) ) {
					
					CvMat * sourcepoint = cvCreateMat(3, 1, CV_32FC1);
					cvMatMul( newcorners, result, sourcepoint );	
				
					double sourcex = sourcepoint->data.fl[0]/*/sourcepoint->data.fl[2]*/;
					double sourcey = sourcepoint->data.fl[1]/*/sourcepoint->data.fl[2]*/;
					if((sourcex >= 0) && (sourcey >= 0) && (sourcex < (image1->width)) && (sourcey < (image1->height))) {
						clean_texture->data.fl[2*k+0] = sourcex;
						clean_texture->data.fl[2*k+1] = sourcey;
						// cvSet2D( image1, y, x, cvGet2D( clean_nonthresh, (int)sourcey, (int)sourcex ) );
					}

					cvReleaseMat( &sourcepoint );
				}
				cvReleaseMat( &result );
				cvReleaseMat( &curp );
			}
			cvReleaseMat( &newcorners );
		}
		cvReleaseMat( &curpoints );
	}

	cvReleaseImage( &clean_nonthresh );

	printf("done.\n");

	cvSaveImage("fullwarp.jpg", image1);

	printf("Drawing subdivisions on warped image...");
	draw_subdiv( image1, delaunay, NULL, NULL, 0, NULL );
	// draw_subdiv( image1, delaunay, delaunay_points, source_points, count, status );
	printf("done.\n");
	
	cvSaveImage("edgeswarp.jpg", image1);

	cvReleaseImage(&image2);

	image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_COLOR);
	// cvCreateImage( cvGetSize(image2), IPL_DEPTH_8U, 3 );

	// cvCalcSubdivVoronoi2D( delaunay );
	printf("Drawing subdivisions on unwarped image...");
	// draw_subdiv( image2, delaunay, delaunay_points, dest_points, count, status );
	// draw_subdiv( image2, delaunay, NULL, NULL, 0, NULL );
	printf("done.\n");

	cvSaveImage("edges.jpg",image2);

	cvReleaseImage(&image1);
	cvFree(&source_points);
	cvFree(&dest_points);
	cvFree(&status);
	cvReleaseMemStorage(&storage);
	cvFree(&delaunay_points);

	cvReleaseImage(&image2);

	return 0;
}
std::list<utils::Garbage*> GarbageRecognition::garbageList(IplImage * src, IplImage * model){
	std::list<utils::Garbage*>::iterator it;
	for ( it=garbages.begin() ; it != garbages.end() ; it++ )
		delete *it;
	garbages.clear();

	//cvNamedWindow("output",CV_WINDOW_AUTOSIZE);
	//object model

	//image for the histogram-based filter
	//could be a parameter

	utils::Histogram * h = new Histogram(HIST_H_BINS,HIST_S_BINS);
	CvHistogram * testImageHistogram = h->getHShistogramFromRGB(model);

	//~ int frameWidth=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH);
	//~ int frameHeight=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT);



	//gets a frame for setting  image size
	//CvSize srcSize = cvSize(frameWidth,frameHeight);
	CvSize srcSize = cvGetSize(src);

	//images for HSV conversion
	IplImage* hsv = cvCreateImage( srcSize, 8, 3 );
	IplImage* h_plane = cvCreateImage( srcSize, 8, 1 );
	IplImage* s_plane = cvCreateImage( srcSize, 8, 1 );
	IplImage* v_plane = cvCreateImage( srcSize, 8, 1 );



	//Image for thresholding
	IplImage * threshImage=cvCreateImage(srcSize,8,1);

	//image for equalization
	IplImage * equalizedImage=cvCreateImage(srcSize,8,1);

	//image for Morphing operations(Dilate-erode)
	IplImage * morphImage=cvCreateImage(srcSize,8,1);

	//image for image smoothing
	IplImage * smoothImage=cvCreateImage(srcSize,8,1);

	//image for contour-finding operations
	IplImage * contourImage=cvCreateImage(srcSize,8,3);

	int frameCounter=1;
	int cont_index=0;

	//convolution kernel for morph operations
	IplConvKernel* element;

	CvRect boundingRect;

	//contours
	CvSeq * contours;

	//Main loop


	frameCounter++;

	//convert image to hsv
	cvCvtColor( src, hsv, CV_BGR2HSV );
	cvCvtPixToPlane( hsv, h_plane, s_plane, v_plane, 0 );

	//equalize Saturation Channel image
	cvEqualizeHist(s_plane,equalizedImage);

	//threshold the equalized Saturation channel image
	cvThreshold(equalizedImage,threshImage,THRESHOLD_VALUE,255,
	CV_THRESH_BINARY);

	//apply morphologic operations
	element = cvCreateStructuringElementEx( MORPH_KERNEL_SIZE*2+1,
		MORPH_KERNEL_SIZE*2+1, MORPH_KERNEL_SIZE, MORPH_KERNEL_SIZE,
		CV_SHAPE_RECT, NULL);

	cvDilate(threshImage,morphImage,element,MORPH_DILATE_ITER);
	cvErode(morphImage,morphImage,element,MORPH_ERODE_ITER);

	//apply smooth gaussian-filter
	cvSmooth(morphImage,smoothImage,CV_GAUSSIAN,3,0,0,0);

	//get all contours
	contours = myFindContours(smoothImage);

	cont_index=0;
	cvCopy(src,contourImage,0);
	


	while(contours!=NULL){
		CvSeq * aContour=getPolygon(contours);
		utils::Contours * ct = new Contours(aContour);

	
	    int	pf = ct->perimeterFilter(MINCONTOUR_PERIMETER,MAXCONTOUR_PERIMETER);

		int raf = ct->rectangularAspectFilter(CONTOUR_RECTANGULAR_MIN_RATIO, CONTOUR_RECTANGULAR_MAX_RATIO);

		// int af = ct->areaFilter(MINCONTOUR_AREA,MAXCONTOUR_AREA);
		int baf = ct->boxAreaFilter(BOXFILTER_TOLERANCE);

        int hmf = ct->histogramMatchingFilter(src,testImageHistogram, HIST_H_BINS,HIST_S_BINS,HIST_MIN);


		//apply filters

		if( pf && raf && baf && hmf	){

				//if passed filters
				ct->printContour(3,cvScalar(127,127,0,0),
					contourImage);
				
				//get contour bounding box
				boundingRect=cvBoundingRect(ct->getContour(),0);
				cvRectangle(contourImage,cvPoint(boundingRect.x,boundingRect.y),
						cvPoint(boundingRect.x+boundingRect.width,
						boundingRect.y+boundingRect.height),
						_GREEN,1,8,0);
				//build garbage List
			
				//printf(" c %d,%d\n",boundingRect.x,boundingRect.y);

				utils::MinimalBoundingRectangle * r = new utils::MinimalBoundingRectangle(boundingRect.x,
					boundingRect.y,boundingRect.width,boundingRect.height);



				utils::Garbage * aGarbage = new utils::Garbage(r);
//				printf("%d , %d - %d , %d\n",boundingRect.x,boundingRect.y,boundingRect.width,boundingRect.height);

				garbages.push_back(aGarbage);


			}

		delete ct;
		cvReleaseMemStorage( &aContour->storage );
		contours=contours->h_next;
		cont_index++;
	}

   // cvShowImage("output",contourImage);
   // cvWaitKey(0);
	delete h;

	cvReleaseHist(&testImageHistogram);
	//Image for thresholding
	//cvReleaseMemStorage( &contours->storage );
	cvReleaseImage(&threshImage);
	cvReleaseImage(&equalizedImage);
	cvReleaseImage(&morphImage);
	cvReleaseImage(&smoothImage);
	cvReleaseImage(&contourImage);
	
	cvReleaseImage(&hsv);
	cvReleaseImage(&h_plane);
	cvReleaseImage(&s_plane);
	cvReleaseImage(&v_plane);


	return garbages;
}
bool findBlueNYelContour(IplImage* img, CvMemStorage* &storage,CvPoint &centre,int color){  //color :  blue==0,  yellow==1
	CvSeq* contours;  
	IplImage* timg = cvCloneImage( img ); // make a copy of input image  
	IplImage* gray = cvCreateImage( cvGetSize(timg), 8, 1 );   
	CvSeq* result;  

	CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage );  
	cvNamedWindow("rgbContour",0);

	IplImage* hsv = cvCreateImage( cvGetSize(timg), 8, 3 );   
	cvSmooth(hsv,hsv,2,3);
	if(color==0){
		findLP_HSV_BLUE(timg,hsv);
		cvNamedWindow("hsv_license_blue",0);
	}
	else {
		findLP_HSV_YEL(timg,hsv);
			cvNamedWindow("hsv_license_yel",0);
	}
	//	

	cvNamedWindow("侵蚀前",0);
	cvShowImage("侵蚀前",hsv);
	cvErode(hsv,hsv,0,1);
	cvNamedWindow("侵蚀后",0);
	cvShowImage("侵蚀后",hsv);
	cvDilate(hsv,hsv,0,4);
	cvNamedWindow("膨胀后",0);
	cvShowImage("膨胀后",hsv);
	cvCvtColor(hsv,hsv,CV_HSV2RGB);
	

	cvCvtColor(hsv,gray,CV_RGB2GRAY);
	cvThreshold(gray,gray,100,255,0);
	CvContourScanner scanner = NULL;
	scanner = cvStartFindContours(gray,storage,sizeof(CvContour),CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE,cvPoint(0,0));
	//ImagePreprocess::contourFinder(gray,0,hsv_blue,4000,10000);
	// find contours and store them all as a list  
/*	cvFindContours( gray, storage, &contours, sizeof(CvContour),  
		CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );  */
	// test each contour  
	int t=0;
	while (contours=cvFindNextContour(scanner))
	{
		// approximate contour with accuracy proportional  
		// to the contour perimeter  
		result = cvApproxPoly( contours, sizeof(CvContour), storage,  
			CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.04, 0 );  
		double tempArea = fabs(cvContourArea(result,CV_WHOLE_SEQ));
		double peri=cvContourPerimeter(result);
		CvRect rct=cvBoundingRect(result,1);
		// square contours should have 4 vertices after approximation  
		// relatively large area (to filter out noisy contours)  
		// and be convex.  
		// Note: absolute value of an area is used because  
		// area may be positive or negative - in accordance with the  
		// contour orientation  
		if(tempArea<3500 || tempArea>10000 || 
			result->total < 4 || result->total >10 ||
			peri<340 || peri>500
			|| rct.width/(1.0*rct.height)>3.85 || rct.width/(1.0*rct.height)<2.47 || rct.width<135 || rct.width>175
			){
			cvSubstituteContour(scanner,NULL);
	}
		else{  
			
	//	cout<<"height: "<<rct.height<<" width: "<<rct.width<<" rate: "<<rct.width/(rct.height*1.0)<<endl;
	//			cout<<"edge num: "<<result->total<<endl;
	//			cout<<"area : "<<fabs(cvContourArea(result,CV_WHOLE_SEQ))<<endl;
	//			cout<<"peri : "<<cvContourPerimeter(result)<<endl;
				CvScalar color = CV_RGB( rand()&255, rand()&255, rand()&255 );
	//			cvDrawContours( timg, result, color, color, -1, 3, 8 );
	//			cvDrawContours( hsv, result, color, color, -1, 3, 8 );
				t++;
				//		contour = cvApproxPoly( contour, sizeof(CvContour), storage, CV_POLY_APPROX_DP, 3, 1 );         
				CvMat *region;
				region=(CvMat*)result; 
				CvMoments moments;  
				cvMoments( region, &moments,0 );
				int xc=moments.m10/moments.m00 , yc=moments.m01/moments.m00; 
				//		double angle3=atan(2*moments.mu11/(moments.mu20-moments.mu02))/2;
		//		cout<<"long: "<<longAxis<<"short: "<<shortAxis<<endl;
				centre=cvPoint(xc,yc);
	//			cvCircle( hsv, centre, 3, color, 3, 8, 0 );
	//			cvCircle( timg, centre, 3, color, 3, 8, 0 );
		}
		// take the next contour  
//		contours = contours->h_next;  			
	}
	result = cvEndFindContours(&scanner);
	cvShowImage("rgbContour",timg);
	if(color==0)
		cvShowImage("hsv_license_blue",hsv);
	else
		cvShowImage("hsv_license_yel",hsv);
	cvReleaseImage( &timg );  
	cvReleaseImage( &hsv );  
	cvReleaseImage( &gray );  
	if(0==t){
		return false;
	}
	else
		return true;
	// release all the temporary images  
	//	cvReleaseImage( &gray );  

	//cvReleaseImage( &hsv_blue );  
}