コード例 #1
0
void cvFindBlobsByCCClasters(IplImage* pFG, CvBlobSeq* pBlobs, CvMemStorage* storage)
{   /* Create contours: */
    IplImage*       pIB = NULL;
    CvSeq*          cnt = NULL;
    CvSeq*          cnt_list = cvCreateSeq(0,sizeof(CvSeq),sizeof(CvSeq*), storage );
    CvSeq*          clasters = NULL;
    int             claster_cur, claster_num;

    pIB = cvCloneImage(pFG);
    cvThreshold(pIB,pIB,128,255,CV_THRESH_BINARY);
    cvFindContours(pIB,storage, &cnt, sizeof(CvContour), CV_RETR_EXTERNAL);
    cvReleaseImage(&pIB);

    /* Create cnt_list.      */
    /* Process each contour: */
    for(; cnt; cnt=cnt->h_next)
    {
        cvSeqPush( cnt_list, &cnt);
    }

    claster_num = cvSeqPartition( cnt_list, storage, &clasters, CompareContour, NULL );

    for(claster_cur=0; claster_cur<claster_num; ++claster_cur)
    {
        int         cnt_cur;
        CvBlob      NewBlob;
        double      M00,X,Y,XX,YY; /* image moments */
        CvMoments   m;
        CvRect      rect_res = cvRect(-1,-1,-1,-1);
        CvMat       mat;

        for(cnt_cur=0; cnt_cur<clasters->total; ++cnt_cur)
        {
            CvRect  rect;
            CvSeq*  cnt;
            int k = *(int*)cvGetSeqElem( clasters, cnt_cur );
            if(k!=claster_cur) continue;
            cnt = *(CvSeq**)cvGetSeqElem( cnt_list, cnt_cur );
            rect = ((CvContour*)cnt)->rect;

            if(rect_res.height<0)
            {
                rect_res = rect;
            }
            else
            {   /* Unite rects: */
                int x0,x1,y0,y1;
                x0 = MIN(rect_res.x,rect.x);
                y0 = MIN(rect_res.y,rect.y);
                x1 = MAX(rect_res.x+rect_res.width,rect.x+rect.width);
                y1 = MAX(rect_res.y+rect_res.height,rect.y+rect.height);
                rect_res.x = x0;
                rect_res.y = y0;
                rect_res.width = x1-x0;
                rect_res.height = y1-y0;
            }
        }

        if(rect_res.height < 1 || rect_res.width < 1)
        {
            X = 0;
            Y = 0;
            XX = 0;
            YY = 0;
        }
        else
        {
            cvMoments( cvGetSubRect(pFG,&mat,rect_res), &m, 0 );
            M00 = cvGetSpatialMoment( &m, 0, 0 );
            if(M00 <= 0 ) continue;
            X = cvGetSpatialMoment( &m, 1, 0 )/M00;
            Y = cvGetSpatialMoment( &m, 0, 1 )/M00;
            XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X;
            YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y;
        }
        NewBlob = cvBlob(rect_res.x+(float)X,rect_res.y+(float)Y,(float)(4*sqrt(XX)),(float)(4*sqrt(YY)));
        pBlobs->AddBlob(&NewBlob);

    }   /* Next cluster. */

    #if 0
    {   // Debug info:
        IplImage* pI = cvCreateImage(cvSize(pFG->width,pFG->height),IPL_DEPTH_8U,3);
        cvZero(pI);
        for(claster_cur=0; claster_cur<claster_num; ++claster_cur)
        {
            int         cnt_cur;
            CvScalar    color = CV_RGB(rand()%256,rand()%256,rand()%256);

            for(cnt_cur=0; cnt_cur<clasters->total; ++cnt_cur)
            {
                CvSeq*  cnt;
                int k = *(int*)cvGetSeqElem( clasters, cnt_cur );
                if(k!=claster_cur) continue;
                cnt = *(CvSeq**)cvGetSeqElem( cnt_list, cnt_cur );
                cvDrawContours( pI, cnt, color, color, 0, 1, 8);
            }

            CvBlob* pB = pBlobs->GetBlob(claster_cur);
            int x = cvRound(CV_BLOB_RX(pB)), y = cvRound(CV_BLOB_RY(pB));
            cvEllipse( pI,
                cvPointFrom32f(CV_BLOB_CENTER(pB)),
                cvSize(MAX(1,x), MAX(1,y)),
                0, 0, 360,
                color, 1 );
        }

        cvNamedWindow( "Clusters", 0);
        cvShowImage( "Clusters",pI );

        cvReleaseImage(&pI);

    }   /* Debug info. */
    #endif

}   /* cvFindBlobsByCCClasters */
コード例 #2
0
ファイル: vecfacetracking.cpp プロジェクト: 353/viewercv
void CvFaceElement::FindContours(IplImage* img, IplImage* thresh, int nLayers, int dMinSize) {
    CvSeq* seq;
    CvRect roi = m_rROI;
    Extend(roi, 1);
    cvSetImageROI(img, roi);
    cvSetImageROI(thresh, roi);
    // layers
    int colors[MAX_LAYERS] = {0};
    int iMinLevel = 0, iMaxLevel = 255;
    float step, power;
    ThresholdingParam(img, nLayers / 2, iMinLevel, iMaxLevel, step, power, 4);
    int iMinLevelPrev = iMinLevel;
    int iMaxLevelPrev = iMinLevel;
    if (m_trPrev.iColor != 0) {
        iMinLevelPrev = m_trPrev.iColor - nLayers / 2;
        iMaxLevelPrev = m_trPrev.iColor + nLayers / 2;
    }
    if (iMinLevelPrev < iMinLevel) {
        iMaxLevelPrev += iMinLevel - iMinLevelPrev;
        iMinLevelPrev = iMinLevel;
    }
    if (iMaxLevelPrev > iMaxLevel) {
        iMinLevelPrev -= iMaxLevelPrev - iMaxLevel;
        if (iMinLevelPrev < iMinLevel) {
            iMinLevelPrev = iMinLevel;
        }
        iMaxLevelPrev = iMaxLevel;
    }
    int n = nLayers;
    n -= (iMaxLevelPrev - iMinLevelPrev + 1) / 2;
    step = float(iMinLevelPrev - iMinLevel + iMaxLevel - iMaxLevelPrev) / float(n);
    int j = 0;
    float level;
    for (level = (float)iMinLevel; level < iMinLevelPrev && j < nLayers; level += step, j++) {
        colors[j] = int(level + 0.5);
    }
    for (level = (float)iMinLevelPrev; level < iMaxLevelPrev && j < nLayers; level += 2.0, j++) {
        colors[j] = int(level + 0.5);
    }
    for (level = (float)iMaxLevelPrev; level < iMaxLevel && j < nLayers; level += step, j++) {
        colors[j] = int(level + 0.5);
    }
    //
    for (int i = 0; i < nLayers; i++) {
        cvThreshold(img, thresh, colors[i], 255.0, CV_THRESH_BINARY);
        if (cvFindContours(thresh, m_mstgRects, &seq, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE)) {
            CvTrackingRect cr;
            for (CvSeq* external = seq; external; external = external->h_next) {
                cr.r = cvContourBoundingRect(external);
                Move(cr.r, roi.x, roi.y);
                if (RectInRect(cr.r, m_rROI) && cr.r.width > dMinSize  && cr.r.height > dMinSize) {
                    cr.ptCenter = Center(cr.r);
                    cr.iColor = colors[i];
                    cvSeqPush(m_seqRects, &cr);
                }
                for (CvSeq* internal = external->v_next; internal; internal = internal->h_next) {
                    cr.r = cvContourBoundingRect(internal);
                    Move(cr.r, roi.x, roi.y);
                    if (RectInRect(cr.r, m_rROI) && cr.r.width > dMinSize  && cr.r.height > dMinSize) {
                        cr.ptCenter = Center(cr.r);
                        cr.iColor = colors[i];
                        cvSeqPush(m_seqRects, &cr);
                    }
                }
            }
            cvClearSeq(seq);
        }
    }
    cvResetImageROI(img);
    cvResetImageROI(thresh);
}//void CvFaceElement::FindContours(IplImage* img, IplImage* thresh, int nLayers)
コード例 #3
0
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage 
CvSeq* findSquares4( IplImage* img, CvMemStorage* storage ){  
	CvSeq* contours;  
	int i, c, l, N = 11;  
	CvSize sz = cvSize( img->width & -2, img->height & -2 );
	IplImage* timg = cvCloneImage( img );
	// make a copy of input image 
	IplImage* gray = cvCreateImage( sz, 8, 1 );  
	IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 ); 
	IplImage* tgray;  CvSeq* result;  double s, t; 
	// create empty sequence that will contain points -  
	// 4 points per square (the square's vertices) 
	CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage ); 
	// select the maximum ROI in the image 
	// with the width and height divisible by 2 
	cvSetImageROI( timg, cvRect( 0, 0, sz.width, sz.height )); 
	// down-scale and upscale the image to filter out the noise  
	cvPyrDown( timg, pyr, 7 ); 
	cvPyrUp( pyr, timg, 7 );  tgray = cvCreateImage( sz, 8, 1 );
	// find squares in every color plane of the image  
	for( c = 0; c < 3; c++ )  {  
		// extract the c-th color plane  
		cvSetImageCOI( timg, c+1 ); 
		cvCopy( timg, tgray, 0 ); 
		// try several threshold levels 
		for( l = 0; l < N; l++ )  { 
			// hack: use Canny instead of zero threshold level. 
			// Canny helps to catch squares with gradient shading
			if( l == 0 )  {  
				// apply Canny. Take the upper threshold from slider 
				// and set the lower to 0 (which forces edges merging) 
				cvCanny( tgray, gray, 0, thresh, 5 );
				// dilate canny output to remove potential 
				// holes between edge segments  
				cvDilate( gray, gray, 0, 1 );
			} 
			else  
			{ 
				// apply threshold if l!=0:  
				// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0  
				cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY ); 
			}  // find contours and store them all as a list 
			cvFindContours( gray, storage, &contours, sizeof(CvContour),  CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); 
			// test each contour 
			while( contours )  { 
				// approximate contour with accuracy proportional 
				// to the contour perimeter
				result = cvApproxPoly( contours, sizeof(CvContour), storage,  CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );
				// square contours should have 4 vertices after approximation  
				// relatively large area (to filter out noisy contours)
				// and be convex.  
				// Note: absolute value of an area is used because 
				// area may be positive or negative - in accordance with the 
				// contour orientation
				if( result->total == 4 &&  fabs(cvContourArea(result,CV_WHOLE_SEQ)) > 1000 &&  cvCheckContourConvexity(result) )  {
					s = 0; 
					for( i = 0; i < 5; i++ )  
					{  
						// find minimum angle between joint 
						// edges (maximum of cosine)  
						if( i >= 2 )  {  
							t = fabs(angle(  (CvPoint*)cvGetSeqElem( result, i ),  (CvPoint*)cvGetSeqElem( result, i-2 ),  (CvPoint*)cvGetSeqElem( result, i-1 )));  s = s > t ? s : t;
						}  
					}  
					// if cosines of all angles are small  
					// (all angles are ~90 degree) then write quandrange  
					// vertices to resultant sequence 
					if( s < 0.3 )  
						for( i = 0; i < 4; i++ )  
							cvSeqPush( squares,  (CvPoint*)cvGetSeqElem( result, i ));
				}  
				// take the next contour  
				contours = contours->h_next; 
			}  
		}  
	} 
	// release all the temporary images 
	cvReleaseImage( &gray );
	cvReleaseImage( &pyr ); 
	cvReleaseImage( &tgray ); 
	cvReleaseImage( &timg );
	return squares;
} 
コード例 #4
0
ファイル: movement.c プロジェクト: juampi092/ExamProject
int main(int argc,char **argv)
{
    if(argc != 2)
    {
        printf("usage: %s <mode>\n0 - integrate webcam\n1 - external webcam\n",argv[0]);
        exit(-1);
    }
    else
    {
        int web=atoi(argv[1]);
        if(web >= 0 && web <= 1)
        {
            CvCapture *cam = cvCaptureFromCAM(web);
    	    cvSetCaptureProperty(cam,CV_CAP_PROP_FRAME_WIDTH,640);
            cvSetCaptureProperty(cam,CV_CAP_PROP_FRAME_HEIGHT,480);
            IplImage *img = cvQueryFrame(cam);
            IplImage *copia = cvCreateImage(cvGetSize(img),8,3);
            IplImage *prima = NULL;
            IplImage *binary = cvCreateImage(cvGetSize(img),8,1);
            IplImage *ris = cvCreateImage(cvGetSize(img),8,3);

            cvNamedWindow(NOME,1);
            //Variabili per prendere l'orario e la data correnti
            time_t tempo;
            struct tm *timeobj;
            time(&tempo);
            timeobj = localtime(&tempo);

            char nome[25];
            long int num=0;
            //Funzione per inserire i dati del tempo in nome
            strftime(nome,24,"%H-%M-%S_%F.avi",timeobj);
            //Creo il writer che si occuperà di scrivere i vari frame presi come video compresso in formato divx
            CvVideoWriter *video = cvCreateVideoWriter(nome,CV_FOURCC('D','I','V','X'),15,cvSize(640,480),1);
            //Inizializzo i font
            CvFont scritta,info;
            cvInitFont(&scritta,CV_FONT_HERSHEY_SIMPLEX,1.0,1.0,0,5,CV_AA);
            cvInitFont(&info,CV_FONT_HERSHEY_SIMPLEX,.6,.6,0,1,6);

            char tasto;
            int i,j,trovato=0,scelta,step = binary->widthStep/sizeof(uchar);
            uchar *target = (uchar*)binary->imageData;
            //Scelta fra dinamica e statica
            do
            {
                printf("-- Scelta modalita' --\n1)Dinamica -- Se ci saranno variazioni tra un frame e l'altro\n2)Statica -- Se ci sono variazioni fra un determinato frame e il frame corrente\nScelta: ");
                scanf("%1d",&scelta);
            }while(scelta < 1 || scelta > 2);

            while(img)
            {
                //Ruoto l'immagine
                cvFlip(img,img,1);
                //Prendo le informazioni sul tempo
                time(&tempo);
                timeobj = localtime(&tempo);
                strftime(nome,24,"%H:%M:%S %F",timeobj);
                //Scrivo le info a schermo
                cvPutText(img,nome,cvPoint(415,475),&info,CV_RGB(0,255,255));
                //Copio il frame
                cvCopy(img,copia);

                riduciNoise(img,img);
                //Dinamica
                if(scelta == 1)
                {
                    //Se è il primo frame preso
                    if(prima == NULL)
                    {
                        prima = cvCreateImage(cvGetSize(img),8,3);
                        //Copio img in prima
                        cvCopy(img,prima);
                    }
                    else
                    {
                        //Se non è il primo frame controllo se ci sono differenze
                        cvAbsDiff(img,prima,ris);
                        //Da colore a grigia
                        cvCvtColor(ris,binary,CV_BGR2GRAY);
                        //Il threshold dell'immagine
                        cvThreshold(binary,binary,62,255,CV_THRESH_BINARY);
                        riduciNoise(binary,binary);
                        cvCopy(img,prima);
                    }

                }
                //Statica
                else
                {
                    //Se ho preso il frame da monitorare
                    if(prima != NULL)
                    {
                        cvAbsDiff(img,prima,ris);
                        cvCvtColor(ris,binary,CV_BGR2GRAY);
                        cvThreshold(binary,binary,62,255,CV_THRESH_BINARY);
                        riduciNoise(binary,binary);

                    }

                }

                //Controllo l'immagine pixel per pixel
                for(i=0; i < binary->height; i++)
                {
                    for(j=0; j < binary->width; j++)
                    {
                        if(target[i*step+j] == 255)
                            trovato = 1;
                    }
                }

                //Se trovo un cambiamento
                if(trovato)
                {
                    num++;
                    //Inserisco "REC O" nell'immagine
                    cvPutText(copia,"REC",cvPoint(10,25),&scritta,CV_RGB(255,0,0));
                    cvCircle(copia,cvPoint(100,15),5,CV_RGB(255,0,0),20,8);
                    //Salvo il frame trovato
                    cvWriteFrame(video,copia);
                    trovato = 0;
                }
                //Mostro l'immagine
                cvShowImage(NOME,copia);

                tasto = cvWaitKey(15);

                if(tasto == 'q')
                    break;
                //Se premo v salvo il frame da monitorare
                else if(tasto == 'v' && scelta == 2)
                {
                    prima = cvCreateImage(cvGetSize(img),8,3);
                    cvCopy(img,prima);
                }

                img = cvQueryFrame(cam);
            }
            //Se ho preso dei frame
            if(num != 0)
            {
                //Scrivo il video
                cvReleaseVideoWriter(&video);
                printf("Video %s salvato\n",nome);
            }
        }
        else
            puts("webcam not found");
    }
    return 0;
}
コード例 #5
0
int main()
{ 
	//freopen ("myfile.txt","w",stdout);
	IplImage* background=NULL; //intel image processing libraray
	IplImage* foreground=NULL; 
	IplImage* subtract=NULL; 
	IplConvKernel* element=NULL;
	CvCapture* capture=NULL; //info for reading frames from a camera or a video file
	int counter=0;	
	//show image on screen

	//cvNamedWindow("Video",CV_WINDOW_AUTOSIZE); //CV_WINDOW_AUTOSIZE is the default video size
	cvNamedWindow("Subtract",CV_WINDOW_AUTOSIZE);
	//cvNamedWindow("Contour",CV_WINDOW_AUTOSIZE);
	IplImage* background_gry ;
	IplImage* foreground_gry;
	IplImage* foreground_skipped;
	IplImage* eroded;
	IplImage* dilated;
	IplImage* frame;
	IplImage* audio;

	CvRect rect;
	CvSeq* contours=0;
	CvMemStorage *storage = cvCreateMemStorage(0);
	//float ar;
	//capture = cvCaptureFromCAM(0);
	capture = cvCaptureFromFile("4.avi"); //takes camera ID as argument

	/* if( !capture ) 
	{ 
	printf("Cannot open camera\n"); 
	return -1; 
	}*/
	if(!background) 
	{ 
		for(int i=0;i<50;i++)
		{ 
			background=cvQueryFrame(capture); 
			//takes as its argument a pointer to CvCapture structure
			//grabs next video frame into memory		


			background_gry = cvCreateImage(cvGetSize(background), 8,1); 
			// parameters are size, depth and channels
			//size is image height and width
			//depth - bit depth of image elements
			//channel - no of channels per pixel, supports 1-4 channels
			eroded = cvCreateImage(cvGetSize(background), 8, 1);
			dilated = cvCreateImage(cvGetSize(background), 8, 1);
			frame = cvCreateImage(cvGetSize(background), 8, 1);
			cvCvtColor(background, background_gry, CV_RGB2GRAY); 
			// converts one color space(no of channels) to another
			//parameters - input image, output image, color space conversion code
		} 

	} 
	cvWaitKey(150); //wait and stop for a key stroke
	subtract= cvCreateImage( cvGetSize(background), 8, 1);
	IplImage* subtract_gry = cvCreateImage( cvGetSize(background), 8, 1);
	IplImage* subtract_final = cvCreateImage( cvGetSize(background), 8, 1);

while(1) 
{ 
	foreground=cvQueryFrame(capture); 
	//if( !foreground ) break; 
	IplImage* foreground_skipped = cvCreateImage( cvGetSize(foreground), 8, 1);

	if((counter%5)==0)
	{	
		foreground_skipped = foreground;
		//cvNamedWindow("frame", CV_WINDOW_AUTOSIZE);
		//cvShowImage("frame", foreground);
		//cvWaitKey(100);

		foreground_gry= cvCreateImage(cvGetSize(foreground), 8,1);
		cvCvtColor(foreground, foreground_gry, CV_RGB2GRAY);
		//cvShowImage( "Video",foreground_gry); 
		char c = cvWaitKey(30); 
		// for(int j=0;c!=27;j++){ 
		//cvSub(foreground,background,subtract,NULL); 

		cvAbsDiff(foreground_gry,background_gry,subtract); 
		//cvCvtColor(subtract, subtract_gry, CV_RGB2GRAY);
		background_gry = foreground_gry;
		//cvShowImage( "Background", background_gry );
		//double sub_pix= cvGetReal2D(subtract_gry, 150, 100);
		//printf("\n sub= %lf", sub_pix);
		cvThreshold(subtract, subtract_final, 15.0, 255.0, CV_THRESH_BINARY);
		cvErode(subtract_final, eroded, element, 3);
		cvDilate(eroded, dilated, element, 40);

		//defining color for drawing contours
		CvScalar(ext_color);
		ext_color = CV_RGB(rand()&255, rand()&255, rand()&255);
		//finding contours for the subtracted image 
		cvFindContours(dilated, storage, &contours, sizeof(CvContour),CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));

		//drawing the contours
//		for (; contours != 0; contours = contours->h_next)
		if (contours != 0)
		{
			cvDrawContours(dilated, contours, ext_color, CV_RGB(4,2,1), -1, 2, 8);
			rect = cvBoundingRect(contours, 0); //extract bounding box for current contour
			//drawing rectangle

			if( (rect.height * rect.width) < 10000)
			{
				contours = contours->h_next;
			}	
			else
			{
				cvRectangle(background, cvPoint(rect.x, rect.y), cvPoint(rect.x+rect.width, rect.y+rect.height), cvScalar(0, 50, 255, 0), 10, 4, 0); 
				//float ar = rect.x/rect.y;	
				float ar = (rect.width-rect.x)/(rect.height-rect.y);
				printf("\n%2f",ar);
				printf("\t\t");

				//float er = 786432/(((rect.x+rect.width))*((rect.y+rect.height)));
				//printf("%2f\n",er);
				//printf("\t\t\t");


				float w,v,y,d;
				float dif=200,cvr;
				v=0.5;
				d = pow(rect.width,2.0)+pow(rect.height,2.0);
				w=pow(d,v);
				y=w/2;
				
				cvr=dif-y;
				printf("%f\t\t%f\n",y,cvr);
				dif = y;


				for(int i=0;i<3;i++)
				{
					if(ar<-15)
				  	{
						if(cvr<-75)
						{
							//if(rect.width<rect.height)
							//{
								printf("Fall detected \n");
								
								//system("CSmtp.exe");
								
								//PlaySound(TEXT("ring.avi"), NULL, SND_SYNC);
								
								
							//}
						}
					}
				}
			}
		
		//cvBoundingRect(contours,0);
		cvShowImage("Subtract",background); 
		
		//cvShowImage("video",background);
		cvWaitKey(2);
		// } 
		if( c == 27 ) break; 
	}
	}
counter++;
}

cvReleaseCapture( &capture ); 
//cvDestroyWindow("Background"); 
cvDestroyWindow("Video"); 
cvDestroyWindow("Subtract"); 
cvReleaseMemStorage(&storage);
return 0;
}
コード例 #6
0
static GstFlowReturn
gst_skin_detect_transform (GstOpencvVideoFilter * base, GstBuffer * buf,
    IplImage * img, GstBuffer * outbuf, IplImage * outimg)
{
  GstSkinDetect *filter = GST_SKIN_DETECT (base);

  filter->cvRGB->imageData = (char *) img->imageData;
  filter->cvSkin->imageData = (char *) outimg->imageData;

  /* SKIN COLOUR BLOB DETECTION */
  if (HSV == filter->method) {
    cvCvtColor (filter->cvRGB, filter->cvHSV, CV_RGB2HSV);
    cvSplit (filter->cvHSV, filter->cvH, filter->cvS, filter->cvV, 0);  /*  Extract the 3 color components. */

    /*  Detect which pixels in each of the H, S and V channels are probably skin pixels.
       Assume that skin has a Hue between 0 to 18 (out of 180), and Saturation above 50, and Brightness above 80. */
    cvThreshold (filter->cvH, filter->cvH2, 10, UCHAR_MAX, CV_THRESH_BINARY);   /* (hue > 10) */
    cvThreshold (filter->cvH, filter->cvH, 20, UCHAR_MAX, CV_THRESH_BINARY_INV);        /* (hue < 20) */
    cvThreshold (filter->cvS, filter->cvS, 48, UCHAR_MAX, CV_THRESH_BINARY);    /* (sat > 48) */
    cvThreshold (filter->cvV, filter->cvV, 80, UCHAR_MAX, CV_THRESH_BINARY);    /* (val > 80) */

    /*  erode the HUE to get rid of noise. */
    cvErode (filter->cvH, filter->cvH, NULL, 1);

    /*  Combine all 3 thresholded color components, so that an output pixel will only
       be white (255) if the H, S and V pixels were also white.
       imageSkin = (hue > 10) ^ (hue < 20) ^ (sat > 48) ^ (val > 80), where   ^ mean pixels-wise AND */
    cvAnd (filter->cvH, filter->cvS, filter->cvSkinPixels1, NULL);
    cvAnd (filter->cvSkinPixels1, filter->cvH2, filter->cvSkinPixels1, NULL);
    cvAnd (filter->cvSkinPixels1, filter->cvV, filter->cvSkinPixels1, NULL);

    cvCvtColor (filter->cvSkinPixels1, filter->cvRGB, CV_GRAY2RGB);
  } else if (RGB == filter->method) {
    cvSplit (filter->cvRGB, filter->cvR, filter->cvG, filter->cvB, 0);  /*  Extract the 3 color components. */
    cvAdd (filter->cvR, filter->cvG, filter->cvAll, NULL);
    cvAdd (filter->cvB, filter->cvAll, filter->cvAll, NULL);    /*  All = R + G + B */
    cvDiv (filter->cvR, filter->cvAll, filter->cvRp, 1.0);      /*  R' = R / ( R + G + B) */
    cvDiv (filter->cvG, filter->cvAll, filter->cvGp, 1.0);      /*  G' = G / ( R + G + B) */

    cvConvertScale (filter->cvR, filter->cvR2, 1.0, 0.0);
    cvCopy (filter->cvGp, filter->cvGp2, NULL);
    cvCopy (filter->cvRp, filter->cvRp2, NULL);

    cvThreshold (filter->cvR2, filter->cvR2, 60, UCHAR_MAX, CV_THRESH_BINARY);  /* (R > 60) */
    cvThreshold (filter->cvRp, filter->cvRp, 0.42, UCHAR_MAX, CV_THRESH_BINARY);        /* (R'> 0.4) */
    cvThreshold (filter->cvRp2, filter->cvRp2, 0.6, UCHAR_MAX, CV_THRESH_BINARY_INV);   /* (R'< 0.6) */
    cvThreshold (filter->cvGp, filter->cvGp, 0.28, UCHAR_MAX, CV_THRESH_BINARY);        /* (G'> 0.28) */
    cvThreshold (filter->cvGp2, filter->cvGp2, 0.4, UCHAR_MAX, CV_THRESH_BINARY_INV);   /* (G'< 0.4) */

    /*  Combine all 3 thresholded color components, so that an output pixel will only
       be white (255) if the H, S and V pixels were also white. */

    cvAnd (filter->cvR2, filter->cvRp, filter->cvSkinPixels2, NULL);
    cvAnd (filter->cvRp, filter->cvSkinPixels2, filter->cvSkinPixels2, NULL);
    cvAnd (filter->cvRp2, filter->cvSkinPixels2, filter->cvSkinPixels2, NULL);
    cvAnd (filter->cvGp, filter->cvSkinPixels2, filter->cvSkinPixels2, NULL);
    cvAnd (filter->cvGp2, filter->cvSkinPixels2, filter->cvSkinPixels2, NULL);

    cvConvertScale (filter->cvSkinPixels2, filter->cvdraft, 1.0, 0.0);
    cvCvtColor (filter->cvdraft, filter->cvRGB, CV_GRAY2RGB);
  }

  /* After this we have a RGB Black and white image with the skin, in
     filter->cvRGB. We can postprocess by applying 1 erode-dilate and 1
     dilate-erode, or alternatively 1 opening-closing all together, with
     the goal of removing small (spurious) skin spots and creating large
     connected areas */
  if (filter->postprocess) {
    cvSplit (filter->cvRGB, filter->cvChA, NULL, NULL, NULL);

    cvErode (filter->cvChA, filter->cvChA,
        cvCreateStructuringElementEx (3, 3, 1, 1, CV_SHAPE_RECT, NULL), 1);
    cvDilate (filter->cvChA, filter->cvChA,
        cvCreateStructuringElementEx (3, 3, 1, 1, CV_SHAPE_RECT, NULL), 2);
    cvErode (filter->cvChA, filter->cvChA,
        cvCreateStructuringElementEx (3, 3, 1, 1, CV_SHAPE_RECT, NULL), 1);

    cvCvtColor (filter->cvChA, filter->cvRGB, CV_GRAY2RGB);
  }

  cvCopy (filter->cvRGB, filter->cvSkin, NULL);

  return GST_FLOW_OK;
}
コード例 #7
0
ファイル: extract_front.cpp プロジェクト: cfwin/macbsvpmss
void ExtraFront(IplImage* frame,IplImage* front_bin ,IplImage* background){

	// 创建存储矩阵格式的背景灰度图的变量并将初始背景转化为矩阵格式 
	CvMat* background_grayMat = cvCreateMat(background -> height, background -> width, CV_32FC1);
	cvConvert(background, background_grayMat);	
	// 创建与原背景进行加权叠加的背景更新图 
	CvMat* background_renewMat = cvCreateMat(background -> height, background -> width, CV_32FC1);
	// 创建存储当前帧及其灰度图的变量 
	IplImage* frame_gray = NULL;
	// 创建中值滤波后的当前帧灰度图以及存储其矩阵格式的变量 
	IplImage* frame_median = NULL;
	CvMat* frame_medianMat = NULL;
	// 创建存储前景的变量 
	IplImage* front = NULL;
	CvMat* frontMat = NULL;
	// 创建存储二值化后的前景的变量 
	CvMat* front_binMat = NULL;
	// 创建显示输入以及输出的视频的窗口 

	if (!frame)
		return;
	// 将当前帧转化为灰度图	
	if (frame_gray == NULL)
		frame_gray = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
	cvCvtColor(frame, frame_gray, CV_BGR2GRAY);
	// 对当前帧进行中值滤波 
	if (frame_median == NULL)
	{
		frame_median = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
		frame_medianMat = cvCreateMat(frame -> height, frame -> width, CV_32FC1);
	}
	CvSize size = cvSize(frame->width,frame->height); // get current frame size,得到当前帧的尺寸 
	cvSmooth(frame_gray, frame_median, CV_MEDIAN);	//默认为3*3的掩膜
	/*float k[9] = {0,-1,0,-1,5,-1,0,-1,0};
	CvMat km = cvMat(3,3,CV_32FC1,k);
	cvFilter2D(frame_median,frame_median,&km);*/
	IplImage*pyr= cvCreateImage( cvSize((size.width & -2)/2, (size.height & -2)/2), 8, 1 );
	cvPyrDown(frame_median, pyr, CV_GAUSSIAN_5x5 );// 向下采样,去掉噪声,图像是原图像的四分之一 
	//cvDilate( pyr, pyr, 0, 1 ); // 做膨胀操作,消除目标的不连续空洞 
	cvPyrUp( pyr, frame_median, CV_GAUSSIAN_5x5 );// 向上采样,恢复图像,图像是原图像的四倍 

	cvConvert(frame_median, frame_medianMat);
	// 进行减景操作得到前景 
	if (front == NULL)
	{
		front = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
		frontMat = cvCreateMat(frame -> height, frame -> width, CV_32FC1);
	}
	cvAbsDiff(frame_medianMat, background_grayMat, frontMat);
	cvConvert(frontMat, front);
	// 对前景进行二值化,算法为改进的OTSU 
	if (front_binMat == NULL)
	{
		front_binMat = cvCreateMat(frame -> height, frame -> width, CV_32FC1);
	}
	int threshold = Otsu(front);
	//printf("\n*threshold:%d*\n",threshold);
	cvThreshold(front, front_bin, threshold, 255, CV_THRESH_BINARY); 
	// 对二值化后的前景做开运算 
	cvErode(front_bin, front_bin);	//腐蚀,迭代次数1
	cvSmooth( front_bin, front_bin, CV_MEDIAN, 3, 0, 0, 0 ); 
	cvConvert(front_bin, front_binMat);
	cvReleaseMat(&background_grayMat);
	cvReleaseMat(&background_renewMat);
	cvReleaseImage(&frame_gray);
	cvReleaseImage(&frame_median);
	cvReleaseMat(&frame_medianMat);
	cvReleaseImage(&front);
	cvReleaseMat(&frontMat);
	cvReleaseMat(&front_binMat);
}
コード例 #8
0
char operateImage(Userdata *userdata) {
	if (!userdata) {
		return 0;
	}

	IplImage *image1 = userdata->input[0];
	IplImage *image2 = userdata->input[1];
	IplImage *imageOut = userdata->output[0];
	IplImage *imageOut2 = userdata->output[1];

	static int color_mode = 4;
	static int smooth_mode = 0;
	static int otsu_mode = 0;
	static int close_mode = 0;
	static int canny_mode = 0;
	static int contour_mode = 0;
	static int hsv_mode = 0;
	static int save_img = 0;
	static int history_mode = 0;

	int key = userdata->key;
	switch (key) {
	case 'g':
		color_mode++;
		color_mode %= 5;
		break;
	case 's':
		smooth_mode = !smooth_mode;
		break;
	case 'o':
		otsu_mode = !otsu_mode;
		break;
	case 'e':
		close_mode = !close_mode;
		break;
	case 'c':
		canny_mode = !canny_mode;
		break;
	case 'b':
		contour_mode = !contour_mode;
		break;
	case 'h':
		hsv_mode = !hsv_mode;
		break;
	case 'H':
		history_mode = !history_mode;
		break;
	case 'S':
		save_img = 1;
		break;
	default:
		//cout << key << "\n";
		break;
	}

	int value = userdata->value;
	int kernelSize = userdata->kernelSize;
	kernelSize += 1 - (kernelSize % 2);
	int lowThreshold = userdata->lowThreshold;
	int highThreshold = userdata->highThreshold;
	CvScalar minScalar = cvScalar(userdata->minScalar0, userdata->minScalar1, userdata->minScalar2);
	CvScalar maxScalar = cvScalar(userdata->maxScalar0, userdata->maxScalar1, userdata->maxScalar2);

	static IplImage *tmp1d = cvCreateImage(cvGetSize(image1), IPL_DEPTH_8U, 1);
	static IplImage *tmp3d = cvCreateImage(cvGetSize(image1), IPL_DEPTH_8U, 3);
	static IplImage *tmp3d2 = cvCreateImage(cvGetSize(image1), IPL_DEPTH_8U, 3);

	static IplImage *backgroundAcc = cvCreateImage(cvGetSize(image1), IPL_DEPTH_32F, 3);
	static IplImage *background = cvCreateImage(cvGetSize(image1), IPL_DEPTH_8U, 3);

	COND_PRINTF("                                                                                                 \r");

	char img_full_channel = 0;
	switch (color_mode) {
	case 0:
		COND_PRINTF("Gray");
		cvCvtColor(image1, tmp1d, CV_BGR2GRAY);
		break;
	case 1: // Hue mode
		COND_PRINTF("Hue");
		cvCvtColor(image1, tmp3d, CV_BGR2HSV);
		cvSplit(tmp3d, tmp1d, NULL, NULL, NULL);
		break;
	case 2: // Saturation mode
		COND_PRINTF("Saturation");
		cvCvtColor(image1, tmp3d, CV_BGR2HSV);
		cvSplit(tmp3d, NULL, tmp1d, NULL, NULL);
		break;
	case 3: // Brightness mode
		COND_PRINTF("Brightness");
		cvCvtColor(image1, tmp3d, CV_BGR2HSV);
		cvSplit(tmp3d, NULL, NULL, tmp1d, NULL);
		break;
	case 4: // 
		COND_PRINTF("Color");
		img_full_channel = 1;
		break;
	}

	//filterByVolume(tmp1d, tmp1d, value);
	if (img_full_channel) { // Image has 3 channel
#if 0
		cvRunningAvg(image1, backgroundAcc, (double)userdata->accValue / 1024, NULL);
		cvConvertScale(backgroundAcc, background, 1, 0);
		cvNamedWindow(CONTROL_WINDOW "41", 0);
		cvResizeWindow(CONTROL_WINDOW "41", 640 / 2, 480 / 2);
		cvShowImage(CONTROL_WINDOW "41", background);
		cvCreateTrackbar("accValue", CONTROL_WINDOW "41", &(userdata->accValue), 1024, trackbarCallback);

#endif
		filterByHSV(image1, minScalar, maxScalar, tmp3d);
		if (history_mode) {
			cvCopy(image1, tmp3d, NULL);
			cvCopy(image1, tmp3d2, NULL);
			//cvCvtColor(image1, tmp3d, CV_BGR2HSV);

			//CvRect rect = cvRect(userdata->size.width * 3 / 4 - 40, userdata->size.height / 2 - 40, 80, 80);
			//CvRect rect = cvRect(userdata->size.width * 1 / 4 - 40, userdata->size.height / 2 - 40, userdata->size.width * 3 / 4, 80);
			CvRect rect = cvRect(userdata->square.origin.x, userdata->square.origin.y, userdata->square.size.width, userdata->square.size.height);
			cvSetImageROI(tmp3d, rect);
			GraficarHistograma(tmp3d, tmp3d2);
			cvResetImageROI(tmp3d);

			cvCopy(tmp3d2, tmp3d, NULL);
		}
		else {
			cvCopy(image1, tmp3d, NULL);
		}
	}
	else { // Image has 1 channel

		cvSmooth(tmp1d, tmp1d, CV_GAUSSIAN, 5, 0, 0, 0);

		if (otsu_mode) { // Apply Otsu's method
			COND_PRINTF(", Otsu");
			cvThreshold(tmp1d, tmp1d, 0, 255, CV_THRESH_OTSU);
		}

		if (smooth_mode) { // Apply Gaussian smoothing
			COND_PRINTF(", Gauss");
			cvSmooth(tmp1d, tmp1d, CV_GAUSSIAN, 5, 0, 0, 0);
		}

		if (close_mode) {
			COND_PRINTF(", closE");
			int n = kernelSize;
			cvErode(tmp1d, tmp1d, NULL, n);
			cvDilate(tmp1d, tmp1d, NULL, n);
		}

		if (canny_mode) { // Apply Canny's method
			COND_PRINTF(", Canny");
			cvCanny(tmp1d, tmp1d, lowThreshold, highThreshold, 3);
			cvDilate(tmp1d, tmp1d, NULL, 1);
			cvErode(tmp1d, tmp1d, NULL, 1);
		}

		cvMerge(tmp1d, tmp1d, tmp1d, NULL, tmp3d);

		if (contour_mode) {
			COND_PRINTF(", contours(b)");
			CvMemStorage *storage = cvCreateMemStorage(0);
			CvSeq *contours = NULL;
			int n = cvFindContours(tmp1d, storage, &contours, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE, cvPoint(0, 0));
			//COND_PRINTF(", (" << n <<","<< contours->total <<")contours");
			for (int i = 0; contours != NULL; contours = contours->h_next, i++) {
				int cc = (int)((float)(255 * i) / contours->total);
				CvScalar colorpx = CV_RGB((cc) % 256, (cc + 256 / 3) % 256, (cc + 256 * 2 / 3) % 256);
				cvDrawContours(tmp3d, contours, colorpx, CV_RGB(0, 0, 0), -1, CV_FILLED, 8, cvPoint(0, 0));
			}
		}

	}

	COND_PRINTF("\r");

	cvCopy(image1, image2, NULL);
	cvCopy(imageOut, imageOut2, NULL);
	cvCopy(tmp3d, imageOut, NULL);

	//cvReleaseImage(&tmp1d);
	//cvReleaseImage(&tmp3d);
	//cvReleaseImage(&tmp3d2);

	afterProcess(userdata);

	if (save_img) {
		save_img = 0;
		cvSaveImage(RESOURCES "output.png", imageOut);
	}

	return 0;
}
コード例 #9
0
// Update Motion History Image: Calculate motion features and orientation.
void motionDetection(IplImage* image, IplImage* destination_image, MotionInfo* motionInfo)
{
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
    CvSize image_size = cvSize(image->width, image->height); // get current frame image_size
    int previous_frame_index = last_index, current_frame_index;
    
    initialize_images(image_size);
    
    cvCvtColor(image, image_buffer[last_index], CV_BGR2GRAY); // convert frame to grayscale
    
    current_frame_index = (last_index + 1) % N; // index of (last_index - (N-1))th frame
    last_index = current_frame_index;
    
    silhouette = image_buffer[current_frame_index];
    
    cvAbsDiff(image_buffer[previous_frame_index], image_buffer[current_frame_index], silhouette); // Get difference between frames
    cvThreshold(silhouette, silhouette, DIFFERENCE_THRESHOLD, 1, CV_THRESH_BINARY); // Add threshold
    //cvDilate(silhouette, silhouette, 0, 18);
    //cvErode(silhouette, silhouette, 0, 10);
    
    cvUpdateMotionHistory(silhouette, mhi, timestamp, MHI_DURATION); // Update MHI
    
    // Convert MHI to blue 8U image
    cvCvtScale(mhi, orientation_mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION);
    
    if (destination_image) {
      cvZero(destination_image);
      cvCvtPlaneToPix(orientation_mask, 0, 0, 0, destination_image);
    }
    
    // Calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient(mhi, orientation_mask, orientation, MAX_TIME_DELTA, MIN_TIME_DELTA, 3);
    
    // motion_feature_sequence = extract_motion_features();
    if(!storage)
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);
    
    CvSeq* motion_feature_sequence = cvSegmentMotion(mhi, segment_mask, storage, timestamp, MAX_TIME_DELTA);
    
    int SEGMENT_WIDTH = image_size.width / MAX_SEGMENTS_X;
    int SEGMENT_HEIGHT = image_size.height / MAX_SEGMENTS_Y;
    
    // Global motion
    CvRect global_motion_segment = cvRect(0, 0, image_size.width, image_size.height);
    motionInfo->global_angle = calculate_orientation(global_motion_segment, silhouette);
    
    if (destination_image)
      draw_orientation(destination_image, &global_motion_segment, motionInfo->global_angle, 100, CV_RGB(0, 255, 0), true);
    
    long area = 0;
    long totalArea = 0;
    int totalMovingSegments = 0;
    bool hasValidMovement = false;
    CvRect segmentRect;
    
    // Segmented motion
    for(int x = 0; x < MAX_SEGMENTS_X; x++)
    {
      for(int y = 0; y < MAX_SEGMENTS_Y; y++)
      {
        segmentRect = cvRect(x * SEGMENT_WIDTH, y * SEGMENT_HEIGHT, SEGMENT_WIDTH, SEGMENT_HEIGHT);
        area = calculate_motion(&segmentRect, motion_feature_sequence);
        hasValidMovement = (area > MIN_MOTION_FEATURE_AREA);
        
        motionInfo->segment_motion_areas[x][y] = area;
        motionInfo->segment_movements[x][y] = hasValidMovement;
        motionInfo->segment_angles[x][y] = calculate_orientation(segmentRect, silhouette);
        
        totalArea += area;
        totalMovingSegments += (area > MIN_MOTION_FEATURE_AREA);
        
        //printf("%i, ", area);
        //fflush(stdout);
        
        if (hasValidMovement)
          if (destination_image)
            draw_orientation(destination_image, &segmentRect, motionInfo->segment_angles[x][y], 20, CV_RGB(255, 0, 0), true);
      }
    }
    motionInfo->total_motion_area = totalArea;
    motionInfo->total_segments_with_movements = totalMovingSegments;
    motionInfo->SEGMENTS_X = MAX_SEGMENTS_X;
    motionInfo->SEGMENTS_Y = MAX_SEGMENTS_Y;
    
    printf("%i, %f\n", totalArea, (float)totalArea / (float)(image_size.width*image_size.height));
    //fflush(stdout);
}
コード例 #10
0
float thresholdSegmentation(Rect r, ntk::RGBDImage* current_frame, Mat& dst){
	Mat depth = current_frame->depth();
	Rect& rr = r;
	Mat depthROI = depth(rr), maskROI;
	Mat& rDepthROI = depthROI, &rMaskROI = maskROI;
	double var = 0.3;

	// maskROI for nonZero values in the Face Region
	inRange(depthROI, Scalar::all(0.001), Scalar::all(255), maskROI);
	// Mean depth of Face Region
	Scalar mFace = cv::mean(rDepthROI, rMaskROI);
	//mFace[0]  = mFace[0] - mFace[0] * var;
	inRange(depthROI, Scalar::all(0.001), mFace, maskROI);
	mFace = cv::mean(rDepthROI, rMaskROI);
	//inRange(depthROI, Scalar::all(0.001), mFace, maskROI);
	//mFace = cv::mean(rDepthROI, rMaskROI);
	


	
	// Mask for nearer than the mean of face.
	inRange(depth, Scalar::all(0.001), mFace, dst);
	Mat rgbImage = current_frame->rgb();
	Mat outFrame = cvCreateMat(rgbImage.rows, rgbImage.cols, CV_32FC3);
	rgbImage.copyTo(outFrame, dst);
	Mat outFrameROI;
	outFrameROI = outFrame(rr);
	//cvCopy(&rgbImage, &outFrame, &dst);
	//rgbImageROI = rgbImageROI(rr);
	
	imshow("ROI", outFrameROI);
	//imshow("thresholdSeg", dst);

	// For debug of cvblobslib
	// Display the color image	

	//imshow("faceRIO", maskROI);
	imshow("faceRIO", outFrameROI);
	bool iswrite;
	const int nchannel = 1;
	vector<Rect> faces;
	//iswrite = imwrite("faceROI.png", maskROI);
	iswrite = imwrite("faceROI.png", outFrameROI);
	//iswrite = cvSaveImage("faceROI.jpeg", pOutFrame, &nchannel);

	// ---- blob segmentation on maskROI by using cvblobslib ----
	// ---		Third Trial	---
	//visualizeBlobs("faceROI.png", "faceRIO");




	// ---		First Trial Not Successful		---
	//Mat maskROIThr=cvCreateMat(maskROI.rows, maskROI.cols, CV_8UC1);	
	//maskROIThr = maskROI;
	//IplImage imgMaskROIThr = maskROIThr;
	//IplImage* pImgMaskROIThr = &imgMaskROIThr;
	//cvThreshold(pImgMaskROIThr, pImgMaskROIThr, 0.1, 255, CV_THRESH_BINARY_INV);

	// ---		Second Trial	---
	IplImage* original = cvLoadImage("faceROI.png", 0);
	IplImage* originalThr = cvCreateImage(cvGetSize(original), IPL_DEPTH_8U, 1);
	IplImage* displayBiggestBlob = cvCreateImage(cvGetSize(original), IPL_DEPTH_8U, 3);
	CBlobResult blobs;
	CBlob biggestBlob;
	//IplImage source = maskROIThr;	IplImage* pSource = &source;
	//blobs = CBlobResult(
	cvThreshold(original, originalThr, 0.1, 255, CV_THRESH_BINARY_INV);
	blobs =  CBlobResult( originalThr, NULL, 255);
	printf("%d blobs \n", blobs.GetNumBlobs());
	blobs.GetNthBlob(CBlobGetArea(), 0, biggestBlob);
	biggestBlob.FillBlob(displayBiggestBlob, CV_RGB(255, 0, 0));

	// Drawing the eclipse and Rect on the blob
	Mat mat(displayBiggestBlob);

	cv::RotatedRect blobEllipseContour;
	cv::Rect blobRectContour;
	//RotatedRect blobEllipseContour;
	blobEllipseContour = biggestBlob.GetEllipse();
	blobRectContour = biggestBlob.GetBoundingBox();
	//cv::ellipse(
	cv::ellipse(mat, blobEllipseContour, cv::Scalar(0,255, 0), 3, CV_AA);
	cv::rectangle(mat, blobRectContour, cv::Scalar(255, 0, 0), 3, CV_AA);
	//cv::ellipse(mat, blobEllipseContour);
	float headOritation = blobEllipseContour.angle;
	if (headOritation <= 180)
		headOritation = headOritation - 90;
	else
		headOritation = headOritation - 270;
	cv::putText(mat,
			cv::format("%f degree", headOritation),
			Point(10,20), 0, 0.5, Scalar(255,0,0,255));

	cv::imshow("faceRIO", mat);
	return(headOritation);
}
コード例 #11
0
ファイル: distrans.c プロジェクト: Ikem/opencv-1.1.0
// threshold trackbar callback
void on_trackbar( int dummy )
{
    static const uchar colors[][3] = 
    {
        {0,0,0},
        {255,0,0},
        {255,128,0},
        {255,255,0},
        {0,255,0},
        {0,128,255},
        {0,255,255},
        {0,0,255},
        {255,0,255}
    };
    
    int msize = mask_size;
    int _dist_type = build_voronoi ? CV_DIST_L2 : dist_type;

    cvThreshold( gray, edge, (float)edge_thresh, (float)edge_thresh, CV_THRESH_BINARY );

    if( build_voronoi )
        msize = CV_DIST_MASK_5;

    if( _dist_type == CV_DIST_L1 )
    {
        cvDistTransform( edge, edge, _dist_type, msize, NULL, NULL );
        cvConvert( edge, dist );
    }
    else
        cvDistTransform( edge, dist, _dist_type, msize, NULL, build_voronoi ? labels : NULL );

    if( !build_voronoi )
    {
        // begin "painting" the distance transform result
        cvConvertScale( dist, dist, 5000.0, 0 );
        cvPow( dist, dist, 0.5 );
    
        cvConvertScale( dist, dist32s, 1.0, 0.5 );
        cvAndS( dist32s, cvScalarAll(255), dist32s, 0 );
        cvConvertScale( dist32s, dist8u1, 1, 0 );
        cvConvertScale( dist32s, dist32s, -1, 0 );
        cvAddS( dist32s, cvScalarAll(255), dist32s, 0 );
        cvConvertScale( dist32s, dist8u2, 1, 0 );
        cvMerge( dist8u1, dist8u2, dist8u2, 0, dist8u );
        // end "painting" the distance transform result
    }
    else
    {
        int i, j;
        for( i = 0; i < labels->height; i++ )
        {
            int* ll = (int*)(labels->imageData + i*labels->widthStep);
            float* dd = (float*)(dist->imageData + i*dist->widthStep);
            uchar* d = (uchar*)(dist8u->imageData + i*dist8u->widthStep);
            for( j = 0; j < labels->width; j++ )
            {
                int idx = ll[j] == 0 || dd[j] == 0 ? 0 : (ll[j]-1)%8 + 1;
                int b = cvRound(colors[idx][0]);
                int g = cvRound(colors[idx][1]);
                int r = cvRound(colors[idx][2]);
                d[j*3] = (uchar)b;
                d[j*3+1] = (uchar)g;
                d[j*3+2] = (uchar)r;
            }
        }
    }
    
    cvShowImage( wndname, dist8u );
}
コード例 #12
0
ファイル: ImageProcess.cpp プロジェクト: shengdewu/repository
void CImageProcess::test(string path)
{
	IplImage * src=cvLoadImage(path.c_str(),0);  
	//  cvSmooth(src,src,CV_BLUR,3,3,0,0);  
	cvThreshold(src,src,175,255,CV_THRESH_BINARY);  
	IplImage* paintx=cvCreateImage( cvGetSize(src),IPL_DEPTH_8U, 1 );  
	IplImage* painty=cvCreateImage( cvGetSize(src),IPL_DEPTH_8U, 1 );  
	cvZero(paintx);  
	cvZero(painty);  
	int* v=new int[src->width];  
	int* h=new int[src->height];  
	memset(v,0,src->width*4);  
	memset(h,0,src->height*4);  

	int x,y;  
	CvScalar s,t;  
	for(x=0;x<src->width;x++)  
	{  
		for(y=0;y<src->height;y++)  
		{  
			s=cvGet2D(src,y,x);           
			if(s.val[0]==255)  
				v[x]++;                   
		}         
	}  

	for(x=0;x<src->width;x++)  
	{  
		for(y=0;y<v[x];y++)  
		{         
			t.val[0]=255;  
			cvSet2D(paintx,y,x,t);        
		}         
	}  

	for(y=0;y<src->height;y++)  
	{  
		for(x=0;x<src->width;x++)  
		{  
			s=cvGet2D(src,y,x);           
			if(s.val[0]==255)  
				h[y]++;       
		}     
	}  
	for(y=0;y<src->height;y++)  
	{  
		for(x=0;x<h[y];x++)  
		{             
			t.val[0]=255;  
			cvSet2D(painty,y,x,t);            
		}         
	}  
	cvNamedWindow("二值图像",1);  
	cvNamedWindow("垂直积分投影",1);  
	cvNamedWindow("水平积分投影",1);  
	cvShowImage("二值图像",src);  
	cvShowImage("垂直积分投影",paintx);  
	cvShowImage("水平积分投影",painty);  
	cvWaitKey(0);  
	cvDestroyAllWindows();  
	cvReleaseImage(&src);  
	cvReleaseImage(&paintx);  
	cvReleaseImage(&painty);  
}
コード例 #13
0
ファイル: ImageProcess.cpp プロジェクト: shengdewu/repository
void CImageProcess::ProcessImage()
{
	IplImage*	pOSource = cvLoadImage(m_sName.c_str());
	m_mapImage.insert(std::pair<string, IplImage*>(m_sName, pOSource));
	//opencv  灰度化
	IplImage* pOGray = cvCreateImage(cvGetSize(pOSource), IPL_DEPTH_8U, 1);
	m_mapImage.insert(std::pair<string, IplImage*>("gray", pOGray));

	cvCvtColor(pOSource, pOGray, CV_BGR2GRAY);

	//opencv 二值化
	IplImage*	pOBinary = cvCreateImage(cvGetSize(pOGray), IPL_DEPTH_8U, 1);
	m_mapImage.insert(std::pair<string, IplImage*>("binary", pOBinary));
	cvThreshold(pOGray, pOBinary, 175, 255, CV_THRESH_BINARY);

	//pSwellTemp = LevelSwell(pOBinary->imageData, pOBinary->widthStep, pOBinary->height,8);
	//VerticalSwell(pOBinary->imageData, pOBinary->widthStep, pOBinary->height, 0);
	//OpencvSwell(pOBinary, pOBinary);

	//投影法,分割
	unsigned int *pVHist = new unsigned int[pOBinary->widthStep];
	VerticalProjection(pOBinary->imageData, pOBinary->widthStep, pOBinary->height, pVHist);
	unsigned int *pHHist = new unsigned int[pOBinary->height];
	HorizontalProjection(pOBinary->imageData, pOBinary->widthStep, pOBinary->height, pHHist);

	vector<Location> v_lc = Projection_Location(pVHist,pOBinary->widthStep, pOBinary->height);
	vector<Location> h_lc = Projection_Location(pHHist,pOBinary->height, pOBinary->widthStep);

	//切图
	Image image;
	image._height = pOBinary->height;
	image._width = pOBinary->widthStep;
	image._pData = new unsigned char[image._height * image._width];
	memset(image._pData, 0 , image._height * image._width);
	memcpy_s(image._pData, image._height * image._width, pOBinary->imageData, image._height * image._width);
	vector<Image> dst;
	CutImage(image, v_lc, h_lc, &dst);
	delete []image._pData;

	//缩放
	//Location l = FindMaxLocation(v_lc, h_lc);
	Location l;
	l._start = 16;
	l._end = 16;
	vector<Image> zImage;
	ZoomImage(&dst,&zImage, l);

	//提取特征
	for(int i=0; i<zImage.size(); i++){
		char c[10];
		itoa(i, c, 10);
		IplImage *img = CreateImage(zImage.at(i));
		//m_mapImage.insert(std::pair<string, IplImage*>(c, img));
		SIFTInstance(img, c);
		ReleaseUserImage(&img);
	}
	
/*
	char*		cOTitle = {"opencv"};
	cvNamedWindow(cOTitle, CV_WINDOW_AUTOSIZE);
	cvShowImage(cOTitle,pOBinary);*/

	//test();
	cvWaitKey();

	cvDestroyAllWindows();
}
コード例 #14
0
/* Main tracking function - gets called by MT_TrackerFrameBase every
 * time step when the application is not paused. */
void DanceTracker::doTracking(IplImage* frame)
{
    /* time-keeping, if necessary
     * NOTE this is not necessary for keeping track of frame rate */
    static double t_prev = MT_getTimeSec();
    double t_now = MT_getTimeSec();
    m_dDt = t_now - t_prev;
    t_prev = t_now;

    /* keeping track of the frame number, if necessary */
    m_iFrameCounter++;

    /* This checks every time step to see if the UKF parameters have
       changed and modifies the UKF structures accordingly.  This will
       also get called the first time through b/c the "Prev" values get
       set to zero initially.   There may be a more efficient way to do
       this, but because the values need to be embedded into the CvMat
       objects I'm not sure how else to do it. */ 
    if(
        m_dSigmaPosition != m_dPrevSigmaPosition ||
        m_dSigmaSpeed != m_dPrevSigmaSpeed ||
        m_dSigmaPositionMeas != m_dPrevSigmaPositionMeas
        )
    {
        /* these are the diagonal entries of the "Q" matrix, which
           represents the variances of the process noise.  They're
           modeled here as being independent and uncorrellated. */
        cvSetReal2D(m_pQ, 0, 0, m_dSigmaPosition*m_dSigmaPosition);
        cvSetReal2D(m_pQ, 1, 1, m_dSigmaPosition*m_dSigmaPosition);
        cvSetReal2D(m_pQ, 2, 2, m_dSigmaHeading*m_dSigmaSpeed);
        cvSetReal2D(m_pQ, 3, 3, m_dSigmaSpeed*m_dSigmaSpeed);        

        /* these are the diagonal entries of the "R matrix, also
           assumed to be uncorrellated. */
        cvSetReal2D(m_pR, 0, 0, m_dSigmaPositionMeas*m_dSigmaPositionMeas);
        cvSetReal2D(m_pR, 1, 1, m_dSigmaPositionMeas*m_dSigmaPositionMeas);

        /* this step actually copies the Q and R matrices to the UKF
           and makes sure that it's internals are properly initialized -
           it's set up to handle the fact that the sizes of these
           matrices could have changed. */
        for(unsigned int i = 0; i < m_iNObj; i++)
        {
            MT_UKFCopyQR(m_vpUKF[i], m_pQ, m_pR);
        }
    }


    HSVSplit(frame);

    cvThreshold(m_pHFrame, m_pThreshFrame, m_iHThresh_Low, 255, CV_THRESH_BINARY);
    cvThreshold(m_pHFrame, m_pTempFrame1, m_iHThresh_High, 255, CV_THRESH_BINARY_INV);
    cvAnd(m_pThreshFrame, m_pTempFrame1, m_pThreshFrame);

    cvThreshold(m_pSFrame, m_pTempFrame1, m_iSThresh_Low, 255, CV_THRESH_BINARY);
    cvThreshold(m_pSFrame, m_pTempFrame2, m_iSThresh_High, 255, CV_THRESH_BINARY_INV);
    cvAnd(m_pTempFrame1, m_pTempFrame2, m_pTempFrame1);
    cvAnd(m_pThreshFrame, m_pTempFrame1, m_pThreshFrame);

    cvThreshold(m_pVFrame, m_pTempFrame1, m_iVThresh_Low, 255, CV_THRESH_BINARY);
    cvThreshold(m_pVFrame, m_pTempFrame2, m_iVThresh_High, 255, CV_THRESH_BINARY_INV);
    cvAnd(m_pTempFrame1, m_pTempFrame2, m_pTempFrame1);
    cvAnd(m_pThreshFrame, m_pTempFrame1, m_pTempFrame1);

    cvSub(BG_frame, m_pVFrame, m_pTempFrame2);
    cvThreshold(m_pTempFrame2, m_pTempFrame2, m_iBGThresh, 255, CV_THRESH_BINARY);

    cvOr(m_pTempFrame1, m_pTempFrame2, m_pThreshFrame);
    cvSmooth(m_pThreshFrame, m_pThreshFrame, CV_MEDIAN, 3);
    
    if(ROI_frame)
    {
        cvAnd(m_pThreshFrame, ROI_frame, m_pThreshFrame);
    }
                

/*    std::vector<YABlob> yblobs = m_YABlobber.FindBlobs(m_pThreshFrame,
                                                       5,
                                                       m_iBlobAreaThreshLow,
                                                       NO_MAX,
                                                       m_iBlobAreaThreshHigh);


    int ny = yblobs.size();
    m_vdBlobs_X.resize(ny);
    m_vdBlobs_Y.resize(ny);
    m_vdBlobs_Orientation.resize(ny);
    for(unsigned int i = 0; i < yblobs.size(); i++)
    {
        m_vdBlobs_X[i] = yblobs[i].COMx;
        m_vdBlobs_Y[i] = yblobs[i].COMy;
        m_vdBlobs_Orientation[i] = 0;
        }*/

    m_vbNoMeasurement.assign(m_iNObj, false);
    
    Dance_Segmenter segmenter(this);
    segmenter.setDebugFile(stdout);
    segmenter.m_iMinBlobPerimeter = 1;
    segmenter.m_iMinBlobArea = m_iBlobAreaThreshLow;
    segmenter.m_iMaxBlobArea = m_iBlobAreaThreshHigh;
    segmenter.m_dOverlapFactor = m_dOverlapFactor;

    if(m_iFrameCounter <= 1)
    {
        std::ifstream in_file;
        in_file.open("initials.dat");
        double x, y;

        m_vBlobs.resize(0);
        m_vBlobs = MT_readDSGYABlobsFromFile("initials.dat");

        m_vInitBlobs.resize(0);
        m_viAssignments.resize(0);

/*        m_vBlobs = segmenter.segmentFirstFrame(m_pThreshFrame,
          m_iNObj); */
    }
    else
    {
        MT_writeDSGYABlobsToFile(m_vBlobs, "blobs-in.dat");
        MT_writeDSGYABlobsToFile(m_vPredictedBlobs, "predicted-in.dat");
        
        bool use_prediction = true;
        m_vBlobs = segmenter.doSegmentation(m_pThreshFrame,
                                            use_prediction ? m_vPredictedBlobs : m_vBlobs);

        m_viAssignments = segmenter.getAssignmentVector(&m_iAssignmentRows, &m_iAssignmentCols);
        m_vInitBlobs = segmenter.getInitialBlobs();
    }

    /* prediction is done below - this makes sure the predicted blobs
       are OK no matter what */
    m_vPredictedBlobs = m_vBlobs;
    
    unsigned int sc = 0;
    bool same_frame = false;
    for(unsigned int i = 0; i < m_vBlobs.size(); i++)
    {
        if(m_vdBlobs_X[i] == m_vBlobs[i].m_dXCenter)
        {
            sc++;
        }
        m_vdBlobs_X[i] = m_vBlobs[i].m_dXCenter;
        m_vdBlobs_Y[i] = m_vBlobs[i].m_dYCenter;
        m_vdBlobs_Orientation[i] = m_vBlobs[i].m_dOrientation;
    }

    same_frame = (sc >= m_iNObj - 2);
    if(same_frame)
    {
        return;
    }

    /* Tracking / UKF / State Estimation
     *
     * Now that we've got the mapping of which measurement goes with
     * which object, we need to feed the measurements into the UKF in
     * order to obtain a state estimate.
     *
     * This is a loop over each object we're tracking. 
     */

    for(unsigned int i = 0; i< m_iNObj; i++)
    {
    
        /* we could throw out a measurement and use the blob
           state as an estimate for various reasons.  On the first
           frame we want to set the initial state, so we flag the
           measurement as invalid */
        bool invalid_meas =  m_vbNoMeasurement[i];
        bool need_state = m_iFrameCounter == 1;
        
        /* if any state is NaN, reset the UKF
         * This shouldn't happen anymore, but it's a decent safety
         * check.  It could probably be omitted if we want to
         * optimize for speed... */
        if(m_iFrameCounter > 1 &&
           (!CvMatIsOk(m_vpUKF[i]->x) ||
            !CvMatIsOk(m_vpUKF[i]->P)))
        {
            MT_UKFFree(&(m_vpUKF[i]));
            m_vpUKF[i] = MT_UKFInit(4, 2, 0.1);
            MT_UKFCopyQR(m_vpUKF[i], m_pQ, m_pR);
            need_state = true;
        }

        if(need_state)
        {
            cvSetReal2D(m_px0, 0, 0, m_vdBlobs_X[i]);
            cvSetReal2D(m_px0, 1, 0, m_vdBlobs_Y[i]);
            cvSetReal2D(m_px0, 2, 0, 0);
            cvSetReal2D(m_px0, 3, 0, 0);
            MT_UKFSetState(m_vpUKF[i], m_px0);
        }
    
        /* if we're going to accept this measurement */
        if(!invalid_meas)
        {
            /* UKF prediction step, note we use function pointers to
               the fish_dynamics and fish_measurement functions defined
               above.  The final parameter would be for the control input
               vector, which we don't use here so we pass a NULL pointer */
            MT_UKFPredict(m_vpUKF[i],
                          &dance_dynamics,
                          &dance_measurement,
                          NULL);
    
            /* finally, set the measurement vector z */
            cvSetReal2D(m_pz, 0, 0, m_vdBlobs_X[i]);
            cvSetReal2D(m_pz, 1, 0, m_vdBlobs_Y[i]);

            MT_UKFSetMeasurement(m_vpUKF[i], m_pz);
    
            /* then do the UKF correction step, which accounts for the
               measurement */
            MT_UKFCorrect(m_vpUKF[i]);
    
    
        }
        else  
        {
            /* use the predicted state */
            CvMat* xp = m_vpUKF[i]->x1;
            MT_UKFSetState(m_vpUKF[i], xp);
        }
        
        /* then constrain the state if necessary - see function
         * definition above */
        constrain_state(m_vpUKF[i]->x, m_vpUKF[i]->x1, frame);            
    
        /* grab the state estimate and store it in variables that will
           make it convenient to save it to a file. */
        CvMat* x = m_vpUKF[i]->x;
    
        m_vdTracked_X[i] = cvGetReal2D(x, 0, 0);
        m_vdTracked_Y[i] = cvGetReal2D(x, 1, 0);
        m_vdTracked_Vx[i] = cvGetReal2D(x, 2, 0);
        m_vdTracked_Vy[i] = cvGetReal2D(x, 3, 0);

        /* take the tracked positions as the blob centers */
        m_vBlobs[i].m_dXCenter = m_vdTracked_X[i];
        m_vBlobs[i].m_dYCenter = m_vdTracked_Y[i];

        /* predict blob locations */
        CvMat* xp = m_vpUKF[i]->x1;
        m_vPredictedBlobs[i].m_dXCenter = cvGetReal2D(xp, 0, 0);
        m_vPredictedBlobs[i].m_dYCenter = cvGetReal2D(xp, 1, 0);        
    
        /* If we wanted the predicted state, this would be how to get
           it */
        /* CvMat* xp = m_vpUKF[i]->x1; */
    }

    MT_writeDSGYABlobsToFile(m_vBlobs, "blobs-out.dat");
    MT_writeDSGYABlobsToFile(m_vPredictedBlobs, "predicted-out.dat");
    
    /* write data to file */
    writeData();

}
コード例 #15
0
CV_IMPL void
cvGoodFeaturesToTrack( const void* image, void* eigImage, void* tempImage,
                       CvPoint2D32f* corners, int *corner_count,
                       double quality_level, double min_distance,
                       const void* maskImage, int block_size,
                       int use_harris, double harris_k )
{
    CvMat* _eigImg = 0;
    CvMat* _tmpImg = 0;

    CV_FUNCNAME( "cvGoodFeaturesToTrack" );

    __BEGIN__;

    double max_val = 0;
    int max_count = 0;
    int count = 0;
    int x, y, i, k = 0;
    int min_dist;

    /* when selecting points, use integer coordinates */
    CvPoint *ptr = (CvPoint *) corners;

    /* process floating-point images using integer arithmetics */
    int *eig_data = 0;
    int *tmp_data = 0;
    int **ptr_data = 0;
    uchar *mask_data = 0;
    int  mask_step = 0;
    CvSize size;

    int    coi1 = 0, coi2 = 0, coi3 = 0;
    CvMat  stub, *img = (CvMat*)image;
    CvMat  eig_stub, *eig = (CvMat*)eigImage;
    CvMat  tmp_stub, *tmp = (CvMat*)tempImage;
    CvMat  mask_stub, *mask = (CvMat*)maskImage;

    if( corner_count )
    {
        max_count = *corner_count;
        *corner_count = 0;
    }

    CV_CALL( img = cvGetMat( img, &stub, &coi1 ));
    if( eig )
    {
        CV_CALL( eig = cvGetMat( eig, &eig_stub, &coi2 ));
    }
    else
    {
        CV_CALL( _eigImg = cvCreateMat( img->rows, img->cols, CV_32FC1 ));
        eig = _eigImg;
    }

    if( tmp )
    {
        CV_CALL( tmp = cvGetMat( tmp, &tmp_stub, &coi3 ));
    }
    else
    {
        CV_CALL( _tmpImg = cvCreateMat( img->rows, img->cols, CV_32FC1 ));
        tmp = _tmpImg;
    }

    if( mask )
    {
        CV_CALL( mask = cvGetMat( mask, &mask_stub ));
        if( !CV_IS_MASK_ARR( mask ))
        {
            CV_ERROR( CV_StsBadMask, "" );
        }
    }

    if( coi1 != 0 || coi2 != 0 || coi3 != 0 )
        CV_ERROR( CV_BadCOI, "" );

    if( CV_MAT_CN(img->type) != 1 ||
            CV_MAT_CN(eig->type) != 1 ||
            CV_MAT_CN(tmp->type) != 1 )
        CV_ERROR( CV_BadNumChannels, cvUnsupportedFormat );

    if( CV_MAT_DEPTH(tmp->type) != CV_32F ||
            CV_MAT_DEPTH(eig->type) != CV_32F )
        CV_ERROR( CV_BadDepth, cvUnsupportedFormat );

    if( !corners || !corner_count )
        CV_ERROR( CV_StsNullPtr, "" );

    if( max_count <= 0 )
        CV_ERROR( CV_StsBadArg, "maximal corners number is non positive" );

    if( quality_level <= 0 || min_distance < 0 )
        CV_ERROR( CV_StsBadArg, "quality level or min distance are non positive" );

    if( use_harris )
    {
        CV_CALL( cvCornerHarris( img, eig, block_size, 3, harris_k ));
    }
    else
    {
        CV_CALL( cvCornerMinEigenVal( img, eig, block_size, 3 ));
    }
    CV_CALL( cvMinMaxLoc( eig, 0, &max_val, 0, 0, mask ));
    CV_CALL( cvThreshold( eig, eig, max_val * quality_level,
                          0, CV_THRESH_TOZERO ));
    CV_CALL( cvDilate( eig, tmp ));

    min_dist = cvRound( min_distance * min_distance );

    size = cvGetMatSize( img );
    ptr_data = (int**)(tmp->data.ptr);
    eig_data = (int*)(eig->data.ptr);
    tmp_data = (int*)(tmp->data.ptr);
    if( mask )
    {
        mask_data = (uchar*)(mask->data.ptr);
        mask_step = mask->step;
    }

    /* collect list of pointers to features - put them into temporary image */
    for( y = 1, k = 0; y < size.height - 1; y++ )
    {
        (char*&)eig_data += eig->step;
        (char*&)tmp_data += tmp->step;
        mask_data += mask_step;

        for( x = 1; x < size.width - 1; x++ )
        {
            int val = eig_data[x];
            if( val != 0 && val == tmp_data[x] && (!mask || mask_data[x]) )
                ptr_data[k++] = eig_data + x;
        }
    }

    icvSortFeatures( ptr_data, k, 0 );

    /* select the strongest features */
    for( i = 0; i < k; i++ )
    {
        int j = count, ofs = (int)((uchar*)(ptr_data[i]) - eig->data.ptr);
        y = ofs / eig->step;
        x = (ofs - y * eig->step)/sizeof(float);

        if( min_dist != 0 )
        {
            for( j = 0; j < count; j++ )
            {
                int dx = x - ptr[j].x;
                int dy = y - ptr[j].y;
                int dist = dx * dx + dy * dy;

                if( dist < min_dist )
                    break;
            }
        }

        if( j == count )
        {
            ptr[count].x = x;
            ptr[count].y = y;
            if( ++count >= max_count )
                break;
        }
    }

    /* convert points to floating-point format */
    for( i = 0; i < count; i++ )
    {
        assert( (unsigned)ptr[i].x < (unsigned)size.width &&
                (unsigned)ptr[i].y < (unsigned)size.height );

        corners[i].x = (float)ptr[i].x;
        corners[i].y = (float)ptr[i].y;
    }

    *corner_count = count;

    __END__;

    cvReleaseMat( &_eigImg );
    cvReleaseMat( &_tmpImg );
}
コード例 #16
0
IplImage* cvTestSeqQueryFrame(CvTestSeq* pTestSeq)
{
    CvTestSeq_*     pTS = (CvTestSeq_*)pTestSeq;
    CvTestSeqElem*  p = pTS->pElemList;
    IplImage*       pImg = pTS->pImg;
    IplImage*       pImgAdd = cvCloneImage(pTS->pImg);
    IplImage*       pImgAddG = cvCreateImage(cvSize(pImgAdd->width,pImgAdd->height),IPL_DEPTH_8U,1);
    IplImage*       pImgMask = pTS->pImgMask;
    IplImage*       pImgMaskAdd = cvCloneImage(pTS->pImgMask);
    CvMat*          pT = cvCreateMat(2,3,CV_32F);

    if(pTS->CurFrame >= pTS->FrameNum) return NULL;
    cvZero(pImg);
    cvZero(pImgMask);

    for(p=pTS->pElemList; p; p=p->next)
    {
        int             DirectCopy = FALSE;
        int             frame = pTS->CurFrame - p->FrameBegin;
        //float           t = p->FrameNum>1?((float)frame/(p->FrameNum-1)):0;
        CvTSTrans*      pTrans = p->pTrans + frame%p->TransNum;

        assert(pTrans);

        if( p->FrameNum > 0 && (frame < 0 || frame >= p->FrameNum) )
        {   /* Current frame is out of range: */
            //if(p->pAVI)cvReleaseCapture(&p->pAVI);
            p->pAVI = NULL;
            continue;
        }

        cvZero(pImgAdd);
        cvZero(pImgAddG);
        cvZero(pImgMaskAdd);

        if(p->noise_type == CV_NOISE_NONE)
        {   /* For not noise:  */
            /* Get next frame: */
            icvTestSeqQureyFrameElem(p, frame);
            if(p->pImg == NULL) continue;

#if 1 /* transform using T filed in Trans */
            {   /* Calculate transform matrix: */
                float   W = (float)(pImgAdd->width-1);
                float   H = (float)(pImgAdd->height-1);
                float   W0 = (float)(p->pImg->width-1);
                float   H0 = (float)(p->pImg->height-1);
                cvZero(pT);
                {   /* Calcualte inverse matrix: */
                    CvMat   mat = cvMat(2,3,CV_32F, pTrans->T);
                    mat.width--;
                    pT->width--;
                    cvInvert(&mat, pT);
                    pT->width++;
                }

                CV_MAT_ELEM(pT[0], float, 0, 2) =
                    CV_MAT_ELEM(pT[0], float, 0, 0)*(W0/2-pTrans->T[2])+
                    CV_MAT_ELEM(pT[0], float, 0, 1)*(H0/2-pTrans->T[5]);

                CV_MAT_ELEM(pT[0], float, 1, 2) =
                    CV_MAT_ELEM(pT[0], float, 1, 0)*(W0/2-pTrans->T[2])+
                    CV_MAT_ELEM(pT[0], float, 1, 1)*(H0/2-pTrans->T[5]);

                CV_MAT_ELEM(pT[0], float, 0, 0) *= W0/W;
                CV_MAT_ELEM(pT[0], float, 0, 1) *= H0/H;
                CV_MAT_ELEM(pT[0], float, 1, 0) *= W0/W;
                CV_MAT_ELEM(pT[0], float, 1, 1) *= H0/H;

            }   /* Calculate transform matrix. */
#else
            {   /* Calculate transform matrix: */
                float   SX = (float)(p->pImg->width-1)/((pImgAdd->width-1)*pTrans->Scale.x);
                float   SY = (float)(p->pImg->height-1)/((pImgAdd->height-1)*pTrans->Scale.y);
                float   DX = pTrans->Shift.x;
                float   DY = pTrans->Shift.y;;
                cvZero(pT);
                ((float*)(pT->data.ptr+pT->step*0))[0]=SX;
                ((float*)(pT->data.ptr+pT->step*1))[1]=SY;
                ((float*)(pT->data.ptr+pT->step*0))[2]=SX*(pImgAdd->width-1)*(0.5f-DX);
                ((float*)(pT->data.ptr+pT->step*1))[2]=SY*(pImgAdd->height-1)*(0.5f-DY);
            }   /* Calculate transform matrix. */
#endif


            {   /* Check for direct copy: */
                DirectCopy = TRUE;
                if( fabs(CV_MAT_ELEM(pT[0],float,0,0)-1) > 0.00001) DirectCopy = FALSE;
                if( fabs(CV_MAT_ELEM(pT[0],float,1,0)) > 0.00001) DirectCopy = FALSE;
                if( fabs(CV_MAT_ELEM(pT[0],float,0,1)) > 0.00001) DirectCopy = FALSE;
                if( fabs(CV_MAT_ELEM(pT[0],float,0,1)) > 0.00001) DirectCopy = FALSE;
                if( fabs(CV_MAT_ELEM(pT[0],float,0,2)-(pImg->width-1)*0.5) > 0.5) DirectCopy = FALSE;
                if( fabs(CV_MAT_ELEM(pT[0],float,1,2)-(pImg->height-1)*0.5) > 0.5) DirectCopy = FALSE;
            }

            /* Extract image and mask: */
            if(p->pImg->nChannels == 1)
            {
                if(DirectCopy)
                {
                    cvCvtColor( p->pImg,pImgAdd,CV_GRAY2BGR);
                }
                else
                {
                    cvGetQuadrangleSubPix( p->pImg, pImgAddG, pT);
                    cvCvtColor( pImgAddG,pImgAdd,CV_GRAY2BGR);
                }
            }

            if(p->pImg->nChannels == 3)
            {
                if(DirectCopy)
                    cvCopyImage(p->pImg, pImgAdd);
                else
                    cvGetQuadrangleSubPix( p->pImg, pImgAdd, pT);
            }

            if(p->pImgMask)
            {
                if(DirectCopy)
                    cvCopyImage(p->pImgMask, pImgMaskAdd);
                else
                    cvGetQuadrangleSubPix( p->pImgMask, pImgMaskAdd, pT);

                cvThreshold(pImgMaskAdd,pImgMaskAdd,128,255,CV_THRESH_BINARY);
            }

            if(pTrans->C != 1 || pTrans->I != 0)
            {   /* Intensity transformation: */
                cvScale(pImgAdd, pImgAdd, pTrans->C,pTrans->I);
            }   /* Intensity transformation: */

            if(pTrans->GN > 0)
            {   /* Add noise: */
                IplImage* pImgN = cvCloneImage(pImgAdd);
                cvRandSetRange( &p->rnd_state, pTrans->GN, 0, -1 );
                cvRand(&p->rnd_state, pImgN);
                cvAdd(pImgN,pImgAdd,pImgAdd);
                cvReleaseImage(&pImgN);
            }   /* Add noise. */

            if(p->Mask)
            {   /* Update only mask: */
                cvOr(pImgMaskAdd, pImgMask, pImgMask);
            }
            else
            {   /* Add image and mask to exist main image and mask: */
                if(p->BG)
                {   /* If image is background: */
                    cvCopy( pImgAdd, pImg, NULL);
                }
                else
                {   /* If image is foreground: */
                    cvCopy( pImgAdd, pImg, pImgMaskAdd);
                    if(p->ObjID>=0)
                        cvOr(pImgMaskAdd, pImgMask, pImgMask);
                }
            }   /* Not mask. */
        }   /*  For not noise. */
        else
        {   /* Process noise video: */

            if( p->noise_type == CV_NOISE_GAUSSIAN ||
コード例 #17
0
    virtual void    Process(IplImage* pImg, IplImage* pImgFG = NULL)
    {
        CvSeq*      cnts;
        CvSeq*      cnt;
        int i;

        m_pImg = pImg;
        m_pImgFG = pImgFG;

        if(m_BlobList.GetBlobNum() <= 0 ) return;

        /* Clear bloblist for new blobs: */
        m_BlobListNew.Clear();

        assert(m_pMem);
        cvClearMemStorage(m_pMem);
        assert(pImgFG);


        /* Find CC: */
#if 0
        {   // By contour clustering:
            cvFindBlobsByCCClasters(pImgFG, &m_BlobListNew, m_pMem);
        }
#else
        {   /* One contour - one blob: */
            IplImage* pBin = cvCloneImage(pImgFG);
            assert(pBin);
            cvThreshold(pBin,pBin,128,255,CV_THRESH_BINARY);
            cvFindContours(pBin, m_pMem, &cnts, sizeof(CvContour), CV_RETR_EXTERNAL);

            /* Process each contour: */
            for(cnt = cnts; cnt; cnt=cnt->h_next)
            {
                CvBlob  NewBlob;

                /* Image moments: */
                double      M00,X,Y,XX,YY;
                CvMoments   m;
                CvRect      r = ((CvContour*)cnt)->rect;
                CvMat       mat;
                if(r.height < 3 || r.width < 3) continue;
                cvMoments( cvGetSubRect(pImgFG,&mat,r), &m, 0 );
                M00 = cvGetSpatialMoment( &m, 0, 0 );
                if(M00 <= 0 ) continue;
                X = cvGetSpatialMoment( &m, 1, 0 )/M00;
                Y = cvGetSpatialMoment( &m, 0, 1 )/M00;
                XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X;
                YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y;
                NewBlob = cvBlob(r.x+(float)X,r.y+(float)Y,(float)(4*sqrt(XX)),(float)(4*sqrt(YY)));
                m_BlobListNew.AddBlob(&NewBlob);
            }   /* Next contour. */

            cvReleaseImage(&pBin);
        }
#endif
        for(i=m_BlobList.GetBlobNum(); i>0; --i)
        {   /* Predict new blob position: */
            CvBlob*         pB=NULL;
            DefBlobTracker* pBT = (DefBlobTracker*)m_BlobList.GetBlob(i-1);

            /* Update predictor by previous value of blob: */
            pBT->pPredictor->Update(&(pBT->blob));

            /* Predict current position: */
            pB = pBT->pPredictor->Predict();

            if(pB)
            {
                pBT->BlobPredict = pB[0];
            }
            else
            {
                pBT->BlobPredict = pBT->blob;
            }
        }   /* Predict new blob position. */

        if(m_Collision)
        for(i=m_BlobList.GetBlobNum(); i>0; --i)
        {   /* Predict collision. */
            int             Collision = 0;
            int             j;
            DefBlobTracker* pF = (DefBlobTracker*)m_BlobList.GetBlob(i-1);

            for(j=m_BlobList.GetBlobNum(); j>0; --j)
            {   /* Predict collision: */
                CvBlob* pB1;
                CvBlob* pB2;
                DefBlobTracker* pF2 = (DefBlobTracker*)m_BlobList.GetBlob(j-1);
                if(i==j) continue;
                pB1 = &pF->BlobPredict;
                pB2 = &pF2->BlobPredict;

                if( fabs(pB1->x-pB2->x)<0.6*(pB1->w+pB2->w) &&
                    fabs(pB1->y-pB2->y)<0.6*(pB1->h+pB2->h) ) Collision = 1;

                pB1 = &pF->blob;
                pB2 = &pF2->blob;

                if( fabs(pB1->x-pB2->x)<0.6*(pB1->w+pB2->w) &&
                    fabs(pB1->y-pB2->y)<0.6*(pB1->h+pB2->h) ) Collision = 1;

                if(Collision) break;

            }   /* Check next blob to cross current. */

            pF->Collision = Collision;

        }   /* Predict collision. */

        for(i=m_BlobList.GetBlobNum(); i>0; --i)
        {   /* Find a neighbour on current frame
             * for each blob from previous frame:
             */
            CvBlob*         pB = m_BlobList.GetBlob(i-1);
            DefBlobTracker* pBT = (DefBlobTracker*)pB;
            //int             BlobID = CV_BLOB_ID(pB);
            //CvBlob*         pBBest = NULL;
            //double          DistBest = -1;
            //int j;

            if(pBT->pBlobHyp->GetBlobNum()>0)
            {   /* Track all hypotheses: */
                int h,hN = pBT->pBlobHyp->GetBlobNum();
                for(h=0; h<hN; ++h)
                {
                    int         j, jN = m_BlobListNew.GetBlobNum();
                    CvBlob*     pB = pBT->pBlobHyp->GetBlob(h);
                    int         BlobID = CV_BLOB_ID(pB);
                    CvBlob*     pBBest = NULL;
                    double      DistBest = -1;
                    for(j=0; j<jN; j++)
                    {   /* Find best CC: */
                        double  Dist = -1;
                        CvBlob* pBNew = m_BlobListNew.GetBlob(j);
                        double  dx = fabs(CV_BLOB_X(pB)-CV_BLOB_X(pBNew));
                        double  dy = fabs(CV_BLOB_Y(pB)-CV_BLOB_Y(pBNew));
                        if(dx > 2*CV_BLOB_WX(pB) || dy > 2*CV_BLOB_WY(pB)) continue;

                        Dist = sqrt(dx*dx+dy*dy);
                        if(Dist < DistBest || pBBest == NULL)
                        {
                            DistBest = Dist;
                            pBBest = pBNew;
                        }
                    }   /* Find best CC. */

                    if(pBBest)
                    {
                        pB[0] = pBBest[0];
                        CV_BLOB_ID(pB) = BlobID;
                    }
                    else
                    {   /* Delete this hypothesis. */
                        pBT->pBlobHyp->DelBlob(h);
                        h--;
                        hN--;
                    }
                }   /* Next hypothysis. */
            }   /*  Track all hypotheses. */
        }   /*  Track next blob. */

        m_ClearHyp = 1;

    } /* Process. */
コード例 #18
0
ファイル: ch10_motempl.cpp プロジェクト: Halo9Pan/tyro
// parameters:
//  img - input video frame
//  dst - resultant motion picture
//  args - optional parameters
void  update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
{
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
    CvSize size = cvSize(img->width,img->height); // get current frame size
    int i, idx1 = last, idx2;
    IplImage* silh;
    CvSeq* seq;
    CvRect comp_rect;
    double count;
    double angle;
    CvPoint center;
    double magnitude;          
    CvScalar color;

    // allocate images at the beginning or
    // reallocate them if the frame size is changed
    if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
        if( buf == 0 ) {
            buf = (IplImage**)malloc(N*sizeof(buf[0]));
            memset( buf, 0, N*sizeof(buf[0]));
        }
        
        for( i = 0; i < N; i++ ) {
            cvReleaseImage( &buf[i] );
            buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
            cvZero( buf[i] );
        }
        cvReleaseImage( &mhi );
        cvReleaseImage( &orient );
        cvReleaseImage( &segmask );
        cvReleaseImage( &mask );
        
        mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        cvZero( mhi ); // clear MHI at the beginning
        orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
    }

    cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale

    idx2 = (last + 1) % N; // index of (last - (N-1))th frame
    last = idx2;

    silh = buf[idx2];
    cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames
    
    cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it
    cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI

    // convert MHI to blue 8u image
    cvCvtScale( mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION );
    cvZero( dst );
    cvCvtPlaneToPix( mask, 0, 0, 0, dst );

    // calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
    
    if( !storage )
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);
    
    // segment motion: get sequence of motion components
    // segmask is marked motion components map. It is not used further
    seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );

    // iterate through the motion components,
    // One more iteration (i == -1) corresponds to the whole image (global motion)
    for( i = -1; i < seq->total; i++ ) {

        if( i < 0 ) { // case of the whole image
            comp_rect = cvRect( 0, 0, size.width, size.height );
            color = CV_RGB(255,255,255);
            magnitude = 100;
        }
        else { // i-th motion component
            comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
            if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
                continue;
            color = CV_RGB(255,0,0);
            magnitude = 30;
        }

        // select component ROI
        cvSetImageROI( silh, comp_rect );
        cvSetImageROI( mhi, comp_rect );
        cvSetImageROI( orient, comp_rect );
        cvSetImageROI( mask, comp_rect );

        // calculate orientation
        angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
        angle = 360.0 - angle;  // adjust for images with top-left origin

        count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

        cvResetImageROI( mhi );
        cvResetImageROI( orient );
        cvResetImageROI( mask );
        cvResetImageROI( silh );

        // check for the case of little motion
        if( count < comp_rect.width*comp_rect.height * 0.05 )
            continue;

        // draw a clock with arrow indicating the direction
        center = cvPoint( (comp_rect.x + comp_rect.width/2),
                          (comp_rect.y + comp_rect.height/2) );

        cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
        cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
    }
}
コード例 #19
0
ファイル: main.c プロジェクト: ntavish/tri
void thresh(IplImage *in, IplImage *out)
{
	cvThreshold(in, out, lowthresh, 0, CV_THRESH_TOZERO);
	cvThreshold(out, out, highthresh, 0, CV_THRESH_TOZERO_INV);
}
コード例 #20
0
ファイル: split.c プロジェクト: amnosuperman/LPRS
char* char_ext(IplImage* imagen,basicOCR ocr  )
{
	
	//cvNamedWindow("temp");
	//cvShowImage("temp",imagen);
	//cvWaitKey(0);
	//char* plate=NULL;
	char* no=(char*)malloc(20*sizeof(char));
//-------------------------------------	-----------------------------------------------
    //NUMBER ISOLATION

	//Create needed images
	smooth= cvCreateImage(cvGetSize(imagen), imagen->depth, 1);
	threshold= cvCreateImage(cvGetSize(imagen), imagen->depth, 1);
	open_morf= cvCreateImage(cvGetSize(imagen), imagen->depth, 1);
	
	//Init variables for countours
	contour = 0;
	contourLow = 0;
	//Create storage needed for contour detection
	CvMemStorage* storage = cvCreateMemStorage(0);
	//Smooth image
	cvSmooth(imagen, smooth, CV_GAUSSIAN, 3, 0, 0, 0);
	
	CvScalar avg;
	CvScalar avgStd;
	cvAvgSdv(smooth, &avg, &avgStd, NULL);
	//printf("Avg: %f\nStd: %f\n", avg.val[0], avgStd.val[0]);
	//threshold image
	cvThreshold(smooth, threshold, (int)avg.val[0]+4*(int)(avgStd.val[0]/8), 255, CV_THRESH_BINARY_INV);
	//Morfologic filters
	cvErode(threshold, open_morf, NULL,1); 
	cvDilate(open_morf, open_morf, NULL,1); 
	//Duplicate image for countour
	img_contornos=cvCloneImage(open_morf);
	
	//Search countours in preprocesed image
	cvFindContours( img_contornos, storage, &contour, sizeof(CvContour),
			CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0) );
	//Optimize contours, reduce points
	contourLow=cvApproxPoly(contour, sizeof(CvContour), storage,CV_POLY_APPROX_DP,1,1);
//-----------------------------------------------------------------------------------------------------------
//-----------------------------------------------------------------------------------------------------------
//NUMBER RECOGNITION
	CvRect rect;
	int carea=0,area=0;
	int count=0;
    int match;	
    int w,h;
    w=imagen->width;
    h=imagen->height;
	area=(w)*(h);
//	printf("area : %d, %d %d\n",area,w,h);
	//printf("\n%d\n",area/26);
	
	char name[6];
	//static int width;
	for( ; contourLow != 0; contourLow = contourLow->h_next )
	{		

		rect=cvBoundingRect(contourLow,0);
		cvSetImageROI(smooth,rect);
		IplImage *temp22=cvCreateImage(cvSize(rect.width,rect.height),IPL_DEPTH_8U,1);
		IplImage *temp23=cvCreateImage(cvSize(rect.width,rect.height),IPL_DEPTH_8U,1);
		cvCopy(smooth,temp22,NULL);
		carea=rect.width*rect.height;

		/*if((rect.width>rect.height)||(carea>(area/6))||(carea<(area/25)))
		{	
		    cvReleaseImage(&temp22);
		    continue;
		}*/
		
		if((carea<(area/4))&&(carea>(area/25)))
		{
			static int width = temp22->width;
			sprintf(name,"char%d",count);
			cvNamedWindow(name);
			cvMoveWindow(name,840 - count*3*width,10);
			cvThreshold(temp22, temp23, (int)avg.val[0]+4*(int)(avgStd.val[0]/8), 255, CV_THRESH_BINARY);

			cvShowImage(name,temp23);
			cvWaitKey(500);
			match=ocr.classify(temp23,0);
			if(match<=25)
			    no[count]=97+match;
			else
			    no[count]=48+match-26;

			count++;

		}

		cvReleaseImage(&temp22);
		cvReleaseImage(&temp23);		
		cvResetImageROI(smooth);
	}
	cvWaitKey(0);

	no[count]='\0';
	rev(no,count);
	//strcpy(plate,no);
	//printf("\n%d\n",count);
//-------------------------------------------------------------------------------------------------------------------------------------
	//cvReleaseImage(&imagen_color);
	cvReleaseImage(&imagen);
	cvReleaseImage(&open_morf);
	cvReleaseImage(&img_contornos);	
	return no;
	free(no);
}
コード例 #21
0
int bw_detect_blobs(Tracker *tracker, struct StaticData *data)
{

    /* circular kernel for dilation */
    IplConvKernel *kernel = cvCreateStructuringElementEx(3,3,1,1,CV_SHAPE_ELLIPSE);

    /* temporary image to hold thresholded camera frame */
    IplImage *thresh = cvCreateImage(cvGetSize(tracker->frame),IPL_DEPTH_8U,1);

    /* variables for contour finding */
    CvMemStorage *mem = cvCreateMemStorage(0);
    CvSeq *contour;
    CvMoments moments;
    int it;


    /**
     * preprocessing 
    **/
    /* threshold image, reasonably stable since frame is highly underexposed and LEDs are very bright */
    cvThreshold(tracker->frame,thresh,180,255,CV_THRESH_BINARY);

    /* Dilate image to increase size of responses from thresholding, gives more stable result in contour finding*/
    cvDilate(thresh,thresh,kernel,2);


//  cvShowImage("thresh",thresh);


    /**
     * blob extraction (connected component finding)
    **/
    /* find contours in image, should give one contour for each markers */
    int nc = cvFindContours(thresh,mem,&contour,sizeof(CvContour),CV_RETR_LIST,CV_CHAIN_APPROX_SIMPLE);

//    printf("nc = %d\n",nc);

    it = 0;
    /* if NUM_OF_MARKERS contours detected, compute mean position of each contour */
    if(nc==data->NUM_OF_MARKERS)
    {
        if(contour)
        {
//            cvDrawContours(thresh,contour,cvScalarAll(255),cvScalarAll(0),100);
            CvSeq *c;
            for(c=contour; c!=NULL; c=c->h_next)
            {
                /* compute moments for each contour */
                cvContourMoments(c,&moments);
                /* make sure the contour encloses some area */
                if(moments.m00>0.0)
                {
                    /* compute center of mass -> mean blob position */
                    /* even though the computed position is stored in the marker structs, it doesn't neccessarily correspond to that specific marker */
                    tracker->marker[it]->blob_pos.x = moments.m10/moments.m00;
                    tracker->marker[it]->blob_pos.y = moments.m01/moments.m00;
//                    printf("(%f %f)\n",tracker->marker[it]->blob_pos.x,tracker->marker[it]->blob_pos.y);
                }
                else
                {
                    /* for stable marker recognition all markers must have been detected */
                    tracker->state = OFF_TRACK;
                    break;
                }
                it++;

            }
        }
    }
    else
    {
        tracker->state = OFF_TRACK;
        for(int nm=0; nm<data->NUM_OF_MARKERS; ++nm)
        {
            tracker->marker[nm]->pos_is_set = 0;
            tracker->marker[nm]->blob_pos.x = 0;
            tracker->marker[nm]->blob_pos.y = 0;
        } 
    }

    /* clean up memory */
    cvReleaseMemStorage(&mem);
    cvReleaseImage(&thresh);


    return nc;
}
コード例 #22
0
ファイル: pathobject.cpp プロジェクト: shening/Robotics_Club
IplImage* PathObject::GetMask(const IplImage * imgIn, IplImage * debugOut) const
{
    if(imgIn == NULL) return NULL;
    IplImage* colormask = NULL;
    IplImage* gvcolormask = NULL;
    //IplImage* shapemask = ShapeMask(imgIn);
    IplImage* segmentationmask = NULL;
    IplImage* histogrammask = NULL;
    //IplImage* edgemask = EdgeMask(imgIn);
    IplImage* templatemask = NULL;
    IplImage* channelmask = NULL;
    IplImage* channelmask2 = NULL;

//        if(colormask == NULL  || shapemask == NULL ||
//           segmentationmask== NULL || histogrammask == NULL ||
//           edgemask == NULL ) return NULL;

    //cvShowImage("colormask", colormask);
    //cvShowImage("channelmask", channelmask);
    IplImage * imgOut = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1);
    IplImage * threshold = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1);
    cvZero(imgOut);
    if(mEnableHist)
    {
        histogrammask = HistogramMask(imgIn);
    }
    if(mEnableColor)
    {
        colormask = ColorMask(imgIn);
    }
    if(mEnableSegment)
    {
        segmentationmask = SegmentationMask(imgIn);
    }
    if(mEnableGVColor)
    {
        gvcolormask = GVColorMask(imgIn);
    }
    if(mEnableGVColor)
    {
        gvcolormask = GVColorMask(imgIn);
    }
    int count = 1;
    if(VisionUtils::CombineMasks(imgOut,histogrammask,imgOut,count,mHistWeight))
    {
        count++;
    }
    if(VisionUtils::CombineMasks(imgOut,colormask,imgOut, count, mColorWeight))
    {
        count++;
    }
    if(VisionUtils::CombineMasks(imgOut,segmentationmask,imgOut,count,mSegmentWeight))
    {
        count++;
    }
    if(VisionUtils::CombineMasks(imgOut,gvcolormask,imgOut,count,mGVColorWeight))
    {
        count++;
    }
    for(unsigned int i = 0; i < mMaskOptions.size(); i++)
    {
        if(mMaskOptions[i].mEnabledFlag)
        {
            channelmask = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1);
            if(VisionUtils::ConvertAndGetSingleColorChannel(imgIn,channelmask,mMaskOptions[i].mCvColorConversionName, mMaskOptions[i].mChannelIndex))
            {
                if(mMaskOptions[i].mInvertFlag)
                {
                    VisionUtils::Invert(channelmask,channelmask);
                }
                if(VisionUtils::CombineMasks(imgOut,channelmask,imgOut,count,mMaskOptions[i].mWeight))
                {
                    count++;
                }
            }
            if(mDebug)
            {
                cvShowImage("channelmask", channelmask);
                cvWaitKey(0);
            }
            cvReleaseImage(&channelmask);
        }
    }
//    if(VisionUtils::CombineMasks(imgOut,channelmask,imgOut,count,2))
//    {
//        count++;
//    }
//    if(VisionUtils::CombineMasks(imgOut,channelmask2,imgOut,count,2))
//    {
//        count++;
//    }


    //VisionUtils::CombineMasks(imgOut,edgemask,imgOut,2,1);
    //VisionUtils::CombineMasks(imgOut,histogrammask,imgOut,2,1);
    cvNormalize(imgOut,imgOut,255,0,CV_MINMAX);
    if(mDebug)
    {
        cvShowImage("combined", imgOut);
    }
    //if(debugOut) cvConvertImage(imgOut,debugOut);
    cvThreshold(imgOut,threshold,mMainThreshold,255,CV_THRESH_BINARY );
    std::list<CvBox2D> blobList;
    blobList = Zebulon::Vision::VisionUtils::GetBlobBoxes(threshold,0,mMinNoiseSizePercent);
    for(std::list<CvBox2D>::iterator it = blobList.begin(); it != blobList.end(); it++)
    {
        CvPoint2D32f boxCorners32[4];
        CvPoint boxCorners[4];
        cvBoxPoints(*it,boxCorners32);
        for(int i = 0; i < 4; i ++)
        {
            boxCorners[i] = cvPointFrom32f(boxCorners32[i]);
        }
        cvFillConvexPoly(threshold,boxCorners,4,cvScalar(0,0,0),4);
        //Zebulon::Vision::VisionUtils::DrawSquare(imgOut,*it);
    }
    if(debugOut) cvSet(debugOut,mNearColor,threshold);
    //shapemask = FindCircles(imgOut);
    IplImage * tempImage  = TemplateMask(imgOut, threshold, mTemplate);
    cvCopy(tempImage,imgOut);
    //VisionUtils::CombineMasks(imgOut,templatemask,imgOut);
    if(mDebug)
    {
        cvShowImage("clean", threshold);
        cvShowImage("final", imgOut);
        cvShowImage("color", colormask);
        cvShowImage("hist", histogrammask);
        cvShowImage("segment", segmentationmask);
        cvShowImage("template", templatemask);
        cvShowImage("gvcolor", gvcolormask);
    }
    cvReleaseImage(&colormask);
    cvReleaseImage(&segmentationmask);
    //cvReleaseImage(&segmentationmask2);
    cvReleaseImage(&histogrammask);
    cvReleaseImage(&gvcolormask);
    cvReleaseImage(&channelmask);
    cvReleaseImage(&templatemask);
    cvReleaseImage(&threshold);
    return imgOut;
}
コード例 #23
0
//--------------------------------------------------------------
void testApp::update(){
	
	
	/************ UPDATE BALL ***********************/

	//Update ball position
	ballPositionX += ballVelocityX;
	ballPositionY += ballVelocityY;
	
	if(ballPositionX < 0 || ballPositionX > ofGetWidth()) {
		ballVelocityX *= -1;
	}
	
	if (ballPositionY < 0 || ballPositionY > ofGetHeight()) {
		ballVelocityY *= -1;
	}
	
	/************ UPDATE KINECT ***********************/

	kinect.update();
	
	// get color pixels
	colorImageRGB			= kinect.getPixels();
	
	// get depth pixels
	depthOrig = kinect.getDepthPixels();
	
	// save original depth, and do some preprocessing
	depthProcessed = depthOrig;
	if(invert) depthProcessed.invert();
	if(mirror) {
		depthOrig.mirror(false, true);
		depthProcessed.mirror(false, true);
		colorImageRGB.mirror(false, true);
	}
	if(preBlur) cvSmooth(depthProcessed.getCvImage(), depthProcessed.getCvImage(), CV_BLUR , preBlur*2+1);
	if(topThreshold) cvThreshold(depthProcessed.getCvImage(), depthProcessed.getCvImage(), topThreshold * 255, 255, CV_THRESH_TRUNC);
	if(bottomThreshold) cvThreshold(depthProcessed.getCvImage(), depthProcessed.getCvImage(), bottomThreshold * 255, 255, CV_THRESH_TOZERO);
	if(dilateBeforeErode) {
		if(dilateAmount) cvDilate(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, dilateAmount);
		if(erodeAmount) cvErode(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, erodeAmount);
	} else {
		if(erodeAmount) cvErode(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, erodeAmount);
		if(dilateAmount) cvDilate(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, dilateAmount);
	}
	depthProcessed.flagImageChanged();
	
	// find contours
	depthContours.findContours(depthProcessed,
							   minBlobSize * minBlobSize * depthProcessed.getWidth() * depthProcessed.getHeight(),
							   maxBlobSize * maxBlobSize * depthProcessed.getWidth() * depthProcessed.getHeight(),
							   maxNumBlobs, findHoles, useApproximation);
	
	
	// Clear old attraction points
	attractPts.clear();
	
	// Find centroid point for each blob area and add an attraction force to it
	for (int i=0; i<depthContours.blobs.size(); i++) {
		
		attractPts.push_back(new ofPoint(depthContours.blobs[i].centroid));
		//printf("Blob %d: %f %f \n", i, depthContours.blobs[i].centroid.x, depthContours.blobs[i].centroid.y);
	}
	
	// if one blob found, find nearest point in blob area
	static ofxVec3f newPoint;
	if(depthContours.blobs.size() == 1) {
		ofxCvBlob &blob = depthContours.blobs[0];
		
		depthOrig.setROI(blob.boundingRect);
		
		double minValue, maxValue;
		CvPoint minLoc, maxLoc;
		cvMinMaxLoc(depthOrig.getCvImage(), &minValue, &maxValue, &minLoc, &maxLoc, NULL);
		
		depthOrig.resetROI();
		
		newPoint.x = maxLoc.x + blob.boundingRect.x;
		newPoint.y = maxLoc.y + blob.boundingRect.y;
		//		newPoint.z = (maxValue + offset) * depthScale;	// read from depth map
		//printf("Min: %f %f Max: %f %f \n", minLoc.x, minLoc.y, maxLoc.x, maxLoc.y);
		
		// read directly from distance (in cm)
		// this doesn't seem to work, need to look into it
		newPoint.z = (kinect.getDistanceAt(newPoint) + depthOffset) * depthScale;
		
		// apply kalman filtering
		if(doKalman) {
			newPoint.x = updateKalman(0, newPoint.x);
			newPoint.y = updateKalman(1, newPoint.y);
			newPoint.z = updateKalman(2, newPoint.z);
		}

	} else {
		clearKalman(0);
		clearKalman(1);
		clearKalman(2);
	}
	
	pointHead = (pointHead + 1) % kNumPoints;
	curPoint += (newPoint - curPoint) * lerpSpeed;
	points[pointHead] = curPoint;
	
}
コード例 #24
0
int main( int argc, char **argv ){ 
	int key;							//	キー入力用の変数
	CvCapture *capture = NULL;			//	カメラキャプチャ用の構造体
	IplImage *frameImage;				//	キャプチャ画像用IplImage
	IplImage *frameImage2;				//	キャプチャ画像用IplImage2

	//	画像を生成する
	IplImage *backgroundImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//背景画像用IplImage
	IplImage *grayImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );				//グレースケール画像用IplImage
	IplImage *differenceImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//差分画像用IplImage

	IplImage *hsvImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3 );				//HSV画像用IplImage
	IplImage *hueImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );				//色相(H)情報用IplImage
	IplImage *saturationImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//彩度(S)情報用IplImage
	IplImage *valueImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );				//明度(V)情報用IplImage
	IplImage *thresholdImage1 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//明度がTHRES_BOTTOMより大きい領域用IplImage
//	IplImage *thresholdImage2 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//明度がTHRES_TOP以下の領域用IplImage
//	IplImage *thresholdImage3 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );		//thresholdImage1とthresholdImage2のAND演算結果用IplImage
	IplImage *lightImage = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1 );				//光っている部分の領域の抽出結果用IplImage
	
	char windowNameCapture[] = "Capture"; 			//キャプチャした画像を表示するウィンドウの名前
	char windowNameLight[] = "Light";				//光っている部分の領域を表示するウィンドウの名前
	char windowNameCapture2[] = "Capture2"; 		//キャプチャした画像を表示するウィンドウの名前
	char windowNameThreshold[] = "Threshold";		//thresholdImage1を表示するウィンドウの名前

	CvMoments moment;
	double m_00;
	double m_10;
	double m_01;
	int gravityX;
	int gravityY;
	unsigned char r,g,b;
    
	
	
	int x, y;
	int m,d,ss;
	uchar r0,g0,b0,r1,g1,b1,r2,g2,b2,r3,g3,b3,s;
	int rr,gg,bb;




	//	カメラを初期化する
	if ( ( capture = cvCreateCameraCapture( 0 ) ) == NULL ) {
		//	カメラが見つからなかった場合
		printf( "カメラが見つかりません\n" );
		return -1;
	}

	//	ウィンドウを生成する
	cvNamedWindow( windowNameCapture, CV_WINDOW_AUTOSIZE );
	cvNamedWindow( windowNameLight, CV_WINDOW_AUTOSIZE );
  	cvNamedWindow( windowNameCapture2, CV_WINDOW_AUTOSIZE );
	cvNamedWindow( windowNameThreshold, CV_WINDOW_AUTOSIZE );

  	//	初期背景を設定するためにカメラから画像を取得
	frameImage = cvQueryFrame( capture );
	//	frameImageをグレースケール化し、背景画像とする
	cvCvtColor( frameImage, backgroundImage, CV_BGR2GRAY );
	frameImage2 = cvCreateImage( cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3 );
	cvSet (frameImage2, CV_RGB(0,0,0));  //黒色で塗りつぶす
	
	rr=0;
	gg=150;
	bb=0;
	m=0;
	d=0;
	ss=0;
	//	メインループ
	while( 1 ) {
		frameImage = cvQueryFrame( capture );

      /* 画素値を直接操作する一例*/   
		x = 160;
		y = 120;

		b0 = frameImage ->imageData[frameImage ->widthStep * y + x * 3];        // B
		g0 = frameImage ->imageData[frameImage ->widthStep * y + x * 3 + 1];    // G      
		r0 = frameImage ->imageData[frameImage ->widthStep * y + x * 3 + 2];    // R

    	     frameImage->imageData[frameImage->widthStep * y+ x * 3] = 200;
	    frameImage->imageData[frameImage->widthStep * y + x * 3 + 1] = 200;
	    frameImage->imageData[frameImage->widthStep * y + x * 3 + 2] = 200;

		x = 161;
		y = 120;
		b1 = frameImage ->imageData[frameImage ->widthStep * y + x * 3];        // B
		g1 = frameImage ->imageData[frameImage ->widthStep * y + x * 3 + 1];    // G      
		r1 = frameImage ->imageData[frameImage ->widthStep * y + x * 3 + 2];    // R

        	    frameImage->imageData[frameImage->widthStep * y+ x * 3] = 200;
	    frameImage->imageData[frameImage->widthStep * y + x * 3 + 1] = 200;
	    frameImage->imageData[frameImage->widthStep * y + x * 3 + 2] = 200;

		x = 160;
		y = 121;

		b2 = frameImage ->imageData[frameImage ->widthStep * y + x * 3];        // B
		g2= frameImage ->imageData[frameImage ->widthStep * y + x * 3 + 1];     // G      
		r2 = frameImage ->imageData[frameImage ->widthStep * y + x * 3 + 2];    // R

       	    frameImage->imageData[frameImage->widthStep * y+ x * 3] = 200;
	    frameImage->imageData[frameImage->widthStep * y + x * 3 + 1] = 200;
	    frameImage->imageData[frameImage->widthStep * y + x * 3 + 2] = 200;


		x = 161;
		y = 121;
		b3 = frameImage ->imageData[frameImage ->widthStep * y + x * 3];        // B
		g3 = frameImage ->imageData[frameImage ->widthStep * y + x * 3 + 1];    // G      
		r3 = frameImage ->imageData[frameImage ->widthStep * y + x * 3 + 2];    // R

        	    frameImage->imageData[frameImage->widthStep * y+ x * 3] = 200;
     	    frameImage->imageData[frameImage->widthStep * y + x * 3 + 1] = 200;
	    frameImage->imageData[frameImage->widthStep * y + x * 3 + 2] = 200;

		s=(r0+g0+b0+r1+g1+b1+r2+g2+b2+r3+g3+b3)/12;

		if (s<200) {
			
			if(m==0)
				ss=1;
		
			if(ss)
				m=m+1;	
			printf("0 m= %d,d=%02X  \n",m,d);
		}
		else {
//			printf("%d \n",m);			
	
			if(ss){
				d=d+(1<<(m-1));
				m=m+1;
			}
			printf("1 m= %d,d=%02X  \n",m,d);
		
		}

		if(m>8){
		printf("コード d= %c   \n",d);
		
			if(d==97){
				rr=0;
				gg=0;
				bb=150;
			}

			if(d==98){
				rr=150;
				gg=0;
				bb=0;
			}

		m=0;
		d=0;
		ss=0;
		}

		//	captureの入力画像フレームをframeImageに格納する
//		frameImage = cvQueryFrame( capture );
		//	frameImageをグレースケール化したものを、grayImageに格納する
		cvCvtColor( frameImage, grayImage, CV_BGR2GRAY );
		//	grayImageと背景画像との差分をとる
		cvAbsDiff( grayImage, backgroundImage, differenceImage );
		
		//	frameImageをBGRからHSVに変換する
		cvCvtColor( frameImage, hsvImage, CV_BGR2HSV );
		//	HSV画像をH、S、V画像に分ける
		cvSplit( hsvImage, hueImage, saturationImage, valueImage, NULL );
		//	明度が明るい部分を抽出、その部分のみ出力する
		cvThreshold( valueImage, thresholdImage1, THRESH_BOTTOM, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY );
//		cvThreshold( hueImage, thresholdImage2, THRESH_TOP, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY_INV );
//		cvAnd( thresholdImage1, thresholdImage2, thresholdImage3, NULL );
		
		//	背景差分画像と明るい領域とのANDをとる
		cvAnd( differenceImage, thresholdImage1, lightImage, NULL );
		
		//	光っている領域の重心を算出する
		cvMoments( lightImage, &moment, 0 );
		m_00 = cvGetSpatialMoment( &moment, 0, 0 );
		m_10 = cvGetSpatialMoment( &moment, 1, 0 );
		m_01 = cvGetSpatialMoment( &moment, 0, 1 );
		gravityX = m_10 / m_00;
		gravityY = m_01 / m_00;

		if (0<gravityX){
			b = frameImage ->imageData[frameImage ->widthStep * (gravityY) + gravityX * 3 ];     // B
			g = frameImage ->imageData[frameImage ->widthStep * gravityY + gravityX * 3 + 1];    // G
			r = frameImage ->imageData[frameImage ->widthStep * gravityY + gravityX * 3 + 2];    // R

//			printf ("x= %d ,y= %d v= %d,s= %d,h= %d  \n" ,gravityX,gravityY,v,s,h);

		//	画像上に円を描画する
			if (g>200){
			cvCircle( frameImage2, cvPoint( gravityX, gravityY ), CIRCLE_RADIUS,
			 CV_RGB( rr, gg, bb ), LINE_THICKNESS, LINE_TYPE, 0 );


			}
		}

		//	画像を表示する
		cvShowImage( windowNameCapture, frameImage );
		cvShowImage( windowNameLight, lightImage );
		cvShowImage( windowNameCapture2,   frameImage2);
		cvShowImage( windowNameThreshold, thresholdImage1);

		//	キー入力判定
		key = cvWaitKey( 10 );
		if( key == 'q' ) 
			//	'q'キーが押されたらループを抜ける
			break;
		else if( key == 'b' ) {
			//	'b'キーが押されたら、その時点での画像を背景画像とする
		   frameImage = cvQueryFrame( capture );
		    cvCvtColor( frameImage, backgroundImage, CV_BGR2GRAY );
		}
		else if(key == 'c') {
			//	'c'キーが押されたら画像を保存
			cvSaveImage( "image/frame.bmp", frameImage );
			cvSaveImage( "image/light.bmp", lightImage );
		}
	}


	//	キャプチャを解放する
	cvReleaseCapture( &capture );
	//	メモリを解放する
	cvReleaseImage( &backgroundImage );
	cvReleaseImage( &grayImage );
	cvReleaseImage( &differenceImage );
	cvReleaseImage( &hsvImage );
	cvReleaseImage( &hueImage );
	cvReleaseImage( &saturationImage );
	cvReleaseImage( &valueImage );
	cvReleaseImage( &thresholdImage1 );
//	cvReleaseImage( &thresholdImage2 );
//	cvReleaseImage( &thresholdImage3 );
	cvReleaseImage( &lightImage );
	//	ウィンドウを破棄する
	cvDestroyWindow( windowNameCapture );
	cvDestroyWindow( windowNameLight );
	cvDestroyWindow( windowNameThreshold );
	cvDestroyWindow( windowNameCapture2 );

	return 0;
} 
コード例 #25
0
ファイル: algo.cpp プロジェクト: lblsa/roombara
// initialize the main function
int main(int argc, char *argv[])
{
    if (argc < 2)
    {
        printf("Usage: %s <img.jpg>\n", argv[0]);
        return 1;
    }
    IplImage* picture = cvLoadImage(argv[1]);
    IplImage* greyImg = cvCreateImage(cvGetSize(picture), IPL_DEPTH_8U, 1);
    IplImage* cannyImg = cvCreateImage(cvGetSize(picture), IPL_DEPTH_8U, 1);
    IplImage* drawnImg = cvCreateImage(cvGetSize(picture), IPL_DEPTH_8U, 3);
    IplImage* contrastImg = cvCreateImage(cvGetSize(picture), IPL_DEPTH_8U, 1);
    
    cvNamedWindow("Image", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("Canny", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("Threshold", CV_WINDOW_NORMAL);
    
    cvCvtColor(picture, greyImg, CV_BGR2GRAY);
    cvEqualizeHist(greyImg, greyImg);
    
    CvMemStorage* storage = cvCreateMemStorage(0); 
    
    while (1) {
        
        // Create trackbars
        cvCopy(picture, drawnImg); // picture to be displayed
        
        cvCreateTrackbar( "min_dist", "Image", &min_dist_switch_value, 49, switch_min_dist );
        cvCreateTrackbar( "dp", "Image", &dp_switch_value, 9, switch_dp );
        cvCreateTrackbar( "High", "Canny", &high_switch_value, 499, switch_callback_h );
        cvCreateTrackbar( "Low", "Canny", &low_switch_value, 499, switch_callback_l );
        cvCreateTrackbar( "Threshold", "Threshold", &threshold_switch_value, 199, switch_threshold );
        cvCreateTrackbar( "Max", "Threshold", &threshold_max_switch_value, 500, switch_threshold_max );
        
        int N = 7;
        
        double dp = dpInt+1;
        double min_dist = min_distInt+1;
        double lowThresh = lowInt + 1;
        double highTresh = highInt + 1;
        double threshold = thresholdInt+1;
        double threshold_max = threshold_maxInt+1;
        
        
        cvThreshold(greyImg, contrastImg, threshold, threshold_max, CV_THRESH_TOZERO_INV);
        cvCanny(contrastImg, cannyImg, lowThresh*N*N, highTresh*N*N, N);
        
        //        CvSeq* circles =cvHoughCircles(greyImg, storage, CV_HOUGH_GRADIENT, 35, 25);
        CvSeq* circles =cvHoughCircles(cannyImg, storage, CV_HOUGH_GRADIENT, dp, min_dist); 
        // dp is image resolution
        // min_dist is the minimum distance between circles
        
        for (int i = 0; i < (circles ? circles->total : 0); i++) 
        { 
            float* p = (float*)cvGetSeqElem( circles, i ); 
            cvCircle( drawnImg, cvPoint(cvRound(p[0]),cvRound(p[1])),3, CV_RGB(0,255,0), -1, 8, 0 ); 
        } 
        
        cvShowImage("Image", drawnImg);
        cvShowImage("Canny", cannyImg);
        cvShowImage("Threshold", contrastImg);
        
        char b;
        
        while (b != 98) {
            b = cvWaitKey(1);
        }
        b=0;
    }
}  
コード例 #26
0
IplImage* 
ComputeSaliency(IplImage* image, int thresh, int scale) {
  //given a one channel image
  unsigned int size = floor(pow(2,scale)); //the size to do teh  saliency @

  IplImage* bw_im = cvCreateImage(cvSize(size,size), 
				  IPL_DEPTH_8U,1);
  cvResize(image, bw_im);
  IplImage* realInput = cvCreateImage( cvGetSize(bw_im), IPL_DEPTH_32F, 1);
  
  IplImage* imaginaryInput = cvCreateImage( cvGetSize(bw_im), IPL_DEPTH_32F, 1);
  IplImage* complexInput = cvCreateImage( cvGetSize(bw_im), IPL_DEPTH_32F, 2);

  cvScale(bw_im, realInput, 1.0/255.0);
  cvZero(imaginaryInput);
  cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);
  CvMat* dft_A = cvCreateMat( size, size, CV_32FC2 );

  // copy A to dft_A and pad dft_A with zeros
  CvMat tmp;
  cvGetSubRect( dft_A, &tmp, cvRect(0,0, size,size));
  cvCopy( complexInput, &tmp );
  //  cvZero(&tmp);

  cvDFT( dft_A, dft_A, CV_DXT_FORWARD, size );
  cvSplit( dft_A, realInput, imaginaryInput, NULL, NULL );
  // Compute the phase angle 
  IplImage* image_Mag = cvCreateImage(cvSize(size, size), IPL_DEPTH_32F, 1);
  IplImage* image_Phase = cvCreateImage(cvSize(size, size), IPL_DEPTH_32F, 1);
    

  //compute the phase of the spectrum
  cvCartToPolar(realInput, imaginaryInput, image_Mag, image_Phase, 0);
  
  IplImage* log_mag = cvCreateImage(cvSize(size, size), IPL_DEPTH_32F, 1);
  cvLog(image_Mag, log_mag);
  //Box filter the magnitude, then take the difference

  IplImage* log_mag_Filt = cvCreateImage(cvSize(size, size), 
					   IPL_DEPTH_32F, 1);
  CvMat* filt = cvCreateMat(3,3, CV_32FC1);
  cvSet(filt,cvScalarAll(1.0/9.0));
  cvFilter2D(log_mag, log_mag_Filt, filt);
  cvReleaseMat(&filt);

  cvSub(log_mag, log_mag_Filt, log_mag);
  
  cvExp(log_mag, image_Mag);
   
  cvPolarToCart(image_Mag, image_Phase, realInput, imaginaryInput,0);
  cvExp(log_mag, image_Mag);

  cvMerge(realInput, imaginaryInput, NULL, NULL, dft_A);
  cvDFT( dft_A, dft_A, CV_DXT_INV_SCALE, size);

  cvAbs(dft_A, dft_A);
  cvMul(dft_A,dft_A, dft_A);
  cvGetSubRect( dft_A, &tmp,  cvRect(0,0, size,size));
  cvCopy( &tmp, complexInput);
  cvSplit(complexInput, realInput, imaginaryInput, NULL,NULL);

  IplImage* result_image = cvCreateImage(cvGetSize(image),IPL_DEPTH_32F, 1);
  double minv, maxv;
  CvPoint minl, maxl;
  cvSmooth(realInput,realInput);
  cvSmooth(realInput,realInput);
  cvMinMaxLoc(realInput,&minv,&maxv,&minl,&maxl);
  printf("Max value %lf, min %lf\n", maxv,minv);
  cvScale(realInput, realInput, 1.0/(maxv-minv), 1.0*(-minv)/(maxv-minv));
  cvResize(realInput, result_image);
  double threshold = thresh/100.0*cvAvg(realInput).val[0];
  cvThreshold(result_image, result_image, threshold, 1.0, CV_THRESH_BINARY);
  IplImage* final_result = cvCreateImage(cvGetSize(image),IPL_DEPTH_8U, 1);
  cvScale(result_image, final_result, 255.0, 0.0);
  cvReleaseImage(&result_image);
  //cvReleaseImage(&realInput);
  cvReleaseImage(&imaginaryInput);
  cvReleaseImage(&complexInput);
  cvReleaseMat(&dft_A);
  cvReleaseImage(&bw_im);

  cvReleaseImage(&image_Mag);
  cvReleaseImage(&image_Phase);

  cvReleaseImage(&log_mag);
  cvReleaseImage(&log_mag_Filt);
  cvReleaseImage(&bw_im);
  return final_result;
  //return bw_im;
}
コード例 #27
0
ファイル: motempl.c プロジェクト: runaway/OpenCV1.1
// parameters:
//  img - input video frame
//  dst - resultant motion picture
//  args - optional parameters
void update_mhi(IplImage* img, IplImage* dst, int diff_threshold)
{
    // 获取当前时间
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds

    // 获取当前帧大小
    CvSize size = cvSize(img->width,img->height); // get current frame size
    int i, idx1 = last, idx2;
    IplImage* silh;
    CvSeq* seq;
    CvRect comp_rect;
    double count;
    double angle;
    CvPoint center;
    double magnitude;          
    CvScalar color;

    // 给图像分配空间或者在尺寸改变的时候重新分配
    // allocate images at the beginning or
    // reallocate them if the frame size is changed
    if (!mhi || mhi->width != size.width || mhi->height != size.height) 
    {
        if (buf == 0) 
        {
            buf = (IplImage**)malloc(N*sizeof(buf[0]));
            memset( buf, 0, N*sizeof(buf[0]));
        }
        
        for( i = 0; i < N; i++ ) {
            cvReleaseImage( &buf[i] );
            buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
            cvZero( buf[i] );
        }
        cvReleaseImage( &mhi );
        cvReleaseImage( &orient );
        cvReleaseImage( &segmask );
        cvReleaseImage( &mask );
        
        mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );

        // 在开始时清空MHI
        cvZero( mhi ); // clear MHI at the beginning

		// 按img的尺寸创建图像
        orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
    }

    // 转换为灰度
    cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale

    idx2 = (last + 1) % N; // index of (last - (N-1))th frame
    last = idx2;

    silh = buf[idx2];

    // 获取两帧间的差异,当前帧跟背景图相减,放到silh里面
    cvAbsDiff(buf[idx1], buf[idx2], silh); // get difference between frames

    // 二值化
    cvThreshold(silh, silh, diff_threshold, 1, CV_THRESH_BINARY); // and threshold it

    // 去掉影像(silhouette) 以更新运动历史图像
    cvUpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION); // update MHI

    // 转换MHI到蓝色8位图
    // convert MHI to blue 8u image
    cvCvtScale( mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION );
    cvZero( dst );
    cvCvtPlaneToPix( mask, 0, 0, 0, dst );

    // 计算运动历史图像的梯度方向 
    // 计算运动梯度趋向和合法的趋向掩码
    // calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
    
    if( !storage )
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);

    // 将整个运动分割为独立的运动部分 
    // 分割运动:获取运动组件序列
    // 分割掩码是运动组件图标识出来的,不再过多的使用
    // segment motion: get sequence of motion components
    // segmask is marked motion components map. It is not used further
    seq = cvSegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA);

	// 按运动组件的数目来循环
    // 通过运动组件迭代
    // 根据整幅图像(全局运动)进行相应的一次或多次迭代
    // iterate through the motion components,
    // One more iteration (i == -1) corresponds to the whole image (global motion)
    for (i = -1; i < seq->total; i++) 
    {

        if (i < 0) 
        { 
            // 全局运动事件
            // case of the whole image
            // 获取当前帧的范围
            comp_rect = cvRect( 0, 0, size.width, size.height );

			// 设置颜色为白色
			color = CV_RGB(255,255,255);

			// 设置放大倍数为100
            magnitude = 100;
        }
        else 
        { 
            // 第i个运动组件
            // i-th motion component
            // 获取当前运动组件的范围
            comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;

			// 丢弃很小的组件
			if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
                continue;

			// 设置颜色为红色
			color = CV_RGB(255,0,0);

			// 设置放大倍数为30
            magnitude = 30;
        }

        // 选择组件感兴趣的区域
        // select component ROI
        cvSetImageROI( silh, comp_rect );
        cvSetImageROI( mhi, comp_rect );
        cvSetImageROI( orient, comp_rect );
        cvSetImageROI( mask, comp_rect );

        // 计算某些选择区域的全局运动方向 
        // 每个运动部件的运动方向就可以被这个函数利用提取的特定部件的掩模(mask)计算出来(使用cvCmp) 
        // 计算趋势
        // calculate orientation
        angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);

        // 根据左上角的原点来调整图像的角度
        angle = 360.0 - angle;  // adjust for images with top-left origin

        // 计算数组的绝对范数, 绝对差分范数或者相对差分范数 
        // 计算轮廓感兴趣区域中点的个数
        count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

        cvResetImageROI( mhi );
        cvResetImageROI( orient );
        cvResetImageROI( mask );
        cvResetImageROI( silh );

        // 检测小运动事件
        // check for the case of little motion
        if (count < comp_rect.width*comp_rect.height * 0.05)
        {
            continue;
        }

        // 画一个带箭头的时钟来指示方向
        // draw a clock with arrow indicating the direction
        center = cvPoint( (comp_rect.x + comp_rect.width/2),
                          (comp_rect.y + comp_rect.height/2) );

        cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
        cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
    }
}
コード例 #28
0
std::list<Garbage*> 
GarbageRecognition::garbageList(IplImage * src, IplImage * model){

    clock_t start = clock();

                    
	std::list<Garbage*> garbageList;
	std::vector<int> centroid(2);
	
	//~ utils::Histogram * h = new Histogram(HIST_H_BINS,HIST_S_BINS);
	//~ CvHistogram * testImageHistogram = h->getHShistogramFromRGB(model);

	//gets a frame for setting  image size
	CvSize srcSize = cvGetSize(src);
	CvRect srcRect = cvRect(0,0,srcSize.width,srcSize.height);
	
	//images for HSV conversion
	IplImage* hsv = cvCreateImage( srcSize, 8, 3 );
	IplImage* h_plane = cvCreateImage( srcSize, 8, 1 );
	IplImage* h_plane2 = cvCreateImage( srcSize, 8, 1 );
	IplImage* h_planeV ;//= cvCreateImage( srcSize, 8, 1 );
	IplImage* s_plane = cvCreateImage( srcSize, 8, 1 );
	IplImage* v_plane = cvCreateImage( srcSize, 8, 1 );

	//Image for filtering
	IplImage * andImage=cvCreateImage(srcSize,8,1);	
	IplImage * andImageV=cvCreateImage(srcSize,8,1);	
	//Image for thresholding
	IplImage * threshImage=cvCreateImage(srcSize,8,1);
	IplImage * threshImageV=cvCreateImage(srcSize,8,1);

	//image for Morphing operations(Dilate-erode)
	IplImage * morphImage=cvCreateImage(srcSize,8,1);
	IplImage * morphImageV=cvCreateImage(srcSize,8,1);
	
	//image for contour-finding operations
	IplImage * contourImage=cvCreateImage(srcSize,8,3);
	
	clock_t  create=clock();
	printf("Time elapsed create Image: %f\n", ((double)create - start) / CLOCKS_PER_SEC);
	int frameCounter=1;
	int cont_index=0;

	//convolution kernel for morph operations
	IplConvKernel* element;

	CvRect boundingRect;

	//contours
	CvSeq * contours;
	CvSeq * contoursCopy;

	//Main loop


	
	//convert image to hsv
	cvCvtColor( src, hsv, CV_BGR2HSV );
	clock_t  conv=clock();
	printf("Time elapsed create Image- convert image: %f\n", ((double)conv - create) / CLOCKS_PER_SEC);
	
	cvCvtPixToPlane( hsv, h_plane, s_plane, v_plane, 0 );
	h_planeV=cvCloneImage(h_plane);
	h_plane2=cvCloneImage(h_plane);
	
	CvScalar vasosL1 = cvScalar (0, 0, 170);
	CvScalar vasosU1 = cvScalar (20, 255, 255);
	CvScalar vasosL = cvScalar (40, 0, 170);
	CvScalar vasosU = cvScalar (255, 255, 255);
	CvScalar colillasL = cvScalar (20, 60, 0);
	CvScalar colillasU = cvScalar (40, 255,255);

	clock_t  inrange=clock();
	//~ cvInRangeSalt( hsv,vasosL,vasosU, vasosL1, vasosU1,h_plane );
	cvInRangeS( hsv, vasosL1, vasosU1, h_plane );
	cvInRangeS( hsv, vasosL, vasosU, h_plane2 );
	cvOr(h_plane,h_plane2,h_plane);
	printf("inRange  %f\n", ((double)clock() - inrange) / CLOCKS_PER_SEC);

	cvInRangeS( hsv, colillasL,colillasU,h_planeV);
	cvShowImage("inrange vasos",h_plane);
	//~ cvShowImage("inrange colillas",h_planeV);
	//~ for(int x=0;x<srcSize.width;x++){
		//~ for(int y=0;y<srcSize.height;y++){
			//~ uchar * hue=&((uchar*) (h_plane->imageData+h_plane->widthStep*y))[x];
			//~ uchar * hueV=&((uchar*) (h_planeV->imageData+h_plane->widthStep*y))[x];
			//~ uchar * sat=&((uchar*) (s_plane->imageData+s_plane->widthStep*y))[x];
			//~ uchar * val=&((uchar*) (v_plane->imageData+v_plane->widthStep*y))[x];
			
			//~ if((*val>170) && (( (*hue)<20 || (*hue)>40) ))
				//~ *hue=255;
			//~ else
				//~ *hue=0;
			
			//filter for cigar filters
			//~ if((*hueV>20 && *hueV<40 && *sat>60))
				//~ *hueV=255;
			//~ else
				//~ *hueV=0;
		//~ }
	//~ }
	
	clock_t  color=clock();
	printf("Time elapsed create Image - color filter: %f\n", ((double)color - conv) / CLOCKS_PER_SEC);
	
	//--first pipeline
	//apply morphologic operations
	element = cvCreateStructuringElementEx( MORPH_KERNEL_SIZE*2+1,
		MORPH_KERNEL_SIZE*2+1, MORPH_KERNEL_SIZE, MORPH_KERNEL_SIZE,
		CV_SHAPE_RECT, NULL);

	cvDilate(h_plane,morphImage,element,MORPH_DILATE_ITER);
	cvErode(morphImage,morphImage,element,MORPH_ERODE_ITER);
	
	cvThreshold(morphImage,threshImage,100,255,CV_THRESH_BINARY);
	
	clock_t  pipe1=clock();
	printf("Time elapsed color filter - first pipeline: %f\n", ((double)pipe1 - color) / CLOCKS_PER_SEC);
	
	//-- end first pipeline
	//-- start 2nd pipeline-----
	
	cvAnd(h_planeV, v_plane, andImageV);
	//apply morphologic operations

	cvDilate(andImageV,morphImageV,element,MORPH_DILATE_ITER);
	cvErode(morphImageV,morphImageV,element,MORPH_ERODE_ITER);
	
	cvThreshold(morphImageV,threshImageV,100,255,CV_THRESH_BINARY);

    //--end second pipeline--
    clock_t  pipe2=clock();
	printf("Time elapsed first pipeline - second pipeline: %f\n", ((double)pipe2 - pipe1) / CLOCKS_PER_SEC);
	//get all contours
	contours=myFindContours(threshImage);
	contoursCopy=contours;
	
	cont_index=0;
	
	//image to write contours on
	cvCopy(src,contourImage,0);
	
	
	//contours for dishes and glasses
	while(contours!=NULL){

		CvSeq * aContour=getPolygon(contours);
		utils::Contours * ct;
		
		if(this->window==NULL)
			ct = new Contours(aContour);
		else
			ct = new Contours(aContour,this->window->window);
		
		//apply filters for vasos

		if( ct->perimeterFilter(100,10000) && 
			ct->areaFilter(1000,100000) &&
			ct->vasoFilter()
			){	
				//get contour bounding box
				boundingRect=cvBoundingRect(ct->getContour(),0);
				cvRectangle(contourImage,cvPoint(boundingRect.x,boundingRect.y),
						cvPoint(boundingRect.x+boundingRect.width,
						boundingRect.y+boundingRect.height),
						_GREEN,1,8,0);
						
				
				//if passed filters
				ct->printContour(3,cvScalar(127,127,0,0),
					contourImage);
								
				centroid=ct->getCentroid();
				
				//build garbage List
				utils::MinimalBoundingRectangle * r = new utils::MinimalBoundingRectangle(boundingRect.x,
					boundingRect.y,boundingRect.width,boundingRect.height);

				utils::Garbage * aGarbage = new utils::Garbage(r,centroid,ct);
				//benchmark purposes
				aGarbage->isVisualized=true;
				aGarbage->isPredicted=false;
				aGarbage->isFocused=false;
				

				garbageList.push_back(aGarbage);
			}else if( ct->perimeterFilter(100,10000) && 
			ct->areaFilter(1000,100000) &&
			ct->platoFilter()
			){	
				//get contour bounding box
				boundingRect=cvBoundingRect(ct->getContour(),0);
				cvRectangle(contourImage,cvPoint(boundingRect.x,boundingRect.y),
						cvPoint(boundingRect.x+boundingRect.width,
						boundingRect.y+boundingRect.height),
						_GREEN,1,8,0);
							
				//if passed filters
				ct->printContour(3,cvScalar(127,127,0,0),
					contourImage);
								
				centroid=ct->getCentroid();
				
				//build garbage List
				utils::MinimalBoundingRectangle * r = new utils::MinimalBoundingRectangle(boundingRect.x,
					boundingRect.y,boundingRect.width,boundingRect.height);

				utils::Garbage * aGarbage = new utils::Garbage(r,centroid,ct);
				//benchmark purposes
				aGarbage->isVisualized=true;
				aGarbage->isPredicted=false;
				aGarbage->isFocused=false;
				

				garbageList.push_back(aGarbage);
			}

		//delete ct;
		cvReleaseMemStorage( &aContour->storage );
		contours=contours->h_next;
		cont_index++;
	}
	clock_t  vasoyplato=clock();
	printf("Time elapsed first pipe2 - vasos y platos: %f\n", ((double)vasoyplato - pipe2) / CLOCKS_PER_SEC);
	
		//2nd pipeline
		//release temp images and data
		if(contoursCopy!=NULL)
			cvReleaseMemStorage( &contoursCopy->storage );
		
		contours=myFindContours(threshImageV);
		contoursCopy=contours;
		cont_index=0;

		
		while(contours!=NULL){

		CvSeq * aContour=getPolygon(contours);
		utils::Contours * ct;
		
		if(this->window==NULL)
			ct = new Contours(aContour);
		else
			ct = new Contours(aContour,this->window->window);
		
		//apply filters

    
		if( ct->perimeterFilter(10,800) && 
			ct->areaFilter(50,800) &&
			//ct->rectangularAspectFilter(CONTOUR_RECTANGULAR_MIN_RATIO, CONTOUR_RECTANGULAR_MAX_RATIO) && 
			ct->boxAreaFilter(BOXFILTER_TOLERANCE) && 	
			//ct->histogramMatchingFilter(src,testImageHistogram, HIST_H_BINS,HIST_S_BINS,HIST_MIN)&&
			1){	
				//get contour bounding box
				boundingRect=cvBoundingRect(ct->getContour(),0);
				cvRectangle(contourImage,cvPoint(boundingRect.x,boundingRect.y),
						cvPoint(boundingRect.x+boundingRect.width,
						boundingRect.y+boundingRect.height),
						_GREEN,1,8,0);
										
				//if passed filters
				ct->printContour(3,cvScalar(127,127,0,0),
					contourImage);
								
				centroid=ct->getCentroid();
				
				//build garbage List
				utils::MinimalBoundingRectangle * r = new utils::MinimalBoundingRectangle(boundingRect.x,
					boundingRect.y,boundingRect.width,boundingRect.height);

				utils::Garbage * aGarbage = new utils::Garbage(r,centroid,ct);
				//benchmark purposes
				aGarbage->isVisualized=true;
				aGarbage->isPredicted=false;
				aGarbage->isFocused=false;

				garbageList.push_back(aGarbage);
			}

		delete ct;
		cvReleaseMemStorage( &aContour->storage );
		contours=contours->h_next;
		cont_index++;
	}
	clock_t  colillas=clock();
	printf("Time elapsed vasosyplatos - colillas: %f\n", ((double)colillas - vasoyplato) / CLOCKS_PER_SEC);

	//display found contours
    //~ cvShowImage("drawContours",contourImage);
	
	
	//release temp images and data
	if(contoursCopy!=NULL)
		cvReleaseMemStorage( &contoursCopy->storage );
	
	
	cvReleaseStructuringElement(&element);
	cvReleaseImage(&threshImage);
	cvReleaseImage(&threshImageV);
	cvReleaseImage(&morphImage);
	cvReleaseImage(&morphImageV);
	cvReleaseImage(&contourImage);
	cvReleaseImage(&hsv);
	cvReleaseImage(&h_plane);
	cvReleaseImage(&h_planeV);
	cvReleaseImage(&s_plane);
	cvReleaseImage(&v_plane);
	cvReleaseImage(&andImageV);
	cvReleaseImage(&andImage);
	
	clock_t  total=clock();
	printf("total: %f\n", ((double)total - start) / CLOCKS_PER_SEC);
	
	return garbageList;
}
コード例 #29
0
void defense::ImgToContours(IplImage* TheInput,int PlaneNumber, int CMode){
    
    
    CvMemStorage* G_storage=NULL;
	G_storage=cvCreateMemStorage(0);   
    CvSeq* contour = 0;
    IplImage * Maska;
    Maska = cvCreateImage( cvGetSize(TheInput),IPL_DEPTH_8U,1); 
    int TotalEllip=0;
    CvMat* TempMat;
    TempMat = cvCreateMat(FFTSize, 1, CV_32FC2);
    vector <ofVec2f> TempVec;
    ArrayOfContours.clear();
    ContourGray.clear();
    for (int k=0;k<PlaneNumber;k++){
        if (CMode==0) {
            cvInRangeS(TheInput,cvScalarAll((k-1)*255/(float)PlaneNumber),cvScalarAll(k*255/(float)PlaneNumber),Maska);
        }
        else{  
        cvThreshold(TheInput, Maska, k*255/(float)PlaneNumber, 255, CV_THRESH_BINARY_INV);
        }
        cvSmooth(Maska,Maska,CV_MEDIAN,3);  
        int NC=cvFindContours( Maska, G_storage, &contour, sizeof(CvContour), 
                              CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
  //                            CV_RETR_EXTERNAL, CV_CHAIN_APPROX_TC89_L1 );
        for( ; contour != 0; contour = contour->h_next )
        {
            
            if ((contour->total > 10 )&&(TotalEllip<MaxEllip)){
                
                CvMat* CountArray;
                CvBox2D Ellipdesc;
                CvPoint2D32f * OtroArray;
                OtroArray = new CvPoint2D32f[contour->total];
                for(int q=0;q<contour->total;q++){
                    CvPoint* p = (CvPoint*)cvGetSeqElem( contour, q );
                    OtroArray[q].x = (float)p->x;
                    OtroArray[q].y=(float)p->y;
                }
                CountArray=cvCreateMatHeader(contour->total,1,CV_32FC2);
                cvInitMatHeader(CountArray,contour->total,1,CV_32FC2,OtroArray);
                cvResize(CountArray, TempMat);	
                cvDFT(TempMat, TempMat, CV_DXT_FORWARD);
                int TheLimit = (int)((FFTSize/2)*Slider1/127.0) -1;
                
                for (int q =0; q < TheLimit; q++) { 
                    ((float*)(TempMat->data.ptr + TempMat->step*((FFTSize/2)-q)))[0] = 0.0;
                    ((float*)(TempMat->data.ptr + TempMat->step*((FFTSize/2)-q)))[1] = 0.0;
                    ((float*)(TempMat->data.ptr + TempMat->step*((FFTSize/2)+q)))[0] = 0.0;
                    ((float*)(TempMat->data.ptr + TempMat->step*((FFTSize/2)+q)))[1] = 0.0;                 
                 }
                 cvDFT(TempMat, TempMat, CV_DXT_INV_SCALE);
                TempVec.clear();
                for (int q=0; q < FFTSize;q++){
                    TempVec.push_back(ofVec2f (((const float*)(TempMat->data.ptr + TempMat->step*q))[0],
                    ((const float*)(TempMat->data.ptr + TempMat->step*q))[1]));
                }
                ArrayOfContours.push_back(TempVec);
                ContourGray.push_back(k*255/(float)PlaneNumber);
                
                TotalEllip++;
                delete [] OtroArray;
                cvReleaseMat(&CountArray);  
            } // end of if contour-> total
            
            
        } // end of for contours
        
        
    } // end for planenumber
    
    cvReleaseMat(&TempMat);
    cvReleaseImage(&Maska);
    
    // releasing mem storages
    if (contour!=NULL){cvClearSeq(contour);}
    //cvClearMemStorage(storage);
    if (G_storage!=NULL){cvReleaseMemStorage(&G_storage);}
  
}
コード例 #30
0
/* cvDetectNewBlobs
 * Return 1 and fill blob pNewBlob  with
 * blob parameters if new blob is detected:
 */
int CvBlobDetectorCC::DetectNewBlob(IplImage* /*pImg*/, IplImage* pFGMask, CvBlobSeq* pNewBlobList, CvBlobSeq* pOldBlobList)
{
    int         result = 0;
    CvSize      S = cvSize(pFGMask->width,pFGMask->height);

    /* Shift blob list: */
    {
        int     i;
        if(m_pBlobLists[SEQ_SIZE-1]) delete m_pBlobLists[SEQ_SIZE-1];

        for(i=SEQ_SIZE-1; i>0; --i)  m_pBlobLists[i] = m_pBlobLists[i-1];

        m_pBlobLists[0] = new CvBlobSeq;

    }   /* Shift blob list. */

    /* Create contours and add new blobs to blob list: */
    {   /* Create blobs: */
        CvBlobSeq       Blobs;
        CvMemStorage*   storage = cvCreateMemStorage();

        if(m_Clastering)
        {   /* Glue contours: */
            cvFindBlobsByCCClasters(pFGMask, &Blobs, storage );
        }   /* Glue contours. */
        else
        { /**/
            IplImage*       pIB = cvCloneImage(pFGMask);
            CvSeq*          cnts = NULL;
            CvSeq*          cnt = NULL;
            cvThreshold(pIB,pIB,128,255,CV_THRESH_BINARY);
            cvFindContours(pIB,storage, &cnts, sizeof(CvContour), CV_RETR_EXTERNAL);

            /* Process each contour: */
            for(cnt = cnts; cnt; cnt=cnt->h_next)
            {
                CvBlob  NewBlob;
                /* Image moments: */
                double      M00,X,Y,XX,YY;
                CvMoments   m;
                CvRect      r = ((CvContour*)cnt)->rect;
                CvMat       mat;
                if(r.height < S.height*m_HMin || r.width < S.width*m_WMin) continue;
                cvMoments( cvGetSubRect(pFGMask,&mat,r), &m, 0 );
                M00 = cvGetSpatialMoment( &m, 0, 0 );
                if(M00 <= 0 ) continue;
                X = cvGetSpatialMoment( &m, 1, 0 )/M00;
                Y = cvGetSpatialMoment( &m, 0, 1 )/M00;
                XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X;
                YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y;
                NewBlob = cvBlob(r.x+(float)X,r.y+(float)Y,(float)(4*sqrt(XX)),(float)(4*sqrt(YY)));
                Blobs.AddBlob(&NewBlob);

            }   /* Next contour. */

            cvReleaseImage(&pIB);

        }   /* One contour - one blob. */

        {   /* Delete small and intersected blobs: */
            int i;
            for(i=Blobs.GetBlobNum(); i>0; i--)
            {
                CvBlob* pB = Blobs.GetBlob(i-1);

                if(pB->h < S.height*m_HMin || pB->w < S.width*m_WMin)
                {
                    Blobs.DelBlob(i-1);
                    continue;
                }

                if(pOldBlobList)
                {
                    int j;
                    for(j=pOldBlobList->GetBlobNum(); j>0; j--)
                    {
                        CvBlob* pBOld = pOldBlobList->GetBlob(j-1);
                        if((fabs(pBOld->x-pB->x) < (CV_BLOB_RX(pBOld)+CV_BLOB_RX(pB))) &&
                           (fabs(pBOld->y-pB->y) < (CV_BLOB_RY(pBOld)+CV_BLOB_RY(pB))))
                        {   /* Intersection detected, delete blob from list: */
                            Blobs.DelBlob(i-1);
                            break;
                        }
                    }   /* Check next old blob. */
                }   /*  if pOldBlobList. */
            }   /*  Check next blob. */
        }   /*  Delete small and intersected blobs. */

        {   /* Bubble-sort blobs by size: */
            int N = Blobs.GetBlobNum();
            int i,j;
            for(i=1; i<N; ++i)
            {
                for(j=i; j>0; --j)
                {
                    CvBlob  temp;
                    float   AreaP, AreaN;
                    CvBlob* pP = Blobs.GetBlob(j-1);
                    CvBlob* pN = Blobs.GetBlob(j);
                    AreaP = CV_BLOB_WX(pP)*CV_BLOB_WY(pP);
                    AreaN = CV_BLOB_WX(pN)*CV_BLOB_WY(pN);
                    if(AreaN < AreaP)break;
                    temp = pN[0];
                    pN[0] = pP[0];
                    pP[0] = temp;
                }
            }

            /* Copy only first 10 blobs: */
            for(i=0; i<MIN(N,10); ++i)
            {
                m_pBlobLists[0]->AddBlob(Blobs.GetBlob(i));
            }

        }   /* Sort blobs by size. */

        cvReleaseMemStorage(&storage);

    }   /* Create blobs. */

    {   /* Shift each track: */
        int j;
        for(j=0; j<m_TrackNum; ++j)
        {
            int     i;
            DefSeq* pTrack = m_TrackSeq+j;

            for(i=SEQ_SIZE-1; i>0; --i)
                pTrack->pBlobs[i] = pTrack->pBlobs[i-1];

            pTrack->pBlobs[0] = NULL;
            if(pTrack->size == SEQ_SIZE)pTrack->size--;
        }
    }   /* Shift each track. */

    /* Analyze blob list to find best blob trajectory: */
    {
        double      BestError = -1;
        int         BestTrack = -1;;
        CvBlobSeq*  pNewBlobs = m_pBlobLists[0];
        int         i;
        int         NewTrackNum = 0;
        for(i=pNewBlobs->GetBlobNum(); i>0; --i)
        {
            CvBlob* pBNew = pNewBlobs->GetBlob(i-1);
            int     j;
            int     AsignedTrack = 0;
            for(j=0; j<m_TrackNum; ++j)
            {
                double  dx,dy;
                DefSeq* pTrack = m_TrackSeq+j;
                CvBlob* pLastBlob = pTrack->size>0?pTrack->pBlobs[1]:NULL;
                if(pLastBlob == NULL) continue;
                dx = fabs(CV_BLOB_X(pLastBlob)-CV_BLOB_X(pBNew));
                dy = fabs(CV_BLOB_Y(pLastBlob)-CV_BLOB_Y(pBNew));
                if(dx > 2*CV_BLOB_WX(pLastBlob) || dy > 2*CV_BLOB_WY(pLastBlob)) continue;
                AsignedTrack++;

                if(pTrack->pBlobs[0]==NULL)
                {   /* Fill existed track: */
                    pTrack->pBlobs[0] = pBNew;
                    pTrack->size++;
                }
                else if((m_TrackNum+NewTrackNum)<SEQ_NUM)
                {   /* Duplicate existed track: */
                    m_TrackSeq[m_TrackNum+NewTrackNum] = pTrack[0];
                    m_TrackSeq[m_TrackNum+NewTrackNum].pBlobs[0] = pBNew;
                    NewTrackNum++;
                }
            }   /* Next track. */

            if(AsignedTrack==0 && (m_TrackNum+NewTrackNum)<SEQ_NUM )
            {   /* Initialize new track: */
                m_TrackSeq[m_TrackNum+NewTrackNum].size = 1;
                m_TrackSeq[m_TrackNum+NewTrackNum].pBlobs[0] = pBNew;
                NewTrackNum++;
            }
        }   /* Next new blob. */

        m_TrackNum += NewTrackNum;

        /* Check each track: */
        for(i=0; i<m_TrackNum; ++i)
        {
            int     Good = 1;
            DefSeq* pTrack = m_TrackSeq+i;
            CvBlob* pBNew = pTrack->pBlobs[0];
            if(pTrack->size != SEQ_SIZE) continue;
            if(pBNew == NULL ) continue;

            /* Check intersection last blob with existed: */
            if(Good && pOldBlobList)
            {
                int k;
                for(k=pOldBlobList->GetBlobNum(); k>0; --k)
                {
                    CvBlob* pBOld = pOldBlobList->GetBlob(k-1);
                    if((fabs(pBOld->x-pBNew->x) < (CV_BLOB_RX(pBOld)+CV_BLOB_RX(pBNew))) &&
                       (fabs(pBOld->y-pBNew->y) < (CV_BLOB_RY(pBOld)+CV_BLOB_RY(pBNew))))
                        Good = 0;
                }
            }   /* Check intersection last blob with existed. */

            /* Check distance to image border: */
            if(Good)
            {   /* Check distance to image border: */
                float    dx = MIN(pBNew->x,S.width-pBNew->x)/CV_BLOB_RX(pBNew);
                float    dy = MIN(pBNew->y,S.height-pBNew->y)/CV_BLOB_RY(pBNew);
                if(dx < m_MinDistToBorder || dy < m_MinDistToBorder) Good = 0;
            }   /* Check distance to image border. */

            /* Check uniform motion: */
            if(Good)
            {   /* Check uniform motion: */
                double      Error = 0;
                int         N = pTrack->size;
                CvBlob**    pBL = pTrack->pBlobs;
                float       sum[2] = {0,0};
                float       jsum[2] = {0,0};
                float       a[2],b[2]; /* estimated parameters of moving x(t) = a*t+b*/
                int         j;

                for(j=0; j<N; ++j)
                {
                    float   x = pBL[j]->x;
                    float   y = pBL[j]->y;
                    sum[0] += x;
                    jsum[0] += j*x;
                    sum[1] += y;
                    jsum[1] += j*y;
                }

                a[0] = 6*((1-N)*sum[0]+2*jsum[0])/(N*(N*N-1));
                b[0] = -2*((1-2*N)*sum[0]+3*jsum[0])/(N*(N+1));
                a[1] = 6*((1-N)*sum[1]+2*jsum[1])/(N*(N*N-1));
                b[1] = -2*((1-2*N)*sum[1]+3*jsum[1])/(N*(N+1));

                for(j=0; j<N; ++j)
                {
                    Error +=
                        pow(a[0]*j+b[0]-pBL[j]->x,2)+
                        pow(a[1]*j+b[1]-pBL[j]->y,2);
                }

                Error = sqrt(Error/N);

                if( Error > S.width*0.01 ||
                    fabs(a[0])>S.width*0.1 ||
                    fabs(a[1])>S.height*0.1)
                    Good = 0;

                /* New best trajectory: */
                if(Good && (BestError == -1 || BestError > Error))
                {   /* New best trajectory: */
                    BestTrack = i;
                    BestError = Error;
                }   /* New best trajectory. */
            }   /*  Check uniform motion. */
        }   /*  Next track. */

        #if 0
        {   /**/
            printf("BlobDetector configurations = %d [",m_TrackNum);
            int i;
            for(i=0; i<SEQ_SIZE; ++i)
            {
                printf("%d,",m_pBlobLists[i]?m_pBlobLists[i]->GetBlobNum():0);
            }
            printf("]\n");
        }
        #endif

        if(BestTrack >= 0)
        {   /* Put new blob to output and delete from blob list: */
            assert(m_TrackSeq[BestTrack].size == SEQ_SIZE);
            assert(m_TrackSeq[BestTrack].pBlobs[0]);
            pNewBlobList->AddBlob(m_TrackSeq[BestTrack].pBlobs[0]);
            m_TrackSeq[BestTrack].pBlobs[0] = NULL;
            m_TrackSeq[BestTrack].size--;
            result = 1;
        }   /* Put new blob to output and mark in blob list to delete. */
    }   /*  Analyze blod list to find best blob trajectory. */

    {   /* Delete bad tracks: */
        int i;
        for(i=m_TrackNum-1; i>=0; --i)
        {   /* Delete bad tracks: */
            if(m_TrackSeq[i].pBlobs[0]) continue;
            if(m_TrackNum>0)
                m_TrackSeq[i] = m_TrackSeq[--m_TrackNum];
        }   /* Delete bad tracks: */
    }

#ifdef USE_OBJECT_DETECTOR
    if( m_split_detector && pNewBlobList->GetBlobNum() > 0 )
    {
        int num_new_blobs = pNewBlobList->GetBlobNum();
        int i = 0;

        if( m_roi_seq ) cvClearSeq( m_roi_seq );
        m_debug_blob_seq.Clear();
        for( i = 0; i < num_new_blobs; ++i )
        {
            CvBlob* b = pNewBlobList->GetBlob(i);
            CvMat roi_stub;
            CvMat* roi_mat = 0;
            CvMat* scaled_roi_mat = 0;

            CvDetectedBlob d_b = cvDetectedBlob( CV_BLOB_X(b), CV_BLOB_Y(b), CV_BLOB_WX(b), CV_BLOB_WY(b), 0 );
            m_debug_blob_seq.AddBlob(&d_b);

            float scale = m_param_roi_scale * m_min_window_size.height / CV_BLOB_WY(b);

            float b_width =   MAX(CV_BLOB_WX(b), m_min_window_size.width / scale)
                            + (m_param_roi_scale - 1.0F) * (m_min_window_size.width / scale)
                            + 2.0F * m_max_border / scale;
            float b_height = CV_BLOB_WY(b) * m_param_roi_scale + 2.0F * m_max_border / scale;

            CvRect roi = cvRectIntersection( cvRect( cvFloor(CV_BLOB_X(b) - 0.5F*b_width),
                                                     cvFloor(CV_BLOB_Y(b) - 0.5F*b_height),
                                                     cvCeil(b_width), cvCeil(b_height) ),
                                             cvRect( 0, 0, pImg->width, pImg->height ) );
            if( roi.width <= 0 || roi.height <= 0 )
                continue;

            if( m_roi_seq ) cvSeqPush( m_roi_seq, &roi );

            roi_mat = cvGetSubRect( pImg, &roi_stub, roi );
            scaled_roi_mat = cvCreateMat( cvCeil(scale*roi.height), cvCeil(scale*roi.width), CV_8UC3 );
            cvResize( roi_mat, scaled_roi_mat );

            m_detected_blob_seq.Clear();
            m_split_detector->Detect( scaled_roi_mat, &m_detected_blob_seq );
            cvReleaseMat( &scaled_roi_mat );

            for( int k = 0; k < m_detected_blob_seq.GetBlobNum(); ++k )
            {
                CvDetectedBlob* b = (CvDetectedBlob*) m_detected_blob_seq.GetBlob(k);

                /* scale and shift each detected blob back to the original image coordinates */
                CV_BLOB_X(b) = CV_BLOB_X(b) / scale + roi.x;
                CV_BLOB_Y(b) = CV_BLOB_Y(b) / scale + roi.y;
                CV_BLOB_WX(b) /= scale;
                CV_BLOB_WY(b) /= scale;

                CvDetectedBlob d_b = cvDetectedBlob( CV_BLOB_X(b), CV_BLOB_Y(b), CV_BLOB_WX(b), CV_BLOB_WY(b), 1,
                        b->response );
                m_debug_blob_seq.AddBlob(&d_b);
            }

            if( m_detected_blob_seq.GetBlobNum() > 1 )
            {
                /*
                 * Split blob.
                 * The original blob is replaced by the first detected blob,
                 * remaining detected blobs are added to the end of the sequence:
                 */
                CvBlob* first_b = m_detected_blob_seq.GetBlob(0);
                CV_BLOB_X(b)  = CV_BLOB_X(first_b);  CV_BLOB_Y(b)  = CV_BLOB_Y(first_b);
                CV_BLOB_WX(b) = CV_BLOB_WX(first_b); CV_BLOB_WY(b) = CV_BLOB_WY(first_b);

                for( int j = 1; j < m_detected_blob_seq.GetBlobNum(); ++j )
                {
                    CvBlob* detected_b = m_detected_blob_seq.GetBlob(j);
                    pNewBlobList->AddBlob(detected_b);
                }
            }
        }   /* For each new blob. */

        for( i = 0; i < pNewBlobList->GetBlobNum(); ++i )
        {
            CvBlob* b = pNewBlobList->GetBlob(i);
            CvDetectedBlob d_b = cvDetectedBlob( CV_BLOB_X(b), CV_BLOB_Y(b), CV_BLOB_WX(b), CV_BLOB_WY(b), 2 );
            m_debug_blob_seq.AddBlob(&d_b);
        }
    }   // if( m_split_detector )
#endif

    return result;

}   /* cvDetectNewBlob */