Esempio n. 1
0
int main(int argc, char** argv)
{
	pthread_t 	thread_s;
	int			key;

	if (argc == 2) {
		capture = cvCaptureFromFile(argv[1]);
	} else {
		capture = cvCaptureFromCAM(0);
	}

	if (!capture) {
		quit("cvCapture failed", 1);
	}

	img0 = cvQueryFrame(capture);
	img1 = cvCreateImage(cvGetSize(img0), IPL_DEPTH_8U, 1);

	cvZero(img1);
	cvNamedWindow("stream_server", CV_WINDOW_AUTOSIZE);

	/* print the width and height of the frame, needed by the client */
	fprintf(stdout, "width:  %d\nheight: %d\n\n", img0->width, img0->height);
	fprintf(stdout, "Press 'q' to quit.\n\n");

	/* run the streaming server as a separate thread */
	if (pthread_create(&thread_s, NULL, streamServer, NULL)) {
		quit("pthread_create failed.", 1);
	}

	while(key != 'q') {
		/* get a frame from camera */
		img0 = cvQueryFrame(capture);
		if (!img0) break;

		img0->origin = 0;
		cvFlip(img0, img0, -1);

		/**
		 * convert to grayscale 
		 * note that the grayscaled image is the image to be sent to the client 
		 * so we enclose it with pthread_mutex_lock to make it thread safe 
		 */
		pthread_mutex_lock(&mutex);
		cvCvtColor(img0, img1, CV_BGR2GRAY);
		is_data_ready = 1;
		pthread_mutex_unlock(&mutex);

		/* also display the video here on server */
		cvShowImage("stream_server", img0);
		key = cvWaitKey(30);
	}

	/* user has pressed 'q', terminate the streaming server */
	if (pthread_cancel(thread_s)) {
		quit("pthread_cancel failed.", 1);
	}

	/* free memory */
	cvDestroyWindow("stream_server");
	quit(NULL, 0);
}
Esempio n. 2
0
 int main() {
  CvPoint pt1b,pt2b, pt1t,pt2t,ptarry[4];
  int tempwidth,tempheight;
  CvRect regt,rectROIbot,rectROItop;
  rectROItop=cvRect(0,0,80,10);
  rectROIbot=cvRect(0,50,80,10);
  CvPoint b_cir_center,t_cir_center;
  CvPoint frame_center;
  CvPoint A,B,C,D;
  CvPoint temp;
  double angle,spinsize;
  int cir_radius=1; 
  int frame_width=160, frame_height=120;
  IplImage* frame;
  IplImage* threshframe;
  IplImage* hsvframe;
  IplImage* threshframebot;
  IplImage* threshframetop;
  IplImage* modframe;
  IplImage* dilframetop;
  IplImage* dilframebot;
  int moddiv=2,seq=0,seqdiv=2;
  int release=0, rmax=100;
  int modfheight, modfwidth;
  unsigned char sendBuf;/*
  int serial;
  serial = openSerial("/dev/ttyACM0");
  if (serial == -1)
  serial = openSerial("/dev/ttyACM1");
  if (serial == -1)
  serial = openSerial("/dev/ttyACM2"); 
  if (serial == -1)
  serial = openSerial("/dev/ttyACM3");	
  if (serial == -1)
  serial = openSerial("/dev/ttyACM4");	
  if (serial == -1)
  serial = openSerial("/dev/ttyACM5");
  if (serial == -1)
  serial = openSerial("/dev/ttyACM6"); 
  if (serial == -1)
  serial = openSerial("/dev/ttyACM7");	
  if (serial == -1)
  serial = openSerial("/dev/ttyACM8");	
if( serial == -1 ) {
return -1;
}*/
   //CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
   CvCapture* capture = cvCaptureFromCAM( 1 );
   if ( !capture ) {
     fprintf(stderr, "ERROR: capture is NULL \n" );
     getchar();
     return -1;
   }
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH,frame_width);// 120x160 
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT,frame_height);
 // cvSetCaptureProperty(capture, CV_CAP_PROP_FPS,10);
//  cvSetCaptureProperty(capture,CV_CAP_PROP_POS_FRAMES,5);  
 // Create a window in which the captured images will be presented
   cvNamedWindow( "mywindow", CV_WINDOW_AUTOSIZE );
   // Show the image captured from the camera in the window and repeat
   while ( 1 ) {
     // Get one frame
      frame = cvQueryFrame( capture );
     if ( !frame ) {
       fprintf( stderr, "ERROR: frame is null...\n" );
       getchar();
       break;
     }

     modfheight = frame->height;
     modfwidth = frame->width;
      modframe = cvCreateImage(cvSize((int)(modfwidth/moddiv),(int)(modfheight/moddiv)),frame->depth,frame->nChannels); //cvCreateImage(size of frame, depth, noofchannels)
     cvResize(frame, modframe,CV_INTER_LINEAR);
     // create HSV(Hue, Saturation, Value) frame
      hsvframe = cvCreateImage(cvGetSize(modframe),8, 3);
      
     cvCvtColor(modframe, hsvframe, CV_BGR2HSV); //cvCvtColor(input frame,outputframe,method)

      threshframe = cvCreateImage(cvGetSize(hsvframe),8,1);
    // cvInRangeS(hsvframe,cvScalar(0, 180, 140),cvScalar(15, 230, 235),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame) red
     cvInRangeS(hsvframe,cvScalar(70, 180, 40),cvScalar(100, 230, 90),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame)
     
      threshframebot=cvCloneImage(threshframe);
      cvSetImageROI(threshframebot,rectROIbot);

      threshframetop=cvCloneImage(threshframe);
      cvSetImageROI(threshframetop,rectROItop);
 
//////////////////////////////////////////////////////////////////////////////////////////
    if (seq==0) {
      threshframebot=cvCloneImage(threshframe);
      cvSetImageROI(threshframebot,rectROIbot);
     dilframebot = cvCreateImage(cvGetSize(threshframebot),8,1);
     cvDilate(threshframebot,dilframebot,NULL,2); //cvDilate(input frame,

   //  tempwidth=cvGetSize(dilframebot).width;
   //  tempheight=cvGetSize(dilframebot).height;
   //  printf("dilframe: %d, %d \n",tempwidth,tempheight);
     CBlobResult blobs_bot;
     blobs_bot = CBlobResult(dilframebot,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs_bot.Filter(blobs_bot,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob_bot;
     blobs_bot.GetNthBlob(CBlobGetArea(),0,biggestblob_bot); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1b.x = biggestblob_bot.MinX()*moddiv;
     pt1b.y = biggestblob_bot.MinY()*moddiv+100;
     pt2b.x = biggestblob_bot.MaxX()*moddiv;
     pt2b.y = biggestblob_bot.MaxY()*moddiv+100;
     b_cir_center.x=(pt1b.x+pt2b.x)/2;
     b_cir_center.y=(pt1b.y+pt2b.y)/2;}
//////////////////////////////////////////////////////////////////////////////////////////
    if(seq==seqdiv){
      threshframetop=cvCloneImage(threshframe);
      cvSetImageROI(threshframetop,rectROItop);
      dilframetop = cvCreateImage(cvGetSize(threshframetop),8,1);
     cvDilate(threshframetop,dilframetop,NULL,2); //cvDilate(input frame,
     CBlobResult blobs_top;
     blobs_top = CBlobResult(dilframetop,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs_top.Filter(blobs_top,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob_top;
     blobs_top.GetNthBlob(CBlobGetArea(),0,biggestblob_top); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1t.x = biggestblob_top.MinX()*moddiv;
     pt1t.y = biggestblob_top.MinY()*moddiv;
     pt2t.x = biggestblob_top.MaxX()*moddiv;
     pt2t.y = biggestblob_top.MaxY()*moddiv;
     t_cir_center.x=(pt1t.x+pt2t.x)/2;
     t_cir_center.y=(pt1t.y+pt2t.y)/2;}
//////////////////////////////////////////////////////////////////////////////////////
   if(seq==seqdiv+2) {
     frame_center.x=frame_width/2;
     frame_center.y=frame_height/2;
     A.x=frame_center.x-4;
     A.y=frame_center.y;
     B.x=frame_center.x+4;
     B.y=frame_center.y;
     C.y=frame_center.y-4;
     C.x=frame_center.x;
     D.y=frame_center.y+4;
     D.x=frame_center.x;
     cvRectangle(frame,pt1t,pt2t,cvScalar(255,0,0),1,8,0);
     cvRectangle(frame,pt1b,pt2b,cvScalar(255,0,0),1,8,0); // draw rectangle around the biggest blob
     //cvRectangle(frame,pt1,pt2,cvScalar(255,0,0),1,8,0);
     cvCircle( frame, b_cir_center, cir_radius, cvScalar(0,255,255), 1, 8, 0 ); // center point of the rectangle
     cvLine(frame, A, B,cvScalar(255,0,255),2,8,0);
     cvLine(frame, C, D,cvScalar(255,0,255),2,8,0);
    

    if (b_cir_center.x!=0&&b_cir_center.y!=100) 
    {
    cvLine(frame, b_cir_center, frame_center,cvScalar(0,255,0),1,8,0);
    }

    if(t_cir_center.x!=0&&t_cir_center.y!=0)
     {
     cvLine(frame, frame_center, t_cir_center,cvScalar(255,255,0),1,8,0);
     }
     if ((b_cir_center.x!=0&&b_cir_center.y!=100)&&(t_cir_center.x!=0&&t_cir_center.y!=0)) 
     {
     cvLine(frame, b_cir_center, t_cir_center,cvScalar(0,255,255),1,8,0);
     printf("%d, %d, %d, %d\n",t_cir_center.x,t_cir_center.y,b_cir_center.x,b_cir_center.y);
     }
		
}
    seq++;
    seq=seq%(seqdiv+4);
     cvShowImage( "mywindow", frame); // show output image
     cvShowImage( "bot", threshframebot);
     cvShowImage( "top", threshframetop);

   //  cvShowImage("croped",cropped);
     //cvShowImage( "mywindow3", dilframeROI);
     // Do not release the frame!
     //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
     //remove higher bits using AND operator
     if ( (cvWaitKey(10) & 255) == 27 ) break;
   }
   // Release the capture device housekeeping
   cvReleaseCapture( &capture );
   cvDestroyWindow( "mywindow" );
   return 0;

 }
// Main function, defines the entry point for the program.
int main( int argc, char** argv )
{

    // Structure for getting video from camera or avi
    CvCapture* capture = 0;

    // Images to capture the frame from video or camera or from file
    IplImage *frame, *frame_copy = 0;

    // Used for calculations
    int optlen = strlen("--cascade=");

    // Input file name for avi or image file.
    const char* input_name;

    // Check for the correct usage of the command line
    if( argc > 1 && strncmp( argv[1], "--cascade=", optlen ) == 0 )
    {
        cascade_name = argv[1] + optlen;
        input_name = argc > 2 ? argv[2] : 0;
    }
    else
    {
        fprintf( stderr,
        "Usage: facedetect --cascade=\"<cascade_path>\" [filename|camera_index]\n" );
        return -1;
        /*input_name = argc > 1 ? argv[1] : 0;*/
    }

    // Load the HaarClassifierCascade
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
    
    // Check whether the cascade has loaded successfully. Else report and error and quit
    if( !cascade )
    {
        fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
        return -1;
    }
    
    // Allocate the memory storage
    storage = cvCreateMemStorage(0);
    
    // Find whether to detect the object from file or from camera.
    if( !input_name || (isdigit(input_name[0]) && input_name[1] == '\0') )
        capture = cvCaptureFromCAM( !input_name ? 0 : input_name[0] - '0' );
    else
        capture = cvCaptureFromAVI( input_name ); 

    // Create a new named window with title: result
    cvNamedWindow( "result", 1 );

    // Find if the capture is loaded successfully or not.

    // If loaded succesfully, then:
    if( capture )
    {
        // Capture from the camera.
        for(;;)
        {
            // Capture the frame and load it in IplImage
            if( !cvGrabFrame( capture ))
                break;
            frame = cvRetrieveFrame( capture );

            // If the frame does not exist, quit the loop
            if( !frame )
                break;
            
            // Allocate framecopy as the same size of the frame
            if( !frame_copy )
                frame_copy = cvCreateImage( cvSize(frame->width,frame->height),
                                            IPL_DEPTH_8U, frame->nChannels );

            // Check the origin of image. If top left, copy the image frame to frame_copy. 
            if( frame->origin == IPL_ORIGIN_TL )
                cvCopy( frame, frame_copy, 0 );
            // Else flip and copy the image
            else
                cvFlip( frame, frame_copy, 0 );
            
            // Call the function to detect and draw the face
            detect_and_draw( frame_copy );

            // Wait for a while before proceeding to the next frame
            if( cvWaitKey( 10 ) >= 0 )
                break;
        }

        // Release the images, and capture memory
        cvReleaseImage( &frame_copy );
        cvReleaseCapture( &capture );
    }

    // If the capture is not loaded succesfully, then:
    else
    {
        // Assume the image to be lena.jpg, or the input_name specified
        const char* filename = input_name ? input_name : (char*)"lena.jpg";

        // Load the image from that filename
        IplImage* image = cvLoadImage( filename, 1 );

        // If Image is loaded succesfully, then:
        if( image )
        {
            // Detect and draw the face
            detect_and_draw( image );

            // Wait for user input
            cvWaitKey(0);

            // Release the image memory
            cvReleaseImage( &image );
        }
        else
        {
            /* assume it is a text file containing the
               list of the image filenames to be processed - one per line */
            FILE* f = fopen( filename, "rt" );
            if( f )
            {
                char buf[1000+1];

                // Get the line from the file
                while( fgets( buf, 1000, f ) )
                {

                    // Remove the spaces if any, and clean up the name
                    int len = (int)strlen(buf);
                    while( len > 0 && isspace(buf[len-1]) )
                        len--;
                    buf[len] = '\0';

                    // Load the image from the filename present in the buffer
                    image = cvLoadImage( buf, 1 );

                    // If the image was loaded succesfully, then:
                    if( image )
                    {
                        // Detect and draw the face from the image
                        detect_and_draw( image );
                        
                        // Wait for the user input, and release the memory
                        cvWaitKey(0);
                        cvReleaseImage( &image );
                    }
                }
                // Close the file
                fclose(f);
            }
        }

    }
    
    // Destroy the window previously created with filename: "result"
    cvDestroyWindow("result");

    // return 0 to indicate successfull execution of the program
    return 0;
}
Esempio n. 4
0
int color_cluster(char *filename)
{
	IplImage* originimg=cvLoadImage(filename);

	int i,j;
	CvMat *samples=cvCreateMat((originimg->width)*(originimg->height),1,CV_32FC3);//创建样本矩阵,CV_32FC3代表32位浮点3通道(彩色图像)
	CvMat *clusters=cvCreateMat((originimg->width)*(originimg->height),1,CV_32SC1);//创建类别标记矩阵,CV_32SF1代表32位整型1通道

	int k=0;
	for (i=0;i<originimg->width;i++)
	{
		for (j=0;j<originimg->height;j++)
		{
			CvScalar s;
			//获取图像各个像素点的三通道值(BGR)
			s.val[0]=(float)cvGet2D(originimg,j,i).val[0];//B
			s.val[1]=(float)cvGet2D(originimg,j,i).val[1];//G
			s.val[2]=(float)cvGet2D(originimg,j,i).val[2];//R
			cvSet2D(samples,k++,0,s);//将像素点三通道的值按顺序排入样本矩阵
		}
	}

	int nCuster=2;//聚类类别数,后期可以通过学习确定分类数。
	cvKMeans2(samples,nCuster,clusters,cvTermCriteria(CV_TERMCRIT_ITER,100,1.0));//开始聚类,迭代100次,终止误差1.0

	//创建整体显示聚类后的图像
	IplImage *clusterimg=cvCreateImage(cvSize(originimg->width,originimg->height),IPL_DEPTH_8U,1);
	
	//创建用于单独显示每个聚类结果的图像
	IplImage *cluster_img0=cvCreateImage(cvSize(originimg->width,originimg->height),IPL_DEPTH_8U,1);
	IplImage *cluster_img1=cvCreateImage(cvSize(originimg->width,originimg->height),IPL_DEPTH_8U,1);
	IplImage *cluster_img2=cvCreateImage(cvSize(originimg->width,originimg->height),IPL_DEPTH_8U,1);



	k=0;
	int val=0;
	float step=255/(nCuster-1);
	CvScalar bg={223,124,124,0};//背景设置为白色
	for (i=0;i<originimg->width;i++)
	{
		for (j=0;j<originimg->height;j++)
		{
			cvSet2D(cluster_img0,j,i,bg);
			cvSet2D(cluster_img1,j,i,bg);
			cvSet2D(cluster_img1,j,i,bg);
		}
	}

	for (i=0;i<originimg->width;i++)
	{
		for (j=0;j<originimg->height;j++)
		{
			val=(int)clusters->data.i[k++];
			CvScalar s;
			s.val[0]=255-val*step;//这个是将不同类别取不同的像素值,
			cvSet2D(clusterimg,j,i,s);	//存储聚类后的图像

			//将每个聚类进行分离
			switch(val)
			{
				case 0:
					cvSet2D(cluster_img0,j,i,s);break;//白色类
				case 1:
					cvSet2D(cluster_img1,j,i,s);break;//灰色类
				case 2:
					cvSet2D(cluster_img2,j,i,s);break;//黑色类
				default:
					break;
			}	
		
		}
    }


	//cvSaveImage("PicVideo//cluster_img0.png",cluster_img0);
	//cvSaveImage("PicVideo//cluster_img1.png",cluster_img1);
	//cvSaveImage("PicVideo//cluster_img2.png",cluster_img2);


	cvNamedWindow( "原始图像", 1 ); 
	cvNamedWindow( "聚类图像", 1 );

	cvShowImage( "原始图像", originimg  );
	cvShowImage( "聚类图像", clusterimg  );
	cvSaveImage("clusterimg.png",clusterimg);//结果保存
	
	cvWaitKey(0); 

	cvDestroyWindow( "原始图像" );
	cvDestroyWindow( "聚类图像" );

	cvReleaseImage( &originimg ); 
	cvReleaseImage( &clusterimg );
	cvReleaseImage(&cluster_img0);
	cvReleaseImage(&cluster_img1);
	cvReleaseImage(&cluster_img0);

	return 0;

}
Esempio n. 5
0
int main( int argc, char** argv )
{
    IplImage* img = cvCreateImage( cvSize( 500, 500 ), 8, 3 );
#if !ARRAY        
    CvMemStorage* storage = cvCreateMemStorage(0);
#endif

    cvNamedWindow( "rect & circle", 1 );
        
    for(;;)
    {
        char key;
        int i, count = rand()%100 + 1;
        CvPoint pt0, pt;
        CvBox2D box;
        CvPoint2D32f box_vtx[4];
        CvPoint2D32f center;
        CvPoint icenter;
        float radius;
#if !ARRAY            
        CvSeq* ptseq = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour),
                                     sizeof(CvPoint), storage );
        for( i = 0; i < count; i++ )
        {
            pt0.x = rand() % (img->width/2) + img->width/4;
            pt0.y = rand() % (img->height/2) + img->height/4;
            cvSeqPush( ptseq, &pt0 );
        }
#ifndef _EiC /* unfortunately, here EiC crashes */
        box = cvMinAreaRect2( ptseq, 0 );
#endif
        cvMinEnclosingCircle( ptseq, &center, &radius );
#else
        CvPoint* points = (CvPoint*)malloc( count * sizeof(points[0]));
        CvMat pointMat = cvMat( 1, count, CV_32SC2, points );

        for( i = 0; i < count; i++ )
        {
            pt0.x = rand() % (img->width/2) + img->width/4;
            pt0.y = rand() % (img->height/2) + img->height/4;
            points[i] = pt0;
        }
#ifndef _EiC
        box = cvMinAreaRect2( &pointMat, 0 );
#endif
        cvMinEnclosingCircle( &pointMat, &center, &radius );
#endif
        cvBoxPoints( box, box_vtx );
        cvZero( img );
        for( i = 0; i < count; i++ )
        {
#if !ARRAY                
            pt0 = *CV_GET_SEQ_ELEM( CvPoint, ptseq, i );
#else
            pt0 = points[i];
#endif
            cvCircle( img, pt0, 2, CV_RGB( 255, 0, 0 ), CV_FILLED, CV_AA, 0 );
        }

#ifndef _EiC
        pt0.x = cvRound(box_vtx[3].x);
        pt0.y = cvRound(box_vtx[3].y);
        for( i = 0; i < 4; i++ )
        {
            pt.x = cvRound(box_vtx[i].x);
            pt.y = cvRound(box_vtx[i].y);
            cvLine(img, pt0, pt, CV_RGB(0, 255, 0), 1, CV_AA, 0);
            pt0 = pt;
        }
#endif
        icenter.x = cvRound(center.x);
        icenter.y = cvRound(center.y);
        cvCircle( img, icenter, cvRound(radius), CV_RGB(255, 255, 0), 1, CV_AA, 0 );

        cvShowImage( "rect & circle", img );

        key = (char) cvWaitKey(0);
        if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC'
            break;

#if !ARRAY
        cvClearMemStorage( storage );
#else
        free( points );
#endif
    }
    
    cvDestroyWindow( "rect & circle" );
    return 0;
}
Esempio n. 6
0
int main()
{
//	IplImage* img = cvLoadImage("PictureSource1.jpg");
//	IplImage* temp = 0;
//	/*---------------------------------------------------------------------------------------------------*/
//	cvNamedWindow("Testing");
//	temp = dopyrdownIpl(img);
//	cvShowImage("Testing",temp);
	/*---------------------------------------------------------------------------------------------------*/
	CvMat* mattesting = cvLoadImageM("PictureSource1.jpg");;
	CvMat* matpyr = 0;
	matpyr = dopydownMat(mattesting);
	cvNamedWindow("MatPYR", CV_WINDOW_AUTOSIZE);
	cvShowImage("MatPYR", matpyr);
	/*---------------------------------------------------------------------------------------------------*/
#if TEST==RECTANGLE
	CvMat* Mattemp = 0;
	Mattemp = cvCloneMat(matpyr);
	cvRectangle(Mattemp, cvPoint(5, 10), cvPoint(255, 255), cvScalar(255, 255, 255));
	cvNamedWindow("MatClone", CV_WINDOW_AUTOSIZE);
	cvShowImage("MatClone", Mattemp);
	cvWaitKey(0);
	cvDestroyWindow("MatClone");
	cvReleaseMat(&Mattemp);
#endif
	/*---------------------------------------------------------------------------------------------------*/
#if TEST==CANNY
	CvMat* MatCannyIn;
	CvMat* MatCannyOut = cvCreateMat(matpyr->rows, matpyr->cols, matpyr->type);
	MatCannyIn = cvCloneMat(matpyr);
	MatCannyOut = docannyMat(MatCannyIn, 10, 100);

	cvNamedWindow("MatCanny");
	cvShowImage("MatCanny",MatCannyOut);
	cvWaitKey(0);
	cvDestroyWindow("MatCanny");
	cvReleaseMat(&MatCannyIn);
	cvReleaseMat(&MatCannyOut);
#endif
	/*---------------------------------------------------------------------------------------------------*/ 
#if TEST==MAT_ELEM
	CvMat* MatELEM = cvCreateMat(matpyr->rows, matpyr->cols, matpyr->type);
	float element_test = CV_MAT_ELEM(*MatELEM, float, matpyr->rows-2, matpyr->rows-3);
	printf("element_test = %f\r\n", element_test);
	cvWaitKey(0);
	cvNamedWindow("MatELEM");
	cvShowImage("MatELEM", MatELEM);
	cvWaitKey(0);
 	cvDestroyWindow("MatELEM");
#endif
	/*---------------------------------- -----------------------------------------------------------------*/
#if TEST==GETMATPointData
	float val[] = { 0.86,-0.5,0.5,0.84,0, 0.86,-0.5,0.5,0.84,0, 0.86,-0.5,0.5,0.84,0, 0.86,-0.5,0.5,0.84,0, 0.86,-0.5,0.5,0.84,0 };
	float* Point = 0;
	float SUMResult = 0;
	CvMat* MatSUM = cvCreateMat(5, 3, CV_32FC1);
	cvInitMatHeader(MatSUM,MatSUM->rows,MatSUM->cols,MatSUM->type,&val);
	for (size_t row = 0; row < MatSUM->rows; row++)
	{
		Point = (float*)(MatSUM->data.ptr + row*MatSUM->step);
		for (size_t col = 0; col < MatSUM->cols; col++)
		{
			printf("%f\t",*Point); 
			SUMResult += *(Point);
			Point++;
		}
		printf("\r\n");
	}
	printf("TheSUMResult=%f\r\n",SUMResult);
	printf("MatSUM->rows=%d\r\n", MatSUM->rows);
	printf("MatSUM->cols=%d\r\n ", MatSUM->cols);
	
	cvWaitKey(0);
	cvReleaseMat(&MatSUM);
#endif
	/*---------------------------------- -----------------------------------------------------------------*/
#if TEST==ROITEST
	/*
		Using the ROI to come true 
	*/
	IplImage* MatToImage = cvCreateImage(cvGetSize(matpyr), IPL_DEPTH_8U, 3);
	cvGetImage(matpyr, MatToImage);
	cvNamedWindow("MatToImage");
	cvShowImage("MatToImage", MatToImage);
	cvWaitKey(0);

	cvSetImageROI(MatToImage, cvRect(10, 10, matpyr->rows - 30, matpyr->cols - 60));
	cvAddS(MatToImage, cvScalar(200), MatToImage);
	cvResetImageROI(MatToImage);
	
	cvNamedWindow("ROITEST");
	cvShowImage("ROITEST", MatToImage);
	cvWaitKey(0);
	cvDestroyWindow("ROITEST");
#endif
	/*---------------------------------- -----------------------------------------------------------------*/
#if TEST==WIDTHSTEPTEST
	/*
		Using WidthStep to come true
	*/
	//The picture source is matpyr
	
#endif
	/*---------------------------------- -----------------------------------------------------------------*/
	printf("\tEND\t \r\n");
	cvWaitKey(0);
//	cvReleaseImage(&img); 
//	cvReleaseImage(&temp);
	cvReleaseMat(&matpyr);
	cvReleaseMat(&mattesting);
	cvDestroyAllWindows();
}
Esempio n. 7
0
// ----- Main
int main( int argc, char** argv ) {
    if(argc != 2) {
        printf("Usage: ./motion <video name>\n");
        return 0;
    }

    cvNamedWindow( "Example Video", CV_WINDOW_AUTOSIZE );
    g_capture = cvCreateFileCapture( argv[1] );
    int frames = (int) cvGetCaptureProperty(
                     g_capture,
                     CV_CAP_PROP_FRAME_COUNT
                 );
    if( frames != 0 ) {
        cvCreateTrackbar(
            "Position",
            "Example Video",
            &g_slider_position,
            frames,
            onTrackbarSlide
        );
    }

    // Keep track of frames
    IplImage *prev_frame;
    IplImage *cur_frame = cvQueryFrame( g_capture ); // read first frame
    CvSize img_sz = cvGetSize( cur_frame );

    IplImage* imgA = cvCreateImage( img_sz, IPL_DEPTH_8U, 1 );
    IplImage* imgB = cvCreateImage( img_sz, IPL_DEPTH_8U, 1 );
    cvConvertImage( cur_frame, imgB ); // convert first frame

    IplImage* imgC = cvCreateImage( img_sz, cur_frame->depth, cur_frame->nChannels );

    while(1) {
        // Scroll to next frame and read
#ifdef OPTICAL_FLOW
        if( pyrB )
            cvCopy( pyrB, pyrA );
        if( imgB )
            cvCopy( imgB, imgA );
        if( cur_frame )
            cvCopy( cur_frame, prev_frame );
        /*
        pyrA = pyrB;
        imgA = imgB;
        prev_frame = cur_frame;
        */
#endif
        cur_frame = cvQueryFrame( g_capture );
        if( !cur_frame )
            break;

#ifdef OPTICAL_FLOW
        // Convert frames to 8U single channel
        cvConvertImage( cur_frame, imgB );
        cvCopyImage( cur_frame, imgC );
        calcOpticalFlowAndMark( imgA, imgB, imgC );
        cvShowImage( "Example Video", imgC );
#else
        cvShowImage( "Example Video", cur_frame );
#endif

        char c = cvWaitKey( 33 ); // ms to wait
        if( c == 27 ) // ESC key
            break;
    }
    cvReleaseCapture( &g_capture );
    cvDestroyWindow( "Example Video" );

    return 0;
}
int main( int argc, char** argv )
{

    CvCapture *capture;
    int key = 0;

    capture = cvCaptureFromCAM( 0 );
    if( !capture ) return 1;
 
    faceTrack.frame = cvQueryFrame( capture );
    if ( !faceTrack.frame ) return 1;
   
    /* create template image */
	faceTrack.tmplLeftEye = cvCreateImage( cvSize( TPL_WIDTH, TPL_HEIGHT ),
                         faceTrack.frame->depth, faceTrack.frame->nChannels );

	faceTrack.tmplRightEye = cvCreateImage( cvSize( TPL_WIDTH, TPL_HEIGHT ),
                         faceTrack.frame->depth, faceTrack.frame->nChannels );

    faceTrack.tmplLeftMouth = cvCreateImage( cvSize( TPL_WIDTH, TPL_HEIGHT ),
                         faceTrack.frame->depth, faceTrack.frame->nChannels );

	faceTrack.tmplRightMouth = cvCreateImage( cvSize( TPL_WIDTH, TPL_HEIGHT ),
                         faceTrack.frame->depth, faceTrack.frame->nChannels );
	
	faceTrack.tmplNose = cvCreateImage( cvSize( TPL_WIDTH, TPL_HEIGHT ),
                         faceTrack.frame->depth, faceTrack.frame->nChannels );

   
    /* create image for template matching result */
    faceTrack.tmLeftEye = cvCreateImage( cvSize( WINDOW_WIDTH  - TPL_WIDTH  + 1,  
                                WINDOW_HEIGHT - TPL_HEIGHT + 1 ),
                        IPL_DEPTH_32F, 1 );

	faceTrack.tmRightEye = cvCreateImage( cvSize( WINDOW_WIDTH  - TPL_WIDTH  + 1,
                                WINDOW_HEIGHT - TPL_HEIGHT + 1 ),
                        IPL_DEPTH_32F, 1 );

	faceTrack.tmLeftMouth = cvCreateImage( cvSize( WINDOW_WIDTH  - TPL_WIDTH  + 1,  
                                WINDOW_HEIGHT - TPL_HEIGHT + 1 ),
                        IPL_DEPTH_32F, 1 );

	faceTrack.tmRightMouth = cvCreateImage( cvSize( WINDOW_WIDTH  - TPL_WIDTH  + 1,  
                                WINDOW_HEIGHT - TPL_HEIGHT + 1 ),
                        IPL_DEPTH_32F, 1 );

	faceTrack.tmNose = cvCreateImage( cvSize( WINDOW_WIDTH  - TPL_WIDTH  + 1,  
                                WINDOW_HEIGHT - TPL_HEIGHT + 1 ),
                        IPL_DEPTH_32F, 1 );
   
  
    cvNamedWindow( "video", CV_WINDOW_NORMAL | CV_WINDOW_FREERATIO);
    cvSetMouseCallback( "video", mouseHandler, NULL );


   
    while( key != 'q' ) {
        faceTrack.frame = cvQueryFrame( capture );
        if( !faceTrack.frame ) break;
 
       
        /* perform tracking if template is available */
        if( faceTrack.left_eye_tracking ) faceTrack.trackLeftEye();
		if (faceTrack.right_eye_tracking) faceTrack.trackRightEye();
		if (faceTrack.left_mouth_tracking) faceTrack.trackLeftMouth();
		if (faceTrack.right_mouth_tracking) faceTrack.trackRightMouth();
		if (faceTrack.nose_tracking) faceTrack.trackNose();

		/*if user hits the space bar capture neutral face data*/
		if (key == 32)
		{  faceTrack.captureNeutralFace(); }

		/*if user hits escape key reset data*/
		if (key == 27)
		{	faceTrack.resetData(); }

		/*if we have neutral face data begin to capture facial expressions*/
		if (faceTrack.neutralDataCaptured)
		{	faceTrack.getFaceData(); }

		
		/*if user hits c key display image based on current facial expression*/
		if (key == 'c')
		{
		if (faceTrack.smile == true && faceTrack.eyeRaised == true)
		{
			filter.expressionFilter(3.0,100);

		}
		else if (faceTrack.smile == true && faceTrack.eyeFurrow == true)
		{
			filter.expressionFilter(1.0,100 );

		}
		else if (faceTrack.frown == true && faceTrack.eyeRaised == true)
		{
			filter.expressionFilter(3.0,20);

		}
		else if (faceTrack.frown == true && faceTrack.eyeFurrow == true)
		{
			filter.expressionFilter(1.0,20);

		}
		else if (faceTrack.smile == true)
		{
			filter.expressionFilter(1.5,100);

		}

		else if (faceTrack.frown == true)
		{
			filter.expressionFilter(1.5,20);
		}

		else if (faceTrack.eyeRaised == true)
		{
			filter.expressionFilter(3.0,50);
		}

		else if (faceTrack.eyeFurrow == true)
		{
			filter.expressionFilter(1.0,50);
		}

		else
			filter.expressionFilter(1.5,150);
       
		}

        cvShowImage( "video", faceTrack.frame );
        key = cvWaitKey( 1 );
   
	}

    /* free window */
    cvDestroyWindow( "video" );
   
    return 0;
}
Esempio n. 9
0
int main()
{             
    CvVideoWriter *writer;
    
    //capture =cvCreateFileCapture("hand4.avi") ;
    //
    capture = cvCaptureFromCAM(0) ;
    cvNamedWindow("Webcam",0);
    //cvNamedWindow("Virtual hand",0);
    writer = cvCreateVideoWriter("palm_output2.avi",CV_FOURCC('M','J','P','G'),15,cvSize(640,480),1);
          
    while(1)
    {
        frame = cvQueryFrame(capture);
        //cvWriteFrame(writer,frame);
        cvCvtColor(frame,frame,CV_BGR2HSV); 
      
      // IMPORTANT!!
      // The following FOR loop generates binary image which contains ONLY the arm.
      // Please replace the following FOR loop with your own method to generate the ideal output image.
      // Because mine method definitely won't work for you.
      //
      for(int i=0;i<frame->height;i++) //REPLACE ME
      {
        for(int j=0;j<frame->width;j++)
        {
        //if(frame->imageData[i*frame->widthStep+(j*3)+2] < 90 && frame->imageData[i*frame->widthStep+(j*3)+2] > 0 && frame->imageData[i*frame->widthStep+(j*3)+1] < 0) 
          if(frame->imageData[i*frame->widthStep+(j*3)] < 50 || frame->imageData[i*frame->widthStep+(j*3)+2] > 170) 
             { mask->imageData[i*mask->width+j] = 255;}
          else mask->imageData[i*mask->width+j] = 0;
        }
      }
        
        cvCvtColor(frame,frame,CV_HSV2BGR);
        cvCopy(frame,frame2);
        //cvErode(mask,mask,0,2);
        
        cvErode(mask,mask,0,1); //ERODE first then DILATE to eliminate the noises.
        cvDilate(mask,mask,0,1);


        cvFindContours( mask, storage, &contours, sizeof(CvContour),
                   CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE, cvPoint(0,0) );

        // We choose the first contour in the list which is longer than 650.
        // You might want to change the threshold to which works the best for you.
        while(contours && contours->total <= 650)
        {
          contours = contours->h_next;
        }

    cvDrawContours( frame, contours, CV_RGB(100,100,100), CV_RGB(0,255,0), 1, 2, CV_AA, cvPoint(0,0) );

        //
        // Use a rectangle to cover up the contour.
        // Find the center of the rectangle (armcenter). Fingertip() needs it.
        //
        if(contours)
        {
          contourcenter =  cvMinAreaRect2(contours,0);
          armcenter.x = cvRound(contourcenter.center.x);
          armcenter.y = cvRound(contourcenter.center.y);
          //cvCircle(frame,armcenter,10,CV_RGB(255,255,255),-1,8,0);
          getconvexhull();
          fingertip();
          hand();
        }


        cvShowImage("Webcam",frame);
        
        //cvShowImage("Virtual hand",virtualhand);
        
        if(savepic)
        {
           int framenum = (int)cvGetCaptureProperty(capture,CV_CAP_PROP_POS_FRAMES);
           char name[10];
           itoa(framenum,name,10);
           sprintf(name,"%sfix4.jpg",name);
           //printf("%s\n",name);
           //cvSaveImage(name,frame);
           savepic = false;           
        }
        
        //printf("FPS:%d\n",(int)cvGetCaptureProperty(capture,CV_CAP_PROP_FPS));

       // cvZero(virtualhand);
        
        if(cvWaitKey(1)>=0 || !frame)
        {
              //cvSaveImage("normal.jpg",frame2);
              break;
        }
    }       
    cvReleaseCapture(&capture);
    cvDestroyWindow("Webcam");
    //cvDestroyWindow("Virtual hand");
    cvReleaseVideoWriter(&writer);
}
Esempio n. 10
0
int main(int argc, char** argv)
{
	CvMemStorage* mstrg = cvCreateMemStorage();
	CvSeq* contours = 0; 
	CvSeq* contours2 = 0; 

	const char* filename = 0;
	IplImage* rawImage = 0, *yuvImage = 0, *borde = 0; //yuvImage is for codebook method
	IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;
	CvCapture* capture = 0;		

	int c, n, nframes = 0;
	int nframesToLearnBG = 300;

	model = cvCreateBGCodeBookModel();

	//Set color thresholds to default values
	model->modMin[0] = 3;
	model->modMin[1] = model->modMin[2] = 3;
	model->modMax[0] = 10;
	model->modMax[1] = model->modMax[2] = 10;
	model->cbBounds[0] = model->cbBounds[1] = model->cbBounds[2] = 10;

	bool pause = false;
	bool singlestep = false;

	printf("Capturando de la camara...\n");
	capture = cvCaptureFromCAM( 0 );

	if( !capture )
	{
		printf( "No se pudo inicializar la captura de video\n\n" );
		return -1;
	}

	while (true)
	{

		rawImage = cvQueryFrame( capture );
		++nframes;
		if(!rawImage) 
			break;


		//First time:
		if( nframes == 1 && rawImage )
		{
			borde = cvLoadImage("Borde.png",0);

			// CODEBOOK METHOD ALLOCATION
			yuvImage = cvCloneImage(rawImage);

			int w = yuvImage->width;
			cvSetImageROI(yuvImage, cvRect(w-250,0,250,250));
			IplImage *tmp = cvCreateImage(cvGetSize(yuvImage),yuvImage->depth,yuvImage->nChannels);
			cvCopy(yuvImage, tmp, NULL);
			cvResetImageROI(yuvImage);
			yuvImage = cvCloneImage(tmp);

			ImaskCodeBook = cvCreateImage( cvGetSize(yuvImage), IPL_DEPTH_8U, 1 );
			ImaskCodeBookCC = cvCreateImage( cvGetSize(yuvImage), IPL_DEPTH_8U, 1 );

			cvSet(ImaskCodeBook,cvScalar(255));

			cvNamedWindow("CapturaCam",CV_WINDOW_AUTOSIZE);
			cvNamedWindow( "ForegroundCodeBook",CV_WINDOW_AUTOSIZE);
			cvNamedWindow( "CodeBook_ConnectComp",CV_WINDOW_AUTOSIZE);

			printf (">>Aprendiendo fondo\n");
		}

		// If we've got an rawImage and are good to go:                
		if( rawImage )
		{
			cvFlip(rawImage, NULL, 1);
			int w = rawImage->width;

			cvFindContours(borde,mstrg,&contours,sizeof(CvContour),CV_RETR_EXTERNAL);

			//Dibujar contorno
			cvLine(rawImage, cv::Point (w-250,0), cv::Point (w-250,250), CV_RGB(255,0,0),1, CV_AA, 0) ;
			cvLine(rawImage, cv::Point (w-250,250), cv::Point (w,250), CV_RGB(255,0,0),1, CV_AA, 0) ;
			//
			if(nframes - 1 < nframesToLearnBG)
			{
				char buffer [33];
				_itoa (nframesToLearnBG - nframes,buffer,10);
				CvFont font2;
				cvInitFont(&font2, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 3, CV_AA);
				cvPutText(rawImage, buffer, cvPoint(50, 50), &font2, cvScalar(0, 0, 255, 0));
			}

			cvSetImageROI(rawImage, cvRect(w-250,0,250,250));
			IplImage *temp = cvCreateImage(cvGetSize(rawImage),rawImage->depth,rawImage->nChannels);

			cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );
			//YUV para el metodo del codebook

			//Construccion del modelo del fondo
			if(nframes-1 < nframesToLearnBG  )
				cvBGCodeBookUpdate( model, yuvImage );


			if( nframes-1 == nframesToLearnBG  )
			{
				cvBGCodeBookClearStale( model, model->t/2 );
				printf (">>Fondo aprendido\n");
			}

			//Se encuentran objetos por el metodo de codebook
			if( nframes-1 >= nframesToLearnBG  )
			{
				cvBGCodeBookDiff( model, yuvImage, ImaskCodeBook );

				cvCopy(ImaskCodeBook,ImaskCodeBookCC);	
				cvSegmentFGMask( ImaskCodeBookCC );

				cvShowImage( "CodeBook_ConnectComp",ImaskCodeBookCC);

				//deteccion de imagen
				detect(ImaskCodeBookCC,rawImage);

				//base para dibujar la mano
				if(contours)
					cvDrawContours(rawImage,contours, cvScalar(255, 0, 0, 0), cvScalarAll(128), 1 );


			}
			//Display
			cvResetImageROI(rawImage);
			cvShowImage( "CapturaCam", rawImage );
			cvShowImage( "ForegroundCodeBook",ImaskCodeBook);

		}

		// User input:
		c = cvWaitKey(10)&0xFF;
		c = tolower(c);
		// End processing on ESC, q or Q
		if(c == 27 || c == 'q')
			break;
		//Else check for user input
		switch( c )
		{
		case 'c':
			saveLength = true;
			break;        
		case ' ':
			cvBGCodeBookClearStale( model, 0 );
			nframes = 0;
			break;            
		}

		if (c != 'c')
			saveLength=false;
	}		

	cvReleaseCapture( &capture );
	cvReleaseMemStorage(&mstrg);
	cvDestroyWindow( "CapturaCam" );
	cvDestroyWindow( "ForegroundCodeBook");
	cvDestroyWindow( "CodeBook_ConnectComp");
	return 0;
}
Esempio n. 11
0
int main(int arguments_size, char * arguments[]) {
    CvCapture * capture = NULL;
    String_Const capture_base_name = "video_capture";

    if (arguments_size <= 1) {
        // No arguments; let the user know the usage:
        File__format(stderr,
          "Usage: Video_Capture camera_number [capture_base_name]\n");
        return 1;
    } else {
        // Grab the arguments:
        String argument1 = arguments[1];
        if (arguments_size > 2) {
            capture_base_name = arguments[2];
        }

        // Figure whether to open a video file or a camera;
        if (isdigit(argument1[0])) {
            // Open the camera:
            unsigned int camera_number = String__to_unsigned(argument1);
            int camera_flags = CV_CAP_ANY + (int)camera_number;
            capture = cvCreateCameraCapture(camera_flags);
            if (capture == NULL) {
                File__format(stderr,
                  "Could not open camara %d\n", camera_number);
                return 1;
            }

            // Set the frame size:
            cvSetCaptureProperty(capture,
              CV_CAP_PROP_FRAME_WIDTH, (double)640);
            cvSetCaptureProperty(capture,
              CV_CAP_PROP_FRAME_HEIGHT, (double)480);
        } else {
            // Open a video file format:
            capture = cvCreateFileCapture(argument1);
            if (capture == NULL) {
                File__format(stderr,
                  "Could not open video file '%s'\n", argument1);
                return 1;
            }
        }
    }
    // We should not be able to here without a open *capture*:
    assert(capture != NULL);

    // Create the window to display the video into:
    String_Const window_name = "Video_Capture";
    cvNamedWindow(window_name, CV__window_auto_size);

    // Do a video loop:
    unsigned int capture_number = 0;
    while (1) {
        // Grab a frame from the video source:
        CV_Image frame = cvQueryFrame(capture);
        if (frame == (CV_Image)0) {
            // When *frame* is null, the video source is at end-of-file
            // or disconnected:
            break;
        }
        
        // Show the image:
        cvShowImage(window_name, frame);

        // Deal with key character:
        char character = cvWaitKey(33);
        if (character == '\033') {
            // [Esc] key causes program to escape:
            break;
        } else if (character == ' ') {
            // Write out image out to file system as a .tga file:
            String file_name =
              String__format("%s-%02d.pnm", capture_base_name, capture_number);
            CV_Image__pnm_write(frame, file_name);
            File__format(stderr, "Wrote frame out to file '%s'\n", file_name);
            capture_number += 1;
            String__free(file_name);
        }
    }

    // Clean up and leave:
    cvReleaseCapture(&capture);
    cvDestroyWindow(window_name);

    return 0;
}
Esempio n. 12
0
/**
 * @brief Permite al usuario interactivamente seleccionar un objeto
 * @param regions Guarda los rectángulos que definen a cada objeto
 * @param MAX_OBJECTS Número máximo permitido de objetos a rastrear
 * @param argv Uso el nombre del video para poder leer el correspondiente fichero de regiones por defecto
 * @return El número de objetos seleccionados por el usuario (<= MAX_OBJECTS)
 */
int get_regions(CvRect **regions, int MAX_OBJECTS, char *argv ) {
	
	FILE *fich;
	char name[50], *p1, *p2;
	params p;
	CvRect* r;
	int x1, y1, x2, y2, w, h;
	
	// Si hay que leer desde fichero las regiones...
	if(MAX_OBJECTS > 0) {
		p.n = MAX_OBJECTS;

		strcpy( name, REGION_IN);
		p1 = strrchr( &argv[1], '/' );
		p2 = strrchr( &argv[1], '.' );
		strncat( name, (++p1), p2-p1 );
		strcat( name, "txt" );
		fich = fopen( name, "r" );
		if( ! fich ) {
			strcpy( name, REGION_IN);
			p1 = strrchr( &argv[1], '/' );
			p2 = strrchr( &argv[1], '.' );
			strncat( name, (++p1), (++p2)-p1 );
			strcat( name, "txt" );
			fich = fopen( name, "r" );
			if( ! fich ) {
				printf("Error leyendo las regiones iniciales\n");
				exit (-1);
			}
		}

		p.loc1 = std::vector<CvPoint>(MAX_OBJECTS);
		p.loc2 = std::vector<CvPoint>(MAX_OBJECTS);
		for( int i = 0; i < MAX_OBJECTS; ++i ) {
			int leidos = fscanf(fich, "%d", &p.loc1[i].x);
			leidos = fscanf(fich, "%d", &p.loc1[i].y);
			leidos = fscanf(fich, "%d", &p.loc2[i].x);
			leidos = fscanf(fich, "%d", &p.loc2[i].y);
		}
		fclose( fich );
	}

	// Si hay que seleccionarlas con el ratón...
	else {
		fprintf( stderr, "Selecciona la región a rastrear\n" );
		p.n = 0;
		cvNamedWindow( win_name, CV_WINDOW_AUTOSIZE );
		cvShowImage( win_name, first_frame );
		cvSetMouseCallback( win_name, &mouse, &p );
		cvWaitKey( 0 );
		cvDestroyWindow( win_name );
		if( p.n == 0 )
			return 0;
	}
	
	// Reservo espacio para la lista de regiones
	r = (CvRect*) malloc( p.n * sizeof( CvRect ) );

	for( int i = 0; i < p.n; ++i ) {
		x1 = MIN( p.loc1[i].x, p.loc2[i].x );
		x2 = MAX( p.loc1[i].x, p.loc2[i].x );
		y1 = MIN( p.loc1[i].y, p.loc2[i].y );
		y2 = MAX( p.loc1[i].y, p.loc2[i].y );
		w = x2 - x1;
		h = y2 - y1;
		
		//printf("%d %d %d %d ", x1, y1, x2, y2);
		// Me aseguro que la altura y anchura es par
		w = ( w % 2 )? w : w+1;
		h = ( h % 2 )? h : h+1;
		r[i] = cvRect( x1, y1, w, h );
	}
	*regions = r;
	return p.n;
}
Esempio n. 13
0
File: hiutil.c Progetto: vabc3/KarCv
void fin(const char *name,IplImage **img)
{
	cvDestroyWindow(name);
	cvReleaseImage(img);
}
Esempio n. 14
0
/** void DestroyWindow();
 ***********************************************************
 * Date		: 2012/03/29
 * Author	: Kohei Kojima
 * Note		: Destroy Window
 ***********************************************************/	
void CGraphRendering::DestroyWindow()
{
	cvDestroyWindow( WINDOW_NAME );
}
int main( int argc, char** argv ) 
{ 
     
    FILE *ptr; 
    ptr=fopen("dataerr.dat","w+"); 
    CvCapture* capture = 0; 
 
    int counter1=0; 
 
    IplImage* image2 = 0; 
 
    float sumX=0; 
    float sumY=0; 
 
    float err_X; 
    float err_Y; 
 
    int XX=0; 
    int YY=0; 
 
    CvPoint ipt1; 
 
    int tempxx1=0; 
    int tempyy1=0; 
    int tempxx2=0; 
    int tempyy2=0; 
 
     
 
    char *imgFmt="pgm"; 
    char str1[100]; 
 
    /* Initailize the error array */ 
    for(int kk=0;kk<=400;kk++) 
    { 
        optical_flow_error[0][kk]=0; 
        optical_flow_errorP[0][kk]=0; 
        optical_flow_error[1][kk]=0; 
        optical_flow_errorP[1][kk]=0; 
    } 
 
    //capturing frame from video 
    capture = cvCaptureFromAVI("soccer_track.mpeg"); 
 
    cvNamedWindow( "KLT-Tracking Group_R", 0 ); 
    cvSetMouseCallback( "KLT-Tracking Group_R", on_mouse, 0 ); 
 
    if(add_remove_pt==1) 
    { 
        flagg=1; 
    } 
 
    for(;;) 
    { 
        IplImage* frame = 0; 
         
        int i, k, c; 
 
        //creating file name 
        sprintf(str1,"%d.%s",counter1,imgFmt); 
        err_X=0; 
        err_Y=0; 
        sumX=0; 
        sumY=0; 
 
        //decompressing the grab images 
 
        frame = cvQueryFrame( capture ); 
 
     
        if( !frame ) 
            break; 
 
        if( !image ) 
            //The first frame:to allocation some memories,and do somen initialization work 
        { 
            // allocate all the image buffers  
            image = cvCreateImage( cvGetSize(frame), 8, 3 ); 
            image->origin = frame->origin; 
            grey = cvCreateImage( cvGetSize(frame), 8, 1 );//make it grey 
            prev_grey = cvCreateImage( cvGetSize(frame), 8, 1 );//the previous frame in grey mode 
            pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );//pyramid frame 
            prev_pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );//previous pyramid frame 
            /* Define two pointers */ 
            points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0])); 
            points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0])); 
            status = (char*)cvAlloc(MAX_COUNT); 
            flags = 0; 
        } 
 
        cvCopy( frame, image, 0 );//frame->image 
 
        //converting the image into gray scale for further computation 
        cvCvtColor( image, grey, CV_BGR2GRAY ); 
         
        if( need_to_init ) 
        { 
             
            IplImage* eig = cvCreateImage( cvGetSize(grey), 32, 1 ); 
            IplImage* temp = cvCreateImage( cvGetSize(grey), 32, 1 ); 
            double quality = 0.01; 
            double min_distance = 10; 
         
 
            //using good features to track 
            count = MAX_COUNT; 
            cvGoodFeaturesToTrack( grey, eig, temp, points[1], &count, 
                                   quality, min_distance, 0, 3, 0, 0.04 ); 
            cvFindCornerSubPix( grey, points[1], count, 
            cvSize(win_size,win_size), cvSize(-1,-1), 
            cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03)); 
            cvReleaseImage( &eig ); 
            cvReleaseImage( &temp ); 
 
 
 
            add_remove_pt = 0; 
        } 
        else if( count > 0 ) 
        { 
            //using pyramidal optical flow method 
            cvCalcOpticalFlowPyrLK(  
                    prev_grey, grey,  
                    prev_pyramid, pyramid, 
                    points[0], points[1],  
                    count, cvSize(win_size,win_size),  
                    5, status,0, 
                    cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), flags ); 
             
            flags |= CV_LKFLOW_PYR_A_READY|CV_LKFLOW_PYR_B_READY; 
 
            for( i = k = 0; i < count; i++ ) 
            { 
                /* When need to add or remove the point */ 
                if( add_remove_pt ) 
                { 
 
                    double dx = pt.x - points[1][i].x; 
                    double dy = pt.y - points[1][i].y; 
                    /* Calulate the distance between the point you select and the point tracked  
                    if they are far from less than 5,stop the add or move action     
                    */ 
                    if( dx*dx + dy*dy <= 25 ) 
                    { 
                        add_remove_pt = 0; 
                        continue; 
                    } 
                } 
                 
                if( !status[i] )//if the point is not tracked correctly,pass! 
                    continue; 
                
                points[1][k++] = points[1][i]; 
 
                ipt1=cvPointFrom32f(points[1][i]);//get a point 
                 
            //calculating error here,initalize the error array 
                optical_flow_error[0][i]=ipt1.x; 
                optical_flow_error[1][i]=ipt1.y; 
 
 
            } 
            //taking average error for moving the window 
 
            for(int zz=0; zz<=count;zz++) 
                { 
                    errX[zz]=optical_flow_error[0][zz]- optical_flow_errorP[0][zz]; 
                    errY[zz]=optical_flow_error[1][zz]- optical_flow_errorP[1][zz]; 
 
                    sumX=sumX+errX[zz]; 
                    sumY=sumY+errY[zz]; 
 
                    optical_flow_errorP[0][zz]=optical_flow_error[0][zz]; 
                    optical_flow_errorP[1][zz]=optical_flow_error[1][zz]; 
 
                } 
 
                fprintf(ptr,"%d\n",count); 
                 
                err_X=sumX/count; 
                err_Y=sumY/count; 
 
            if(flagg==1) 
            { 
              int static startonce=0; 
 
            if(startonce==0) 
            { 
                 
             
            tempxx1=pt.x-20; 
            tempyy1=pt.y-20; 
 
            tempxx2=pt.x+20; 
            tempyy2=pt.y+20; 
 
            XX=pt.x; 
            YY=pt.y; 
 
            startonce=1; 
 
            } 
            if(err_X<3) 
            { 
                tempxx1=tempxx1+err_X; 
                tempyy1=tempyy1+err_Y; 
                tempxx2=tempxx2+err_X; 
                tempyy2=tempyy2+err_Y; 
 
                XX=XX+err_X; 
                YY=YY+err_Y; 
                fprintf(ptr,"%f %f\n",err_X,err_Y); 
            } 
 
            printf("\n%f",err_X); 
 
            //moving window 
 
            cvRectangle(image, cvPoint(tempxx1,tempyy1), cvPoint(tempxx2,tempyy2), cvScalar(255,0,0), 1); 
            cvCircle(image, cvPoint(XX,YY), 3, cvScalar(0,0,255), 1); 
        } 
            count = k; 
        } 
 
 
        if( add_remove_pt && count < MAX_COUNT ) 
        { 
            points[1][count++] = cvPointTo32f(pt); 
            cvFindCornerSubPix( grey, points[1] + count - 1, 1, 
                cvSize(win_size,win_size), cvSize(-1,-1), 
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03)); 
            add_remove_pt = 0; 
        } 
 
        CV_SWAP( prev_grey, grey, swap_temp ); 
        CV_SWAP( prev_pyramid, pyramid, swap_temp ); 
        CV_SWAP( points[0], points[1], swap_points ); 
        need_to_init = 0; 
 
       
        //writing image file to the file 
        //if(!cvSaveImage(str1,image)) printf("Could not save: %s\n",str1); 
        //storing in a video also 
  
         
        cvShowImage( "KLT-Tracking Group_R", image ); 
 
        c = cvWaitKey(100); 
        if( (char)c == 27 ) 
            break; 
        switch( (char) c ) 
        { 
        case 's': 
            need_to_init = 1; 
          } 
 
        counter1++; 
    } 
 
    cvReleaseCapture( &capture ); 
    cvDestroyWindow("KLT-Tracking Group_R"); 
 
    fcloseall(); 
     
    return 0; 
} 
Esempio n. 16
0
int main(int argc, char *argv[])
{
	char k;
	
	// CAMERA
	/*CvCapture *capture = cvCreateCameraCapture(1);
	
	frame = cvQueryFrame(capture);*/
	
	// IMAGE
	const char *imageFile = "./137cm.jpg";
	
	frame = cvLoadImage(imageFile,CV_LOAD_IMAGE_COLOR);
	
	imageFiltree = cvCreateImage(cvGetSize(frame),frame->depth,frame->nChannels);
	imageHSV = cvCreateImage(cvGetSize(frame),frame->depth,frame->nChannels);
	imageBinaire = cvCreateImage(cvGetSize(frame),frame->depth,1);
	imageErodee = cvCreateImage(cvGetSize(frame),frame->depth,1);
	imageDilatee = cvCreateImage(cvGetSize(frame),frame->depth,1);
	imageDilateeFiltree = cvCreateImage(cvGetSize(frame),frame->depth,1);
	imageObjectHSV = cvCreateImage(cvGetSize(frame),frame->depth,frame->nChannels);
	imageObjectRGB = cvCreateImage(cvGetSize(frame),frame->depth,frame->nChannels);
	imageFinale = cvCreateImage(cvGetSize(frame),frame->depth,frame->nChannels);
	
	storage = cvCreateMemStorage(0);
	
	for (;;)
	{
		// IMAGE
		frame = cvLoadImage(imageFile,CV_LOAD_IMAGE_COLOR);
		
		/*// CAM
		 frame = cvQueryFrame(capture);*/
		
		//cvSmooth(frame, imageFiltree, CV_BLUR,seuilFiltre,seuilFiltre,0.0,0.0);
		
		if (!frame)
			break;
		
		
		
		callback(0);
		cvCreateTrackbar("seuilFiltre", myWindow,&seuilFiltre, 11, callback);
		cvCreateTrackbar("H min", myWindow, &hmin,180, callback);
		cvCreateTrackbar("H max", myWindow, &hmax,180, callback);
		cvCreateTrackbar("S min", myWindow, &smin,255, callback);
		cvCreateTrackbar("S max", myWindow, &smax,255, callback);
		cvCreateTrackbar("V min", myWindow, &vmin,255, callback);
		cvCreateTrackbar("V max", myWindow, &vmax,255, callback);
		cvCreateTrackbar("nbDilatations", myWindow, &nbDilatations,10, callback);
		cvCreateTrackbar("nbErosions", myWindow, &nbErosions,10, callback);
		
		
		
		int delay = 10;
		k=cvWaitKey(delay);
		
		
		if(k=='s'){
			printf("sauvegarde\n");
			cvSaveImage("CaptureContours.jpg", frame,0);
			cvSaveImage("CapturePointDetecteHSV.jpg", imageObjectHSV,0);
			
		}
		if (k=='q'){
			printf("goodbye Kate\n");
			break;
		}
	}
	cvDestroyWindow(myWindowObjectHSV);
	cvDestroyWindow(myWindowObjectRGB);
	cvDestroyWindow(myWindow);
	//CAM cvReleaseCapture(&capture);
	cvReleaseImage(&imageFiltree);
	cvReleaseImage(&imageBinaire);
	cvReleaseImage(&imageErodee);
	cvReleaseImage(&imageDilatee);
	cvReleaseImage(&imageDilateeFiltree);
	cvReleaseImage(&imageObjectHSV);
	cvReleaseImage(&imageObjectRGB);
	cvReleaseImage(&imageFinale);
	cvReleaseMemStorage(&storage);
}
Esempio n. 17
0
int main(int argc, char** argv )
{
	IplImage *img, *filterMask = NULL;
	CvAdaptiveSkinDetector filter(1, CvAdaptiveSkinDetector::MORPHING_METHOD_ERODE_DILATE);
	ASDFrameSequencer *sequencer;
	CvFont base_font;
	char caption[2048], s[256], windowName[256];
	long int clockTotal = 0, numFrames = 0;
	std::clock_t clock;

	if (argc < 4)
	{
		help(argv);
		sequencer = new ASDFrameSequencerWebCam();
		(dynamic_cast<ASDFrameSequencerWebCam*>(sequencer))->open(-1);

		if (! sequencer->isOpen())
		{
			std::cout << std::endl << "Error: Cannot initialize the default Webcam" << std::endl << std::endl;
		}
	}
	else
	{
		sequencer = new ASDFrameSequencerImageFile();
		(dynamic_cast<ASDFrameSequencerImageFile*>(sequencer))->open(argv[1], std::atoi(argv[2]), std::atoi(argv[3]) ); // A sequence of images captured from video source, is stored here

	}
	std::sprintf(windowName, "%s", "Adaptive Skin Detection Algorithm for Video Sequences");

	cvNamedWindow(windowName, CV_WINDOW_AUTOSIZE);
	cvInitFont( &base_font, CV_FONT_VECTOR0, 0.5, 0.5);

	// Usage:
	//		c:\>CvASDSample "C:\VideoSequences\sample1\right_view\temp_%05d.jpg" 0 1000

	std::cout << "Press ESC to stop." << std::endl << std::endl;
	while ((img = sequencer->getNextImage()) != 0)
	{
		numFrames++;

		if (filterMask == NULL)
		{
			filterMask = cvCreateImage( cvSize(img->width, img->height), IPL_DEPTH_8U, 1);
		}
		clock = std::clock();
		filter.process(img, filterMask);	// DETECT SKIN
		clockTotal += (std::clock() - clock);

		displayBuffer(img, filterMask, 0, 255, 0);

		sequencer->getFrameCaption(caption);
		std::sprintf(s, "%s - %d x %d", caption, img->width, img->height);
		putTextWithShadow(img, s, cvPoint(10, img->height-35), &base_font);

		std::sprintf(s, "Average processing time per frame: %5.2fms", (double(clockTotal*1000/CLOCKS_PER_SEC))/numFrames);
		putTextWithShadow(img, s, cvPoint(10, img->height-15), &base_font);

		cvShowImage (windowName, img);
		cvReleaseImage(&img);

		if (cvWaitKey(1) == 27)
			break;
	}

	sequencer->close();
	delete sequencer;

	cvReleaseImage(&filterMask);

	cvDestroyWindow(windowName);

	std::cout << "Finished, " << numFrames << " frames processed." << std::endl;

	return 0;
}
Esempio n. 18
0
int main(){
	//подключенные автомобили
	int num = 1;
	char* ip[] = {"0.0.0.0"};

	//получение сокетов
	WSADATA wsaData;
    WSAStartup(MAKEWORD(2, 2), &wsaData);
	int port_in = 8888;
	int port_out = 4444;
	sockaddr_in addr;
	addr.sin_family = AF_INET;
	SOCKET *sct_in = new SOCKET[num];
	SOCKET *sct_out = new SOCKET[num];
	for(int i = 0; i<num; i++){
		addr.sin_addr.s_addr = inet_addr(ip[i]);
		addr.sin_port = htons(port_in);
		sct_in[i] = socket(AF_INET, SOCK_STREAM, 0);
		connect(sct_in[i],(SOCKADDR *) &addr, sizeof(addr));
		addr.sin_port = htons(port_out);
		sct_out[i] = socket(AF_INET, SOCK_STREAM, 0);
		connect(sct_out[i], (SOCKADDR *) &addr, sizeof(addr));
	}
	char *buf = new char[11];
	std::string s = "keepalive";
	int n = 0;

	//получение изображения
	CvCapture *capture = cvCreateFileCapture("http://192.168.77.1:8080/?action=stream.mjpg");
	assert(capture);
	cvNamedWindow("capture", CV_WINDOW_AUTOSIZE);
	IplImage *frame = 0;
	IplImage *image = 0;
	while(true){
		frame = cvQueryFrame(capture);
		image = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
		cvConvertImage(frame, image, CV_YUV2BGR);
		cvConvertImage(image, image, CV_BGR2GRAY);
		cvSmooth(image, image, CV_GAUSSIAN, 3);
		cvThreshold(image, image, 120, 255, CV_THRESH_BINARY);
		cvErode(image, image);
		cvShowImage("capture", image);
		/* проверка на новые сообщения */
		for(int i = 0; i<num; i++){
			send(sct_in[i], &s[0], s.size(), 0);
			n = recv(sct_in[i], buf, 11, 0);
			if(n <= 0){
				std::cout << "error" << std::endl;
			}
			if(buf != "9:keepalive"){
				std::cout << buf << std::endl;
				n = getBestPlace(getAvailablePlaces(image));
				std::cout << n << std::endl;
				sendAnswer(sct_out[i], n);
			}
		}
		/* проверка на новые сообщения */
		if(cvWaitKey(50) == 32){
			break;
		}
	}
	cvReleaseCapture(&capture);
	cvDestroyWindow("capture");

	//закрытие сокетов
	for(int i = 0; i<num; i++){
		closesocket(sct_in[i]);
		closesocket(sct_out[i]);
	}
	delete sct_in;
	delete sct_out;
	WSACleanup();
	return 0;
}
Esempio n. 19
0
/*
Allows user to view an array of images as a video.  Keyboard controls
are as follows:

<ul>
<li>Space - start and pause playback</li>
<li>Page Down - skip forward 10 frames</li>
<li>Page Up - jump back 10 frames</li>
<li>Right Arrow - skip forward 1 frame</li>
<li>Left Arrow - jump back 1 frame</li>
<li>Backspace - jump back to beginning</li>
<li>Esc - exit playback</li>
<li>Closing the window also exits playback</li>
</ul>

@param imgs an array of images
@param n number of images in \a imgs
@param win_name name of window in which images are displayed
*/
void vid_view( IplImage** imgs, int n, char* win_name )
{
	int k, i = 0, playing = 0;

	cvNamedWindow( win_name, 1 );
	cvShowImage( win_name, imgs[i] );
	while( ! win_closed( win_name ) )
	{
		/* if already playing, advance frame and check for pause */
		if( playing )
		{
			i = MIN( i + 1, n - 1 );
			cvNamedWindow( win_name, 1 );
			cvShowImage( win_name, imgs[i] );
			k = cvWaitKey( 33 );
			if( k == ' '  ||  i == n - 1 )
				playing = 0;
		}

		else
		{
			k = cvWaitKey( 0 );
			switch( k )
			{
				/* space */
			case ' ':
				playing = 1;
				break;

				/* esc */
			case 27:
			case 1048603:
				cvDestroyWindow( win_name );
				break;

				/* backspace */
			case '\b':
				i = 0;
				cvNamedWindow( win_name, 1 );
				cvShowImage( win_name, imgs[i] );
				break;

				/* left arrow */
			case 65288:
			case 1113937:
				i = MAX( i - 1, 0 );
				cvNamedWindow( win_name, 1 );
				cvShowImage( win_name, imgs[i] );
				break;

				/* right arrow */
			case 65363:
			case 1113939:
				i = MIN( i + 1, n - 1 );
				cvNamedWindow( win_name, 1 );
				cvShowImage( win_name, imgs[i] );
				break;

				/* page up */
			case 65365:
			case 1113941:
				i = MAX( i - 10, 0 );
				cvNamedWindow( win_name, 1 );
				cvShowImage( win_name, imgs[i] );
				break;

				/* page down */
			case 65366:
			case 1113942:
				i = MIN( i + 10, n - 1 );
				cvNamedWindow( win_name, 1 );
				cvShowImage( win_name, imgs[i] );
				break;
			}
		}
	}
}
Esempio n. 20
0
int text_encode(char *ip,char *hide)
{
	IplImage *input;  //cover image
	IplImage *output; // stego image
	
	uchar *data;
	uchar *inputdata;
	
	int msglen=strlen(hide);
	int i=0,j=0,dataPos=20;
	int height,width,nchannels;
	
	input=cvLoadImage(ip,-1);
	
	height=input->height;
	width=input->width;
	nchannels=input->nChannels;

	if((nchannels*height*width) < (msglen+20))
	{
		printf("cover image should be of larger size");
		exit(1);
	}
	

	//now we will make an empty image of same size as original
	output=cvCreateImage(cvSize(width,height),IPL_DEPTH_8U,nchannels);
	data=(uchar *)output->imageData;
	inputdata=(uchar *)input->imageData;
	//so we save the height and width of image in output image. 20 bits for height and 20 for width

	for (i=0;i<20;i++)
	{
		data[i]=inputdata[i] & 254;
		data[i]=data[i]+getBit(msglen,i);
	}
	// so now we start saving the message from the 48th pixel i=dataPos=48
	for ( i=0;i<msglen;i++)
	{
		for ( j=0;j<8;j++)
		{
			data[dataPos]=inputdata[dataPos] & 254;
			data[dataPos]=data[dataPos]+getBit(hide[i],j);
			dataPos++;
		}
		//printf("%d\t",data[dataPos]);
	}
	//hiding of image completes
	//now the rest of the output image should be equal to input image.
	for(j=dataPos;j<height*width*nchannels;j++)
	{
		data[j]=inputdata[j];
	}
	
	//now showing cover and stego images
	cvSaveImage("textstego_simple.png",output,0);
	cvNamedWindow("Hidden Message Image", CV_WINDOW_AUTOSIZE);
	cvShowImage("Hidden Message Image", output);

	cvWaitKey(0);

	cvReleaseImage(&input);
	cvReleaseImage(&output);
	cvDestroyWindow("Original Image");
	cvDestroyWindow("Hidden Message Image");
	
	return 0;
}
Esempio n. 21
0
////////////////////////////////////////////////////////////////////////////
// MAIN ////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////
///
/// Main program.
///
int main( int argc, char *argv[] )
{

    CvCapture *video;
    CvVideoWriter *writer;
    IplImage *frame;
    char *win_name = "Source Frame";
    char key = 0;
    tcp_connection *con;
    int frame_count = 0, result;

    // register signal handler for SIGINT und SIGPIPE
    // the latter occurs if the server terminates the connection
    /*
    DEBUG_PRINT( DEBUG_NOTE, "registering signal" )
        if ( signal( SIGINT, sig_handler ) == SIG_ERR ||
             signal( SIGPIPE, sig_handler ) == SIG_ERR ) {
        fprintf( stderr, "failed to register signal handler.\n" );
        exit( 1 );
    }
    */
    parse_args( argc, argv );

    // open the capture source
    switch ( source ) {
    case SOURCE_FILE:
        video = cvCreateFileCapture( infilename );
        break;
    case SOURCE_CAM:
        video = cvCreateCameraCapture( camera );
        break;
    default:
        fprintf( stderr, "strange source\n" );
        exit( 1 );
    }

    if ( !video ) {
        fprintf( stderr, "unable to capture source\n" );
        exit( 1 );
    }
    // connect to remote host
    con = tcp_connection_create( host, port );
    if ( !con ) {
        fprintf( stderr, "unable to connect to %s, port %d\n", host,
                 port );
        exit( 1 );
    }
    printf( "Connected to %s, port %d.\n", host, port );

    frame = cvQueryFrame( video );
    if ( !frame ) {
        fprintf( stderr, "unable to capture video.\n" );
        exit( 1 );
    }

    if ( netimage_send_header( con, frame ) <= 0 ) {
        fprintf( stderr, "unable to send header information.\n" );
        exit( 1 );
    }

    printf
        ( "Sending image stream (%d x %d, depth %u, %d channels (size: %d bytes)).\n"
          "Press 'q' to abort.\n", frame->width, frame->height,
          frame->depth, frame->nChannels, frame->imageSize );

    // open capture file, if desired
    if ( output ) {

        strncat( outfilename, ".mpg", MAX_FILENAMELEN );

        writer =
            cvCreateVideoWriter( outfilename, atofourcc( fourcc ), fps,
                                 cvSize( frame->width, frame->height ),
                                 frame->nChannels > 1 ? 1 : 0 );
        if ( writer == NULL ) {
            fprintf( stderr, "unable to create output file '%s'\n",
                     outfilename );
/*             exit (1);*/
        } else
            printf( "Writing to output file '%s'.\n", outfilename );
    }

    // for fps measurement
    struct timeval current, last;
    unsigned int diff;	// time difference in usecs
	
    gettimeofday(&last, NULL);
    
    // get video data and send/store it
    while ( ( frame = cvQueryFrame( video ) ) && ( char ) key != 'q'
            && !quit ) {
        result = tcp_send( con, frame->imageData, frame->imageSize );

        if ( result > 0 ) {
            if ( !quiet ) {
                cvNamedWindow( win_name, 1 );
                cvShowImage( win_name, frame );
                key = cvWaitKey( 5 );
            }
            if ( output )
                cvWriteFrame( writer, frame );
        } else {
            printf( "connection lost.\n" );
            break;
        }
        gettimeofday(&current, NULL);
        diff = (current.tv_sec - last.tv_sec) * 1000000;
        diff += (current.tv_usec - last.tv_usec);
	
        fprintf(stderr, "FPS: %.2f\r", 1000000.0 / diff);
	
        last.tv_sec = current.tv_sec;
        last.tv_usec = current.tv_usec;
    }

    // clean up
    cvDestroyWindow( win_name );
    cvReleaseCapture( &video );
    if ( output )
        cvReleaseVideoWriter( &writer );
    tcp_connection_destroy( con );

    return 0;
}
Esempio n. 22
0
void cv::destroyWindow( const String& winname )
{
    cvDestroyWindow( winname.c_str() );
}
Esempio n. 23
0
int main(int argc, char* argv[]) {
	cvNamedWindow( "capture", 1 );
	IplImage* img_8uc1 = cvLoadImage( "/home/eugene/hand_gest/TestingData/10-2.png", CV_LOAD_IMAGE_GRAYSCALE );
	IplImage* img_edge = cvCreateImage( cvGetSize(img_8uc1), 8, 1 );
	IplImage* img_8uc3 = cvCreateImage( cvGetSize(img_8uc1), 8, 3 );
	cvThreshold( img_8uc1, img_edge, 128, 255, CV_THRESH_BINARY );
	CvMemStorage* storage = cvCreateMemStorage();
	CvSeq* first_contour = NULL;
	int Nc = cvFindContours(
				img_edge,
				storage,
				&first_contour,
				sizeof(CvContour),
				CV_RETR_EXTERNAL // Try all four values and see what happens
				);
	int i;
	int n=0;
	int best=0;
	int current=0;
	int N=8;
	int n2;
	double Scale;
	double Features[N];
	fftw_complex *contour;
	fftw_complex *FD;
	fftw_plan plan_forward;
	printf( "Total Contours Detected: %d\n", Nc );
	//Find max contour
	for( CvSeq* c=first_contour; c!=NULL; c=c->h_next ) {
		if(c->total>current);
			best=n;
		n++;
	}
	fprintf(stderr,"best is %d",best);
	n=0;
	for( CvSeq* c=first_contour; c!=NULL; c=c->h_next ) {
		if(n==best){
			cvCvtColor( img_8uc1, img_8uc3, CV_GRAY2BGR );
			cvDrawContours(
				img_8uc3,
				c,
				CVX_RED,
				CVX_BLUE,
				0, // Try different values of max_level, and see what happens
				2,
				8
				);
			printf("Contour #%d\n", n );
			cvShowImage("capture", img_8uc3 );
			printf("%d elements:\n", c->total );
			contour= fftw_malloc(sizeof(fftw_complex)*c->total);

			for( int i=0; i<c->total; ++i ) {
				CvPoint* p = CV_GET_SEQ_ELEM( CvPoint, c, i );
			//	printf("(%d,%d)\n", p->x, p->y );
				//assemble complex representation here
				contour[i][0]=p->x;
				contour[i][1]=p->y;
			}
			//do fft
			//cvCvtSeqToArray(c
			cvWaitKey(0);
		}
		n++;
	}
	//try downspampling later
	FD=fftw_malloc(sizeof(fftw_complex)*c->total);
	plan_forward=fftw_plan_dft_1d(c->total,contour,FD,FFTW_FORWARD,FFTW_ESTIMATE);
	fftw_execute(plan_forward);

	n2=c->total/2;
	Scale=(double)sqrt(pow(FD[1][0],2)+pow(FD[1][1],2));
	//reduce to 10 coefficients
	//normalize
	if(N+2>=c->total)
	{
		fprintf(stderr,"Contour Is too small");
		exit(1);
	}
	//positive frequency components
	for(i=0;i<N/2;i++)
	{
		//fftshift stuff
		Features[i]=(double)sqrt(pow(FD[i+2][0],2)+pow(FD[i+2][1],2))/Scale;
	}
	for(i=0;i<N/2;i++)
	{
		Features[i+N/2]=(double)sqrt(pow(FD[N-1-i][0],2)+pow(FD[N-1-i][1],2))/Scale;
	}

	printf("Finished all contours.\n");
	cvCvtColor( img_8uc1, img_8uc3, CV_GRAY2BGR );
	cvShowImage( "capture", img_8uc3 );
	cvWaitKey(0);
	cvDestroyWindow( "capture" );
	cvReleaseImage( &img_8uc1 );
	cvReleaseImage( &img_8uc3 );
	cvReleaseImage( &img_edge );
	return 0;
}
//
//USAGE:  ch9_background startFrameCollection# endFrameCollection# [movie filename, else from camera]
//If from AVI, then optionally add HighAvg, LowAvg, HighCB_Y LowCB_Y HighCB_U LowCB_U HighCB_V LowCB_V
//
int main(int argc, char** argv)
{
    const char* filename = 0;
    IplImage* rawImage = 0, *yuvImage = 0; //yuvImage is for codebook method
    IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;
    CvCapture* capture = 0;

    int c, n, nframes = 0;
    int nframesToLearnBG = 300;

    model = cvCreateBGCodeBookModel();

    //Set color thresholds to default values
    model->modMin[0] = 3;
    model->modMin[1] = model->modMin[2] = 3;
    model->modMax[0] = 10;
    model->modMax[1] = model->modMax[2] = 10;
    model->cbBounds[0] = model->cbBounds[1] = model->cbBounds[2] = 10;

    bool pause = false;
    bool singlestep = false;

    for( n = 1; n < argc; n++ )
    {
        static const char* nframesOpt = "--nframes=";
        if( strncmp(argv[n], nframesOpt, strlen(nframesOpt))==0 )
        {
            if( sscanf(argv[n] + strlen(nframesOpt), "%d", &nframesToLearnBG) == 0 )
            {
                help();
                return -1;
            }
        }
        else
            filename = argv[n];
    }

    if( !filename )
    {
        printf("Capture from camera\n");
        capture = cvCaptureFromCAM( 0 );
    }
    else
    {
        printf("Capture from file %s\n",filename);
        capture = cvCreateFileCapture( filename );
    }

    if( !capture )
    {
        printf( "Can not initialize video capturing\n\n" );
        help();
        return -1;
    }

    //MAIN PROCESSING LOOP:
    for(;;)
    {
        if( !pause )
        {
            rawImage = cvQueryFrame( capture );
            ++nframes;
            if(!rawImage)
                break;
        }
        if( singlestep )
            pause = true;

        //First time:
        if( nframes == 1 && rawImage )
        {
            // CODEBOOK METHOD ALLOCATION
            yuvImage = cvCloneImage(rawImage);
            ImaskCodeBook = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
            ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
            cvSet(ImaskCodeBook,cvScalar(255));

            cvNamedWindow( "Raw", 1 );
            cvNamedWindow( "ForegroundCodeBook",1);
            cvNamedWindow( "CodeBook_ConnectComp",1);
        }

        // If we've got an rawImage and are good to go:
        if( rawImage )
        {
            cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );//YUV For codebook method
            //This is where we build our background model
            if( !pause && nframes-1 < nframesToLearnBG  )
                cvBGCodeBookUpdate( model, yuvImage );

            if( nframes-1 == nframesToLearnBG  )
                cvBGCodeBookClearStale( model, model->t/2 );

            //Find the foreground if any
            if( nframes-1 >= nframesToLearnBG  )
            {
                // Find foreground by codebook method
                cvBGCodeBookDiff( model, yuvImage, ImaskCodeBook );
                // This part just to visualize bounding boxes and centers if desired
                cvCopy(ImaskCodeBook,ImaskCodeBookCC);
                cvSegmentFGMask( ImaskCodeBookCC );
                //bwareaopen_(ImaskCodeBookCC,100);
                cvShowImage( "CodeBook_ConnectComp",ImaskCodeBookCC);
                detect(ImaskCodeBookCC,rawImage);

            }
            //Display
            cvShowImage( "Raw", rawImage );
            cvShowImage( "ForegroundCodeBook",ImaskCodeBook);

        }

        // User input:
        c = cvWaitKey(10)&0xFF;
        c = tolower(c);
        // End processing on ESC, q or Q
        if(c == 27 || c == 'q')
            break;
        //Else check for user input
        switch( c )
        {
        case 'h':
            help();
            break;
        case 'p':
            pause = !pause;
            break;
        case 's':
            singlestep = !singlestep;
            pause = false;
            break;
        case 'r':
            pause = false;
            singlestep = false;
            break;
        case ' ':
            cvBGCodeBookClearStale( model, 0 );
            nframes = 0;
            break;
            //CODEBOOK PARAMS
        case 'y': case '0':
        case 'u': case '1':
        case 'v': case '2':
        case 'a': case '3':
        case 'b':
            ch[0] = c == 'y' || c == '0' || c == 'a' || c == '3';
            ch[1] = c == 'u' || c == '1' || c == 'a' || c == '3' || c == 'b';
            ch[2] = c == 'v' || c == '2' || c == 'a' || c == '3' || c == 'b';
            printf("CodeBook YUV Channels active: %d, %d, %d\n", ch[0], ch[1], ch[2] );
            break;
        case 'i': //modify max classification bounds (max bound goes higher)
        case 'o': //modify max classification bounds (max bound goes lower)
        case 'k': //modify min classification bounds (min bound goes lower)
        case 'l': //modify min classification bounds (min bound goes higher)
            {
            uchar* ptr = c == 'i' || c == 'o' ? model->modMax : model->modMin;
            for(n=0; n<NCHANNELS; n++)
            {
                if( ch[n] )
                {
                    int v = ptr[n] + (c == 'i' || c == 'l' ? 1 : -1);
                    ptr[n] = CV_CAST_8U(v);
                }
                printf("%d,", ptr[n]);
            }
            printf(" CodeBook %s Side\n", c == 'i' || c == 'o' ? "High" : "Low" );
            }
            break;
        }
    }

    cvReleaseCapture( &capture );
    cvDestroyWindow( "Raw" );
    cvDestroyWindow( "ForegroundCodeBook");
    cvDestroyWindow( "CodeBook_ConnectComp");
    return 0;
}
Esempio n. 25
0
LRESULT CALLBACK CamDlgHandler(HWND hDlg, UINT message, WPARAM wParam, LPARAM lParam)
{
	static bool init;
	CAMOBJ * st;
	int tmp;
	
	st = (CAMOBJ *) actobject;
    if ((st==NULL)||(st->type!=OB_CAM)) return(FALSE);	

	switch( message )
	{
		case WM_INITDIALOG:
				SetDlgItemInt(hDlg,IDC_UPDATERATE,update_rate,0);
				SetDlgItemText(hDlg,IDC_CAMSTATUS,"0");
				SetDlgItemInt(hDlg,IDC_ERROR_DIST,(int)dist_threshold,0);
				SetDlgItemInt(hDlg,IDC_ERROR_ANGLE,(int)angle_threshold,0);
				SetDlgItemInt(hDlg,IDC_THRESHOLD_TIME,threshold_time,0);
				SetDlgItemInt(hDlg,IDC_PT1X,(int)(PT1_xpos*100.0f),1);
				SetDlgItemInt(hDlg,IDC_PT1Y,(int)(PT1_ypos*100.0f),1);
				SetDlgItemInt(hDlg,IDC_PT2X,(int)(PT2_xpos*100.0f),1);
				SetDlgItemInt(hDlg,IDC_PT2Y,(int)(PT2_ypos*100.0f),1);
				
				CheckDlgButton(hDlg,IDC_AUTORESTORE,autorestore);
				CheckDlgButton(hDlg,IDC_SHOWLIVE,st->showlive);
				CheckDlgButton(hDlg,IDC_ENABLE_TRACKING,st->enable_tracking);
				CheckDlgButton(hDlg,IDC_TRACKFACE,st->trackface);

				if (st->mode==0) CheckDlgButton(hDlg,IDC_NOARCHIVE,TRUE); else
					if (st->mode==1) CheckDlgButton(hDlg,IDC_RECORDARCHIVE,TRUE); else
						if (st->mode==2) CheckDlgButton(hDlg,IDC_PLAYARCHIVE,TRUE);
				SetDlgItemText(hDlg,IDC_ARCHIVEFILE,st->videofilename);
				break;		
		case WM_CLOSE:
			    EndDialog(hDlg, LOWORD(wParam));
				return TRUE;
			break;
		case WM_COMMAND:
			switch (LOWORD(wParam)) 
			{
				case IDC_INITCAM:
					lk_init(st);
					break;
				case IDC_EXITCAM:
					lk_exit();
                    break;
				case IDC_RESET:
		            count = 0;
					break;

				case IDC_NOARCHIVE:
					  lk_exit();
					  st->mode=0;
					  lk_init(st);
					break;
				case IDC_RECORDARCHIVE:
					  lk_exit();
					  st->mode=1;

					  if (!strcmp(st->videofilename,"none"))
					  {
						 strcpy(st->videofilename,GLOBAL.resourcepath); 
						 strcat(st->videofilename,"MOVIES\\*.avi");
					  }

					  if (!open_file_dlg(hDlg,st->videofilename, FT_AVI, OPEN_SAVE))
					     strcpy(st->videofilename,"none");
					  SetDlgItemText(hDlg, IDC_ARCHIVEFILE,st->videofilename);
					  lk_init(st);

					break;
				case IDC_PLAYARCHIVE:
					  lk_exit();
					  st->mode=2;
					  if (!strcmp(st->videofilename,"none"))
					  {
						 strcpy(st->videofilename,GLOBAL.resourcepath); 
						 strcat(st->videofilename,"MOVIES\\*.avi");
					  }
					  if (!open_file_dlg(hDlg,st->videofilename, FT_AVI, OPEN_LOAD))
					     strcpy(st->videofilename,"none");
					  SetDlgItemText(hDlg, IDC_ARCHIVEFILE,st->videofilename);
					  lk_init(st);

					break;
				
				case IDC_SHOWLIVE:
					st->showlive=IsDlgButtonChecked(hDlg,IDC_SHOWLIVE);
					if (!st->showlive) cvDestroyWindow("Camera");
					else
					{ 
					  cvNamedWindow( "Camera", 1 );
					  cvSetMouseCallback( "Camera", on_mouse, 0 );
					}
                    break;
				case IDC_UPDATERATE:
					tmp=GetDlgItemInt(hDlg,IDC_UPDATERATE,NULL,0);
					if ((tmp>10)&&(tmp<1000)) update_rate=tmp;
                    break;
				case IDC_AUTORESTORE:
					autorestore=IsDlgButtonChecked(hDlg,IDC_AUTORESTORE);
                    break;
				case IDC_TRACKFACE:
					st->trackface=IsDlgButtonChecked(hDlg,IDC_TRACKFACE);
					if (st->trackface) MAX_COUNT=2; else MAX_COUNT=1;
                    break;
				case IDC_ERROR_DIST:
					dist_threshold=(float)GetDlgItemInt(hDlg, IDC_ERROR_DIST,0,0);
                    break;
				case IDC_ERROR_ANGLE:
					angle_threshold=(float)GetDlgItemInt(hDlg, IDC_ERROR_ANGLE,0,0);
                    break;
				case IDC_THRESHOLD_TIME:
					threshold_time=GetDlgItemInt(hDlg, IDC_THRESHOLD_TIME,0,0);
                    break;
				case IDC_PT1X:
					PT1_xpos=(float) GetDlgItemInt(hDlg, IDC_PT1X,0,1) / 100.0f;
					need_to_init=1;
					break;
				case IDC_PT1Y:
					PT1_ypos=(float)GetDlgItemInt(hDlg, IDC_PT1Y,0,1)/ 100.0f;
					need_to_init=1;
					break;
				case IDC_PT2X:
					PT2_xpos=(float)GetDlgItemInt(hDlg, IDC_PT2X,0,1)/ 100.0f;
					need_to_init=1;
					break;
				case IDC_PT2Y:
					PT2_ypos=(float)GetDlgItemInt(hDlg, IDC_PT2Y,0,1)/ 100.0f;
					need_to_init=1;
					break;
//				case IDC_NIGHTMODE:
//		            night_mode ^= 1;
//					break;
				case IDC_AUTOINIT:
					 need_to_init=1;
					break;
				case IDC_ENABLE_TRACKING:
					st->enable_tracking=IsDlgButtonChecked(hDlg,IDC_ENABLE_TRACKING);
					break;
				case IDC_SETTINGS:
					{
						int ncams = cvcamGetCamerasCount( ); // init cvcam
						if (ncams==0) report("no cam");
						cvcamSetProperty(0, CVCAM_PROP_ENABLE, CVCAMTRUE); //Selects the 1-st found camera
						cvcamInit();
						cvcamGetProperty(0, CVCAM_CAMERAPROPS, NULL);
						//	cvcamGetProperty(0, CVCAM_VIDEOFORMAT, NULL);
						cvcamExit();
					}
					break;
            }
			return TRUE;
			break;
		case WM_SIZE:
		case WM_MOVE:  update_toolbox_position(hDlg);
		break;
		return(TRUE);
	}
	return FALSE;
}
Esempio n. 26
0
int main(int argc, char* argv[]) {

	// Déclarations
	CvHaarClassifierCascade *pCascadeFrontal = 0, *pCascadeProfile = 0;	// le detecteur de visage 
	CvMemStorage *pStorage = 0;		// buffer mémoire expensible
	CvSeq *pFaceRectSeq;			// liste des visages detectés
	int i;

	/*/ Capture Webcam
	CvCapture *capture;
	capture = cvCreateCameraCapture(CV_CAP_ANY);
	pInpImg = cvQueryFrame(capture);*/

	// Initialisations
	//IplImage* pInpImg = (argc > 1) ? cvLoadImage(argv[1], CV_LOAD_IMAGE_COLOR) : 0;
	IplImage *pInpImg = cvLoadImage("D:/测试/test6/3.jpg", CV_LOAD_IMAGE_COLOR);
	pStorage = cvCreateMemStorage(0);

	pCascadeFrontal = (CvHaarClassifierCascade *) cvLoad ("D:/测试/test6/FaceRecognition/resources/haarcascade/haarcascade_frontalface_default.xml",0,0,0);
	//pCascadeFrontal = (CvHaarClassifierCascade *) cvLoad ("D:/测试/test6/FaceRecognition/resources/haarcascade/haarcascade_frontalface_alt_tree.xml",0,0,0);
	pCascadeProfile = (CvHaarClassifierCascade *) cvLoad ("D:/测试/test6/FaceRecognition/resources/haarcascade/haarcascade_profileface.xml",0,0,0);
	//pCascadeProfile = (CvHaarClassifierCascade *) cvLoad ("D:/测试/test6/FaceRecognition/resources/haarcascade/haarcascade_lowerbody.xml",0,0,0);

	// On valide que tout a bien été initialisé correctement
	if (!pInpImg || !pStorage || !pCascadeFrontal || !pCascadeProfile) {
		printf("L'initilisation a echoue");
		exit(-1);
	}

	// Affiche une fenêtre pour l'affichage des visages
	cvNamedWindow("Fenetre de Haar", CV_WINDOW_NORMAL);
	cvShowImage("Fenetre de Haar", pInpImg);
	cvWaitKey(50);

	// Detection de visage DE FACE dans l'image
	pFaceRectSeq = cvHaarDetectObjects
		(pInpImg, pCascadeFrontal, pStorage,
		1.1,	// augmente l'échelle de recherche de 10% à chaque passe [1.0-1.4] : plus c'est grand, plus c'est rapide
		3,	// met de côté les groupes plus petit que 3 détections [0-4] : plus c'est petit, plus il y aura de "hits"
		/*0,*/ CV_HAAR_DO_CANNY_PRUNING,	// [0] : explore tout ; [1] : abandonne les régions non candidates à contenir un visage
		cvSize(0, 0));	// utilise les paramètres XML par défaut (24, 24) pour la plus petite echelle de recherche

	// Dessine un rectangle autour de chaque visage detecté
	for (i=0 ; i < (pFaceRectSeq ? pFaceRectSeq->total : 0) ; i++) {
		CvRect* r = (CvRect*)cvGetSeqElem(pFaceRectSeq, i);
		CvPoint pt1 = { r->x, r->y };
		CvPoint pt2 = { r->x + r->width, r->y + r->height };
		cvRectangle(pInpImg, pt1, pt2, CV_RGB(0,255,0), 3, 4, 0);
		
		// Floutage 
		cvSetImageROI(pInpImg, *r);
		cvSmooth(pInpImg, pInpImg, CV_GAUSSIAN, 5, 3);
		cvResetImageROI(pInpImg);
	}
	cvShowImage("Fenetre de Haar", pInpImg);
	cvWaitKey(1);

	// Detection de visage DE PROFIL dans l'image
	pFaceRectSeq = cvHaarDetectObjects
		(pInpImg, pCascadeProfile, pStorage,
		1.4,	// augmente l'échelle de recherche de 10% à chaque passe [1.0-1.4] : plus c'est grand, plus c'est rapide
		3,	// met de côté les groupes plus petit que 3 détections [0-4] : plus c'est petit, plus il y aura de "hits"
		/*0,*/ CV_HAAR_DO_CANNY_PRUNING,	// abandonne les régions non candidates à contenir un visage
		cvSize(0, 0));	// utilise les paramètres XML par défaut (24, 24) pour la plus petite echelle de recherche

	// Dessine un rectangle autour de chaque visage detecté
	for (i=0 ; i < (pFaceRectSeq ? pFaceRectSeq->total : 0) ; i++) {
		CvRect* r = (CvRect*)cvGetSeqElem(pFaceRectSeq, i);
		CvPoint pt1 = { r->x, r->y };
		CvPoint pt2 = { r->x + r->width, r->y + r->height };
		cvRectangle(pInpImg, pt1, pt2, CV_RGB(255,165,0), 3, 4, 0);
		
		// Floutage 
		cvSetImageROI(pInpImg, *r);
		cvSmooth(pInpImg, pInpImg, CV_GAUSSIAN, 5, 3);
		cvResetImageROI(pInpImg);
	}

	// Affiche la détection de visage
	cvShowImage("Fenetre de Haar", pInpImg);
	cvWaitKey(0);
	cvDestroyWindow("Fenetre de Haar");

	// Libère les ressources
	//cvReleaseCapture(&capture); // Capture Webcam
	cvReleaseImage(&pInpImg);
	if (pCascadeFrontal) cvReleaseHaarClassifierCascade(&pCascadeFrontal);
	if (pCascadeProfile) cvReleaseHaarClassifierCascade(&pCascadeProfile);
	if (pStorage) cvReleaseMemStorage(&pStorage);
}
Esempio n. 27
0
	~Filter()
	{
		cvDestroyWindow("Image window");
	}
void OpenCVDisplay::releaseImage() {
	if(image)
		cvReleaseImage(&image);
	cvDestroyWindow("image");
}
int main( int argc, char** argv )
{
    	IplImage* img;
		IplImage* mask2; //ÉùÃ÷IplImageÖ¸Õë

		CvRect r;
 
        //ÔØÈëͼÏñ
        img = cvLoadImage( "tennis_input.jpg",1);

		cvNamedWindow( "originalImage", 1 );//´´½¨´°¿Ú
        cvShowImage( "originalImage", img );//ÏÔʾͼÏñ
   
		CvSize size = cvGetSize(img);
		IplImage *hsv = cvCreateImage(size, IPL_DEPTH_8U, 3);
		cvCvtColor(img, hsv, CV_BGR2HSV);  


		CvMat *mask = cvCreateMat(size.height, size.width, CV_8UC1);
		mask2	 = cvCreateImage(size, IPL_DEPTH_8U,3);
		
		cvInRangeS(hsv, cvScalar(0.11*256, 0.60*256, 0.20*256, 0), cvScalar(0.14*256, 1.00*256, 1.00*256, 0), mask);
		cvReleaseImage(&hsv);

		IplConvKernel *se21 = cvCreateStructuringElementEx(21, 21, 10, 10, CV_SHAPE_RECT, NULL);
		IplConvKernel *se11 = cvCreateStructuringElementEx(11, 11, 5,  5,  CV_SHAPE_RECT, NULL);

		cvNamedWindow( "Mask before", 1 );//´´½¨´°¿Ú
        cvShowImage( "Mask before", mask );//ÏÔʾͼÏñ
		//cvClose(mask, mask, se21);  See completed example for cvClose definition
		cvDilate(mask, mask, se21,1);
		cvErode(mask, mask, se21,1);	
		//cvOpen(mask, mask, se11);  See completed example for cvOpen  definition
		cvErode(mask, mask, se11,1);
		cvDilate(mask, mask, se11,1);	
		cvReleaseStructuringElement(&se21);
		cvReleaseStructuringElement(&se11);

		/* Copy mask into a grayscale image */
		IplImage *hough_in = cvCreateImage(size, 8, 1);
		cvCopy(mask, hough_in, NULL);
		cvCvtColor( mask, mask2, CV_GRAY2BGR );
		
        cvSmooth(hough_in, hough_in, CV_GAUSSIAN, 15, 15, 0, 0);

		/* Run the Hough function */
		CvMemStorage *storage = cvCreateMemStorage(0);
		//CvSeq *circles = cvHoughCircles(hough_in, storage, CV_HOUGH_GRADIENT, 4, size.height/10, 50, 150, 0, 0);
		CvSeq *contour;
		cvFindContours( hough_in, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );


		for(;contour;contour=contour->h_next)
		{
			    r=((CvContour*)contour)->rect;
	
				//ÔÚͼÖл­³ö¾ØÐοò
				cvRectangle(img,cvPoint(r.x,r.y),cvPoint(r.x+r.width,r.y+r.height),CV_RGB(255,0,0),3,CV_AA,0);
				cvRectangle(mask2,cvPoint(r.x,r.y),cvPoint(r.x+r.width,r.y+r.height),CV_RGB(255,0,0),3,CV_AA,0);

		}

		cvReleaseMemStorage(&storage);
	


		cvNamedWindow( "Image", 1 );//´´½¨´°¿Ú
        cvShowImage( "Image", img );//ÏÔʾͼÏñ
		cvNamedWindow( "Mask", 1 );//´´½¨´°¿Ú
        cvShowImage( "Mask", mask );//ÏÔʾͼÏñ

		cvNamedWindow( "Mask2", 1 );//´´½¨´°¿Ú
        cvShowImage( "Mask2", mask2 );//ÏÔʾͼÏñ



	    cvWaitKey(0); //µÈ´ý°´¼ü
        cvDestroyWindow( "Image" );//Ïú»Ù´°¿Ú
        cvReleaseImage( &img ); //ÊÍ·ÅͼÏñ
		cvDestroyWindow( "Mask" );//Ïú»Ù´°¿Ú
		cvDestroyWindow( "Mask before" );//Ïú»Ù´°¿Ú
		cvDestroyWindow( "Original" );//Ïú»Ù´°¿Ú
		cvDestroyWindow( "Mask2" );//Ïú»Ù´°¿Ú
        return 0;


}
Esempio n. 30
0
int mainMatch(void)
{
  // Initialise capture device
  CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
  if(!capture) error("No Capture");

  // Declare Ipoints and other stuff
  IpPairVec matches;
  IpVec ipts, ref_ipts;

  // This is the reference object we wish to find in video frame
  // Replace the line below with IplImage *img = cvLoadImage("imgs/object.jpg");
  // where object.jpg is the planar object to be located in the video
  IplImage *img = cvLoadImage("imgs/object.jpg");
  if (img == NULL) error("Need to load reference image in order to run matching procedure");
  CvPoint src_corners[4] = {{0,0}, {img->width,0}, {img->width, img->height}, {0, img->height}};
  CvPoint dst_corners[4];

  // Extract reference object Ipoints
  surfDetDes(img, ref_ipts, false, 3, 4, 3, 0.004f);
  drawIpoints(img, ref_ipts);
  showImage(img);

  // Create a window
  cvNamedWindow("OpenSURF", CV_WINDOW_AUTOSIZE );

  // Main capture loop
  while( true )
  {
    // Grab frame from the capture source
    img = cvQueryFrame(capture);

    // Detect and describe interest points in the frame
    surfDetDes(img, ipts, false, 3, 4, 3, 0.004f);

    // Fill match vector
    getMatches(ipts,ref_ipts,matches);

    // This call finds where the object corners should be in the frame
    if (translateCorners(matches, src_corners, dst_corners))
    {
      // Draw box around object
      for(int i = 0; i < 4; i++ )
      {
        CvPoint r1 = dst_corners[i%4];
        CvPoint r2 = dst_corners[(i+1)%4];
        cvLine( img, cvPoint(r1.x, r1.y),
          cvPoint(r2.x, r2.y), cvScalar(255,255,255), 3 );
      }

      for (unsigned int i = 0; i < matches.size(); ++i)
        drawIpoint(img, matches[i].first);
    }

    // Draw the FPS figure
    drawFPS(img);

    // Display the result
    cvShowImage("OpenSURF", img);

    // If ESC key pressed exit loop
    if( (cvWaitKey(10) & 255) == 27 ) break;
  }

  // Release the capture device
  cvReleaseCapture( &capture );
  cvDestroyWindow( "OpenSURF" );
  return 0;
}