Ejemplo n.º 1
0
int main(int argc, char** argv)
{
    IplImage* motion = 0;
    CvCapture* capture = 0;
    
    if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
        capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
    else if( argc == 2 )
        capture = cvCaptureFromFile( argv[1] );

    // 如果捕获到图像
    if (capture)
    {
        cvNamedWindow( "Motion", 1 );
        
        for(;;)
        {
            IplImage* image;
            if( !cvGrabFrame( capture ))
                break;
            image = cvRetrieveFrame( capture );

            if( image )
            {
                if( !motion )
                {
                    motion = cvCreateImage( cvSize(image->width,image->height), 8, 3 );
                    cvZero( motion );

					// 将采集到的图像指针赋给运动图像
                    motion->origin = image->origin;
                }
            }

			// 更新运动历史图像
            update_mhi(image, motion, 30);

			// 显示运动图像
            cvShowImage( "Motion", motion );

            if( cvWaitKey(10) >= 0 )
                break;
        }

		cvReleaseCapture( &capture );
        cvDestroyWindow( "Motion" );
    }

    return 0;
}
int main(int argc, char** argv){
	IplImage* motion = 0;
	CvCapture* capture = 0;
	short frameCount = 0;
	
	// Info del clasificador
	CvNormalBayesClassifier *nbayes;
	char default_data_filename[] = "./train-data.txt";
	char* data_filename = default_data_filename;
	build_nbayes_classifier(data_filename, &nbayes);

    if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
        capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
    else if( argc == 2 )
        capture = cvCaptureFromFile( argv[1] );

	if( capture ){
		double fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
		std::cout << "FPS : " <<  fps << std::endl;
		cvNamedWindow( "Original", 1 );
		cvNamedWindow( "Motion", 1 );
		for(;;){
			IplImage* image = cvQueryFrame( capture );
			if( !image ){
				break;
			}
			if( !motion ){
                motion = cvCreateImage( cvSize(image->width,image->height), 8, 3 );
                cvZero( motion );
                motion->origin = image->origin;
            }
			++frameCount;
			if(frameCount > 7 || 1){
				frameCount = 0;
	    		initMatrix(relevanceVector,51);
				update_mhi( image, motion, MOTION_HISTORY_SENSITIVITY, frameCount);
	    		computeVectors(image, motion, 128, 96);
				classify(nbayes);
			}
			cvShowImage( "Original", image);
			cvShowImage( "Motion", motion );
			if( cvWaitKey(10) >= 0 )
				break;
		}
		cvReleaseCapture( &capture );
		cvDestroyWindow( "Original" );
		cvDestroyWindow( "Motion" );
	}
    return 0;
}
Ejemplo n.º 3
0
int main(int argc, char** argv)
{
    IplImage* motion = 0;
    CvCapture* capture = 0;
    
    if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
        capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
    else if( argc == 2 )
        capture = cvCaptureFromFile( argv[1] );

    if( capture )
    {
		  help();
        cvNamedWindow( "Motion", 1 );
        
        for(;;)
        {
            IplImage* image;
            if( !cvGrabFrame( capture ))
                break;
            image = cvRetrieveFrame( capture );

            if( image )
            {
                if( !motion )
                {
                    motion = cvCreateImage( cvSize(image->width,image->height), 8, 3 );
                    cvZero( motion );
                    motion->origin = image->origin;
                }
            }

            update_mhi( image, motion, 30 );
            cvShowImage( "Motion", motion );

            if( cvWaitKey(10) >= 0 )
                break;
        }
        cvReleaseCapture( &capture );
        cvDestroyWindow( "Motion" );
    }
    else
    {
   	printf("\nFailed to open camera or file\n");
   	help();
    }

    return 0;
}
Ejemplo n.º 4
0
int main(int argc, char** argv)
{
    IplImage* motion = 0;
    CvCapture* capture = 0;

    if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
        capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
    else if( argc == 2 )
        capture = cvCaptureFromFile( argv[1] );

    if( capture )
    {
        cvNamedWindow( "Motion", 1 );

        for(;;)
        {
            IplImage* image = cvQueryFrame( capture );
            if( !image )
                break;

            if( !motion )
            {
                motion = cvCreateImage( cvSize(image->width,image->height), 8, 3 );
                cvZero( motion );
                motion->origin = image->origin;
            }

            update_mhi( image, motion, 30 );
            cvShowImage( "Motion", motion );

            if( cvWaitKey(10) == 27 )
                break;
        }
        cvReleaseCapture( &capture );
        cvDestroyWindow( "Motion" );
    }

    return 0;
}
Ejemplo n.º 5
0
/*************************************************************
  视频设备和显示设备初始化,预览函数
*************************************************************/
int video_fb_init_preview()
{
	
	//临时
	int tmpcount=0;

	//串口相关变量-------------------------------
	char buff[512];
	int nread=0;
	int FrameDone=0;//一帧数据结束标志
	int FrameCosunt=0;//记录帧长度
	int j=0;
	int key=0;//开关标志
	int stat=0;//视频设备状态标志
	//-------------------------------------------
	
	int numBufs;

	//--------------------------------------------
	//SDL yuv
	SDL_Surface      *pscreen;
	SDL_Overlay      *overlay;
	SDL_Rect         drect;
	SDL_Event        sdlevent;
	SDL_mutex        *affmutex;
	unsigned char    *p = NULL;
	unsigned char    frmrate;
	unsigned int     currtime;
	unsigned int     lasttime;
	char* status = NULL;

	//SDL RGB
	unsigned int     rmask;
	unsigned int     gmask;
	unsigned int     bmask;
	unsigned int     amask;	
	int              bpp;
	int 		 pitch;
	int 		 pixels_num;
	unsigned char    *pixels;
	unsigned char    *p_RGB = NULL;	
	SDL_Surface      *pscreen_RGB;
	SDL_Surface      *display_RGB;
	printf("USB Camera Test\n");



	//++++++++++++++++++++++++SDL初始化和设置start+++++++++++++++++++++++++++++++
	/*if(SDL_Init(SDL_INIT_VIDEO) < 0)
	{
		printf("SDL Init failed.\n");
		exit(1);
	}
	//SDL 设置:YUV输出
	pscreen = SDL_SetVideoMode(fmt.fmt.pix.width, fmt.fmt.pix.height,0,SDL_VIDEO_Flags);
	overlay = SDL_CreateYUVOverlay(fmt.fmt.pix.width, fmt.fmt.pix.height,SDL_YUY2_OVERLAY,pscreen);
	p = (unsigned char *)overlay->pixels[0];
	drect.x = 0;
	drect.y = 0;
	drect.w = pscreen->w;
	drect.h = pscreen->h;

	//SDL 设置:RGB输出
	//pscreen = SDL_SetVideoMode(fmt.fmt.pix.width, fmt.fmt.pix.height, 24, SDL_SWSURFACE | SDL_DOUBLEBUF);
	//rmask = 0x000000ff;
	//gmask = 0x0000ff00;
	//bmask = 0x00ff0000;
	//amask = 0x00000000;
	//bpp   = 24;
	pitch = fmt.fmt.pix.width*3;
	pixels_num = fmt.fmt.pix.width*fmt.fmt.pix.height*3;
	pixels = (unsigned char *)malloc(pixels_num);
	memset(pixels, 0, pixels_num);
	p_RGB = (unsigned char *)pixels;
	//pscreen_RGB = SDL_CreateRGBSurfaceFrom(pixels, 
					//fmt.fmt.pix.width, 
					//fmt.fmt.pix.height, 
					//bpp, 
					//pitch, 
					//rmask, 
					//gmask, 
					//bmask, 
					//amask);
	//lasttime = SDL_GetTicks();
	//affmutex = SDL_CreateMutex();*/
	//++++++++++++++++++++++++SDL初始化和设置end+++++++++++++++++++++++++++++++
	
	//++++++++++++++++++++++++openCV设置start+++++++++++++++++++++++++++++++
	CvMemStorage*  storage = cvCreateMemStorage(0);
	IplImage*      img     = cvCreateImageHeader(cvSize(X,Y), IPL_DEPTH_8U, 3);//image头,未开辟数据空间
	IplImage*      imggray = cvCreateImage(cvSize(X,Y), IPL_DEPTH_8U, 1);//image,开辟数据空间
	IplImage*      motion  = cvCreateImage(cvSize(X,Y), IPL_DEPTH_8U, 3);
	cvNamedWindow("image", 1);

	unsigned char *pRGB = NULL;
	pRGB = (unsigned char *)calloc(1,X*Y*3*sizeof(unsigned char));
	//++++++++++++++++++++++++openCV设置end+++++++++++++++++++++++++++++++++



	video_fd = open("/dev/video0", O_RDWR, 0);//打开摄像头设备,使用阻塞方式打开
	if (video_fd<0)
	{
		printf("open error\n");
		return  1;
	}

	/*************先向驱动尝试获取设备视频格式start*************/
	struct v4l2_fmtdesc fmt0;
	int ret0;
	memset(&fmt0,0,sizeof(fmt0));
	fmt0.index = 0;
	fmt0.type=V4L2_BUF_TYPE_VIDEO_CAPTURE;
	while((ret0 = ioctl(video_fd,VIDIOC_ENUM_FMT,&fmt0) == 0))
	{
		fmt0.index++;
		printf("%d> pixelformat =%c%c%c%c,description =%s\n",
			fmt0.index,fmt0.pixelformat&0xff,
			(fmt0.pixelformat>>8)&0xff,
			(fmt0.pixelformat>>16)&0xff,
			(fmt0.pixelformat>>24)&0xff,
			fmt0.description);
	}
	/**************************END***************************/
	
	//---------------------设置获取视频的格式----------------//
	struct v4l2_format fmt;	
	memset( &fmt, 0, sizeof(fmt));
	fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	     //视频数据流类型,永远都V4L2_BUF_TYPE_VIDEO_CAPTURE
	fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; //视频源的格式为JPEG或YUN4:2:2或RGB
	fmt.fmt.pix.width = X;                     //设置视频宽度
	fmt.fmt.pix.height = Y;                    //设置视频高度
	//fmt.fmt.pix.field=V4L2_FIELD_INTERLACED;
	//fmt.fmt.pix.colorspace=8;
	//printf("color: %d \n",fmt.fmt.pix.colorspace);
	if (ioctl(video_fd, VIDIOC_S_FMT, &fmt) < 0) //使配置生效
	{
		printf("set format failed\n");
		return 2;
	}
	//-------------------------------------------------------//
	

	//++++++++++++++++++++++++向video设备驱动申请帧缓冲start+++++++++++++++++
	struct v4l2_requestbuffers req;
	memset(&req, 0, sizeof (req));
	req.count = 3;	                                   //缓存数量,即可保存的图片数量
	req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	           //数据流类型,永远都是V4L2_BUF_TYPE_VIDEO_CAPTURE
	req.memory = V4L2_MEMORY_MMAP;	                   //存储类型:V4L2_MEMORY_MMAP或V4L2_MEMORY_USERPTR
	if (ioctl(video_fd, VIDIOC_REQBUFS, &req) == -1)   //使配置生效
	{
		perror("request buffer error \n");
		return 2;
	}
	//++++++++++++++++++++++++向video设备驱动申请帧缓冲end+++++++++++++++++
	
	//+++++++++++++++将VIDIOC_REQBUFS获取内存转为物理空间start+++++++++++++
	buffers = calloc(req.count, sizeof(VideoBuffer));	
	//printf("sizeof(VideoBuffer) is %d\n", sizeof(VideoBuffer));
	struct v4l2_buffer buf;
	for (numBufs = 0; numBufs < req.count; numBufs++)
	{
		memset( &buf, 0, sizeof(buf));
		buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	
		
		//存储类型:V4L2_MEMORY_MMAP(内存映射)或V4L2_MEMORY_USERPTR(用户指针)
		buf.memory = V4L2_MEMORY_MMAP;
		buf.index = numBufs;
		if (ioctl(video_fd, VIDIOC_QUERYBUF, &buf) < 0)        //使配置生效
		{
			printf("VIDIOC_QUERYBUF error\n");
			return 2;
		}
		//printf("buf len is %d\n", sizeof(buf));
		buffers[numBufs].length = buf.length;
		buffers[numBufs].offset = (size_t) buf.m.offset;
		
		//使用mmap函数将申请的缓存地址转换应用程序的绝对地址------
		buffers[numBufs].start = mmap(NULL, buf.length, PROT_READ | PROT_WRITE,
			MAP_SHARED, video_fd, buf.m.offset);	
		if (buffers[numBufs].start == MAP_FAILED)
		{
			perror("buffers error\n");
			return 2;
		}
		if (ioctl(video_fd, VIDIOC_QBUF, &buf) < 0)           //放入缓存队列
		{
			printf("VIDIOC_QBUF error\n");
			return 2;
		}

	}
	//+++++++++++++++将VIDIOC_REQBUFS获取内存转为物理空间end+++++++++++++
	
	//++++++++++++++++++++++++++++++打开视频流start+++++++++++++++++++++++++
	enum v4l2_buf_type type;
	type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	if (ioctl(video_fd, VIDIOC_STREAMON, &type) < 0)
	{
		printf("VIDIOC_STREAMON error\n");
		return 2;
	}
	//++++++++++++++++++++++++++++++打开视频流end+++++++++++++++++++++++++
	
	//---------------------读取视频源格式---------------------//	
	fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;				
	if (ioctl(video_fd, VIDIOC_G_FMT, &fmt) < 0)	
	{
		printf("get format failed\n");
		return 2 ;
	}
	else
	{
		printf("Picture:Width = %d   Height = %d\n", fmt.fmt.pix.width, fmt.fmt.pix.height);
		
	}
	//-------------------------------------------------------//
	
	int i=0;	

	//++++++++++++++++++++++++直接使用FB设备显示结果start++++++++++++++++++++++++++++++
	//一些关于fb设备或者没有用到的变量---------------------------
	/*FILE * fd_y_file = 0;
	int a=0;
	int k = 0;
	int i=0;
	//设置显卡设备framebuffer------------------------------------
	struct jpeg_decompress_struct cinfo;
	struct jpeg_error_mgr jerr;
	FILE *infile;//Jpeg文件的句柄
	unsigned char *buffer;
	char *fb_device;
	unsigned int x;
	unsigned int y;
	//打开显卡设备------------------------------------------------
	if ((fb = open("/dev/fb0", O_RDWR)) < 0)
	{
		perror(__func__);
		return 2;
	}

	//获取framebuffer的状态-----------------------------------------
	fb_set(fb);//设置显存参数	
	fb_stat(fb);//获取显卡驱动中的长、宽和显示位宽
	
	printf("frame buffer: %dx%d,  %dbpp, 0x%xbyte= %d,graylevels= %d \n", 
		fbdev.fb_width, fbdev.fb_height, fbdev.fb_bpp, fbdev.fb_size, fbdev.fb_size,fbdev.fb_gray);

	//映射framebuffer的地址到用户空间----------------------------------
	fbdev.fb_mem = mmap (NULL, fbdev.fb_size, PROT_READ|PROT_WRITE,MAP_SHARED,fb,0);
	fbdev.fb = fb;
	*/
	//++++++++++++++++++++++++直接使用FB设备显示结果end++++++++++++++++++++++++++++++
		
	//预览采集到的图像(如果有需要可以添加capture功能)-------------------
	while (sdl_quit)
	{
		
		fd_set fds;//文件描述符集,准备使用Select机制
		struct timeval tv;
		int ret1;
		//++++++++++++++++++++IO select start++++++++++++++++++++++++++
		FD_ZERO(&fds);//清空文件描述符集
		FD_SET(video_fd,&fds);//将视频设备文件的描述符放入集合中
		
		//消息等待超时,可以完全阻塞-------------------------------
		tv.tv_sec =5;
		tv.tv_usec=0;
		//等待视频设备准备好--------------------------------------
		ret1=select(video_fd+1,&fds,NULL,NULL,&tv);
		if(-1==ret1)
		{
			if(EINTR==errno)
				continue;
			printf("select error. \n");
			exit(EXIT_FAILURE);
		}
		if(0==ret1)
		{
			printf("select timeout. \n");
			continue;
		}
		//++++++++++++++++++++IO select end++++++++++++++++++++++++++
	
		while(sdl_quit)		
		{
					 
			//检测退出消息
			while(SDL_PollEvent(&sdlevent))
			{
				if(sdlevent.type == SDL_QUIT)
				{
					sdl_quit = 0;
					break;
				}
			}
			currtime = SDL_GetTicks();
			if(currtime - lasttime >0)
				frmrate = 1000/(currtime-lasttime);
			lasttime = currtime;

			//开始获取FIFO中已经准备好的一帧数据-----------------------		
			memset(&buf ,0,sizeof(buf));
			buf.type=V4L2_BUF_TYPE_VIDEO_CAPTURE;
			buf.memory=V4L2_MEMORY_MMAP;
			//准备好的出列--------------------------------------------
			ret1=ioctl (video_fd,VIDIOC_DQBUF,&buf);
			if(ret1!=0)
			{					
				printf("Lost the video \n");					
			}	
			//从FIFO中数据获取完成------------------------------------

	
			//获取当前帧的用户空间首地址,用于格式转换------------------
			unsigned char *ptcur=buffers[buf.index].start;
			
			//++++++++++++++++++++++++++++++++++++++++
			//算法区
			//+++++++++++++++++++++++++++++++++++++++++
			//灰度变换
			/*
			unsigned char *pgray = NULL;
			pgray = (unsigned char *)calloc(1,fmt.fmt.pix.width*fmt.fmt.pix.height*2*sizeof(unsigned char));//避免被识别为段错误
			yuv2gray(ptcur,pgray,fmt.fmt.pix.width, fmt.fmt.pix.height);
			*/
			//YUV向RGB(24bit)转换
			YUYVToRGB888(ptcur, pRGB, fmt.fmt.pix.width, fmt.fmt.pix.height);
			
			cvSetData(img, pRGB, fmt.fmt.pix.width*3);     //将pRGB数据装入img中
			//运动检测联合测试
			update_mhi(img,motion,30);
			/*//opencv 检测人脸
			cvCvtColor(img, imggray, CV_RGB2GRAY);         //将img灰度转换到imggray,供opencv检测使用
			CvHaarClassifierCascade *cascade=(CvHaarClassifierCascade*)cvLoad("/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml", storage,0,0);
			cvClearMemStorage(storage);
			cvEqualizeHist(imggray, imggray);
			CvSeq* objects = cvHaarDetectObjects(imggray, cascade, storage, 1.1, 2, 0, cvSize(30,30),cvSize(500,500));
			//opencv 标记人脸
			CvScalar colors[] = {{{255,0,0}},{{0,0,0}}};
			int faces=0;
			printf("%d\n",objects->total);
			for(faces=0; faces < (objects ? objects->total:0); faces++)
			{
				CvRect* r = (CvRect *)cvGetSeqElem(objects,faces);
				cvRectangle(img, cvPoint(r->x, r->y), cvPoint(r->x+r->width, r->y+r->height),colors[0],2,8,0 );
			}*/
			//调整opencv img图像数据
			/*CvScalar s;
			int imgi=0,imgj=0,sdlcount=0;
			for(imgi=0;imgi<img->height;imgi++)
			{
				for(imgj=0; imgj<img->width; imgj++)
				{
					s=cvGet2D(img,imgi,imgj);
					pRGB[sdlcount++]=0xff;//s.val[0];//B
					pRGB[sdlcount++]=0xff;//s.val[1];//G
					pRGB[sdlcount++]=0xff;//s.val[2];//R
					//cvSet2D(img,imgi,imgj,s);
				}
			}
			*/
			//opencv 显示图像	
			cvShowImage("image", img);
			tmpcount++;
			if(cvWaitKey(10)>0)
				sdl_quit=0;
			
			//+++++++++++++++++++++++SDL显示start+++++++++++++++++++++++++++++++
			//yuv载入到SDL
			/*
			SDL_LockYUVOverlay(overlay);
			memcpy(p, pgray,pscreen->w*(pscreen->h)*2);
			SDL_UnlockYUVOverlay(overlay);
			SDL_DisplayYUVOverlay(overlay, &drect);
			*/

			//RGB载入到SDL
			//memcpy(pixels, pRGB, pscreen_RGB->w*(pscreen_RGB->h)*3);
			//SDL_BlitSurface(pscreen_RGB, NULL, display_RGB, NULL);
			//SDL_Flip(display_RGB);

			//统计帧率
			//status = (char *)calloc(1,20*sizeof(char));
			//sprintf(status, "Fps:%d",frmrate);
			//SDL_WM_SetCaption(status, NULL);
			//SDL_Delay(10);
			//++++++++++++++++++++++SDL显示end++++++++++++++++++++++++++++++

			//用完了的入列--------------------------------------------
			ret1=ioctl (video_fd,VIDIOC_QBUF,&buf);
			if(ret1!=0)
			{					
				printf("Lost the video \n");					
			}
			if( catchflag ==1)
			{
				printf("S:Target:(%d,%d)",tx,ty);
				//小图像到大图像的坐标映射
				tx = tx*X_F/X;
				ty = Y_F - ty*Y_F/Y;
				lx = tx;
				ly = ty;
				printf("Zooming...\n");
				if((ret1=restartdev(&video_fd,&buffers,&req,&buf,X_F,Y_F)) != 0 )
					return ret1;
				
        			for( tmpcount = 0; tmpcount < N; tmpcount++ ) 
				{
            				cvReleaseImage( &mbuf[tmpcount] );
            				mbuf[tmpcount] = cvCreateImage( cvSize(X,Y), IPL_DEPTH_8U, 1 );
            				cvZero( mbuf[tmpcount] );
        			}
				//开始获取FIFO中已经准备好的一帧数据-----------------------		
		                delayN = DELAYN;	
				while( catchflag == 1 ||delayN !=0)
				{
					catchflag = 0;
					memset(&buf ,0,sizeof(buf));
					buf.type=V4L2_BUF_TYPE_VIDEO_CAPTURE;
					buf.memory=V4L2_MEMORY_MMAP;
					//准备好的出列--------------------------------------------
					ret1=ioctl (video_fd,VIDIOC_DQBUF,&buf);
					if(ret1!=0)
					{					
						printf("Lost the video \n");					
					}	
					//从FIFO中数据获取完成------------------------------------

	
					printf("...\n");
					//获取当前帧的用户空间首地址,用于格式转换------------------
					unsigned char *tmpptcur=buffers[buf.index].start;
				
					unsigned char *pCUT = NULL;
					pCUT= (unsigned char *)calloc(1,X*Y*3*sizeof(unsigned char));
					if(cutinterest(tmpptcur,pCUT,tx,ty,X_F,Y_F,X,Y)==1)
					{
						printf("Not the Force field.\n");
					}
					else
					{
					
						//YUV向RGB(24bit)转换
						YUYVToRGB888(pCUT, pRGB, X, Y);
						cvSetData(img, pRGB, X*3);     //将pRGB数据装入img中
						update_mhi(img,motion,30);
						printf("catchflag:%d.\n",catchflag);
						if(catchflag == 1)
						{
							delayN=DELAYN;
							if(tx<X/2-DX || tx>X/2+DX || ty<Y/2-DY || ty>Y/2+DY)//是否出了监视窗口
							{
								catchflag = 1;
							}
							else
							{
								catchflag = 0;
							}
						}
						cvShowImage("image",img);
						//用完了的入列--------------------------------------------
						ret1=ioctl (video_fd,VIDIOC_QBUF,&buf);
						if(ret1!=0)
						{					
							printf("Lost the video \n");					
						}
						if(cvWaitKey(2)>0)
							sdl_quit=0;
						printf("NOW:catchflag:%d\n",catchflag);
						if(catchflag == 1)
						{	
							ax = tx;
							ay = ty;
							//目标坐标截取图像到大图像的坐标映射
							tx = lx-X/2+tx;
							ty = ly-Y/2+ty;
							lx = tx;
							ly = ty;
							printf("L:New taget:(%d,%d)",tx,ty);
						}
						else if (catchflag == 0 && delayN !=1)
						{
							delayN--;
							tx = lx;
							ty = ly;
						}
						else 
						{
							printf("out Zoom...\n");
							//图像回到小模式
							if((ret1=restartdev(&video_fd,&buffers,&req,&buf,X,Y)) != 0 )
								return ret1;
        						for( tmpcount = 0; tmpcount < N; tmpcount++ ) 
							{
            							cvReleaseImage( &mbuf[tmpcount] );
            							mbuf[tmpcount] = cvCreateImage( cvSize(X,Y), IPL_DEPTH_8U, 1 );
            							cvZero( mbuf[tmpcount] );
        						}
							delayN = 0;
						}printf("delayN:%d.\n",delayN);
					}
				}
			
			
			}
		}	
	}	

	//fb_munmap(fbdev.fb_mem, fbdev.fb_size);	//释放framebuffer映射
	//close(fb);                                    //关闭Framebuffer设备
	for(i=0;i<req.count;i++)
	{
		if(-1==munmap(buffers[i].start,buffers[i].length))
			printf("munmap error:%d \n",i);
	}

	cvDestroyWindow("image");
	close(video_fd);					
	SDL_DestroyMutex(affmutex);
	//SDL_FreeYUVOverlay(overlay);
	cvReleaseImage(&img);
	cvReleaseImage(&imggray);
	free(status);
	free(buffers);
	SDL_Quit();
	return 0;

}