Ejemplo n.º 1
0
//视频设备和显示设备初始化和预览函数(加设备状态检测)--------------------------------
int video_fb_init_preview()
{
	//串口相关变量-------------------------------
	char buff[512];
	int nread=0;
	int FrameDone=0;//一帧数据结束标志
	int FrameCount=0;//记录帧长度
	int j=0;
	int key=0;//开关标志
	int stat=0;//视频设备状态标志
	//-------------------------------------------
	
	int numBufs;

	//--------------------------------------------
	//SDL yuv
	SDL_Surface      *pscreen;
	SDL_Overlay      *overlay;
	SDL_Rect         drect;
	SDL_Event        sdlevent;
	SDL_mutex        *affmutex;
	unsigned char    *p = NULL;
	unsigned char    frmrate;
	unsigned int     currtime;
	unsigned int     lasttime;
	char* status = NULL;

	//SDL RGB
	unsigned int     rmask;
	unsigned int     gmask;
	unsigned int     bmask;
	unsigned int     amask;	
	int              bpp;
	int 		 pitch;
	int 		 pixels_num;
	unsigned char    *pixels;
	unsigned char    *p_RGB = NULL;	
	SDL_Surface      *pscreen_RGB;
	SDL_Surface      *display_RGB;
	printf("USB Camera Test\n");

	video_fd = open("/dev/video0", O_RDWR, 0);//打开摄像头设备,使用阻塞方式打开
	if (video_fd<0)
	{
		printf("open error\n");
		return  1;
	}

	/*************先向驱动尝试获取设备视频格式start*************/
	struct v4l2_fmtdesc fmt0;
	int ret0;
	memset(&fmt0,0,sizeof(fmt0));
	fmt0.index = 0;
	fmt0.type=V4L2_BUF_TYPE_VIDEO_CAPTURE;
	while((ret0 = ioctl(video_fd,VIDIOC_ENUM_FMT,&fmt0) == 0))
	{
		fmt0.index++;
		printf("%d> pixelformat =%c%c%c%c,description =%s\n",fmt0.index,fmt0.pixelformat&0xff,(fmt0.pixelformat>>8)&0xff,(fmt0.pixelformat>>16)&0xff,(fmt0.pixelformat>>24)&0xff,fmt0.description);
	}
	/**************************END***************************/
	
	//---------------------设置获取视频的格式----------------//
	struct v4l2_format fmt;	
	memset( &fmt, 0, sizeof(fmt));
	fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	//视频数据流类型,永远都V4L2_BUF_TYPE_VIDEO_CAPTURE
	fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;//视频源的格式为JPEG或YUN4:2:2或RGB
	fmt.fmt.pix.width = 640;//设置视频宽度
	fmt.fmt.pix.height = 480;//设置视频高度
	//fmt.fmt.pix.field=V4L2_FIELD_INTERLACED;
	//fmt.fmt.pix.colorspace=8;
	//printf("color: %d \n",fmt.fmt.pix.colorspace);
	if (ioctl(video_fd, VIDIOC_S_FMT, &fmt) < 0)//使配置生效
	{
		printf("set format failed\n");
		return 2;
	}
	//-------------------------------------------------------//
	
	//+++++++++++++++++++++++++++++++++++++++++++++++++++++++
	//if(SDL_Init(SDL_INIT_VIDEO) < 0)
	//{
	//	printf("SDL Init failed.\n");
	//	exit(1);
	//}
	
	//SDL 设置:YUV输出
	/*
 	pscreen = SDL_SetVideoMode(fmt.fmt.pix.width, fmt.fmt.pix.height,0,SDL_VIDEO_Flags);
	overlay = SDL_CreateYUVOverlay(fmt.fmt.pix.width, fmt.fmt.pix.height,SDL_YUY2_OVERLAY,pscreen);
	p = (unsigned char *)overlay->pixels[0];
	drect.x = 0;
	drect.y = 0;
	drect.w = pscreen->w;
	drect.h = pscreen->h;
	*/

	//SDL 设置:RGB输出
	//pscreen = SDL_SetVideoMode(fmt.fmt.pix.width, fmt.fmt.pix.height, 24, SDL_SWSURFACE | SDL_DOUBLEBUF);
	rmask = 0x000000ff;
	gmask = 0x0000ff00;
	bmask = 0x00ff0000;
	amask = 0x00000000;
	bpp   = 24;
	pitch = fmt.fmt.pix.width*3;
	pixels_num = fmt.fmt.pix.width*fmt.fmt.pix.height*3;
	pixels = (unsigned char *)malloc(pixels_num);
	memset(pixels, 0, pixels_num);
	p_RGB = (unsigned char *)pixels;
	//pscreen_RGB = SDL_CreateRGBSurfaceFrom(pixels, fmt.fmt.pix.width, fmt.fmt.pix.height, bpp, pitch, rmask, gmask, bmask, amask);

	
	//lasttime = SDL_GetTicks();
	//affmutex = SDL_CreateMutex();
	//SDL 设置end
	
	//openCV 设置
	CvMemStorage*  storage = cvCreateMemStorage(0);
	IplImage*      img     = cvCreateImageHeader(cvSize(fmt.fmt.pix.width,fmt.fmt.pix.height), IPL_DEPTH_8U, 3);//image头,未开辟数据空间
	IplImage*      imggray = cvCreateImage(cvSize(fmt.fmt.pix.width,fmt.fmt.pix.height), IPL_DEPTH_8U, 1);//image,开辟数据空间
	cvNamedWindow("image", 1);

	unsigned char *pRGB = NULL;
	pRGB = (unsigned char *)calloc(1,fmt.fmt.pix.width*fmt.fmt.pix.height*3*sizeof(unsigned char));
	//openCV 设置 end

	//------------------------申请帧缓冲---------------------//
	struct v4l2_requestbuffers req;
	memset(&req, 0, sizeof (req));
	req.count = 3;	//缓存数量,即可保存的图片数量
	req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	//数据流类型,永远都是V4L2_BUF_TYPE_VIDEO_CAPTURE
	req.memory = V4L2_MEMORY_MMAP;	//存储类型:V4L2_MEMORY_MMAP或V4L2_MEMORY_USERPTR
	if (ioctl(video_fd, VIDIOC_REQBUFS, &req) == -1)//使配置生效
	{
		perror("request buffer error \n");
		return 2;
	}
	//-------------------------------------------------------//
	
	//--------将VIDIOC_REQBUFS获取内存转为物理空间-------------//
	buffers = calloc(req.count, sizeof(VideoBuffer));	
	//printf("sizeof(VideoBuffer) is %d\n", sizeof(VideoBuffer));
	struct v4l2_buffer buf;
	for (numBufs = 0; numBufs < req.count; numBufs++)
	{
		memset( &buf, 0, sizeof(buf));
		buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	
		//存储类型:V4L2_MEMORY_MMAP(内存映射)或V4L2_MEMORY_USERPTR(用户指针)
		buf.memory = V4L2_MEMORY_MMAP;
		buf.index = numBufs;
		if (ioctl(video_fd, VIDIOC_QUERYBUF, &buf) < 0)//使配置生效
		{
			printf("VIDIOC_QUERYBUF error\n");
			return 2;
		}
		//printf("buf len is %d\n", sizeof(buf));
		buffers[numBufs].length = buf.length;
		buffers[numBufs].offset = (size_t) buf.m.offset;
		//使用mmap函数将申请的缓存地址转换应用程序的绝对地址------
		buffers[numBufs].start = mmap(NULL, buf.length, PROT_READ | PROT_WRITE,
			MAP_SHARED, video_fd, buf.m.offset);	
		if (buffers[numBufs].start == MAP_FAILED)
		{
			perror("buffers error\n");
			return 2;
		}
		if (ioctl(video_fd, VIDIOC_QBUF, &buf) < 0)//放入缓存队列
		{
			printf("VIDIOC_QBUF error\n");
			return 2;
		}

	}
	//-------------------------------------------------------//
	
	//----------------------开始视频显示----------------------//
	enum v4l2_buf_type type;
	type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	if (ioctl(video_fd, VIDIOC_STREAMON, &type) < 0)
	{
		printf("VIDIOC_STREAMON error\n");
		return 2;
	}
	//-------------------------------------------------------//
	
	//---------------------读取视频源格式---------------------//	
	fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;				
	if (ioctl(video_fd, VIDIOC_G_FMT, &fmt) < 0)	
	{
		printf("get format failed\n");
		return 2 ;
	}
	else
	{
		printf("Picture:Width = %d   Height = %d\n", fmt.fmt.pix.width, fmt.fmt.pix.height);
		
	}
	//-------------------------------------------------------//
	int i=0;	
	//一些关于fb设备或者没有用到的变量---------------------------
	/*FILE * fd_y_file = 0;
	int a=0;
	int k = 0;
	int i=0;
	//设置显卡设备framebuffer------------------------------------
	struct jpeg_decompress_struct cinfo;
	struct jpeg_error_mgr jerr;
	FILE *infile;//Jpeg文件的句柄
	unsigned char *buffer;
	char *fb_device;
	unsigned int x;
	unsigned int y;
	//打开显卡设备------------------------------------------------
	if ((fb = open("/dev/fb0", O_RDWR)) < 0)
	{
		perror(__func__);
		return 2;
	}

	//获取framebuffer的状态-----------------------------------------
	fb_set(fb);//设置显存参数	
	fb_stat(fb);//获取显卡驱动中的长、宽和显示位宽
	
	printf("frame buffer: %dx%d,  %dbpp, 0x%xbyte= %d,graylevels= %d \n", 
		fbdev.fb_width, fbdev.fb_height, fbdev.fb_bpp, fbdev.fb_size, fbdev.fb_size,fbdev.fb_gray);

	//映射framebuffer的地址到用户空间----------------------------------
	fbdev.fb_mem = mmap (NULL, fbdev.fb_size, PROT_READ|PROT_WRITE,MAP_SHARED,fb,0);
	fbdev.fb = fb;
	*/
		
	//预览采集到的图像(如果有需要可以添加capture功能)-------------------
	while (sdl_quit)
	{
		
		fd_set fds;//文件描述符集,准备使用Select机制
		struct timeval tv;
		int ret1;
		
		FD_ZERO(&fds);//清空文件描述符集
		FD_SET(video_fd,&fds);//将视频设备文件的描述符放入集合中
		
		//消息等待超时,可以完全阻塞-------------------------------
		tv.tv_sec =2;
		tv.tv_usec=0;
		//等待视频设备准备好--------------------------------------
		ret1=select(video_fd+1,&fds,NULL,NULL,&tv);
		if(-1==ret1)
		{
			if(EINTR==errno)
				continue;
			printf("select error. \n");
			exit(EXIT_FAILURE);
		}
		if(0==ret1)
		{
			printf("select timeout. \n");
			continue;
		}		
		while(sdl_quit)		
		{
					 
			//检测退出消息
			while(SDL_PollEvent(&sdlevent))
			{
				if(sdlevent.type == SDL_QUIT)
				{
					sdl_quit = 0;
					break;
				}
			}
			currtime = SDL_GetTicks();
			if(currtime - lasttime >0)
				frmrate = 1000/(currtime-lasttime);
			lasttime = currtime;

			//开始获取FIFO中已经准备好的一帧数据-----------------------		
			memset(&buf ,0,sizeof(buf));
			buf.type=V4L2_BUF_TYPE_VIDEO_CAPTURE;
			buf.memory=V4L2_MEMORY_MMAP;
			//准备好的出列--------------------------------------------
			ret1=ioctl (video_fd,VIDIOC_DQBUF,&buf);
			if(ret1!=0)
			{					
				printf("Lost the video \n");					
			}	
	
			//获取当前帧的用户空间首地址,用于格式转换------------------
			unsigned char *ptcur=buffers[buf.index].start;
			//++++++++++++++++++++++++++++++++++++++++
			//算法区
			//+++++++++++++++++++++++++++++++++++++++++
			//灰度变换
			/*
			unsigned char *pgray = NULL;
			pgray = (unsigned char *)calloc(1,fmt.fmt.pix.width*fmt.fmt.pix.height*2*sizeof(unsigned char));//避免被识别为段错误
			yuv2gray(ptcur,pgray,fmt.fmt.pix.width, fmt.fmt.pix.height);
			*/

			//YUV向RGB(24bit)转换
			YUYVToRGB888(ptcur, pRGB, fmt.fmt.pix.width, fmt.fmt.pix.height);
			
			//opencv 检测人脸
			cvSetData(img, pRGB, fmt.fmt.pix.width*3);//将pRGB数据装入img中
			cvCvtColor(img, imggray, CV_RGB2GRAY);//将img灰度转换到imggray,供opencv检测使用
			CvHaarClassifierCascade *cascade=(CvHaarClassifierCascade*)cvLoad("/usr/share/opencv-2.4.6.1/data/haarcascades/haarcascade_frontalface_alt2.xml", storage,0,0);
			cvClearMemStorage(storage);
			cvEqualizeHist(imggray, imggray);
			CvSeq* objects = cvHaarDetectObjects(imggray, cascade, storage, 1.1, 2, 0, cvSize(30,30),cvSize(30,30));
			
			//opencv 标记人脸
			CvScalar colors[] = {{{255,0,0}},{{0,0,0}}};
			int faces=0;
			for(faces=0; faces < (objects ? objects->total:0); faces++)
			{
				CvRect* r = (CvRect *)cvGetSeqElem(objects,faces);
				cvRectangle(img, cvPoint(r->x, r->y), cvPoint(r->x+r->width, r->y+r->height),colors[0],2,8,0 );//原始图像上加框
			}
			

			//调整opencv img图像数据
			/*CvScalar s;
			int imgi=0,imgj=0,sdlcount=0;
			for(imgi=0;imgi<img->height;imgi++)
			{
				for(imgj=0; imgj<img->width; imgj++)
				{
					s=cvGet2D(img,imgi,imgj);
					pRGB[sdlcount++]=0xff;//s.val[0];//B
					pRGB[sdlcount++]=0xff;//s.val[1];//G
					pRGB[sdlcount++]=0xff;//s.val[2];//R
					//cvSet2D(img,imgi,imgj,s);
				}
			}
			*/
			//opencv 显示图像	
			cvShowImage("image", img);
			char c = cvWaitKey(1);
			printf("%d\n",c);
			if(c==27)
				sdl_quit=0;
			
			
			//yuv载入到SDL
			/*
			SDL_LockYUVOverlay(overlay);
			memcpy(p, pgray,pscreen->w*(pscreen->h)*2);
			SDL_UnlockYUVOverlay(overlay);
			SDL_DisplayYUVOverlay(overlay, &drect);
			*/

			//RGB载入到SDL
			//memcpy(pixels, pRGB, pscreen_RGB->w*(pscreen_RGB->h)*3);
			//SDL_BlitSurface(pscreen_RGB, NULL, display_RGB, NULL);
			//SDL_Flip(display_RGB);

			//统计帧率
			//status = (char *)calloc(1,20*sizeof(char));
			//sprintf(status, "Fps:%d",frmrate);
			//SDL_WM_SetCaption(status, NULL);
			//SDL_Delay(10);
			//用完了的入列--------------------------------------------
			ret1=ioctl (video_fd,VIDIOC_QBUF,&buf);
			if(ret1!=0)
			{					
				printf("Lost the video \n");					
			}
			
		}	
	}	

	//fb_munmap(fbdev.fb_mem, fbdev.fb_size);	//释放framebuffer映射
	//close(fb);//关闭Framebuffer设备
	for(i=0;i<req.count;i++)
	{
		if(-1==munmap(buffers[i].start,buffers[i].length))
			printf("munmap error:%d \n",i);
	}

	cvDestroyWindow("image");
	close(video_fd);					
	SDL_DestroyMutex(affmutex);
	//SDL_FreeYUVOverlay(overlay);
	cvReleaseImage(&img);
	cvReleaseImage(&imggray);
	free(status);
	free(buffers);
	//free(pRGB);
	SDL_Quit();
	return 0;

}
Ejemplo n.º 2
0
	int getPegthresholdFromUser(IplImage *img, Gui *gui, string message, int pegThreshVal, Rect r, cv::Mat &fgMaskPeg)
	{
		cv::Mat element[1];
		int count = 0;

		element[0] = getStructuringElement(MORPH_ELLIPSE, Size(8, 8), Point(0, 0));
		
		window_name = gui->windowName();
		cvDestroyWindow(window_name.c_str());
		cvNamedWindow(window_name.c_str(), CV_WINDOW_AUTOSIZE);
		cvMoveWindow(window_name.c_str(), 100, 100);

		img0 = (IplImage *)cvClone(img);
		char TrackbarName[50];
		sprintf(TrackbarName, "thresh x %d", slider_max);

		slider_val = pegThreshVal;
		createTrackbar(TrackbarName, window_name, &slider_val, slider_max, 0);

		

		Mat src, im1, im3;
		src = Mat(img0);

		im1 = Mat::zeros(src.size(), src.type());
		cvtColor(src, im3, CV_BGR2HSV);
		vector<vector<Point> > pegsI;
		while (1)
		{
			pegsI.clear();
			Mat channel[3];
			split(im3, channel);


			//Mat fgMaskRing;
			inRange(channel[2], slider_val, 255, fgMaskPeg);
			// ROI
			for (int y = 0; y < fgMaskPeg.rows; y++)
			{
				for (int x = 0; x < fgMaskPeg.cols; x++)
				{
					if (!(x >= r.tl().x && x <= r.br().x && y >= r.tl().y && y <= r.br().y))
					{
						fgMaskPeg.at<uchar>(Point(x, y)) = 0;
					}
				}
			}
			erode(fgMaskPeg, fgMaskPeg, element[0]);
			dilate(fgMaskPeg, fgMaskPeg, element[0]);
			erode(fgMaskPeg, fgMaskPeg, element[0]);
			dilate(fgMaskPeg, fgMaskPeg, element[0]);

			//p.copyTo(p, fgMaskPeg);
			for (int y = 0; y < src.rows; y++)
			{
				for (int x = 0; x < src.cols; x++)
				{
					if (fgMaskPeg.at<uchar>(Point(x, y)))
					{
						im1.at<Vec3b>(Point(x, y)) = src.at<Vec3b>(Point(x, y));
					}
					else
					{
						im1.at<Vec3b>(Point(x, y)) = Vec3b(0,0,0);
					}
				}
			}

			Mat mask = fgMaskPeg.clone();
			vector<Vec4i> hierarchy_ring;

			//imshow("Initial mask", initial_ring_mask);
			findContours(mask, pegsI, hierarchy_ring, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
			count = pegsI.size();



			cout << "count Pegs->" << count << endl;
			cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 0, 1, 8);
			putText(im1, message.c_str(), cvPoint(0, 60), CV_FONT_HERSHEY_SIMPLEX, .7, Scalar(255, 255, 0), 1);

			imshow(window_name.c_str(), im1);
			char key = cvWaitKey(40);
			if ((key == '\r' || key == '\n' || key == '\r\n'))
			{
				if (count == 12)
				{
					break;
				}
			}
			count = 0;
		} 
		cvReleaseImage(&img0);
		return slider_val;
	}
Ejemplo n.º 3
0
bool VisionPipeLine::init()
{
    // query the first frame
    _currentFrame = _input->retrieveFrame();
    if (!_currentFrame) return false;

    _currentframeSz.height = _currentFrame->height;
    _currentframeSz.width = _currentFrame->width;

    if (_currentframeSz.height != SRC_IMG_HEIGHT || _currentframeSz.width != SRC_IMG_WIDTH)
    {
        printf("Please set your camera to the resolution %dx%d\n", SRC_IMG_WIDTH, SRC_IMG_HEIGHT);
        return false;
    }

    std::string filepath_cam = getPlatformConfigPrefix();
    std::string filepath_distort = getPlatformConfigPrefix();
    filepath_cam+=FILEPATH_CAMERA_INTRINSICS;
    filepath_distort+=FILEPATH_CAMERA_DISTORT;
    try {
        
        if (!_undistortor.loadCalibrationfromFile(filepath_cam.c_str(), filepath_distort.c_str()))
        {
            // no user defined calib data found, load predefined
            filepath_cam = getPlatformResPrefix();
            filepath_distort = getPlatformResPrefix();

            filepath_cam+= FILEPATH_RESOURCE_PREDEFINE_FOLDER;
            filepath_distort+= FILEPATH_RESOURCE_PREDEFINE_FOLDER;

            filepath_cam+= RELEASE_VENDOR_TYPE;
            filepath_distort+= RELEASE_VENDOR_TYPE;

            filepath_cam+= FILEPATH_PREDEFINE_CAMERA_INTRINSICS;
            filepath_distort+= FILEPATH_PREDEFINE_CAMERA_DISTORT;
            if (!_undistortor.loadCalibrationfromFile(filepath_cam.c_str(), filepath_distort.c_str()))
            {
                printf("warning, no camera calibration file found.\n");
                _noLenCalibration = true;
            }
        }
    } 
    catch(...){
        printf("warning, unexpected error happens during loading the camera calibration files.\n");
        _noLenCalibration = true;
    }

    _grayFrame = cvCreateImage(cvSize(RECTIFIED_IMG_W,RECTIFIED_IMG_H), 8, 1);
    _rectified = cvCreateImage(cvSize(RECTIFIED_IMG_W,RECTIFIED_IMG_H), 8, 3);
    _projected = cvCreateImage(cvSize(PROJECTED_WIDTH,PROJECTED_HEIGHT), 8, 3);
    
    
    _final_disp = cvCreateImage(cvSize(PROJECTED_WIDTH + SRC_IMG_WIDTH, SRC_IMG_HEIGHT + KBD_IMG_HEIGHT ), 8, 3);

    //cvNamedWindow(WINDOWNAME_RECTIFIED);
    //cvNamedWindow(WINDOWNAME_ORIGINAL);
    //cvNamedWindow(WINDOWNAME_PROJECTED);
    cvNamedWindow(WINDOWNAME_STATUS);

    cvSetMouseCallback(WINDOWNAME_STATUS, &VisionPipeLine::s_onMouse, this);
   
    
    _ui_banner_frame.setImage(g_resouce_mgr.getImageRes("main_banner.png"));
    _ui_btn_keyboard_mode.setNormalImage(g_resouce_mgr.getImageRes("btn.keyboard.png"));
    _ui_btn_keyboard_mode.setActiveImage(g_resouce_mgr.getImageRes("btn.keyboard.active.png"));

    _ui_btn_touchpad_mode.setNormalImage(g_resouce_mgr.getImageRes("btn.pad.png"));
    _ui_btn_touchpad_mode.setActiveImage(g_resouce_mgr.getImageRes("btn.pad.active.png"));

    _ui_btn_calib_mode.setNormalImage(g_resouce_mgr.getImageRes("btn.calibration.png"));
    _ui_btn_calib_mode.setActiveImage(g_resouce_mgr.getImageRes("btn.calibration.active.png"));

    _ui_btn_keyboard_mode.moveTo((_final_disp->width-(_ui_btn_keyboard_mode.getWidth()+5)*3)/2,
        (_ui_banner_frame.getHeight() - _ui_btn_keyboard_mode.getHeight())/2);

    _ui_btn_touchpad_mode.moveTo(_ui_btn_keyboard_mode.getRight()+5, _ui_btn_keyboard_mode.getY());
    _ui_btn_calib_mode.moveTo(_ui_btn_touchpad_mode.getRight()+5, _ui_btn_keyboard_mode.getY());


    _ui_btn_rplogo.setNormalImage(g_resouce_mgr.getImageRes("logobtn.png"));
    _ui_btn_rplogo.setActiveImage(g_resouce_mgr.getImageRes("logobtn.active.png"));

    _ui_btn_upgrade.setNormalImage(g_resouce_mgr.getImageRes("btn.update.png"));
    _ui_btn_upgrade.setActiveImage(g_resouce_mgr.getImageRes("btn.update.active.png"));


    _ui_btn_rplogo.moveTo(_final_disp->width-_ui_btn_rplogo.getWidth(), 0);
    _ui_btn_upgrade.moveTo(0, 0);

    _ui_btn_upgrade.setVisible(false);

    _uicontainer.addRenderObject(&_ui_banner_frame);
    _uicontainer.addRenderObject(&_ui_btn_keyboard_mode);
    _uicontainer.addRenderObject(&_ui_btn_touchpad_mode);
    _uicontainer.addRenderObject(&_ui_btn_calib_mode);
    
    _uicontainer.addRenderObject(&_ui_btn_rplogo);
    _uicontainer.addRenderObject(&_ui_btn_upgrade);

    _ui_btn_keyboard_mode.setListener(&VisionPipeLine::s_on_keyboardmode, this);
    _ui_btn_touchpad_mode.setListener(&VisionPipeLine::s_on_touchpadmode, this);
    _ui_btn_calib_mode.setListener(&VisionPipeLine::s_on_calibrationmode, this);

    _ui_btn_rplogo.setListener(&VisionPipeLine::s_on_rplogo_clicked, this);
    _ui_btn_upgrade.setListener(&VisionPipeLine::s_on_upgrade_clicked, this);

    if (!loadLocalizationCalibrationData()) {
        setWorkingMode(MODE_CALIBRATION);
    }
    return true;
}
Ejemplo n.º 4
0
int mainStaticMatchStrengths()
{
  bool matchGlobalOrientations = true;

  // Make images as Mats; convert to IplImage for OpenSURF library actions
  cv::Mat mimg1, mimg2;
  mimg1=cv::imread("OpenSURF/imgs/img1.jpg", CV_LOAD_IMAGE_COLOR);
  mimg2=cv::imread("OpenSURF/imgs/img2.jpg", CV_LOAD_IMAGE_COLOR);

  IplImage iimg1, iimg2;
  iimg1=mimg1;
  iimg2=mimg2;

  IplImage *img1, *img2;
  img1 = &iimg1;
  img2 = &iimg2;

  IpVec ipts1, ipts2;
  surfDetDes(img1,ipts1,false,4,4,2,0.0001f,matchGlobalOrientations);
  surfDetDes(img2,ipts2,false,4,4,2,0.0001f,matchGlobalOrientations);

  MatchVec matches;
  getMatchesSymmetric(ipts1,ipts2,matches);

  IpVec mpts1, mpts2;

  const int & w = img1->width;

  for (unsigned int i = 0; i < matches.size(); ++i)
  {
    float strengthOverThreshold = 1 - matches[i].second; // /MATCH_THRESHOLD;
    strengthOverThreshold*=255;
    CvScalar clr = cvScalar(strengthOverThreshold,strengthOverThreshold,strengthOverThreshold);
    clr = cvScalar(255,255,255);
    
    //drawPoint(img1,matches[i].first.first,clr);
    //drawPoint(img2,matches[i].first.second,clr),
    mpts1.push_back(matches[i].first.first);
    mpts2.push_back(matches[i].first.second);
  
    cvLine(img1,cvPoint(matches[i].first.first.x,matches[i].first.first.y),cvPoint(matches[i].first.second.x+w,matches[i].first.second.y), clr,1);
    cvLine(img2,cvPoint(matches[i].first.first.x-w,matches[i].first.first.y),cvPoint(matches[i].first.second.x,matches[i].first.second.y), clr,1);
  }

  drawIpoints(img1,mpts1);
  drawIpoints(img2,mpts2);

  std::cout<< "Matches: " << matches.size() << std::endl;

  cvNamedWindow("1", CV_WINDOW_AUTOSIZE );
  cvNamedWindow("2", CV_WINDOW_AUTOSIZE );
  cvShowImage("1", img1);
  cvShowImage("2",img2);
  cvWaitKey(0);

  // NOW DO IT AGAIN!
  cv::Mat mimg3, mimg4;
  mimg3=cv::imread("OpenSURF/imgs/img1.jpg", CV_LOAD_IMAGE_COLOR);
  mimg4=cv::imread("OpenSURF/imgs/img2.jpg", CV_LOAD_IMAGE_COLOR);

  IplImage iimg3, iimg4;
  iimg3=mimg3;
  iimg4=mimg4;

  IplImage *img3, *img4;
  img3 = &iimg3;
  img4 = &iimg4;

  IpVec ipts3, ipts4;
  surfDetDes(img3,ipts3,false,4,4,2,0.0001f,!matchGlobalOrientations);
  surfDetDes(img4,ipts4,false,4,4,2,0.0001f,!matchGlobalOrientations);

  matches.clear();
  getMatchesSymmetric(ipts3,ipts4,matches);

  IpVec mpts3, mpts4;

  for (unsigned int i = 0; i < matches.size(); ++i)
  {
    float strengthOverThreshold = 1 - matches[i].second; // /MATCH_THRESHOLD;
    strengthOverThreshold*=255;
    CvScalar clr = cvScalar(strengthOverThreshold,strengthOverThreshold,strengthOverThreshold);
    clr = cvScalar(255,255,255);
    
    //drawPoint(img1,matches[i].first.first,clr);
    //drawPoint(img2,matches[i].first.second,clr),
    mpts3.push_back(matches[i].first.first);
    mpts4.push_back(matches[i].first.second);
  
    cvLine(img3,cvPoint(matches[i].first.first.x,matches[i].first.first.y),cvPoint(matches[i].first.second.x+w,matches[i].first.second.y), clr,1);
    cvLine(img4,cvPoint(matches[i].first.first.x-w,matches[i].first.first.y),cvPoint(matches[i].first.second.x,matches[i].first.second.y), clr,1);
  }

  drawIpoints(img3,mpts3);
  drawIpoints(img4,mpts4);

  std::cout<< "Matches: " << matches.size() << std::endl;

  cvNamedWindow("3", CV_WINDOW_AUTOSIZE );
  cvNamedWindow("4", CV_WINDOW_AUTOSIZE );
  cvShowImage("3", img3);
  cvShowImage("4",img4);
  cvWaitKey(0);


  return 0;
}
Ejemplo n.º 5
0
int main( int argc, char** argv )
{
    CvSize imgSize;                 
    imgSize.width = 320; 
    imgSize.height = 240; 
	
	int key= -1; 
	
	// set up opencv capture objects

    CvCapture* capture= cvCaptureFromCAM(0); 
	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 320);
	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 240);
	
    CvCapture* capture2= cvCaptureFromCAM(1); 
	cvSetCaptureProperty(capture2, CV_CAP_PROP_FRAME_WIDTH, 320);
	cvSetCaptureProperty(capture2, CV_CAP_PROP_FRAME_HEIGHT, 240);

    CvCapture* capture3= cvCaptureFromCAM(2); 
	cvSetCaptureProperty(capture3, CV_CAP_PROP_FRAME_WIDTH, 320);
	cvSetCaptureProperty(capture3, CV_CAP_PROP_FRAME_HEIGHT, 240);

    
	// allocate image storage (other createimage specifiers: IPL_DEPTH_32F, IPL_DEPTH_8U)
	
    IplImage* colourImage  = cvCloneImage(cvQueryFrame(capture)); 
    IplImage* greyImage    = cvCreateImage(cvGetSize(colourImage), IPL_DEPTH_8U, 1); 
    IplImage* hannImage    = cvCloneImage(greyImage); 
	IplImage *poc= cvCreateImage( cvSize( greyImage->width, kFFTStoreSize ), IPL_DEPTH_64F, 1 );
	IplImage *pocdisp= cvCreateImage( cvSize( greyImage->width, kFFTStoreSize ), IPL_DEPTH_8U, 1 );
	
	// set up opencv windows
	
    cvNamedWindow("hannImage", 1);
    cvNamedWindow("greyImage", 1); 
    cvNamedWindow("greyImage2", 1); 
    cvNamedWindow("greyImage3", 1); 
    cvNamedWindow("poc", 1);
	cvMoveWindow("greyImage", 40, 0);
	cvMoveWindow("hannImage", 40, 270);
	cvMoveWindow("poc", 365, 0);
	cvMoveWindow("greyImage2", 40, 540);
	cvMoveWindow("greyImage3", 365, 540);
	
	// set up storage for fftw
	
	fftw_complex *fftwSingleRow = ( fftw_complex* )fftw_malloc( sizeof( fftw_complex ) * kFFTWidth * 1 );
	fftw_complex *fftwSingleRow2 = ( fftw_complex* )fftw_malloc( sizeof( fftw_complex ) * kFFTWidth * 1 );
	fftw_complex *fftwStore = ( fftw_complex* )fftw_malloc( sizeof( fftw_complex ) * kFFTWidth * kFFTStoreSize );
		
	// loop
	
    while(key != 'q') 
	{ 

		//		double t = (double)cvGetTickCount();
		//		printf( "%g ms: start.\n", (cvGetTickCount() - t)/((double)cvGetTickFrequency()*1000.));

		// capture a frame, convert to greyscale, and show it
		
		cvCopyImage(cvQueryFrame(capture), colourImage);  // cvCopy because both are allocated already!
		cvCvtColor(colourImage,greyImage,CV_BGR2GRAY); 
		cvShowImage("greyImage",greyImage); 

        cvCopyImage(cvQueryFrame(capture2), colourImage);  // cvCopy because both are allocated already!
		cvCvtColor(colourImage,greyImage,CV_BGR2GRAY); 
		cvShowImage("greyImage2",greyImage); 

        cvCopyImage(cvQueryFrame(capture3), colourImage);  // cvCopy because both are allocated already!
		cvCvtColor(colourImage,greyImage,CV_BGR2GRAY); 
		cvShowImage("greyImage3",greyImage);

        
        key = cvWaitKey(3);

		// project and calculate hann window
		
		int i, j, k;
		uchar 	*inData= ( uchar* ) greyImage->imageData;
		uchar 	*hannImageData= ( uchar* ) hannImage->imageData;
		unsigned long acc;
		
		for( j = 0 ; j < greyImage->width ; j++) {
			
			// sum input column
			
			acc= 0;
			for( i = 0; i < greyImage->height ; i++ ) {
				acc+= inData[i * greyImage->widthStep + j];
			}
			
			// hann window and output
			
			for( i = 0; i < 240 ; i++ ) {
				double hannMultiplier = 0.5 * (1 - cos(2*3.14159*j/(greyImage->width-1)));  // hann window coefficient
				hannImageData[i * hannImage->widthStep + j]=  hannMultiplier * (acc/greyImage->height);
			}
			
		}

		cvShowImage("hannImage",hannImage); 

		// set up forward FFT into store plan
		
		fftw_plan fft_plan = fftw_plan_dft_2d( 1 , kFFTWidth, fftwSingleRow, &(fftwStore[kFFTWidth * pocline]), FFTW_FORWARD,  FFTW_ESTIMATE );
				
		// load data for fftw
		
		for( int j = 0 ; j < kFFTWidth ; j++) {
			fftwSingleRow[j][0] = ( double )hannImageData[j];
			fftwSingleRow[j][1] = 0.0;
		}
		
		// run and release plan
		
		fftw_execute( fft_plan );
		fftw_destroy_plan( fft_plan );

		// compare pocline against ALL OTHER IN STORE

		for( int j = 0 ; j < kFFTStoreSize ; j++) {
			
			fftw_complex *img1= &(fftwStore[kFFTWidth * pocline]);
			fftw_complex *img2= &(fftwStore[kFFTWidth * j]);
			
			// obtain the cross power spectrum
			for( int i = 0; i < kFFTWidth ; i++ ) {
				
				// complex multiply complex img2 by complex conjugate of complex img1
				
				fftwSingleRow[i][0] = ( img2[i][0] * img1[i][0] ) - ( img2[i][1] * ( -img1[i][1] ) );
				fftwSingleRow[i][1] = ( img2[i][0] * ( -img1[i][1] ) ) + ( img2[i][1] * img1[i][0] );
				
				// set tmp to (real) absolute value of complex number res[i]
				
				double tmp = sqrt( pow( fftwSingleRow[i][0], 2.0 ) + pow( fftwSingleRow[i][1], 2.0 ) );
				
				// complex divide res[i] by (real) absolute value of res[i]
				// (this is the normalization step)
				
				if(tmp == 0) {
					fftwSingleRow[i][0]= 0;
					fftwSingleRow[i][1]= 0;
				}
				else {
					fftwSingleRow[i][0] /= tmp;
					fftwSingleRow[i][1] /= tmp;
				}
			}
				
			// run inverse
			
			fft_plan = fftw_plan_dft_2d( 1 , kFFTWidth, fftwSingleRow, fftwSingleRow2, FFTW_BACKWARD,  FFTW_ESTIMATE );
			fftw_execute(fft_plan);
			fftw_destroy_plan( fft_plan );

			// normalize and copy to result image

			double 	*poc_data = ( double* )poc->imageData;
			
			for( int k = 0 ; k < kFFTWidth ; k++ ) {
				poc_data[k+(j*kFFTWidth)] = (fftwSingleRow2[k][0] / ( double )kFFTWidth);
			}
				
			
		}
		
		
		

		// inc pocline
		
		pocline++;
		if(pocline == kFFTStoreSize-1)
			pocline= 0;
		
		
		// display??
		

//		for(int i = 0 ; i < kFFTWidth ; i++ ) {
//			poc_data[i+(pocline*kFFTWidth)] = (fftwStore[(kFFTWidth * pocline)+i])[1];
//		}
		
		// find the maximum value and its location
		CvPoint minloc, maxloc;
		double  minval, maxval;
		cvMinMaxLoc( poc, &minval, &maxval, &minloc, &maxloc, 0 );
		
		// print it
//		printf( "Maxval at (%d, %d) = %2.4f\n", maxloc.x, maxloc.y, maxval );
		
//        cvConvertScale(dft_re,dft_orig,255,0); //255.0*(max-min),0);

        
        
		cvCvtScale(poc, pocdisp, (1.0/(maxval/2))*255, 0);
		
		cvShowImage("poc",pocdisp);
		
		
		// set up fftw plans
//		fftw_plan fft_plan = fftw_plan_dft_2d( 1 , kFFTWidth, img2, img2, FFTW_FORWARD,  FFTW_ESTIMATE );
//		fftw_plan ifft_plan = fftw_plan_dft_2d( 1 , kFFTWidth, res,  res,  FFTW_BACKWARD, FFTW_ESTIMATE );
		
		
		
		// TODO FROM HERE
		
		/*
		
		if(key == 'r') {
			cvReleaseImage(&ref);
			ref= cvCloneImage(testOutImage);
			cvShowImage("ref",ref); 
		}
		
		
		
		{  // try phase correlating full img
			
			tpl= cvCloneImage(testOutImage);
			//				ref= cvCloneImage(testOutImage);
//				cvShowImage("tpl",tpl); 
//				cvShowImage("ref",ref); 
			
			
			if(ref == 0)
				continue;
			
			if( ( tpl->width != ref->width ) || ( tpl->height != ref->height ) ) {
				fprintf( stderr, "Both images must have equal width and height!\n" );
				continue
				;
			}
			
			// get phase correlation of input images
			
			phase_correlation( ref, tpl, poc );
			
			// find the maximum value and its location
			CvPoint minloc, maxloc;
			double  minval, maxval;
			cvMinMaxLoc( poc, &minval, &maxval, &minloc, &maxloc, 0 );
			
			// print it
			printf( "Maxval at (%d, %d) = %2.4f\n", maxloc.x, maxloc.y, maxval );
			
			cvCvtScale(poc, pocdisp, 1.0/(maxval/2), 0);
			
			cvShowImage("poc",pocdisp);
			
			cvReleaseImage(&tpl);
		
			
		}*/

//			cvReleaseImage(&ref);
//			ref= cvCloneImage(testOutImage);

//			printf( "%g ms: done.\n", (cvGetTickCount() - t)/((double)cvGetTickFrequency()*1000.));
			

	} 
	
	
	cvReleaseImage(&poc);

	
	return 0;
}
Ejemplo n.º 6
0
int main()
{
	// Load the image we'll work on
	IplImage* img = cvLoadImage("C:\\goal_arena.jpg");
	CvSize imgSize = cvGetSize(img);

	// This will hold the white parts of the image
	IplImage* detected = cvCreateImage(imgSize, 8, 1);

	// These hold the three channels of the loaded image
	IplImage* imgBlue = cvCreateImage(imgSize, 8, 1);
	IplImage* imgGreen = cvCreateImage(imgSize, 8, 1);
	IplImage* imgRed = cvCreateImage(imgSize, 8, 1);
	cvSplit(img, imgBlue, imgGreen, imgRed, NULL);

	// Extract white parts into detected
	cvAnd(imgGreen, imgBlue, detected);
	cvAnd(detected, imgRed, detected);

	// Morphological opening
	cvErode(detected, detected);
	cvDilate(detected, detected);

	// Thresholding (I knew you wouldn't catch this one... so i wrote a comment here
	// I mean the command can be so decieving at times)
	cvThreshold(detected, detected, 100, 250, CV_THRESH_BINARY);

	// Do the hough thingy
	CvMat* lines = cvCreateMat(100, 1, CV_32FC2);
	cvHoughLines2(detected, lines, CV_HOUGH_STANDARD, 1, 0.001, 100);
	
	// The two endpoints for each boundary line
	CvPoint left1 = cvPoint(0, 0);
	CvPoint left2 = cvPoint(0, 0);
	CvPoint right1 = cvPoint(0, 0);
	CvPoint right2 = cvPoint(0, 0);
	CvPoint top1 = cvPoint(0, 0);
	CvPoint top2 = cvPoint(0, 0);
	CvPoint bottom1 = cvPoint(0, 0);
	CvPoint bottom2 = cvPoint(0, 0);

	// Some numbers we're interested in
	int numLines = lines->rows;
	int numTop = 0;
	int numBottom = 0;
	int numLeft = 0;
	int numRight = 0;

	// Iterate through each line
	for(int i=0;i<numLines;i++)
	{
		// Get the parameters for the current line
		CvScalar dat = cvGet1D(lines, i);
		double rho = dat.val[0];
		double theta = dat.val[1];
		
		if(theta==0.0)
		{
			// This is an obviously vertical line... and we can't approximate it... NEXT
			continue;
		}

		// Convert from radians to degrees
		double degrees = theta*180/(3.1412);
		
		// Generate two points on this line (one at x=0 and one at x=image's width)
		CvPoint pt1 = cvPoint(0, rho/sin(theta));
		CvPoint pt2 = cvPoint(img->width, (-img->width/tan(theta)) + rho/sin(theta));
		
		if(abs(rho)<50)		// Top + left
		{
			if(degrees>45 && degrees<135)	// Top
			{
				numTop++;

				// The line is horizontal and near the top
				top1.x+=pt1.x;
				top1.y+=pt1.y;
			
				top2.x+=pt2.x;
				top2.y+=pt2.y;
			}
			else	// left
			{
				numLeft++;

				//The line is vertical and near the left
				left1.x+=pt1.x;
				left1.y+=pt1.y;
			
				left2.x+=pt2.x;
				left2.y+=pt2.y;
			}
		}
		else // bottom+right
		{
			if(degrees>45 && degrees<135)	// Bottom
			{
				numBottom++;

				//The line is horizontal and near the bottom
				bottom1.x+=pt1.x;
				bottom1.y+=pt1.y;
			
				bottom2.x+=pt2.x;
				bottom2.y+=pt2.y;
			}
			else	// Right
			{
				numRight++;

				// The line is vertical and near the right
				right1.x+=pt1.x;
				right1.y+=pt1.y;
				
				right2.x+=pt2.x;
				right2.y+=pt2.y;
			}
		}
	}

	// we've done the adding... now the dividing to get the "averaged" point
	left1.x/=numLeft;
	left1.y/=numLeft;
	left2.x/=numLeft;
	left2.y/=numLeft;

	right1.x/=numRight;
	right1.y/=numRight;
	right2.x/=numRight;
	right2.y/=numRight;

	top1.x/=numTop;
	top1.y/=numTop;
	top2.x/=numTop;
	top2.y/=numTop;

	bottom1.x/=numBottom;
	bottom1.y/=numBottom;
	bottom2.x/=numBottom;
	bottom2.y/=numBottom;

	// Render these lines onto the image
	cvLine(img, left1, left2, CV_RGB(255, 0,0), 1);
	cvLine(img, right1, right2, CV_RGB(255, 0,0), 1);
	cvLine(img, top1, top2, CV_RGB(255, 0,0), 1);
	cvLine(img, bottom1, bottom2, CV_RGB(255, 0,0), 1);

	// Next, we need to figure out the four intersection points
	double leftA = left2.y-left1.y;
	double leftB = left1.x-left2.x;
	double leftC = leftA*left1.x + leftB*left1.y;

	double rightA = right2.y-right1.y;
	double rightB = right1.x-right2.x;
	double rightC = rightA*right1.x + rightB*right1.y;

	double topA = top2.y-top1.y;
	double topB = top1.x-top2.x;
	double topC = topA*top1.x + topB*top1.y;

	double bottomA = bottom2.y-bottom1.y;
	double bottomB = bottom1.x-bottom2.x;
	double bottomC = bottomA*bottom1.x + bottomB*bottom1.y;

	// Intersection of left and top
	double detTopLeft = leftA*topB - leftB*topA;
	CvPoint ptTopLeft = cvPoint((topB*leftC - leftB*topC)/detTopLeft, (leftA*topC - topA*leftC)/detTopLeft);

	// Intersection of top and right
	double detTopRight = rightA*topB - rightB*topA;
	CvPoint ptTopRight = cvPoint((topB*rightC-rightB*topC)/detTopRight, (rightA*topC-topA*rightC)/detTopRight);

	// Intersection of right and bottom
	double detBottomRight = rightA*bottomB - rightB*bottomA;
	CvPoint ptBottomRight = cvPoint((bottomB*rightC-rightB*bottomC)/detBottomRight, (rightA*bottomC-bottomA*rightC)/detBottomRight);

	// Intersection of bottom and left
	double detBottomLeft = leftA*bottomB-leftB*bottomA;
	CvPoint ptBottomLeft = cvPoint((bottomB*leftC-leftB*bottomC)/detBottomLeft, (leftA*bottomC-bottomA*leftC)/detBottomLeft);

	// Render the points onto the image
	cvLine(img, ptTopLeft, ptTopLeft, CV_RGB(0,255,0), 5);
	cvLine(img, ptTopRight, ptTopRight, CV_RGB(0,255,0), 5);
	cvLine(img, ptBottomRight, ptBottomRight, CV_RGB(0,255,0), 5);
	cvLine(img, ptBottomLeft, ptBottomLeft, CV_RGB(0,255,0), 5);

	// Initialize a mask
	IplImage* imgMask = cvCreateImage(imgSize, 8, 3);
	cvZero(imgMask);

	// Generate the mask
	CvPoint* pts = new CvPoint[4];
	pts[0] = ptTopLeft;
	pts[1] = ptTopRight;
	pts[2] = ptBottomRight;
	pts[3] = ptBottomLeft;
	cvFillConvexPoly(imgMask, pts, 4, cvScalar(255,255,255));

	// Delete anything thats outside the mask
	cvAnd(img, imgMask, img);

	// Show all images in windows
	cvNamedWindow("Original");
	cvNamedWindow("Detected");

	cvShowImage("Original", img);
	cvShowImage("Detected", detected);

	cvWaitKey(0);

	return 0;
}
Ejemplo n.º 7
0
/**
 * @brief Main principal
 * @param argc El número de argumentos del programa
 * @param argv Cadenas de argumentos del programa
 * @return Nada si es correcto o algún número negativo si es incorrecto
 */
int main( int argc, char** argv ) {
	
	if( argc < 4 )
		return -1;

	// Declaración de variables
	gsl_rng *rng;
	IplImage *frame, *hsv_frame;
	histogram **ref_histos, *histo_aux;
	CvCapture *video;
	particle **particles, **aux, **nuevas_particulas;
	CvScalar color_rojo = CV_RGB(255,0,0), color_azul = CV_RGB(0,0,255);
	CvRect *regions;
	int num_objects = 0;
	int i = 1, MAX_OBJECTS = atoi(argv[3]), PARTICLES = atoi(argv[2]);
	FILE *datos;
	char name[45], num[3], *p1, *p2;
	clock_t t_ini, t_fin;
	double ms;
	
	video = cvCaptureFromFile( argv[1] );
	if( !video ) {
		printf("No se pudo abrir el fichero de video %s\n", argv[1]);
		exit(-1);
	}

	first_frame = cvQueryFrame( video );
	num_objects = get_regions( &regions,  MAX_OBJECTS, argv[1] );
	if( num_objects == 0 )
		exit(-1);

	t_ini = clock();
	hsv_frame = bgr2hsv( first_frame );
	histo_aux = (histogram*) malloc( sizeof(histogram) );
	histo_aux->n = NH*NS + NV;
	nuevas_particulas = (particle**) malloc( num_objects * sizeof( particle* ) );
	for( int j = 0; j < num_objects; ++j )
		nuevas_particulas[j] = (particle*) malloc( PARTICLES * sizeof( particle ) );
			
	// Computamos los histogramas de referencia y distribuimos las partículas iniciales
	ref_histos = compute_ref_histos( hsv_frame, regions, num_objects );
	particles = init_distribution( regions, num_objects, PARTICLES );

	// Mostramos el tracking
	if( show_tracking ) {

		// Mostramos todas las partículas
		if( show_all )
			for( int k = 0; k < num_objects; ++k )
				for( int j = 0; j < PARTICLES; ++j )
					display_particle( first_frame, particles[k][j], color_azul );

		// Dibujamos la partícula más prometedora de cada objeto
		for( int k = 0; k < num_objects; ++k )
			display_particle( first_frame, particles[k][0], color_rojo );

		cvNamedWindow( "Video", 1 );
		cvShowImage( "Video", first_frame );
		cvWaitKey( 5 );
	}

	// Exportamos los histogramas de referencia y los frames
	if( exportar ) {
		export_ref_histos( ref_histos, num_objects );
		export_frame( first_frame, 1 );

		for( int k = 0; k < num_objects; ++k ) {
			sprintf( num, "%02d", k );
			strcpy( name, REGION_BASE);
			p1 = strrchr( argv[1], '/' );
			p2 = strrchr( argv[1], '.' );
			strncat( name, (++p1), p2-p1 );
			strcat( name, num );
			strcat( name, ".txt" );
			datos = fopen( name, "a+" );
			if( ! datos ) {
				printf("Error creando fichero para datos\n");
				return -1;
			}
			fprintf( datos, "%d\t%f\t%f\n", 0, particles[k][0].x, particles[k][0].y );
			fclose( datos );
		}
	}

	cvReleaseImage( &hsv_frame );
	
	// Inicializamos el generador de números aleatorios
	gsl_rng_env_setup();
	rng = gsl_rng_alloc( gsl_rng_mt19937 );
	gsl_rng_set(rng, (unsigned long) time(NULL));

	// Recordar que frame no se puede liberar debido al cvQueryFrame
	while( frame = cvQueryFrame( video ) ) {
		hsv_frame = bgr2hsv( frame );

		// Realizamos la predicción y medición de probabilidad para cada partícula
		for( int k = 0; k < num_objects; ++k )
			for( int j = 0; j < PARTICLES; ++j ) {
				transition( &particles[k][j], frame->width, frame->height, rng );
				particles[k][j].w = likelihood( hsv_frame, &particles[k][j], ref_histos[k], histo_aux );
			}
			
		// Normalizamos los pesos y remuestreamos un conjunto de partículas no ponderadas
		normalize_weights( particles, num_objects, PARTICLES );
		for (int k = 0; k < num_objects; ++k )
			resample( particles[k], PARTICLES, nuevas_particulas[k] );
		aux = particles;
		particles = nuevas_particulas;
		nuevas_particulas = aux;

		// Mostramos el tracking
		if( show_tracking ) {

			// Mostramos todas las partículas
			if( show_all )
				for( int k = 0; k < num_objects; ++k )
					for( int j = 0; j < PARTICLES; ++j )
						display_particle( frame, particles[k][j], color_azul );
		
			// Dibujamos la partícula más prometedora de cada objeto
			for( int k = 0; k < num_objects; ++k )
				display_particle( frame, particles[k][0], color_rojo );
			cvNamedWindow( "Video", 1 );
			cvShowImage( "Video", frame );
			cvWaitKey( 5 );
		}

		// Exportamos los histogramas de referencia y los frames
		if( exportar ) {
			export_frame( frame, i+1 );

			for( int k = 0; k < num_objects; ++k ) {
				sprintf( num, "%02d", k );
				strcpy( name, REGION_BASE);
				p1 = strrchr( argv[1], '/' );
				p2 = strrchr( argv[1], '.' );
				strncat( name, (++p1), p2-p1 );
				strcat( name, num );
				strcat( name, ".txt" );
				datos = fopen( name, "a+" );
				if( ! datos ) {
					printf("Error abriendo fichero para datos\n");
					return -1;
				}
				fprintf( datos, "%d\t%f\t%f\n", i, particles[k][0].x, particles[k][0].y );
				fclose( datos );
			}
		}

		cvReleaseImage( &hsv_frame );
		++i;
	}
	
	// Liberamos todos los recursos usados (mallocs, gsl y frames)
	cvReleaseCapture( &video );
	gsl_rng_free( rng );
	free( histo_aux );
	free( regions );

	for( int i = 0; i < num_objects; ++i ) {
		free( ref_histos[i] );
		free( particles[i] );
		free( nuevas_particulas[i] );
	}

	free( particles );
	free( nuevas_particulas );

	t_fin = clock();
	ms = ((double)(t_fin - t_ini) / CLOCKS_PER_SEC) * 1000.0;
	printf("%d\t%d\t%.10g\n", PARTICLES, num_objects, ms);
}
Ejemplo n.º 8
0
void demo(char *cfgfile, char *weightfile, float thresh, int cam_index, const char *filename, char **names, int classes, int frame_skip, char *prefix, float hier, int w, int h, int frames, int fullscreen)
{
    //skip = frame_skip;
    image **alphabet = load_alphabet();
    int delay = frame_skip;
    demo_names = names;
    demo_alphabet = alphabet;
    demo_classes = classes;
    demo_thresh = thresh;
    demo_hier = hier;
    printf("Demo\n");
    net = parse_network_cfg(cfgfile);
    if(weightfile){
        load_weights(&net, weightfile);
    }
    set_batch_network(&net, 1);

    srand(2222222);

    if(filename){
        printf("video file: %s\n", filename);
        cap = cvCaptureFromFile(filename);
    }else{
        cap = cvCaptureFromCAM(cam_index);

        if(w){
            cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
        }
        if(h){
            cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
        }
        if(frames){
            cvSetCaptureProperty(cap, CV_CAP_PROP_FPS, frames);
        }
    }

    if(!cap) error("Couldn't connect to webcam.\n");

    layer l = net.layers[net.n-1];
    int j;

    avg = (float *) calloc(l.outputs, sizeof(float));
    for(j = 0; j < FRAMES; ++j) predictions[j] = (float *) calloc(l.outputs, sizeof(float));
    for(j = 0; j < FRAMES; ++j) images[j] = make_image(1,1,3);

    boxes = (box *)calloc(l.w*l.h*l.n, sizeof(box));
    probs = (float **)calloc(l.w*l.h*l.n, sizeof(float *));
    for(j = 0; j < l.w*l.h*l.n; ++j) probs[j] = (float *)calloc(l.classes, sizeof(float));

    pthread_t fetch_thread;
    pthread_t detect_thread;

    fetch_in_thread(0);
    det = in;
    det_s = in_s;

    fetch_in_thread(0);
    detect_in_thread(0);
    disp = det;
    det = in;
    det_s = in_s;

    for(j = 0; j < FRAMES/2; ++j){
        fetch_in_thread(0);
        detect_in_thread(0);
        disp = det;
        det = in;
        det_s = in_s;
    }

    int count = 0;
    if(!prefix){
        cvNamedWindow("Demo", CV_WINDOW_NORMAL); 
        if(fullscreen){
            cvSetWindowProperty("Demo", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
        } else {
            cvMoveWindow("Demo", 0, 0);
            cvResizeWindow("Demo", 1352, 1013);
        }
    }

    double before = get_wall_time();

    while(1){
        ++count;
        if(1){
            if(pthread_create(&fetch_thread, 0, fetch_in_thread, 0)) error("Thread creation failed");
            if(pthread_create(&detect_thread, 0, detect_in_thread, 0)) error("Thread creation failed");

            if(!prefix){
                show_image(disp, "Demo");
                int c = cvWaitKey(1);
		if (c != -1) c = c%256;
                if (c == 10){
                    if(frame_skip == 0) frame_skip = 60;
                    else if(frame_skip == 4) frame_skip = 0;
                    else if(frame_skip == 60) frame_skip = 4;   
                    else frame_skip = 0;
                } else if (c == 27) {
                    return;
                } else if (c == 82) {
                    demo_thresh += .02;
                } else if (c == 84) {
                    demo_thresh -= .02;
                    if(demo_thresh <= .02) demo_thresh = .02;
                } else if (c == 83) {
                    demo_hier += .02;
                } else if (c == 81) {
                    demo_hier -= .02;
                    if(demo_hier <= .0) demo_hier = .0;
                }
            }else{
                char buff[256];
                sprintf(buff, "%s_%08d", prefix, count);
                save_image(disp, buff);
            }

            pthread_join(fetch_thread, 0);
            pthread_join(detect_thread, 0);

            if(delay == 0){
                free_image(disp);
                disp  = det;
            }
            det   = in;
            det_s = in_s;
        }else {
            fetch_in_thread(0);
            det   = in;
            det_s = in_s;
            detect_in_thread(0);
            if(delay == 0) {
                free_image(disp);
                disp = det;
            }
            show_image(disp, "Demo");
            cvWaitKey(1);
        }
        --delay;
        if(delay < 0){
            delay = frame_skip;

            double after = get_wall_time();
            float curr = 1./(after - before);
            fps = curr;
            before = after;
        }
    }
}
Ejemplo n.º 9
0
////////////////////////////////////////////////////////////////////////////////////	
//以彩色图像显示每一尺度的张量信息
////////////////////////////////////////////////////////////////////////////////////
void Tensor::ShowTensorByColorImage()
{
	double ret_minr=0.0;
	double ret_maxr=0.0;
	double ret_ming=0.0;
	double ret_maxg=0.0;
	double ret_minb=0.0;
	double ret_maxb=0.0;
	int x,y,i;
	//纹理特征
	IplImage **pImg= new IplImage *[m_levels];
	for (i = 0;i < m_levels;i++)
	{
		pImg[i] = cvCreateImage( cvGetSize(m_img), m_img->depth, 3);
		cvZero(pImg[i]);
	}

	CString * ptitle=new CString [m_levels];

	for (i=0;i<m_levels;i++)
	{
		//找到每幅图像颜色通道的上限与下限值
		for (y=0; y<m_h;y++)
		{
			for (x=0;x<m_w;x++)
			{
				if((*m_pImageTensorRGB[i])(x,y).r>ret_maxr)
				{
					ret_maxr=(*m_pImageTensorRGB[i])(x,y).r;
				}
				if ((*m_pImageTensorRGB[i])(x,y).r<ret_minr)
				{
					ret_minr=(*m_pImageTensorRGB[i])(x,y).r;
				}

				if((*m_pImageTensorRGB[i])(x,y).g>ret_maxg)
				{
					ret_maxg=(*m_pImageTensorRGB[i])(x,y).g;
				}
				if ((*m_pImageTensorRGB[i])(x,y).g<ret_ming)
				{
					ret_ming=(*m_pImageTensorRGB[i])(x,y).g;
				}

				if((*m_pImageTensorRGB[i])(x,y).b>ret_maxb)
				{
					ret_maxb=(*m_pImageTensorRGB[i])(x,y).b;
				}
				if ((*m_pImageTensorRGB[i])(x,y).b<ret_minb)
				{
					ret_minb=(*m_pImageTensorRGB[i])(x,y).b;
				}

			}
		}
		uchar * dst=(uchar *)pImg[i]->imageData;
		for (y=0; y<m_h;y++)
		{
			for (x=0;x<m_w;x++)
			{
				int temp=y*(pImg[i]->widthStep)+3*x;
				dst[temp+2]=(uchar)(((*m_pImageTensorRGB[i])(x,y).r-ret_minr)/(ret_maxr-ret_minr)*256);
				dst[temp+1]=(uchar)(((*m_pImageTensorRGB[i])(x,y).g-ret_ming)/(ret_maxg-ret_ming)*256);
				dst[temp+0]=(uchar)(((*m_pImageTensorRGB[i])(x,y).b-ret_minb)/(ret_maxb-ret_minb)*256);
			}
		}
		ptitle[i].Format(_T("Image Texture of Level %d"),i);
		cvNamedWindow((char *)(LPCTSTR)ptitle[i],CV_WINDOW_AUTOSIZE);
		cvShowImage((char *)(LPCTSTR)ptitle[i],pImg[i]);
	}
	if (pImg != NULL)
	{
		for (i=0;i<m_levels;i++)
		{
			cvReleaseImage(&pImg[i]);
		}
		delete [] pImg;
	}
}
Ejemplo n.º 10
0
int _tmain(int argc, _TCHAR* argv[])
{
	location bot, target;
	bot.x = 300;		bot.y = 20;		bot.theta = 90.0;
	target.x = 300;	target.y = 450;	target.theta = 90.000;

	list *ol = NULL, *cl = NULL;
	elem e,vare;
	e.l = bot;	e.g = 0;	e.h = 0;	e.id = UNDEFINED;

	int n = 13;
	elem* np = loadPosData(n);
	
	while(1)
	{
		cl = append(cl, e);
		//printList(cl);
		if(isNear(e.l, target))
			break;
		ol = update(ol, e, target, np, n);
		//printList(ol);
		e = findMin(ol);
		printf("Min: (%.3f, %.3f, %.3f)\n", e.l.x, e.l.y, e.l.theta);
		ol = detach(ol, e);
		//printList(ol);
		//getchar();
	}
	//getchar();
	cvNamedWindow("hello",CV_WINDOW_AUTOSIZE);
	IplImage *img = cvCreateImage(cvSize(500, 500), IPL_DEPTH_8U, 3);
	cvCircle(img, cvPoint(300, 500-300), 45, CV_RGB(0, 15, 200), 1, CV_AA, 0);

	//list *t = cl;
	//while(t)
	//{
	//	cvLine(img,cvPoint(t->p.parent.x*40,500-(t->p.parent.y*40)),cvPoint(t->p.parent.x*40+2,500-(t->p.parent.y*40)-2),CV_RGB(255,255,0),2,CV_AA,0);
	//	//printf("(%.3f, %.3f) ", t->p.l.x, t->p.l.y);
	//	t=t->next;
	//}
	CvPoint a = cvPoint(target.x, 500 - (target.y));
	CvPoint b = cvPoint((target.x + 10*cos(target.theta*(CV_PI/180))), 500 - ((target.y+10*sin(target.theta*(CV_PI/180)))));
	cvLine(img, a, b, CV_RGB(0,255,0), 2, CV_AA, 0);

	a = cvPoint(bot.x, 500 - (bot.y));
	b = cvPoint((bot.x + 10*cos(bot.theta*(CV_PI/180))), 500 - ((bot.y+10*sin(bot.theta*(CV_PI/180)))));
	cvLine(img, a, b, CV_RGB(0,0,255), 2, CV_AA, 0);

	vare = e;
	a = cvPoint(vare.l.x, 500 - (vare.l.y));
	b = cvPoint((vare.l.x + 10*cos(vare.l.theta*(CV_PI/180))), 500 - ((vare.l.y+10*sin(vare.l.theta*(CV_PI/180)))));
	cvLine(img, a, b, CV_RGB(255,0,0), 2, CV_AA, 0);
	
	printf("(%.3f, %.3f, %.3f) : %d\n", vare.l.x, vare.l.y, vare.l.theta, vare.id);
	while(!((abs(vare.l.x-bot.x) < 1.25) && (abs(vare.l.y-bot.y) < 1.25)))
	{
		vare=searchforcoor(cl,vare.parent.x,vare.parent.y);
		if(vare.id != -1)
		{
			printf("(%.3f, %.3f, %.3f) : %d\n", vare.l.x, vare.l.y, vare.l.theta, vare.id);
			a = cvPoint(vare.l.x, 500 - (vare.l.y));
			b = cvPoint((vare.l.x + 10*cos(vare.l.theta*(CV_PI/180))), 500 - ((vare.l.y+10*sin(vare.l.theta*(CV_PI/180)))));
			cvLine(img, a, b, CV_RGB(255,0,0), 2, CV_AA, 0);
		}
	}

	cvShowImage("hello",img);
	cvWaitKey(0);
}
Ejemplo n.º 11
0
void surf_match(IplImage* object_color, IplImage* object, IplImage* image,const CvSeq *objectKeypoints,const CvSeq *imageKeypoints,const CvSeq * objectDescriptors,const CvSeq * imageDescriptors, CvPoint val[4])
{
    cvNamedWindow("Object", 0);
    cvNamedWindow("Object Correspond", 0);

    static CvScalar colors[] = 
    {
        {{0,0,255}},
        {{0,128,255}},
        {{0,255,255}},
        {{0,255,0}},
        {{255,128,0}},
        {{255,255,0}},
        {{255,0,0}},
        {{255,0,255}},
        {{255,255,255}}
    };

    int i;

	CvPoint src_corners[4] = {{0,0}, {object->width,0}, {object->width, object->height}, {0, object->height}};
    CvPoint dst_corners[4];
    IplImage* correspond = cvCreateImage( cvSize(image->width, object->height+image->height), 8, 1 );
    cvSetImageROI( correspond, cvRect( 0, 0, object->width, object->height ) );
    cvCopy( object, correspond );
    cvSetImageROI( correspond, cvRect( 0, object->height, correspond->width, correspond->height ) );
    cvCopy( image, correspond );
    cvResetImageROI( correspond );

#ifdef USE_FLANN
    printf("Using approximate nearest neighbor search\n");
#endif

    if( locatePlanarObject( objectKeypoints, objectDescriptors, imageKeypoints,
        imageDescriptors, src_corners, dst_corners ))
    {
        for( i = 0; i < 4; i++ )
        {
            CvPoint r1 = dst_corners[i%4];
            CvPoint r2 = dst_corners[(i+1)%4];
            cvLine( correspond, cvPoint(r1.x, r1.y+object->height ),
                cvPoint(r2.x, r2.y+object->height ), colors[8] );
        }
    }
    vector<int> ptpairs;
#ifdef USE_FLANN
    flannFindPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
#else
    findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
#endif
    for( i = 0; i < (int)ptpairs.size(); i += 2 )
    {
        CvSURFPoint* r1 = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, ptpairs[i] );
        CvSURFPoint* r2 = (CvSURFPoint*)cvGetSeqElem( imageKeypoints, ptpairs[i+1] );
        cvLine( correspond, cvPointFrom32f(r1->pt),
            cvPoint(cvRound(r2->pt.x), cvRound(r2->pt.y+object->height)), colors[8] );
    }

    cvShowImage( "Object Correspond", correspond );
    for( i = 0; i < objectKeypoints->total; i++ )
    {
        CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, i );
        CvPoint center;
        int radius;
        center.x = cvRound(r->pt.x);
        center.y = cvRound(r->pt.y);
        radius = cvRound(r->size*1.2/9.*2);
        cvCircle( object_color, center, radius, colors[0], 1, 8, 0 );
    }
    cvShowImage( "Object", object_color );

    cvWaitKey(0);

    cvDestroyWindow("Object");
    cvDestroyWindow("Object SURF");
    cvDestroyWindow("Object Correspond");

	//CvPoint val[4];
	for(int k=0;k<4;k++)
	{
//		printf("%d %d \n", dst_corners[k].x, dst_corners[k].y);
		val[k] = dst_corners[k]; 
		val[k] = dst_corners[k]; 
	}

}
Ejemplo n.º 12
0
int main()
{
	// Initialize capturing live feed from the camera
	CvCapture* capture = 0;
	capture = cvCaptureFromCAM(1);	 //depending on from which camera you are Capturing
	// Couldn't get a device? Throw an error and quit
	if(!capture)
    {
        printf("Could not initialize capturing...\n");
        return -1;
    }

	// The two windows we'll be using
    cvNamedWindow("video");
	cvNamedWindow("thresh");

	// This image holds the "scribble" data...
	// the tracked positions of the ball
	IplImage* imgScribble = NULL;

	// An infinite loop
	while(true)
    {
		// Will hold a frame captured from the camera
		IplImage* frame = 0;
		frame = cvQueryFrame(capture);

		// If we couldn't grab a frame... quit
        if(!frame)
            break;
		
		// If this is the first frame, we need to initialize it
		if(imgScribble == NULL)
		{
			imgScribble = cvCreateImage(cvGetSize(frame), 8, 3);
		}

		// Holds the yellow thresholded image (yellow = white, rest = black)
		IplImage* imgYellowThresh = GetThresholdedImage(frame);

		// Calculate the moments to estimate the position of the ball
		CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments));
		cvMoments(imgYellowThresh, moments, 1);

		// The actual moment values
		double moment10 = cvGetSpatialMoment(moments, 1, 0);
		double moment01 = cvGetSpatialMoment(moments, 0, 1);
		double area = cvGetCentralMoment(moments, 0, 0);

		// Holding the last and current ball positions
		static int posX = 0;
		static int posY = 0;

		int lastX = posX;
		int lastY = posY;

		posX = moment10/area;
		posY = moment01/area;

		// Print it out for debugging purposes
		printf("position (%d,%d)\n", posX, posY);

		// We want to draw a line only if its a valid position
		if(lastX>0 && lastY>0 && posX>0 && posY>0)
		{
			// Draw a yellow line from the previous point to the current point
			cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,255,255), 5);
		}

		// Add the scribbling image and the frame... and we get a combination of the two
		cvAdd(frame, imgScribble, frame);
		cvShowImage("thresh", imgYellowThresh);
		cvShowImage("video", frame);

		// Wait for a keypress
		int c = cvWaitKey(10);
		if(c!=-1)
		{
			// If pressed, break out of the loop
            break;
		}

		// Release the thresholded image... we need no memory leaks.. please
		cvReleaseImage(&imgYellowThresh);

		delete moments;
    }

	// We're done using the camera. Other applications can now use it
	cvReleaseCapture(&capture);
    return 0;
}
void basicOCR::printCvSeq(CvSeq* seq, IplImage* imgSrc, IplImage* img_gray, CvMemStorage* storage)
{
	CvSeq* si = seq;
	CvRect rcFirst = findFirstChar(seq, 0);
	if (rcFirst.x == 0)
	{
		printf("No words found...\n");
		return;
	}
	else
		printf("\nOCR of text:\n");
	CvRect rcNewFirst = rcFirst;
	cvDrawRect(imgSrc, cvPoint(rcFirst.x, rcFirst.y), cvPoint(rcFirst.x + rcFirst.width, rcFirst.y + rcFirst.height), CV_RGB(0, 0, 0));
	int printX = rcFirst.x - 1;
	int printY = rcFirst.y - 1;

	int idx = 0;
	char szName[56] = {0};
	int tempCount=0;

	while (true)
	{
		CvRect rc = findPrintRect(seq, printX, printY, rcFirst);
		cvDrawRect(imgSrc, cvPoint(rc.x, rc.y), cvPoint(rc.x + rc.width, rc.y + rc.height), CV_RGB(0, 0, 0));
		// dealing with useless Part
		/*if (rc.width <= 1 && rc.height <= 1)
		{
		continue;
		}*/

		if (printX < rc.x)
		{
			if ((rc.x - printX) >= (rcFirst.width / 2))
				printf(" ");
			printX = rc.x;
			//cvDrawRect(imgSrc, cvPoint(rc.x, rc.y), cvPoint(rc.x + rc.width, rc.y + rc.height), CV_RGB(255, 0, 0));
			IplImage* imgNo = cvCreateImage(cvSize(rc.width, rc.height), IPL_DEPTH_8U, 3);
			cvSetImageROI(imgSrc, rc);
			cvCopyImage(imgSrc, imgNo);
			cvResetImageROI(imgSrc);
			sprintf(szName, "wnd_%d", idx++);
			// show splited picture or not
			cvNamedWindow(szName);
			cvShowImage(szName, imgNo);
			IplImage* imgDst = cvCreateImage(cvSize(rc.width, rc.height),IPL_DEPTH_8U,1);
			cvCvtColor(imgNo, imgDst, CV_RGB2GRAY);
			printf("%c", (char)classify(imgDst, 0));
			cvReleaseImage(&imgNo);
		}
		else if (printX == rc.x && printX < imgSrc->width)
		{
			printX += rc.width;
		}
		else
		{
			printf("\n");
			printY = rcNewFirst.y + rcNewFirst.height;
			rcNewFirst = findFirstChar(seq, printY);
			if (rcNewFirst.x == 0)
				break;
			cvDrawRect(imgSrc, cvPoint(rcNewFirst.x, rcNewFirst.y), cvPoint(rcNewFirst.x + rcNewFirst.width, rcNewFirst.y + rcNewFirst.height), CV_RGB(0, 0, 0));
			printX = rcNewFirst.x - 1;
			printY = rcNewFirst.y - 1;
		}
	}
	cvNamedWindow("src");
	cvShowImage("src", imgSrc);
	cvWaitKey(0);
	cvReleaseMemStorage(&storage);
	cvReleaseImage(&imgSrc);
	cvReleaseImage(&img_gray);
	cvDestroyAllWindows();

}
Ejemplo n.º 14
0
int main(int argc, char **argv)
{
  //   void *p;

  //   ((int *)p)[5];
  //   (int *)p[5];

  //   argc--, argv++;
  //   if (argc > 0) { distFileName = *argv; argc--, argv++; }
  //   if (argc > 1) { argc--, argv++; distFileName = *argv; }
  //   if (argc > 1) { argc--, argv++; distFileName = *argv; }
  //   if (!distFileName) usage();
  
  std::vector <std::vector <int> > dists;
  int zlow = INT_MAX, zhigh = 0;
  int face_row = 0, face_col = 0;
  char from_stdin = (argc == 1) ? 1 : 0;
  char *distFileName;
  std::ifstream file;
  const int WIDTH = 176, HEIGHT = 144;

  if (argc > 2) {
    distFileName = argv[1];
    file.open(distFileName, std::ifstream::in);
  }

  while (1) {
    std::string line;

    if (from_stdin) {
      char cline[100];
      char *res = fgets(cline, 100, stdin);
      if (!res) break;
      line = cline;
    } else {
      if (file.eof()) break;
      getline(file, line);
    }

    if (line[0] == '#') {
      sscanf(line.c_str(), "#face:%d\t%d\n", &face_row, &face_col);
    } else {
      std::vector <int> vals = split(line, "\t");
      if (!vals.empty() && vals[2] != -1) {
        dists.push_back(vals);
        zlow = std::min(zlow, vals[2]);
        zhigh = std::max(zhigh, vals[2]);
      }
    }
  }

  if (argc > 1 && argv[2]) face_row = atoi(argv[2]);
  if (argc > 2 && argv[3]) face_col = atoi(argv[3]);
  if (face_row < 0) face_row = 0;
  if (face_col < 0) face_col = 0;

  IplImage * img = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_16U, 1);
  int imgDepth = 16;
  int maxNum = (int)pow((double)2, (double)imgDepth);
  double ratio = (double)maxNum/(double)(zhigh - zlow);
  cvSetZero(img);

  for (int i=0; i<(int)dists.size(); i++)
    {
      int z = dists[i][2];
      z -= zlow;
      z = (int)(z*ratio);
      cvSet2D(img, dists[i][0], dists[i][1], cvScalarAll(z));
    }

  cvCircle(img, cvPoint(face_row, face_col), 5, CV_RGB(maxNum, maxNum, maxNum));

  cvNamedWindow("result", CV_WINDOW_AUTOSIZE);
  cvShowImage("result", img);
  cvWaitKey(0);
  cvReleaseImage(&img);
  cvDestroyWindow("result");

  return 0;
}
Ejemplo n.º 15
0
int main(int argc, const char **argv)
{
    //Variables
    int degrees,PosRelX,PosRelY;
    float radians,Dlaser,ODM_ang, ang;
    int width = 500, height = 500; //Create the size of the map here (in pixel)
    int centroX = (width / 2);
    int centroY = (height / 2);
    playerc_client_t *client;
    playerc_laser_t *laser;
    playerc_position2d_t *position2d;
    CvPoint pt,pt1,pt2;
    CvScalar cinzaE,preto,cinzaC;
    char window_name[] = "Map";

    IplImage* image = cvCreateImage( cvSize(width,height), 8, 3 );
    cvNamedWindow(window_name, 1 );
    preto = CV_RGB(0, 0, 0);        //for indicating obstacles
    cinzaE = CV_RGB(92, 92, 92);    //To indicate the stranger
    cinzaC = CV_RGB(150, 150, 150); //To indicate free spaces

    client = playerc_client_create(NULL, "localhost", 6665);
    if (playerc_client_connect(client) != 0)
    return -1;

    laser = playerc_laser_create(client, 0);
    if (playerc_laser_subscribe(laser, PLAYERC_OPEN_MODE))
    return -1;

    position2d = playerc_position2d_create(client, 0);
    if (playerc_position2d_subscribe(position2d, PLAYERC_OPEN_MODE) != 0) {
        fprintf(stderr, "error: %s\n", playerc_error_str());
        return -1;
    }

    if (playerc_client_datamode (client, PLAYERC_DATAMODE_PULL) != 0) {
        fprintf(stderr, "error: %s\n", playerc_error_str());
        return -1;
    }

    if (playerc_client_set_replace_rule (client, -1, -1, PLAYER_MSGTYPE_DATA, -1, 1) != 0) {
        fprintf(stderr, "error: %s\n", playerc_error_str());
        return -1;
    }

    playerc_position2d_enable(position2d, 1);  // initialise motors
    playerc_position2d_set_odom(position2d, 0, 0, 0);  // Set odometer to zero

    cvSet(image, cinzaE,0); //set the image colour to dark
    pt.x = centroX;  // Zero coordinate for x
    pt.y = centroY;  // Zero coordinate for y


    while(1) {
        playerc_client_read(client);
        cvSaveImage("mapa.jpg",image);
        playerc_client_read(client);

        for (degrees = 2; degrees <= 360; degrees+=2) {
            Dlaser = laser->scan[degrees][0];
            if (Dlaser < 8) {
                radians = graus2rad (degrees/2);      //Convert the angle of the laser to radians
                ODM_ang = position2d->pa;             //Obtain the angle relative to the robot
                ang = ((1.5*PI)+radians+ODM_ang);     //Converte the angle relative to the world
                PosRelX = arredonda(position2d->px);  //Position x relative to robot
                PosRelY = arredonda(position2d->py);  //Position y relative to robot
                pt1.y = (centroY-PosRelY);            //Co-ordinated global y of the robot
                pt1.x = (centroX+PosRelX);            //Co-ordinated global x of the robot

 //t converts polar coordinates for rectangular (global)
                pt.y = (int)(pt1.y-(sin(ang)*Dlaser*10));
                pt.x = (int)(pt1.x+(cos(ang)*Dlaser*10));

                //The free area draws cvline
                cvLine(image, pt1,pt,cinzaC, 1,4,0);

                //marks the object in the map
                cvLine(image, pt,pt,preto, 1,4,0);

                //Shows the result of the map to the screen
                cvShowImage(window_name, image );
                cvWaitKey(10);
            }
        }
    }

    //Disconnect player
    playerc_laser_unsubscribe(laser);
    playerc_laser_destroy(laser);
    playerc_client_disconnect(client);
    playerc_client_destroy(client);

    //Destroy the OpenCV window cvReleaseImage
    cvReleaseImage(&image);
    cvDestroyWindow(window_name);
    return 0;
}
int main(int argc, char* argv[])
{
	IplImage *m_pPreImage = NULL;
	IplImage *m_pGrayImage = NULL;
	IplImage *m_pSmoothImage = NULL;
	IplImage *pPrev = NULL;
	IplImage *pCurr = NULL;
	IplImage *pDest = NULL;
	IplImage *pMask = NULL;
	IplImage *pMaskDest = NULL;
	IplImage *dst = NULL;
	CvMat *pPrevF = NULL;
	CvMat *pCurrF = NULL;
	CvSize imgSize;

    CvCapture *m_pCapture = NULL;
	CvVideoWriter *writer = 0;
	IplConvKernel* element;
	CvSeq* contour = 0;
	CvMemStorage* storage = cvCreateMemStorage(0);
	CvRect r;

	// IplConvKernel* element;

    cvNamedWindow( "VideoDisplay1", 1 );
	cvNamedWindow( "VideoDisplay2", 1 );
	cvNamedWindow( "VideoDisplay3", 1 );
	cvNamedWindow( "VideoDisplay4", 1 );
	
// Capture
	m_pCapture = cvCreateFileCapture("MVI_8833.AVI");
	contour = cvCreateSeq(CV_SEQ_ELTYPE_POINT,sizeof(CvSeq),sizeof(CvPoint),storage);
	

    if( !m_pCapture )
    {
        fprintf(stderr,"Could not initialize capturing! \n");
        return -1;
    }
// Display
    while ( (m_pPreImage = cvQueryFrame(m_pCapture)))
    {	
		imgSize = cvSize(m_pPreImage->width, m_pPreImage->height);
		if(!m_pGrayImage)
			m_pGrayImage = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
		if(!pCurr)
			pCurr = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);	
		if(!m_pSmoothImage)
			m_pSmoothImage = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);

		//图像预处理
		cvCvtColor(m_pPreImage, m_pGrayImage, CV_BGR2GRAY);//转化为灰度图像
		cvSmooth(m_pGrayImage,m_pSmoothImage,CV_GAUSSIAN,3,0,0,0 );//GAUSSIAN平滑去噪声
		cvEqualizeHist(m_pSmoothImage,pCurr );//直方图均衡


		 if(!pPrevF)
			pPrevF = cvCreateMat(m_pGrayImage->width,m_pPreImage->height, CV_32FC1);
		 if(!pCurrF)
			pCurrF = cvCreateMat(m_pGrayImage->width,m_pPreImage->height, CV_32FC1);
		 if(!pPrev)
			pPrev = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
		 if(!pMask)
			pMask = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
		 if(!pMaskDest)
			pMaskDest = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
		 if(!dst)
			dst = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
		 if(!pDest)
			{
				pDest = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
				
			}
	
		cvAbsDiff(pPrev, pCurr, pDest);   //帧差
		cvCopy(pCurr, pPrev, NULL);  // 当前帧存入前一帧

		
		cvThreshold(pDest, pMask, 80, 255, CV_THRESH_BINARY);     // 二值化
		element = cvCreateStructuringElementEx( 9, 9, 3, 3, CV_SHAPE_RECT, NULL);
		cvMorphologyEx( pMask, pMaskDest, NULL, element, CV_MOP_CLOSE, 1);//形态学处理
		
		//查找并且画出团块轮廓
		cvFindContours( pMaskDest, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );

		//画出包含目标的最小矩形
		for(;contour;contour=contour->h_next)
		{
			r=((CvContour*)contour)->rect;
			if(r.height*r.width>100)
			{
				cvRectangle(m_pPreImage,cvPoint(r.x,r.y),cvPoint(r.x+r.width,r.y+r.height),CV_RGB(255,0,0),1,CV_AA,0);
				
			}
		}


		cvShowImage( "VideoDisplay1", m_pPreImage );
		cvShowImage( "VideoDisplay2", pMask);
		cvShowImage( "VideoDisplay3", pMaskDest );
		cvShowImage( "VideoDisplay4", pPrev );

		if(cvWaitKey(50)>0)
			return 0;
	}

	// Realease
    cvReleaseImage( &m_pPreImage );
	cvReleaseImage( &m_pGrayImage );
	cvReleaseImage( &m_pSmoothImage );
	cvReleaseImage( &pCurr );
	cvReleaseImage( &pDest );
	cvReleaseImage( &pMask );
	cvReleaseImage( &pMaskDest );
	cvReleaseImage( &dst );
	cvReleaseMemStorage( &storage );
    cvDestroyWindow("VideoDisplay1");
	cvDestroyWindow("VideoDisplay2");
	cvDestroyWindow("VideoDisplay3");
	cvDestroyWindow("VideoDisplay4");
	cvReleaseStructuringElement( &element ); 

	return 0;
}
Ejemplo n.º 17
0
int main(void)
{
  IplImage *src=NULL;
  if (0){
    src = cvCreateImageHeader(cvSize(4,4),IPL_DEPTH_8U,1);
    char rawdata[4][4] = { {0, 0, 1, 1},
			   {0, 0, 1, 1},
			   {0, 2, 2, 2},
			   {2, 2, 3, 3}};
    src->imageData = (char*)(&rawdata);
  }else{
    src = cvLoadImage("test.png",0);
  }
  CvGLCM* glcm;
//  glcm = cvCreateGLCM(src, 1, NULL, 4, CV_GLCM_OPTIMIZATION_LUT);
  glcm = cvCreateGLCM(src, 1, NULL, 4, CV_GLCM_OPTIMIZATION_NONE);
  cvCreateGLCMDescriptors(glcm, CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST);
//#define CV_GLCMDESC_ENTROPY                         0
//#define CV_GLCMDESC_ENERGY                          1
//#define CV_GLCMDESC_HOMOGENITY                      2
//#define CV_GLCMDESC_CONTRAST                        3
//#define CV_GLCMDESC_CLUSTERTENDENCY                 4
//#define CV_GLCMDESC_CLUSTERSHADE                    5
//#define CV_GLCMDESC_CORRELATION                     6
//#define CV_GLCMDESC_CORRELATIONINFO1                7
//#define CV_GLCMDESC_CORRELATIONINFO2                8
//#define CV_GLCMDESC_MAXIMUMPROBABILITY              9

  for (int step=0; step<4; step++){ 
    for (int i=0; i<10; i++){
      printf("%.3f,", cvGetGLCMDescriptor(glcm, step, i));
    }
    printf("\n");
    
  }


  IplImage *d0org = cvCreateImage(cvSize(256,256),IPL_DEPTH_32F,1); 
  cvResize(cvCreateGLCMImage(glcm,0),d0org,CV_INTER_NN);
  IplImage *d0 = cvCreateImage(cvGetSize(d0org),IPL_DEPTH_8U,1);
  cvConvertScaleAbs(d0org,d0,255,0);
  cvNormalize(d0,d0,0,255,CV_MINMAX);
  cvSaveImage("d0.png",d0);

  IplImage *d1org = cvCreateImage(cvSize(256,256),IPL_DEPTH_32F,1); 
  cvResize(cvCreateGLCMImage(glcm,1),d1org,CV_INTER_NN);
  IplImage *d1 = cvCreateImage(cvGetSize(d1org),IPL_DEPTH_8U,1);
  cvConvertScaleAbs(d1org,d1,255,0);
  cvNormalize(d1,d1,0,255,CV_MINMAX);
  cvSaveImage("d1.png",d1);

  IplImage *d2org = cvCreateImage(cvSize(256,256),IPL_DEPTH_32F,1); 
  cvResize(cvCreateGLCMImage(glcm,2),d2org,CV_INTER_NN);
  IplImage *d2 = cvCreateImage(cvGetSize(d2org),IPL_DEPTH_8U,1);
  cvConvertScaleAbs(d2org,d2,255,0);
  cvNormalize(d2,d2,0,255,CV_MINMAX);
  cvSaveImage("d2.png",d2);

  IplImage *d3org = cvCreateImage(cvSize(256,256),IPL_DEPTH_32F,1); 
  cvResize(cvCreateGLCMImage(glcm,3),d3org,CV_INTER_NN);
  IplImage *d3 = cvCreateImage(cvGetSize(d3org),IPL_DEPTH_8U,1);
  cvConvertScaleAbs(d3org,d3,255,0);
  cvNormalize(d3,d3,0,255,CV_MINMAX);
  cvSaveImage("d3.png",d3);

  cvNamedWindow("D0",1);
  cvNamedWindow("D1",1);
  cvNamedWindow("D2",1);
  cvNamedWindow("D3",1);
  cvShowImage("D0",d0);
  cvShowImage("D1",d1);
  cvShowImage("D2",d2);
  cvShowImage("D3",d3);
  cvWaitKey(0);

  cvReleaseGLCM(glcm,CV_GLCM_ALL);
  return 0;
}
Ejemplo n.º 18
0
int main( int argc, char** argv )
{
	IplImage *tpl = 0;
	IplImage *ref = 0;
	IplImage *poc = 0;
	char String[255];
	
	if( argc < 3 ) {
		fprintf( stderr, "Usage: phase_correlation <url1> <url2>\n" );
		return 1;	
	}
	sprintf(String, "wget %s -O image1.jpg", argv[1]);
        system(String);
	sprintf(String, "wget %s -O image2.jpg", argv[2]);
        system(String);	

	/* load reference image */
	ref = cvLoadImage( "image1.jpg", CV_LOAD_IMAGE_GRAYSCALE );
	
	/* always check */
	if( ref == 0 ) {
		fprintf( stderr, "Cannot load %s!\n", argv[1] );
		return 1;	
	}
	
	/* load template image */
	tpl = cvLoadImage( "image2.jpg", CV_LOAD_IMAGE_GRAYSCALE );
	
	/* always check */
	if( tpl == 0 ) {
		fprintf( stderr, "Cannot load %s!\n", argv[2] );
		return 1;	
	}
	
	/* both images' size should be equal */
	if( ( tpl->width != ref->width ) || ( tpl->height != ref->height ) ) {
		fprintf( stderr, "Both images must have equal width and height!\n" );
		return 1;
	}
	
	/* create a new image, to store phase correlation result */
	poc = cvCreateImage( cvSize( tpl->width, tpl->height ), IPL_DEPTH_64F, 1 );
	
	/* get phase correlation of input images */
	phase_correlation( ref, tpl, poc );
	
	/* find the maximum value and its location */
    CvPoint minloc, maxloc;
	double  minval, maxval;
	cvMinMaxLoc( poc, &minval, &maxval, &minloc, &maxloc, 0 );
	
	/* print it */
	fprintf( stdout, "Maxval at (%d, %d) = %2.4f\n", maxloc.x, maxloc.y, maxval );
        fprintf(stdout,"percentage comparision= %2.4f \n",maxval*100);
	
	/* display images and free memory */
	cvNamedWindow( "tpl", CV_WINDOW_AUTOSIZE );
	cvNamedWindow( "ref", CV_WINDOW_AUTOSIZE );	
	
	cvShowImage( "tpl", tpl );
	cvShowImage( "ref", ref );
	
	cvWaitKey( 0 );
	
	cvDestroyWindow( "tpl" );
	cvDestroyWindow( "ref" );	
	
	cvReleaseImage( &tpl );
	cvReleaseImage( &ref );
	cvReleaseImage( &poc );
	
	return 0;
}
Ejemplo n.º 19
0
void mainWrapper() {
    (void) ftime(&tbi);
    (void) ftime(&tbf);

    // ESSE � O MILAGRE
    timestamp = ctime(&tbi.time);

    printf("%.19s.%hu %s", timestamp, tbi.millitm, &timestamp[20]);

    multiCamera.setup(javaStartInterface.GetIndexCAM());
    cvNamedWindow(winName, CV_WINDOW_AUTOSIZE);
    cvSetMouseCallback(winName, mouseHandler, NULL);
    ofp = fopen("log.txt", "w");
    fprintf(ofp, "timestamp\tfps\tLeftGazeX\tLeftGazeY\tRightGazeX\tRightGazeY\tLeftBlink\tRightBlink\t\n");
    int freio = 0;
    for (;;) {

        nFrames++;
        double t = (double) cvGetTickCount();

        if (multiCamera.queryFrame() == 0)
            continue;


        CvPoint startPoint = cvPoint(cvRound(multiCamera.GetFrameWidth() / 2), cvRound(multiCamera.GetFrameHeight() / 2));

        if (javaStartInterface.IsStart()) {
            //double t = (double)cvGetTickCount();
            trackerDetector.execute(multiCamera.GetFrame());
            //t = (double)cvGetTickCount() - t;
            //printf( "%g\n",  t/((double)cvGetTickFrequency()*1000.) );
            //printf( "%d\n", FaceHeuristics::GazeLeftX );
            cvCircle(multiCamera.GetFrame(), startPoint, 10, CV_RGB(255, 255, 255), 0);
            fprintf(ofp, "%g\t%g\t%d\t%d\t%d\t%d\t%d\t%d\t\n", t / ((double) cvGetTickFrequency()*1000.), getFPS(), getLeftGazeX(), getLeftGazeY(), getRightGazeX(), getRightGazeY(), getLeftBlink(), getRightBlink());
        } else {
            cvCircle(multiCamera.GetFrame(), startPoint, 10, CV_RGB(255, 255, 255), -1);

        }
        if ((cvWaitKey(10) & 255) == 27)
            freio = 1; //break;  //Test for Escape-key press 
        if (javaStartInterface.IsStop())
            freio = 1; //break; 

        if (javaStartInterface.GetScreenWidth() > 0)
            cvMoveWindow(winName, cvRound(javaStartInterface.GetScreenWidth() / 2 - multiCamera.GetFrameWidth() / 2), cvRound(javaStartInterface.GetScreenHeight() / 2 - multiCamera.GetFrameHeight() / 2));

        //cvSmooth( multiCamera.GetFrame()  , BiFrame , CV_BILATERAL, 5, 5, sigma, sigma);
        cvShowImage(winName, multiCamera.GetFrame());

        t = (double) cvGetTickCount() - t;
        loopTime = t / ((double) cvGetTickFrequency()*1000.);
        somaLoopTime += loopTime;
        if (somaLoopTime > 1000) {
            fps = nFrames;
            //printf( "%g\n", getFPS() );
            nFrames = 0;
            somaLoopTime = 0;
        }

        if (freio) {
            //break;
        }
    }

    fclose(ofp);
    cvDestroyWindow(winName);
    exit(0);
}
Ejemplo n.º 20
0
int main(int argc, char *argv[])
{
	IplImage *img = 0, *img2=0 ;
	int height,width,step,channels;
	int i,j,k;

	if(argc<4){
		printf("Usage: ./a.out <image-file-name> <watermarker image> <audio file>\n");
		exit(0);
	}

	// load an image  
	img=cvLoadImage(argv[1]);
	if(!img){
		printf("Could not load image file: %s\n",argv[1]);
		exit(0);
	}

	/// Load Watermark Image
	img2=cvLoadImage(argv[2]);
	if(!img2){
		printf("Could not load image file: %s\n",argv[2]);
		exit(0);
	}
	height    = img->height;
	width     = img->width;
	step      = img->widthStep;
	channels  = img->nChannels;
	int nchannels = img->nChannels;
	data      = (uchar *)img->imageData;
	int height2=img2->height;
	int width2=img2->width;
	int step2=img2->widthStep;
	int channels2=img2->nChannels;
	data2  = (uchar *)img2->imageData;
		
	///// Inserting Watermark
	insert_watermark(img,img2);

	//Read an audio file and write into the image
	FILE *fp=fopen(argv[3],"r");
	if(fp==NULL){
		printf("Could not load audio file: %s\n",argv[3]);
		exit(0);
	}
	
	insert_audio(fp,img,img2);


	//printf("%d row=%d col=%d \n",count1,row_count,col_count);

	// Extract bit form count of total no. of Bytes which are presence in audio file 
	int a1,a2,a3,a4;
	a1=count1%256;
	a2=count1%65536;
	a3=count1%16777216;
	a4=count1%4294967296;
	//printf("%d %d %d %d\n",a1,(a2-a1)>>8,(a3-a2)>>16,(a4-a3)>>24);
	int size[4]={0};
	size[0]=a1;
	size[1]=(a2-a1)>>8;
	size[2]=(a3-a2)>>16;
	size[3]=(a4-a3)>>24;

	printf("bytes=%d\n", size[0] | (size[1]<<8) | (size[2]<<16) | (size[3]<<24));
	int val1,val2;

	//insert the header of the audio//
	for(i=0;i<4;i++)
	{
		a1=size[i]%4;
		a2=size[i]%32;
		a3=size[i]%256;
		val1=(a2-a1)>>2;
		val2=(a3-a2)>>5;
		data[0+i*channels+0]= (data[0+i*channels+0] &252);
		data[0+i*channels+1]= (data[0+i*channels+1] &248);
		data[0+i*channels+2]= (data[0+i*channels+2] &248);
		data[0+i*channels+0]= (data[0+i*channels+0] |a1);
		data[0+i*channels+1]= (data[0+i*channels+1] |val1);
		data[0+i*channels+2]= (data[0+i*channels+2] |val2);

	}
	cvSaveImage("new_image.png", img );

	 // create a window
	cvNamedWindow("mainWin", CV_WINDOW_AUTOSIZE); 
	cvMoveWindow("mainWin", 100, 100);

	// show the image
	cvShowImage("mainWin", img );

	// wait for a key
	cvWaitKey(0);

	// release the image
	cvReleaseImage(&img );
	return 0;
}
Ejemplo n.º 21
0
/**
 * @brief Permite al usuario interactivamente seleccionar un objeto
 * @param regions Guarda los rectángulos que definen a cada objeto
 * @param MAX_OBJECTS Número máximo permitido de objetos a rastrear
 * @param argv Uso el nombre del video para poder leer el correspondiente fichero de regiones por defecto
 * @return El número de objetos seleccionados por el usuario (<= MAX_OBJECTS)
 */
int get_regions(CvRect **regions, int MAX_OBJECTS, char *argv ) {
	
	FILE *fich;
	char name[50], *p1, *p2;
	params p;
	CvRect* r;
	int x1, y1, x2, y2, w, h;
	
	// Si hay que leer desde fichero las regiones...
	if(MAX_OBJECTS > 0) {
		p.n = MAX_OBJECTS;

		strcpy( name, REGION_IN);
		p1 = strrchr( &argv[1], '/' );
		p2 = strrchr( &argv[1], '.' );
		strncat( name, (++p1), p2-p1 );
		strcat( name, "txt" );
		fich = fopen( name, "r" );
		if( ! fich ) {
			strcpy( name, REGION_IN);
			p1 = strrchr( &argv[1], '/' );
			p2 = strrchr( &argv[1], '.' );
			strncat( name, (++p1), (++p2)-p1 );
			strcat( name, "txt" );
			fich = fopen( name, "r" );
			if( ! fich ) {
				printf("Error leyendo las regiones iniciales\n");
				exit (-1);
			}
		}

		p.loc1 = std::vector<CvPoint>(MAX_OBJECTS);
		p.loc2 = std::vector<CvPoint>(MAX_OBJECTS);
		for( int i = 0; i < MAX_OBJECTS; ++i ) {
			int leidos = fscanf(fich, "%d", &p.loc1[i].x);
			leidos = fscanf(fich, "%d", &p.loc1[i].y);
			leidos = fscanf(fich, "%d", &p.loc2[i].x);
			leidos = fscanf(fich, "%d", &p.loc2[i].y);
		}
		fclose( fich );
	}

	// Si hay que seleccionarlas con el ratón...
	else {
		fprintf( stderr, "Selecciona la región a rastrear\n" );
		p.n = 0;
		cvNamedWindow( win_name, CV_WINDOW_AUTOSIZE );
		cvShowImage( win_name, first_frame );
		cvSetMouseCallback( win_name, &mouse, &p );
		cvWaitKey( 0 );
		cvDestroyWindow( win_name );
		if( p.n == 0 )
			return 0;
	}
	
	// Reservo espacio para la lista de regiones
	r = (CvRect*) malloc( p.n * sizeof( CvRect ) );

	for( int i = 0; i < p.n; ++i ) {
		x1 = MIN( p.loc1[i].x, p.loc2[i].x );
		x2 = MAX( p.loc1[i].x, p.loc2[i].x );
		y1 = MIN( p.loc1[i].y, p.loc2[i].y );
		y2 = MAX( p.loc1[i].y, p.loc2[i].y );
		w = x2 - x1;
		h = y2 - y1;
		
		//printf("%d %d %d %d ", x1, y1, x2, y2);
		// Me aseguro que la altura y anchura es par
		w = ( w % 2 )? w : w+1;
		h = ( h % 2 )? h : h+1;
		r[i] = cvRect( x1, y1, w, h );
	}
	*regions = r;
	return p.n;
}
Ejemplo n.º 22
0
int mainMatch(void)
{
    // Initialise capture device
    CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
    if(!capture) error("No Capture");

    // Declare Ipoints and other stuff
    IpPairVec matches;
    IpVec ipts, ref_ipts;

    // This is the reference object we wish to find in video frame
    // Replace the line below with IplImage *img = cvLoadImage("Images/object.jpg");
    // where object.jpg is the planar object to be located in the video
    IplImage *img = cvLoadImage("Images/object.jpg");
    if (img == NULL) error("Need to load reference image in order to run matching procedure");
    CvPoint src_corners[4] = {{0,0}, {img->width,0}, {img->width, img->height}, {0, img->height}};
    CvPoint dst_corners[4];

    // Extract reference object Ipoints
    surfDetDes(img, ref_ipts, false, 3, 4, 3, 0.004f);
    drawIpoints(img, ref_ipts);
    showImage(img);

    // Create a window
    cvNamedWindow("OpenSURF", CV_WINDOW_AUTOSIZE );

    // Main capture loop
    while( true )
    {
        // Grab frame from the capture source
        img = cvQueryFrame(capture);

        // Detect and describe interest points in the frame
        surfDetDes(img, ipts, false, 3, 4, 3, 0.004f);

        // Fill match vector
        getMatches(ipts,ref_ipts,matches);

        // This call finds where the object corners should be in the frame
        if (translateCorners(matches, src_corners, dst_corners))
        {
            // Draw box around object
            for(int i = 0; i < 4; i++ )
            {
                CvPoint r1 = dst_corners[i%4];
                CvPoint r2 = dst_corners[(i+1)%4];
                cvLine( img, cvPoint(r1.x, r1.y),
                        cvPoint(r2.x, r2.y), cvScalar(255,255,255), 3 );
            }

            for (unsigned int i = 0; i < matches.size(); ++i)
                drawIpoint(img, matches[i].first);
        }

        // Draw the FPS figure
        drawFPS(img);

        // Display the result
        cvShowImage("OpenSURF", img);

        // If ESC key pressed exit loop
        if( (cvWaitKey(10) & 255) == 27 ) break;
    }

    // Release the capture device
    cvReleaseCapture( &capture );
    cvDestroyWindow( "OpenSURF" );
    return 0;
}
int main()
{

	
	bool salir=FALSE;

	
	

do
{
	IplImage *im;
	char eleccion;
	bool j=TRUE;

	//Panel

	printf("Elija la imagen que quiere cargar\n");
	printf("Imagenes del programa:\n\n"
		   "A=2_bolas\n"
		   "B=3_bolas\n"
		   "C=4_bolas\n"
		   "D=6_bolas\n"
		   "E=bola_azul\n"
		   "F=bola_roja\n"
		   "G=bolas_cortadas\n"
		   "H=bola_amarilla_blanca\n"
		   "I=bola_amarilla_blanca_+intensidad\n"
		   "J=bola_amarilla1\n"
		   "K=bolas_cortadas_+intensidad\n"
		   "L=bolas_juntas\n"
		   "M=cambio_angulo_iluminacion\n"
		   "N=bolas_pegadas_1\n"
		   "O=bolas_pegadas_2\n"
		   "P=bolas_pegadas_3\n"
		   "Q=bolas_pegadas_4\n"
		   "R=bolas_pegadas_4_+intensidad\n"
		   "S=bolas_pegadas_rotas\n"
		   "T=bolas_pegadas_rotas_2\n"
		   

		   );

	printf("X=SALIR\n\n");

while(j==TRUE)
{
	

	
	scanf("%c",&eleccion);
	
	

	switch(eleccion)
	{
	case 'A':{   char NombreImagen[]="2_bolas.jpg"; im=cvLoadImage(NombreImagen, -1); j=FALSE;}
		    break;

	case 'B':  {char NombreImagen[]="3_bolas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'C': { char NombreImagen[]="4_bolas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'D':  { char NombreImagen[]="6_bolas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'E':  { char NombreImagen[]="bola_azul.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;
		   
	case 'F':  {char NombreImagen[]="bola_roja.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'G':  {char NombreImagen[]="bolas_cortadas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'H':  {char NombreImagen[]="bola_amarilla_blanca.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'I': { char NombreImagen[]="bola_amarilla_blanca_+intensidad.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'J': { char NombreImagen[]="bola_amarilla1.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'K':  { char NombreImagen[]="bolas_cortadas_+intensidad.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'L': { char NombreImagen[]="bolas_juntas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'M':  {char NombreImagen[]="cambio_angulo_iluminacion.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'N':  {char NombreImagen[]="bolas_pegadas_1.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'O':  {char NombreImagen[]="bolas_pegadas_2.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'P':  {char NombreImagen[]="bolas_pegadas_3.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'Q':  {char NombreImagen[]="bolas_pegadas_4.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'R':  {char NombreImagen[]="bolas_pegadas_4_+intensidad.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'S':  {char NombreImagen[]="bolas_pegadas_rotas.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'T':  {char NombreImagen[]="bolas_pegadas_rotas_2.jpg"; im=cvLoadImage(NombreImagen, -1);j=FALSE;}
		    break;

	case 'X':  {salir=TRUE; return 0;}
		    break;


	default:{ printf("Eleccion incorrecta, vuelva a elegir una opcion\n"); j=TRUE; }
	}

}
		













	
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
	
//OBTENER UNA IMAGEN BINARIA SÓLO CON BOLAS AZULES Y OTRA SÓLO CON BOLAS ROJAS 

	
	
	IplImage *Imagen_RGB;
	
    
	
	IplImage *Imagen_umbr;
	IplImage *Imagen_umbr_2;
	


	CvSize Dimensiones;

	//umbrales de la imagenS y la imagenH. En esta parte no utilizo la función MinMax porque me sale mejor poniendo  unos umbrales fijos
	int umbral1=150;
	int umbral2=100;
	
	

	

	//pasamos de BGR a RGB

	Dimensiones= cvGetSize(im);

	Imagen_RGB=cvCreateImage(Dimensiones,IPL_DEPTH_8U,3);
	cvCvtColor(im,Imagen_RGB,CV_BGR2RGB);



	
IplImage *ImagenHSV;
IplImage *ImagenH,*ImagenS,*ImagenV;




//pasamos de RGB a HSV

ImagenHSV=cvCreateImage(Dimensiones,IPL_DEPTH_8U,3);

cvCvtColor(Imagen_RGB,ImagenHSV,CV_RGB2HSV);



//Extraemos de la imagen HSV sus tres componentes: H, S y V


ImagenH=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1);
ImagenS=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1);
ImagenV=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1);
cvSplit(ImagenHSV,ImagenH,ImagenS,ImagenV,0);

//imagenes binarias para umbralizar Sy H

Imagen_umbr=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1);
Imagen_umbr_2=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1);

//umbralizacion.
cvThreshold(ImagenS,Imagen_umbr,umbral1,255,CV_THRESH_BINARY);

cvThreshold(ImagenH,Imagen_umbr_2,umbral2,255,CV_THRESH_BINARY_INV);



//Descompongo la imagen en R,G y B

IplImage *ImagenR=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1);
IplImage *ImagenG=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1);
IplImage *ImagenB=cvCreateImage(Dimensiones,IPL_DEPTH_8U,1);

cvSplit(Imagen_RGB,ImagenR,ImagenG,ImagenB,0);




//A partir de aquí hago una serie de transformaciones morfológicas para separar en imágenes binarias las bolas azules de las rojas.


    //creo elemento estructurante

	 IplConvKernel* element = 0;
     const int element_shape =CV_SHAPE_ELLIPSE;
	 int pos=1;
  
     element= cvCreateStructuringElementEx(pos*2+1,pos*2+1,pos,pos, element_shape,0);
	
	


    IplImage * temp= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1);
    IplImage *temp2=cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1);
	IplImage *resta=cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1);
	
	
	//con esto obtengo todas las bolas binarizadas
	
	cvMorphologyEx(Imagen_umbr,temp,temp, NULL,CV_MOP_TOPHAT,2);     //tophat. Me detecta sólo las sombras de las bolas. Mi iluminación iene de arriba.
	//cvMorphologyEx(Imagen_umbr,temp,temp, NULL,CV_MOP_BLACKHAT,2); Esto podria aplicarlo si las sombras se crearan en el lado contrario
	cvAbsDiff (Imagen_umbr, temp ,temp); //resto  la original - el tophat
	cvMorphologyEx(temp,temp,temp, NULL,CV_MOP_CLOSE,6); //aplico el cierre




	//Con esto obtengo las bolas azules binarizadas

	cvMorphologyEx(Imagen_umbr_2,temp2,temp2, NULL,CV_MOP_TOPHAT,1);     //tophat
	//cvMorphologyEx(Imagen_umbr,temp,temp, NULL,CV_MOP_BLACKHAT,2);
	cvAbsDiff (Imagen_umbr_2, temp2 ,temp2); //resto  la original - el tophat
	cvMorphologyEx(temp2,temp2,temp2, NULL,CV_MOP_CLOSE,6); //aplico el cierre

	//Dilato y erosiono el mismo número de veces, para que las bolas me queden mas o menos del mismo tamaño. Además lo hago muchas veces(15), para eliminar los
	//máximos defectos posibles debido a sombras y cambios y contrastes debido a la iluminación
	cvDilate(temp2,temp2,element,15);

	cvErode(temp2,temp2,element,15);


	cvAbsDiff (temp2, temp ,resta); // Resto la imagen de todas las bolas -la imagen de las bolas azules, dilato mcuhas veces y erosiono muchas veces,
	//y finalmente solo me quedan las rojas
	cvDilate(resta,resta,element,15);//dilato

	cvErode(resta,resta,element,15);//erosiono

    //Puede que algun contorno no deseado aún permanezca en la imagen binaria. Como aplico las mismas transformaciones morfológicas a las dos imágenes binarias 
	//tendré el mismo defecto en las dos imagenes, así que  obtengo una imagen sólo los defectos, y después resto los defectos a las dos imágenes.


IplImage * temp3= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1);
IplImage * temp4= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1);
IplImage * Im_defectos_comunes= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1);
IplImage * Im_bolas_azules= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1);
IplImage * Im_bolas_rojas= cvCreateImage(cvGetSize(Imagen_umbr),IPL_DEPTH_8U,1);


cvThreshold(temp2,temp3,umbral2,255,CV_THRESH_BINARY_INV);//invierto las bolas rojas

cvThreshold(resta,temp4,umbral2,255,CV_THRESH_BINARY_INV);//invierto las bolas azules

cvAnd(temp3,temp4,Im_defectos_comunes,NULL);//multiplico las dos imagenes, la imagen que obtengo solo aparecen los defectos comunes

cvAbsDiff (temp2,Im_defectos_comunes,Im_bolas_azules);//resto los defectos a las bolas azules

cvAbsDiff (resta, Im_defectos_comunes ,Im_bolas_rojas);//resto los defectos a las bolas rojas

//Ya tengo una imagen binaria sólo con las bolas azules y otra sólo con las rojas.
//-------------------------------------------------------------------------------------------------------------------------------------------------------------------------



//CALCULAR HISTOGRAMA DE LA IMAGEN G






//Nueva imagen para dibujar el histograma
IplImage *histImage;
//Variables para el histograma
int hist_size=256;
int NivelGris;
float NumPixels;
//Estructura histograma para guardar la informacion
CvHistogram *hist;




//Nueva imagen para dibujar el histograma
histImage = cvCreateImage(cvSize(256,256), 8, 1);
//Estructura histograma para guardar la informacion
hist = cvCreateHist(1, &hist_size, CV_HIST_ARRAY,NULL, 1);
//calcular el histograma. Lo hago con la imagenG, ya que hay más contraste que en la imagen en escala de grises, pero también funcionaria con la imagen de escala de grises
cvCalcHist(&ImagenG,hist,0,NULL);
cvSetZero(histImage);

long Histograma[256];

//dibujo el histograma
for(NivelGris=0;NivelGris<hist_size;++NivelGris)
{
NumPixels=cvQueryHistValue_1D(hist,NivelGris)/15;
cvLine(histImage,cvPoint(NivelGris,256),cvPoint(NivelGris,256-NumPixels),CV_RGB(255,255,255),1,8,0);

Histograma[NivelGris]=NumPixels;//meto en un array el numero de pixels para cada nivel de gris
}

cvReleaseHist(&hist);
cvSaveImage("Histograma.jpg",histImage,0);
      
//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------


//UMBRALIZACIÓN DE LA IMAGEN G


IplImage *imagen_bin;
CvMemStorage *Memoria;
CvSeq *Contorno, *Primer_Contorno;
int Nc;
//imagen=cvLoadImage("herramientas.tif",CV_LOAD_IMAGE_GRAYSCALE);
imagen_bin=cvCreateImage(cvGetSize(ImagenG),8,1);
//imagen_color=cvCreateImage(cvGetSize(ImagenG),8,3);

//umbralizar la ImagenG
int umbral;


umbral=MinMax(Histograma);

//Para algunas imagenes, debido a que tienen mas iluminacion o se introducen otros objetos como la mano, en el histograma las gausianas se juntan mucho o solo aparece
//una. En este caso la función MinMAx() calcula  un umbral muy alto y hace que no se detecten los contornos de algunas bolas, asi que establezco un umbral máximo

if(umbral>100)
{
	umbral=100;
}


cvLine(histImage,cvPoint(umbral,256),cvPoint(umbral,0),CV_RGB(255,255,255),1,8,0);//uDibujo el umbral en el histograma


cvThreshold(ImagenG,imagen_bin,umbral,255,CV_THRESH_BINARY_INV);//Binarizo la imagen G

cvMorphologyEx(imagen_bin,imagen_bin,imagen_bin, NULL,CV_MOP_CLOSE,6);//Alplico cierre para eliminar los cambios de contraste en el interior de las bolas 
//debido al reflejo al reflejo de la luz

	
 



//---------------------------------------------------------------------------------------------------------------------------------------------------------------------


// CÁLCULO DE CONTORNOS, ÁREAS, PERÍMETROS, CAJAS Y CENTROS DE CAJA EN LA IMAGEN G.

 IplConvKernel* element_2 = 0;
     const int element_shape_2 =CV_SHAPE_ELLIPSE;
	 int pos_2=1;
  
     element_2= cvCreateStructuringElementEx(pos_2*2+1,pos_2*2+1,pos_2,pos_2, element_shape_2,0);


Memoria=cvCreateMemStorage();
bool k=FALSE;
int n=0;
bool pelotas_juntas=FALSE;
int i;
double *perimetro;
double *area;
CvBox2D *BoundBox;

 CvPoint *centro;

int bolas_rotas_azules=0;
int bolas_rotas_rojas=0;

CvScalar s3;






     Nc=cvFindContours(imagen_bin,Memoria,&Primer_Contorno,sizeof(CvContour),CV_RETR_EXTERNAL);
   


     perimetro=(double*)malloc(Nc*sizeof(double));
     area=(double*)malloc(Nc*sizeof(double));
     BoundBox=(CvBox2D*)malloc(Nc*sizeof(CvBox2D));
	 centro=(CvPoint*)malloc(Nc*sizeof(CvPoint));
	
	
     for(i=0,Contorno=Primer_Contorno;Contorno!=NULL;Contorno=Contorno->h_next,++i)
     {
	      area[i]=cvContourArea(Contorno,CV_WHOLE_SEQ);
	      perimetro[i]=cvArcLength(Contorno,CV_WHOLE_SEQ,1);
	      BoundBox[i]=cvMinAreaRect2(Contorno,NULL);
		  

     }

	  for(i=0;i<Nc;++i)
      {

		  centro[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y);
      }

//----------------------------------------------------------------------------------------------------------------------------------------------------------------------------	  

//DETECTAR BOLAS ROTAS	  
	  
	  
	  
	  
IplImage * inv_bolas_azules, *inv_bolas_rojas;
CvMemStorage *storage_2;
CvMemStorage *storage_3;
CvSeq *Contorno_2, *Primer_Contorno_2;
CvSeq *Contorno_3, *Primer_Contorno_3;

int Nc_2;
int Nc_3;
double *area_2;
double *area_3;
CvBox2D *BoundBox_2;
CvBox2D *BoundBox_3;
CvPoint *centro_2;
CvPoint *centro_3;



inv_bolas_azules=cvCreateImage(cvGetSize(Im_bolas_azules),8,1);

inv_bolas_rojas=cvCreateImage(cvGetSize(Im_bolas_rojas),8,1);

cvThreshold(Im_bolas_azules,inv_bolas_azules,128,255,CV_THRESH_BINARY_INV);
cvThreshold(Im_bolas_rojas,inv_bolas_rojas,128,255,CV_THRESH_BINARY_INV);

storage_2=cvCreateMemStorage();
storage_3=cvCreateMemStorage();


//detecto las bolas rotas azules
Nc_2=cvFindContours(inv_bolas_azules,storage_2,&Primer_Contorno_2,sizeof(CvContour),CV_RETR_EXTERNAL); //Encuentro cotornos en la imagen binaria donde sólo aparecen 
                                                                                                       //las bolas azules



     
     area_2=(double*)malloc(Nc_2*sizeof(double));//tamaño del vector area
     
     BoundBox_2=(CvBox2D*)malloc(Nc_2*sizeof(CvBox2D));//tamaño  del vector BoundBox_2
	 
	 centro_2=(CvPoint*)malloc(Nc_2*sizeof(CvPoint));//tamaño del vector centro_2
	 
	

     for(i=0,Contorno_2=Primer_Contorno_2;Contorno_2!=NULL;Contorno_2=Contorno_2->h_next,++i)
     {
	      area_2[i]=cvContourArea(Contorno_2,CV_WHOLE_SEQ);//Hallo el area de cada contorno
	      
	      
	      BoundBox_2[i]=cvMinAreaRect2(Contorno_2,NULL);//Hallo las caja de cada contorno
		  
		  
		  
	 }

	  for(i=0;i<Nc_2;++i)
      {

		  centro_2[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y);// Hallo el centro de cada contorno
      }

	 
	  
	  
	 
	  
	 //Para cada contorno, si su area es menor que 2500, es que se trata de una bola rota


	  for(i=0;i<Nc_2;++i)
	  {
		  if(area_2[i]<2500)
		  {
			  bolas_rotas_azules++;
			  DibujarBox2D(im,BoundBox_2[i]);
			  printf("Bola  rota azul en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x);
		  }
	  }


	//Detecto las bolas rotas rojas

	// Es el mismo procedimiento que para detectar las bolas rotas azules, pero encontrando contornos en la imagen binaria donde solo aparecen las bolas rojas
	  Nc_3=cvFindContours(inv_bolas_rojas,storage_3,&Primer_Contorno_3,sizeof(CvContour),CV_RETR_EXTERNAL);



     
     area_3=(double*)malloc(Nc_3*sizeof(double));
     
     BoundBox_3=(CvBox2D*)malloc(Nc_3*sizeof(CvBox2D));
	 
	  centro_3=(CvPoint*)malloc(Nc*sizeof(CvPoint));
	 
	 
	

     for(i=0,Contorno_3=Primer_Contorno_3;Contorno_3!=NULL;Contorno_3=Contorno_3->h_next,++i)
     {
	      area_3[i]=cvContourArea(Contorno_3,CV_WHOLE_SEQ);
	      
	      
	      BoundBox_3[i]=cvMinAreaRect2(Contorno_3,NULL);
		  
		  
		  
	 }

	  for(i=0;i<Nc_3;++i)
      {

		  centro_3[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y);
      }
	  
	  
	  
	 
	  for(i=0;i<Nc_3;++i)
	  {
		  if(area_3[i]<2000)
		  {
			  bolas_rotas_rojas++;
			  DibujarBox2D(im,BoundBox_3[i]);
			  printf("Bola  rota roja en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x);
		  }
	  }

	  
	  
	  
//---------------------------------------------------------------------------------------------------------------------------------------------------------------------------
//CASO DE LAS BOLAS JUNTAS

// En el caso de que haya dos o más bolas juntas, el programa encuentra un contorno con el área de todas las bolas que están juntas. Para solucionar este problema
//utilizo el perímetro de los contornos. Elijo un valor umbral para el perímetro en el que me aseguro que se han separado todas las bolas. Así, si existe un perímetro 
//mayor al umbral, erosiono la imagen hasta que todos los perímetros sean menores que ese umbral. 
	 
// Para detectar si hay bolas juntas, compruebo si existe algún controno que tenga el área mayor que el de una bola . 	 
	 for(i=0;i<Nc;++i)
	 {
		 if(area[i]>4000)//si existe el área de un contorno mayor al área de una bola
		 {
			 k=TRUE;
			 pelotas_juntas=TRUE;
		 }

		 
	 }

while(k==TRUE)// Se mete en este bucle si ha encontrado algun área mayor que el de una bola
{

		 k=FALSE;
		 Nc=cvFindContours(imagen_bin,Memoria,&Primer_Contorno,sizeof(CvContour),CV_RETR_EXTERNAL);
  


     perimetro=(double*)malloc(Nc*sizeof(double));
     area=(double*)malloc(Nc*sizeof(double));
     
     BoundBox=(CvBox2D*)malloc(Nc*sizeof(CvBox2D));
	
	 centro=(CvPoint*)malloc(Nc*sizeof(CvPoint));
	
	 

     for(i=0,Contorno=Primer_Contorno;Contorno!=NULL;Contorno=Contorno->h_next,++i)
     {
	      area[i]=cvContourArea(Contorno,CV_WHOLE_SEQ);
	      perimetro[i]=cvArcLength(Contorno,CV_WHOLE_SEQ,1);
	     
	      BoundBox[i]=cvMinAreaRect2(Contorno,NULL);
		  
		 
		 
     }

	 for(i=0;i<Nc;++i)
      {
	       centro[i] = cvPoint( BoundBox[i].center.x,BoundBox[i].center.y);
      }


	

	 for(i=0;i<Nc;++i)
	 {
		 

		 if(perimetro[i]>100)
		 {
			 k=TRUE;
			 cvErode(imagen_bin,imagen_bin,element_2,1);
		 }

		
	 }



}



    

//------------------------------------------------------------------------------------------------------------------------------------------------------------


//CONOCER EL NÚMERO DE BOLAS DE CADA COLOR  Y SUS RESPECTIVAS POSICIONES




int bolas_azules=0;
int bolas_rojas=0;

int mano=0;
double radio=0.0;
CvScalar s;
CvScalar s2;


//Diferenciar bolas en el caso de que no haya bolas juntas
if( pelotas_juntas==FALSE)
{

    //Bolas azules
    for(i=0;i<Nc;++i)//bucle para todods los contornos
       {

           s=cvGet2D(Im_bolas_azules,centro[i].y,centro[i].x);//Cojo los centros y compruebo de qué color es el pixel en la imagen de bolas azules
		   if(s.val[0]==0)// si es 0,es que puede haber una bola azul o una bola rota azul
		     {
		          if(area[i]>2000 && area[i]<4000)//bola azul 
		            {
			             bolas_azules++;


			             radio=sqrt(area[i]/3.14);

			             cvCircle(
                                  im,
                                  centro[i],
                                  cvRound( radio ),
                                  CV_RGB(0x00,0xff,0xff));

			                      printf("Bola azul en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x);
		             }
		     }
}


			   


	//Bolas rojas
     for(i=0;i<Nc;++i)//bucle para todos los contornos
       {
   
	       s2=cvGet2D(Im_bolas_rojas,centro[i].y,centro[i].x);//Cojo el centro y compruebo de qué color es el pixel en la imagen con bolas rojas

	       if(s2.val[0]==0)// si es 0,es que puede haber bola roja o bola rota roja
		     {
		         if(area[i]>2000 && area[i]<4000)//bola roja
		           {
			            bolas_rojas++;

			            radio=sqrt(area[i]/3.14);

			            cvCircle(
                                  im,
                                  centro[i],
                                  cvRound( radio ),
                                  CV_RGB(0xff,0x00,0x00));

			                      printf("Bola roja en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x);
		           }
		   
		   
		     }
        }


		



}

if( pelotas_juntas==TRUE)
{
    float radio=30;//Como en el caso de qhe haya bolas juntas erosiono la imagen hasta separlas, no tengo las áreas reales de las bolas, así que
	              //estipulo un radio aproximado .


	//Bolas azules
    for(i=0;i<Nc;++i)
       {

          s=cvGet2D(Im_bolas_azules,centro[i].y,centro[i].x);//Cojo los centros y compruebo de qué color es el pixel en la imagen con bolas azules
		  if(s.val[0]==0)// si es 0,es que hay bola azul. En este caso no existe la posibilidad de que haya bolas rotas porque al erosionar solo permanecen los contornos
			             //con un perímetro mayor al de una bola. El perímetro de una bola rota siempre será menor
		   {
		   

			   cvCircle(
                        im,
                        centro[i],
                        cvRound( radio ),
                        CV_RGB(0x00,0xff,0xff));

			   bolas_azules++;

			   printf("Bola azul en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x);





		   }
		   
		   
		

	
       }






	//Bolas rojas	
    for(i=0;i<Nc;++i)//bucle para todos los contornos
       {
   
	       s2=cvGet2D(Im_bolas_rojas,centro[i].y,centro[i].x);//Cojo el centro y compruebo de qué color es el pixel en la imagen con bolas rojas

	       if(s2.val[0]==0)// si es 0,es que hay una bola roja
		     {
		   
			     cvCircle(
                          im,
                          centro[i],
                          cvRound( radio ),
                          CV_RGB(0xff,0x00,0x00));

			     bolas_rojas++;

			     printf("Bola roja en:\n x=%d\n y=%d\n",centro[i].y,centro[i].x);
		     }
		   
		   
		
	   }

}



printf("bolas azules:%d\n",bolas_azules);
printf("bolas rotas azules:%d\n", bolas_rotas_azules);
printf("bolas rojas:%d\n",bolas_rojas);
printf("bolas rotas rojas:%d\n\n",bolas_rotas_rojas);


printf("ORDENAR AL ROBOT\n\n\n");
if(bolas_rotas_azules>0)
{
	printf("METER BOLAS AZULES DEFECTUOSAS EN CAJA DE BOLAS AZULES DEFECTUOSAS\n\n");
}

if(bolas_rotas_rojas>0)
{
	printf("METER BOLAS ROJAS DEFECTUOSAS EN CAJA DE BOLAS ROJAS DEFECTUOSAS\n\n");
}

if(bolas_azules>0 || bolas_rojas>0)
{
	printf("EMPAQUETAR BOLAS\n\n");
}



//----------------------------------------------------------------------------------------------------------------------------------------------------------------------





cvWaitKey(0);

//--------------------------------------------------------------------------------------------------------------------------------------------------------------------
//PANTALLA

cvNamedWindow("Original", CV_WINDOW_AUTOSIZE);
cvShowImage("Original", im );

//cvNamedWindow("imagen_bin", CV_WINDOW_AUTOSIZE);
//cvShowImage("imagen_bin", imagen_bin );

//Mostrar el plano de color rojo, verde y azul

//cvNamedWindow("R", CV_WINDOW_AUTOSIZE);
//cvShowImage("R",ImagenR);

//cvNamedWindow("G", CV_WINDOW_AUTOSIZE);
//cvShowImage("G",inv_bolas_azules);

//cvNamedWindow("B", CV_WINDOW_AUTOSIZE);
//cvShowImage("B",inv_bolas_rojas);

cvNamedWindow("bolas_azules", CV_WINDOW_AUTOSIZE);
cvShowImage("bolas_azules",Im_bolas_azules);

cvNamedWindow("bolas_rojas", CV_WINDOW_AUTOSIZE);
cvShowImage("bolas_rojas",Im_bolas_rojas);

//Mostrar la imagen

cvNamedWindow("Histograma de G", CV_WINDOW_AUTOSIZE);
cvShowImage("Histograma de G", histImage );

cvWaitKey(0);

//---------------------------------------------------------------------------------------------------------------------------------------------------------------

//LIBERAR MEMORIA
cvDestroyAllWindows();

cvReleaseImage(&ImagenR);
cvReleaseImage(&ImagenG);
cvReleaseImage(&ImagenB);
cvReleaseImage(&imagen_bin);
cvReleaseImage(&histImage);
cvReleaseImage(&im);
cvReleaseImage(&Imagen_RGB);
cvReleaseImage(&Imagen_umbr);
cvReleaseImage(&Imagen_umbr_2);
cvReleaseImage(&ImagenHSV);
cvReleaseImage(&ImagenH);
cvReleaseImage(&ImagenS);
cvReleaseImage(&ImagenV);
cvReleaseImage(&temp);
cvReleaseImage(&temp2);
cvReleaseImage(&temp3);
cvReleaseImage(&temp4);
cvReleaseImage(&Im_defectos_comunes);
cvReleaseImage(&Im_bolas_azules);
cvReleaseImage(&Im_bolas_rojas);
cvReleaseImage(&inv_bolas_rojas);
cvReleaseImage(&inv_bolas_azules);









}while(salir==FALSE);

return 0;


}
int main()
{
    int c;//to store ascii value of key pressed
    int i,j,h,s,v;
    CvCapture *capture=cvCreateCameraCapture(0);//initiate camera 
    //because of recursively updating frame , here we dont need to wait for camera as for some ms frame will be black and then as camera starts , frame will update and shaw image
    IplImage *frame;
    
     IplImage* outputred;
   IplImage* outputone;
   IplImage* outputtwo;
  // IplImage* outputblue;
   IplImage* outputwhite;
  // IplImage* outputorange;


    uchar *ptemp;
   // uchar *poutputorange;
    uchar *poutputred;
    uchar *poutputwhite;
   // uchar *poutputblue;
    uchar *poutputone;
    uchar *poutputtwo;
   
   
    if(capture!=NULL)
    {
                     frame=cvQueryFrame(capture);//take current image in camera and give it to frame pointer
                     cvNamedWindow("img");
                     while(1)
                     {
                             
                             cvShowImage("img",frame);
                            frame=cvQueryFrame(capture);
                            temp=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,3);
                            cvCvtColor(frame,temp,CV_BGR2HSV);
                            //frame rate time period (if not given system will hang as system processing speed is very fast
                           // cvNamedWindow("output",1);
                            //cvShowImage("output",temp);
                            cvSetMouseCallback("img", my_mouse_callback, NULL);
                            c=cvWaitKey(1);
                            outputred=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
     outputone=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
      outputtwo=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
     //  outputblue=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
        outputwhite=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
     //    outputorange=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
    
    cvCvtColor(frame,temp,CV_BGR2HSV);
    ptemp  =  (uchar*)temp->imageData;
    poutputone  =  (uchar*)outputone->imageData;
    poutputtwo  =  (uchar*)outputtwo->imageData;
   // poutputblue  =  (uchar*)outputblue->imageData;
    poutputwhite  =  (uchar*)outputwhite->imageData;
    poutputred  =  (uchar*)outputred->imageData;
  //  poutputorange  =  (uchar*)outputorange->imageData;
    
     for(i=0;i<frame->height;i++)
            for(j=0;j<frame->width;j++)
            {
                                       h=ptemp[i*temp->widthStep + j*temp->nChannels+0];
                                       s=ptemp[i*temp->widthStep + j*temp->nChannels+1];
                                       v=ptemp[i*temp->widthStep + j*temp->nChannels+2];
                                       
                                         if((h>=157&&h<=178)&&s>=110 && s<=255 &&v>=90)//red
                                                                         poutputred[i*outputred->widthStep + j]=255;
                                       else
                                                                          poutputred[i*outputred->widthStep + j]=0;
                                       if((h==0 && s==0 &&v<150 && v>9)||(h>=25 &&h<=110 && s>=20&&s<=200&& v>=13 && v<=120))//one
                                                                         poutputone[i*outputone->widthStep + j]=255;
                                       else
                                                                          poutputone[i*outputone->widthStep + j]=0;
                                  
                               /*       if((h>=145 &&h<=160)&&s>=175 && s<=255 && v>=80 && v<=150)//one
                                                                         poutputone[i*outputone->widthStep + j]=255;
                                       else
                                                                          poutputone[i*outputone->widthStep + j]=0;
                                 */                                         
                                       if(h>=110 &&h<=153&&s>=90&&v>=7 && v<=150)//two
                                                                         poutputtwo[i*outputtwo->widthStep + j]=255;
                                       else
                                                                          poutputtwo[i*outputtwo->widthStep + j]=0;
                                          if( (h==0 && s==0 && v>=250) || (((h>=0 && h<=30)) && s>=50&&s<=200&&v>=110) )//white 
                                                                         poutputwhite[i*outputwhite->widthStep + j]=255;
                                       else
                                                                          poutputwhite[i*outputwhite->widthStep + j]=0;
                                   
                                                                          
            }
     
    //cvNamedWindow("output",1);
    cvNamedWindow("outputred",1);
    cvNamedWindow("outputone",1);
    cvNamedWindow("outputtwo",1);
  //  cvNamedWindow("outputblue",1);
    cvNamedWindow("outputwhite",1);
    //cvNamedWindow("outputorange",1);

    //cvShowImage("output",temp);
    cvShowImage("outputred",outputred);
    cvShowImage("outputone",outputone);
    cvShowImage("outputtwo",outputtwo);
   // cvShowImage("outputblue",outputblue);
    cvShowImage("outputwhite",outputwhite);
   // cvShowImage("outputorange",outputorange);
    cvWaitKey(1);
 /*   imgOutred=cvCreateImage(cvGetSize(input),IPL_DEPTH_8U,3);
    labelImgred=cvCreateImage(cvGetSize(input),IPL_DEPTH_LABEL,1);
    CvBlobs blobsred;
    cvLabel(outputred, labelImgred, blobsred);
    cvRenderBlobs(labelImgred, blobsred, input, imgOutred);
    cvFilterByArea(blobsred,PIXEL_MIN,PIXEL_MAX);

    imgOutone=cvCreateImage(cvGetSize(input),IPL_DEPTH_8U,3);
    labelImgone=cvCreateImage(cvGetSize(input),IPL_DEPTH_LABEL,1);
    CvBlobs blobsone;
    cvLabel(outputone, labelImgone, blobsone);
    cvRenderBlobs(labelImgone, blobsone, input, imgOutone);
    cvFilterByArea(blobsone,PIXEL_MIN,PIXEL_MAX);

    imgOuttwo=cvCreateImage(cvGetSize(input),IPL_DEPTH_8U,3);
    labelImgtwo=cvCreateImage(cvGetSize(input),IPL_DEPTH_LABEL,1);
    CvBlobs blobstwo;
    cvLabel(outputtwo, labelImgtwo, blobstwo);
    cvRenderBlobs(labelImgtwo, blobstwo, input, imgOuttwo);
    cvFilterByArea(blobstwo,PIXEL_MIN,PIXEL_MAX);

    imgOutblue=cvCreateImage(cvGetSize(input),IPL_DEPTH_8U,3);
    labelImgblue=cvCreateImage(cvGetSize(input),IPL_DEPTH_LABEL,1);
    CvBlobs blobsblue;
    cvLabel(outputblue, labelImgblue, blobsblue);
    cvRenderBlobs(labelImgblue, blobsblue, input, imgOutblue);
    cvFilterByArea(blobsblue,PIXEL_MIN,PIXEL_MAX);
    
    imgOutwhite=cvCreateImage(cvGetSize(input),IPL_DEPTH_8U,3);
    labelImgwhite=cvCreateImage(cvGetSize(input),IPL_DEPTH_LABEL,1);
    CvBlobs blobswhite;
    cvLabel(outputwhite, labelImgwhite, blobswhite);
    cvRenderBlobs(labelImgwhite, blobswhite, input, imgOutwhite);
    cvFilterByArea(blobswhite,PIXEL_MINWHITE,PIXEL_MAX);
    
   */ 
    
    
    
    
    cvReleaseImage( &outputred ); 
    cvReleaseImage( &outputone ); 
    cvReleaseImage( &outputtwo ); 
    //cvReleaseImage( &outputblue ); 
    cvReleaseImage( &outputwhite ); 
    //cvReleaseImage( &outputorange );
                            cvReleaseImage(&temp ); 
                            //cvDestroyWindow( "output" );
                            if(c>0&&c<255)
                                     {
                                            cvDestroyWindow( "img" );
                                            cvReleaseImage( &frame ); 
                                            cvReleaseCapture(&capture);
     //                                       cvDestroyWindow( "outputred" );
                                            //cvDestroyWindow( "output" );
  //  cvDestroyWindow( "outputone" );
   // cvDestroyWindow( "outputtwo" );
    //cvDestroyWindow( "outputblue" );
   // cvDestroyWindow( "outputwhite" );
    //cvDestroyWindow( "outputorange" );
    
    cvReleaseImage( &outputred ); 
    cvReleaseImage( &outputone ); 
    cvReleaseImage( &outputtwo ); 
    //cvReleaseImage( &outputblue ); 
    cvReleaseImage( &outputwhite ); 
    //cvReleaseImage( &outputorange ); 
     
    
                                            return 0;
                                     }
                     }
    }
}
Ejemplo n.º 25
0
int main(int argc, char* argv[]) {
    CvMemStorage *contStorage = cvCreateMemStorage(0);
    CvSeq *contours;
    CvTreeNodeIterator polyIterator;

    CvMemStorage *mallet_storage;
	CvSeq *mallet_circles = 0;
	float *mallet_p;
	int mi;

    int found = 0;
    int i;
    CvPoint poly_point;
	int fps=30;

	int npts[2] = { 4, 12 };
	CvPoint **pts;

	pts = (CvPoint **) cvAlloc (sizeof (CvPoint *) * 2);
	pts[0] = (CvPoint *) cvAlloc (sizeof (CvPoint) * 4);
	pts[1] = (CvPoint *) cvAlloc (sizeof (CvPoint) * 12);
	pts[0][0] = cvPoint(0,0);
	pts[0][1] = cvPoint(160,0);
	pts[0][2] = cvPoint(320,240);
	pts[0][3] = cvPoint(0,240);
	pts[1][0] = cvPoint(39,17);
	pts[1][1] = cvPoint(126,15);
	pts[1][2] = cvPoint(147,26);
	pts[1][3] = cvPoint(160,77);
	pts[1][4] = cvPoint(160,164);
	pts[1][5] = cvPoint(145,224);
	pts[1][6] = cvPoint(125,233);
	pts[1][7] = cvPoint(39,233);
	pts[1][8] = cvPoint(15,217);
	pts[1][9] = cvPoint(0,133);
	pts[1][10] = cvPoint(0,115);
	pts[1][11] = cvPoint(17,28);

	// ポリライン近似
    CvMemStorage *polyStorage = cvCreateMemStorage(0);
    CvSeq *polys, *poly;

	// OpenCV variables
	CvFont font;

    printf("start!\n");

	//pwm initialize
	if(gpioInitialise() < 0) return -1;
	//pigpio CW/CCW pin setup
	//X:18, Y1:14, Y2:15
	gpioSetMode(18, PI_OUTPUT);
	gpioSetMode(14, PI_OUTPUT);
	gpioSetMode(15, PI_OUTPUT);
	//pigpio pulse setup
	//X:25, Y1:23, Y2:24
	gpioSetMode(25, PI_OUTPUT);
	gpioSetMode(23, PI_OUTPUT);
	gpioSetMode(24, PI_OUTPUT);
	//limit-switch setup
	gpioSetMode(5, PI_INPUT);
	gpioWrite(5, 0);
	gpioSetMode(6, PI_INPUT);
	gpioWrite(6, 0);
	gpioSetMode(7, PI_INPUT);
	gpioWrite(7, 0);
	gpioSetMode(8, PI_INPUT);
	gpioWrite(8, 0);
	gpioSetMode(13, PI_INPUT);
	gpioSetMode(19, PI_INPUT);
	gpioSetMode(26, PI_INPUT);
	gpioSetMode(21, PI_INPUT);

	CvCapture* capture_robot_side = cvCaptureFromCAM(0);
	CvCapture* capture_human_side = cvCaptureFromCAM(1);
    if(capture_robot_side == NULL){
		std::cout << "Robot Side Camera Capture FAILED" << std::endl;
		return -1;
	 }
	if(capture_human_side ==NULL){
		std::cout << "Human Side Camera Capture FAILED" << std::endl;
		return -1;
	}

	// size設定
    cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH);
	cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT);
	//fps設定
	cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FPS,fps);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FPS,fps);

	// 画像の表示用ウィンドウ生成
	//cvNamedWindow("Previous Image", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("Now Image", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("pack", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("mallet", CV_WINDOW_AUTOSIZE);
	cvNamedWindow ("Poly", CV_WINDOW_AUTOSIZE);

	//Create trackbar to change brightness
	int iSliderValue1 = 50;
	cvCreateTrackbar("Brightness", "Now Image", &iSliderValue1, 100);
	//Create trackbar to change contrast
	int iSliderValue2 = 50;
	cvCreateTrackbar("Contrast", "Now Image", &iSliderValue2, 100);
	//pack threthold 0, 50, 120, 220, 100, 220
	int iSliderValuePack1 = 54; //80;
	cvCreateTrackbar("minH", "pack", &iSliderValuePack1, 255);
	int iSliderValuePack2 = 84;//106;
	cvCreateTrackbar("maxH", "pack", &iSliderValuePack2, 255);
	int iSliderValuePack3 = 100;//219;
	cvCreateTrackbar("minS", "pack", &iSliderValuePack3, 255);
	int iSliderValuePack4 = 255;//175;
	cvCreateTrackbar("maxS", "pack", &iSliderValuePack4, 255);
	int iSliderValuePack5 = 0;//29;
	cvCreateTrackbar("minV", "pack", &iSliderValuePack5, 255);
	int iSliderValuePack6 = 255;//203;
	cvCreateTrackbar("maxV", "pack", &iSliderValuePack6, 255);
	//mallet threthold 0, 255, 100, 255, 140, 200
	int iSliderValuemallet1 = 107;
	cvCreateTrackbar("minH", "mallet", &iSliderValuemallet1, 255);
	int iSliderValuemallet2 = 115;
	cvCreateTrackbar("maxH", "mallet", &iSliderValuemallet2, 255);
	int iSliderValuemallet3 = 218;//140
	cvCreateTrackbar("minS", "mallet", &iSliderValuemallet3, 255);
	int iSliderValuemallet4 = 255;
	cvCreateTrackbar("maxS", "mallet", &iSliderValuemallet4, 255);
	int iSliderValuemallet5 = 0;
	cvCreateTrackbar("minV", "mallet", &iSliderValuemallet5, 255);
	int iSliderValuemallet6 = 255;
	cvCreateTrackbar("maxV", "mallet", &iSliderValuemallet6, 255);

	// 画像ファイルポインタの宣言
	IplImage* img_robot_side = cvQueryFrame(capture_robot_side);
	IplImage* img_human_side = cvQueryFrame(capture_human_side);
	IplImage* img_all_round = cvCreateImage(cvSize(CAM_PIX_WIDTH, CAM_PIX_2HEIGHT), IPL_DEPTH_8U, 3);
	IplImage* tracking_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* img_all_round2  = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* show_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);

	cv::Mat mat_frame1;
	cv::Mat mat_frame2;
	cv::Mat dst_img_v;
	cv::Mat dst_bright_cont;
	int iBrightness  = iSliderValue1 - 50;
	double dContrast = iSliderValue2 / 50.0;
	IplImage* dst_img_frame = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* grayscale_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 1);
	IplImage* poly_tmp = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 1);
	IplImage* poly_dst = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 3);
	IplImage* poly_gray = cvCreateImage( cvGetSize(img_all_round),IPL_DEPTH_8U,1);

	int rotate_times = 0;
	//IplImage* -> Mat
	mat_frame1 = cv::cvarrToMat(img_robot_side);
	mat_frame2 = cv::cvarrToMat(img_human_side);
	//上下左右を反転。本番環境では、mat_frame1を反転させる
	cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転)
	cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転)
	vconcat(mat_frame2, mat_frame1, dst_img_v);

	dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート
	//画像の膨張と縮小
//	cv::Mat close_img;
//	cv::Mat element(3,3,CV_8U, cv::Scalar::all(255));
//	cv::morphologyEx(dst_img_v, close_img, cv::MORPH_CLOSE, element, cv::Point(-1,-1), 3);
//	cv::imshow("morphologyEx", dst_img_v);
//	dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート

	//明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。
	*img_all_round = dst_bright_cont;

	cv_ColorExtraction(img_all_round, dst_img_frame, CV_BGR2HSV, 0, 11, 180, 255, 0, 255);

	cvCvtColor(dst_img_frame, grayscale_img, CV_BGR2GRAY);
	cv_Labelling(grayscale_img, tracking_img);

	cvCvtColor(tracking_img, poly_gray, CV_BGR2GRAY);

	cvCopy( poly_gray, poly_tmp);
	cvCvtColor( poly_gray, poly_dst, CV_GRAY2BGR);

	//画像の膨張と縮小
	//cvMorphologyEx(tracking_img, tracking_img,)

	// 輪郭抽出
	found = cvFindContours( poly_tmp, contStorage, &contours, sizeof( CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

	// ポリライン近似
	polys = cvApproxPoly( contours, sizeof( CvContour), polyStorage, CV_POLY_APPROX_DP, 8, 10);

	cvInitTreeNodeIterator( &polyIterator, ( void*)polys, 10);
	poly = (CvSeq *)cvNextTreeNode( &polyIterator);
	printf("sort before by X\n");
	for( i=0; i<poly->total; i++)
	{
		poly_point = *( CvPoint*)cvGetSeqElem( poly, i);
		cvCircle( poly_dst, poly_point, 1, CV_RGB(255, 0 , 255), -1);
		cvCircle( poly_dst, poly_point, 8, CV_RGB(255, 0 , 255));
		std::cout << "x:" << poly_point.x << ", y:" << poly_point.y  << std::endl;
	}
	printf("Poly FindTotal:%d\n",poly->total);

	//枠の座標決定
	//左上 の 壁サイド側 upper_left_f
	//左上 の ゴール寄り  upper_left_g
	//右上 の 壁サイド側 upper_right_f
	//右上 の ゴール寄り  upper_right_g
	//左下 の 壁サイド側 lower_left_f
	//左下 の ゴール寄り  lower_left_g
	//右下 の 壁サイド側 lower_right_f
	//右下 の ゴール寄り  lower_right_g
	CvPoint upper_left_f, upper_left_g, upper_right_f, upper_right_g,
			lower_left_f, lower_left_g, lower_right_f, lower_right_g,
			robot_goal_left, robot_goal_right;

	CvPoint frame_points[8];
//	if(poly->total == 8){
//		for( i=0; i<8; i++){
//			poly_point = *( CvPoint*)cvGetSeqElem( poly, i);
//			frame_points[i] = poly_point;
//		}
//		qsort(frame_points, 8, sizeof(CvPoint), compare_cvpoint);
//		printf("sort after by X\n");
//		for( i=0; i<8; i++){
//			std::cout << "x:" << frame_points[i].x << ", y:" << frame_points[i].y  << std::endl;
//		}
//		if(frame_points[0].y < frame_points[1].y){
//			upper_left_f = frame_points[0];
//			lower_left_f = frame_points[1];
//		}
//		else{
//			upper_left_f = frame_points[1];
//			lower_left_f = frame_points[0];
//		}
//		if(frame_points[2].y < frame_points[3].y){
//			upper_left_g = frame_points[2];
//			lower_left_g = frame_points[3];
//		}
//		else{
//			upper_left_g = frame_points[3];
//			lower_left_g = frame_points[2];
//		}
//		if(frame_points[4].y < frame_points[5].y){
//			upper_right_g = frame_points[4];
//			lower_right_g = frame_points[5];
//		}
//		else{
//			upper_right_g = frame_points[5];
//			lower_right_g = frame_points[4];
//		}
//		if(frame_points[6].y < frame_points[7].y){
//			upper_right_f = frame_points[6];
//			lower_right_f = frame_points[7];
//		}
//		else{
//			upper_right_f = frame_points[7];
//			lower_right_f = frame_points[6];
//		}
//	}
//	else{
		printf("Frame is not 8 Point\n");
		upper_left_f = cvPoint(26, 29);
		upper_right_f =  cvPoint(136, 29);
		lower_left_f = cvPoint(26, 220);
		lower_right_f =  cvPoint(136, 220);

		upper_left_g = cvPoint(38, 22);
		upper_right_g = cvPoint(125, 22);
		lower_left_g =  cvPoint(38, 226);
		lower_right_g = cvPoint(125, 226);

		robot_goal_left = cvPoint(60, 226);
		robot_goal_right = cvPoint(93, 226);

//		cvCopy(img_all_round, show_img);
//		cvLine(show_img, upper_left_f, upper_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, lower_left_f, lower_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, upper_right_f, lower_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, upper_left_f, lower_left_f, CV_RGB( 255, 255, 0 ));
//
//		cvLine(show_img, upper_left_g, upper_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, lower_left_g, lower_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, upper_right_g, lower_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, upper_left_g, lower_left_g, CV_RGB( 0, 255, 0 ));

		//while(1){
			//cvShowImage("Now Image", show_img);
			//cvShowImage ("Poly", poly_dst);
			//if(cv::waitKey(1) >= 0) {
				//break;
			//}
		//}
		//return -1;
//	}
	printf("upper_left_fX:%d, Y:%d\n",upper_left_f.x, upper_left_f.y);
	printf("upper_left_gX:%d, Y:%d\n",upper_left_g.x, upper_left_g.y);
	printf("upper_right_fX:%d,Y:%d\n", upper_right_f.x, upper_right_f.y);
	printf("upper_right_gX:%d, Y:%d\n" , upper_right_g.x, upper_right_g.y);
	printf("lower_left_fX:%d, Y:%d\n", lower_left_f.x, lower_left_f.y);
	printf("lower_left_gX:%d, Y:%d\n", lower_left_g.x, lower_left_g.y);
	printf("lower_right_fX:%d, Y:%d\n", lower_right_f.x, lower_right_f.y);
	printf("lower_right_gX:%d, Y:%d\n", lower_right_g.x, lower_right_g.y);
	printf("robot_goal_left:%d, Y:%d\n", robot_goal_left.x, robot_goal_left.y);
	printf("robot_goal_right:%d, Y:%d\n", robot_goal_right.x, robot_goal_right.y);

    cvReleaseImage(&dst_img_frame);
    cvReleaseImage(&grayscale_img);
    cvReleaseImage(&poly_tmp);
    cvReleaseImage(&poly_gray);

    cvReleaseMemStorage(&contStorage);
    cvReleaseMemStorage(&polyStorage);
	//return 1;
	// Init font
	cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, 0.4,0.4,0,1);
	bool is_pushed_decision_button = 1;//もう一方のラズパイ信号にする

	while(1){
		//決定ボタンが押されたらスタート
		if(gpioRead(8)==0 && is_pushed_decision_button==1){
			cvCopy(img_all_round, img_all_round2);
			cvCopy(img_all_round, show_img);
			img_robot_side = cvQueryFrame(capture_robot_side);
			img_human_side = cvQueryFrame(capture_human_side);
			//IplImage* -> Mat
			mat_frame1 = cv::cvarrToMat(img_robot_side);
			mat_frame2 = cv::cvarrToMat(img_human_side);
			//上下左右を反転。本番環境では、mat_frame1を反転させる
			cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転)
			cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転)
			vconcat(mat_frame2, mat_frame1, dst_img_v);

			iBrightness  = iSliderValue1 - 50;
			dContrast = iSliderValue2 / 50.0;
			dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート
			//明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。
			*img_all_round = dst_bright_cont;
			mat_frame1.release();
			mat_frame2.release();
			dst_img_v.release();

			cvFillPoly(img_all_round, pts, npts, 2, CV_RGB(0, 0, 0));

			IplImage* dst_img_mallet = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
			IplImage* dst_img_pack = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
			IplImage* dst_img2_mallet = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3);
			IplImage* dst_img2_pack = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3);

			cv_ColorExtraction(img_all_round, dst_img_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6);
			cv_ColorExtraction(img_all_round, dst_img_mallet, CV_BGR2HSV, iSliderValuemallet1, iSliderValuemallet2, iSliderValuemallet3, iSliderValuemallet4, iSliderValuemallet5, iSliderValuemallet6);
			cv_ColorExtraction(img_all_round2, dst_img2_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6);

			//CvMoments moment_mallet;
			CvMoments moment_pack;
			CvMoments moment_mallet;
			CvMoments moment2_pack;
			//cvSetImageCOI(dst_img_mallet, 1);
			cvSetImageCOI(dst_img_pack, 1);
			cvSetImageCOI(dst_img_mallet, 1);
			cvSetImageCOI(dst_img2_pack, 1);

			//cvMoments(dst_img_mallet, &moment_mallet, 0);
			cvMoments(dst_img_pack, &moment_pack, 0);
			cvMoments(dst_img_mallet, &moment_mallet, 0);
			cvMoments(dst_img2_pack, &moment2_pack, 0);

			//座標計算
			double m00_before = cvGetSpatialMoment(&moment2_pack, 0, 0);
			double m10_before = cvGetSpatialMoment(&moment2_pack, 1, 0);
			double m01_before = cvGetSpatialMoment(&moment2_pack, 0, 1);
			double m00_after = cvGetSpatialMoment(&moment_pack, 0, 0);
			double m10_after = cvGetSpatialMoment(&moment_pack, 1, 0);
			double m01_after = cvGetSpatialMoment(&moment_pack, 0, 1);
			double gX_before = m10_before/m00_before;
			double gY_before = m01_before/m00_before;
			double gX_after = m10_after/m00_after;
			double gY_after = m01_after/m00_after;
			double m00_mallet = cvGetSpatialMoment(&moment_mallet, 0, 0);
			double m10_mallet = cvGetSpatialMoment(&moment_mallet, 1, 0);
			double m01_mallet = cvGetSpatialMoment(&moment_mallet, 0, 1);
			double gX_now_mallet = m10_mallet/m00_mallet;
			double gY_now_mallet = m01_mallet/m00_mallet;

			int target_direction = -1; //目標とする向き 時計回り=1、 反時計回り=0
			//円の大きさは全体の1/10で描画
			cvCircle(show_img, cvPoint(gX_before, gY_before), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0);
			cvCircle(show_img, cvPoint(gX_now_mallet, gY_now_mallet), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0);
			cvLine(show_img, cvPoint(gX_before, gY_before), cvPoint(gX_after, gY_after), cvScalar(0,255,0), 2);
			cvLine(show_img, robot_goal_left, robot_goal_right, cvScalar(0,255,255), 2);
			printf("gX_after: %f\n",gX_after);
			printf("gY_after: %f\n",gY_after);
			printf("gX_before: %f\n",gX_before);
			printf("gY_before: %f\n",gY_before);
			printf("gX_now_mallet: %f\n",gX_now_mallet);
			printf("gY_now_mallet: %f\n",gY_now_mallet);
			int target_destanceY = CAM_PIX_2HEIGHT - 30; //Y座標の距離を一定にしている。ディフェンスライン。
			//パックの移動は直線のため、一次関数の計算を使って、その後の軌跡を予測する。
			double a_inclination;
			double b_intercept;

			int closest_frequency;

			int target_coordinateX;
			int origin_coordinateY;
			int target_coordinateY;

			double center_line = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4;
			int left_frame = (upper_left_f.x + lower_left_f.x)/2;
			int right_frame = (upper_right_f.x + lower_right_f.x)/2;

			double y_line = (upper_left_f.y + lower_right_f.y)/3;
			double waiting_position = (robot_goal_left.x + lower_left_g.x) / 2;

			if(gY_after - gY_before < -1){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 600);
				target_coordinateX = waiting_position;
				if(waiting_position + 5 < gX_now_mallet){
					target_direction = 0;//反時計回り
				}
				else if(gX_now_mallet < waiting_position - 5){
					target_direction = 1;//時計回り
				}
			}
			/*else if(robot_goal_right.x < gX_now_mallet){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1000);
				target_direction = 0;//反時計回り
			}
			else if(gX_now_mallet < robot_goal_left.x){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1000);
				target_direction = 1;//時計回り
			}*/
			else if(y_line < gY_after && y_line > gY_before){
				clock_t start = clock();
				clock_t end;
				end = start + 0.5 * (target_coordinateX - robot_goal_left.x) / 10;
				target_direction = 1;
				gpioPWM(25, 128);
				gpioWrite(18, target_direction);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				while(end - start < 0);//時間がくるまでループ
			}
			else{
				gpioPWM(25, 0);
				closest_frequency = gpioSetPWMfrequency(25, 0);
			}



			if(target_direction != -1){
				gpioWrite(18, target_direction);
			}
			//防御ラインの描画
			cvLine(show_img, cvPoint(CAM_PIX_WIDTH, target_destanceY), cvPoint(0, target_destanceY), cvScalar(255,255,0), 2);
			//マレットの動きの描画
			cvLine(show_img, cvPoint((int)gX_now_mallet, (int)gY_now_mallet), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2);

			/*

			int amount_movement = target_coordinateX - gX_now_mallet;

			//reacted limit-switch and target_direction rotate
//			if(gpioRead(6) == 1){//X軸右
//				gpioPWM(25, 128);
//				closest_frequency = gpioSetPWMfrequency(25, 1500);
//				target_direction = 0;//反時計回り
//				printf("X軸右リミット!反時計回り\n");
//			}
//			else
			if(gpioRead(26) == 1){//X軸左
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				target_direction = 1;//時計回り
				printf("X軸左リミット!時計回り\n");
			}
			else if(gpioRead(5) == 1){//Y軸右上
				gpioPWM(23, 128);
				gpioSetPWMfrequency(23, 1500);
				gpioWrite(14, 0);
				printf("Y軸右上リミット!時計回り\n");
			}
			else if(gpioRead(13) == 1){//Y軸右下
				gpioPWM(23, 128);
				gpioSetPWMfrequency(23, 1500);
				gpioWrite(14, 1);
				printf("Y軸右下リミット!反時計回り\n");
			}
			else if(gpioRead(19) == 1){//Y軸左下
				gpioPWM(24, 128);
				gpioSetPWMfrequency(24, 1500);
				gpioWrite(15, 0);
				printf("Y軸左下リミット!時計回り\n");
			}

			else if(gpioRead(21) == 1){//Y軸左上
				gpioPWM(24, 0);
				gpioSetPWMfrequency(24, 1500);
				gpioWrite(15, 1);
				printf("Y軸左上リミット!反時計回り\n");
			}
			else{
				//Y軸固定のため
				gpioSetPWMfrequency(23, 0);
				gpioSetPWMfrequency(24, 0);

				if(amount_movement > 0){
					target_direction = 1;//時計回り
				}
				else if(amount_movement < 0){
					target_direction = 0;//反時計回り
				}
			}
			if(target_direction != -1){
				gpioWrite(18, target_direction);
			}
			else{
				gpioPWM(24, 0);
				gpioSetPWMfrequency(24, 0);
			}
			printf("setting_frequency: %d\n", closest_frequency);*/

			// 指定したウィンドウ内に画像を表示する
			//cvShowImage("Previous Image", img_all_round2);
			cvShowImage("Now Image", show_img);
			cvShowImage("pack", dst_img_pack);
			cvShowImage("mallet", dst_img_mallet);
			cvShowImage ("Poly", poly_dst);

			cvReleaseImage (&dst_img_mallet);
			cvReleaseImage (&dst_img_pack);
			cvReleaseImage (&dst_img2_mallet);
			cvReleaseImage (&dst_img2_pack);

			if(cv::waitKey(1) >= 0) {
				break;
			}
		}
		else{ //リセット信号が来た場合
			is_pushed_decision_button = 0;
		}
    }

    gpioTerminate();

    cvDestroyAllWindows();

	//Clean up used CvCapture*
	cvReleaseCapture(&capture_robot_side);
	cvReleaseCapture(&capture_human_side);
    //Clean up used images
	cvReleaseImage(&poly_dst);
	cvReleaseImage(&tracking_img);
    cvReleaseImage(&img_all_round);
    cvReleaseImage(&img_human_side);
    cvReleaseImage(&img_all_round2);
    cvReleaseImage(&show_img);
    cvReleaseImage(&img_robot_side);
    cvFree(&pts[0]);
	cvFree(&pts[1]);
	cvFree(pts);

    return 0;
}
Ejemplo n.º 26
0
IplImage* Panoramic::StitchFace(const char *leftName, const char *centerName,const char *rightName)
{
	IplImage   *leftHsvImg,*centerHsvImg,*rightHsvImg;
	vector<Coordinate> leftCoord;
	vector<Coordinate> rightCoord;
	vector<Coordinate> centerCoord;
	vector<Coordinate> profileCoord(3);
	vector<Coordinate> centerAffineCoord(3);
	IplImage   *leftAffineImg     = cvCreateImage(cvSize(m_width,m_height),8,1);
	IplImage   *rightAffineImg    = cvCreateImage(cvSize(m_width,m_height),8,1);
	IplImage   *leftFeatureImg    = cvLoadImage(leftName,1);
	IplImage   *centerFeatureImg  = cvLoadImage(centerName,1);
	IplImage   *rightFeatureImg   = cvLoadImage(rightName,1); 
	cvZero(rightAffineImg);
	cvZero(leftAffineImg);

	//Using red color threshold to find the features from input image
	leftHsvImg  = GetHsvFeature(leftFeatureImg   ,0,255,255,51,51,51);
	centerHsvImg= GetHsvFeature(centerFeatureImg ,0,255,255,51,51,51);
	rightHsvImg = GetHsvFeature(rightFeatureImg  ,0,255,255,51,51,51);
	//FindFeatureCoord will decide whether it continues or not.
    leftCoord   =  FindFeatureCoord(leftHsvImg);
	rightCoord  =  FindFeatureCoord(rightHsvImg);
	centerCoord =  FindFeatureCoord(centerHsvImg);
	
	if(m_do_sttich)//when all number of feature coord = 12,it will be true,it decide in function:"FindFeatureCoord"
	{
		RearrangeCoord(leftCoord);
		RearrangeCoord(rightCoord);
		RearrangeCoord(centerCoord);

		for(int i = 0; i < m_numFeature; i++) 
		{
			m_centerCood[i] = centerCoord[i];
		}
		if(m_debug)
		{
			ShowFeature(leftCoord);
			ShowFeature(centerCoord);
			ShowFeature(rightCoord);
		}
	
		Graphic FindLine;

		for(int numStitch = 0; numStitch < 2;numStitch++)
		{
			for(int num = 0;num < 3;num++)
			{
				if(numStitch == 1)
				{
					if(num==0)
					{
						profileCoord[0]         = leftCoord[1];
						centerAffineCoord[0]    = centerCoord[1];
					}
					else
					{
						profileCoord[num]       = leftCoord[num+2];
						centerAffineCoord[num]  = centerCoord[num+2];
					}
				}

				else
				{
					if(num==0)
					{
						profileCoord[0]			= rightCoord [7];
						centerAffineCoord[0]	= centerCoord[7];
					}
					else
					{
						profileCoord[num]       = rightCoord [num+8];
						centerAffineCoord[num]  = centerCoord[num+8];
					}
				}
			}
			//Á_¦X¥ª°¼Áy
			if(numStitch == 1)
			{
				FindLine.Line(centerAffineCoord,0,centerAffineCoord,2,m_slope,m_intercept);
				DoAffineTrsform(m_leftImg,leftAffineImg,profileCoord,centerAffineCoord);
				if(m_debug)
				{
					cvNamedWindow("leftAffineImg",0);
					cvShowImage("leftAffineImg",leftAffineImg);
				}
				ShowStitch(leftAffineImg,m_centerImg); //°¼ÁyÁ_¦X¡B½u©Ê¼Ò½k¤Æ
			}
			//Á_¦X¥k°¼Áy
			else
			{
				FindLine.Line(centerAffineCoord,0,centerAffineCoord,2,m_slope,m_intercept);
				DoAffineTrsform(m_rightImg,rightAffineImg,profileCoord,centerAffineCoord);
				if(m_debug)
				{
					cvNamedWindow("rightAffineImg",0);
					cvShowImage("rightAffineImg",rightAffineImg);
				}
				ShowStitch(rightAffineImg,m_centerImg);
			}
				m_saveSlope[numStitch]		=	m_slope;
				m_saveIntercept[numStitch]	=	m_intercept;
		
		}

		//Á_¦X¥¿Áy
		for(int j = 0;j < m_height;j++)
		{
			for(int i = 0;i < m_width;i++)
			{
				double linePostionright = m_saveSlope[0]*i + m_saveIntercept[0]-j;
				double linePostionleft  = m_saveSlope[1]*i + m_saveIntercept[1]-j;

				if(linePostionright > m_lineT && linePostionleft > m_lineT)
				{
					double pixel = cvGetReal2D(m_centerImg,j,i);
					cvSetReal2D(m_PanoramicFace,j,i,pixel) ;
				}
			}
		}
		if(m_debug)
		{
			cvNamedWindow("PanoramicFace",0);
			cvShowImage("PanoramicFace",m_PanoramicFace);
			cvWaitKey(0);
		}
		cvReleaseImage(&leftHsvImg);	cvReleaseImage(&centerHsvImg);		cvReleaseImage(&rightHsvImg);
		cvReleaseImage(&leftAffineImg);	cvReleaseImage(&rightAffineImg);
		cvReleaseImage(&leftFeatureImg);cvReleaseImage(&centerFeatureImg);	cvReleaseImage(&rightFeatureImg);
		return m_PanoramicFace;
	}
	else
	{
		printf("Error when stich image....");
		return NULL;
	}
	
}
Ejemplo n.º 27
0
	void Gui::init()
	{
		cvNamedWindow(m_window_name.c_str(), CV_WINDOW_AUTOSIZE);
		cvMoveWindow(m_window_name.c_str(), 100, 100);
	}
Ejemplo n.º 28
0
// Runs the dot detector and sends detected dots to server on port TODO Implement headless. Needs more config options and/or possibly a config file first though
int run( const char *serverAddress, const int serverPort, char headless ) {
    char calibrate_exposure = 0, show = ~0, flip = 0, vflip = 0, done = 0, warp = 0; //"Boolean" values used in this loop
    char noiceReduction = 2; //Small counter, so char is still ok.
    int i, sockfd; //Generic counter
    int dp = 0, minDist = 29, param1 = 0, param2 = 5; // Configuration variables for circle detection 
    int minDotRadius = 1;
    int detected_dots; //Detected dot counter
    int returnValue = EXIT_SUCCESS;
    int captureControl; //File descriptor for low-level camera controls
    int currentExposure = 150;
    int maxExposure = 1250; //Maximum exposure supported by the camera TODO Get this from the actual camera
    Color min = { 0, 70, 0, 0 }; //Minimum color to detect
    Color max = { 255, 255, 255, 0 }; //Maximum color to detect
    CvScalar colorWhite = cvScalar( WHITE ); //Color to draw detected dots on black and white surface
    BoundingBox DD_mask; //The box indicating what should and what should not be considered for dot search
    BoundingBox DD_transform; //The box indicating the plane we are looking at( and as such is the plane we would transform from )
    BoundingBox DD_transform_to; //The plane we are transforming to
    CvCapture *capture = NULL; //The camera
    CvMemStorage *storage; //Low level memory area used for dynamic structures in OpenCV
    CvSeq *seq; //Sequence to store detected dots in
    IplImage *grabbedImage = NULL; //Raw image from camera( plus some overlay in the end )
    IplImage *imgThreshold = NULL; //Image with detected dots
    IplImage *mask = NULL; //Mask to be able to remove uninteresting areas
    IplImage *coloredMask = NULL; //Mask to be able to indicate above mask on output image
    CvFont font; //Font for drawing text on images
    SendQueue *queue; //Head of the linked list that is the send queue
    char strbuf[255]; //Generic buffer for text formatting( with sprintf())
    struct timeval oldTime, time, diff; //Structs for measuring FPS
    float lastKnownFPS = 0; //Calculated FPS
    CvMat* pointRealMat = cvCreateMat( 1,1,CV_32FC2 ); //Single point matrix for point transformation
    CvMat* pointTransMat = cvCreateMat( 1,1,CV_32FC2 ); //Single point matrix for point transformation
    CvMat* transMat = cvCreateMat( 3,3,CV_32FC1 ); //Translation matrix for transforming input to a straight rectangle
    ClickParams clickParams = { TOP_LEFT, NULL, &DD_transform_to, transMat }; //Struct holding data needed by mouse-click callback function

    // Set up network
    sockfd = initNetwork( serverAddress, serverPort );
    if( sockfd == -1 ) {
        fprintf( stderr, "ERROR: initNetwork returned -1\n");
        return EXIT_FAILURE;
    }
    queue = initSendQueue();

    if( openCamera( &capture, &captureControl ) == 0 ) {
        fprintf( stderr, "ERROR: capture is NULL \n" );
        return EXIT_FAILURE;
    }

    if( ( disableAutoExposure( captureControl ) ) == -1 ) {
        fprintf( stderr, "ERROR: Cannot disable auto exposure \n" );
        //return EXIT_FAILURE;
    }

    if( ( updateAbsoluteExposure( captureControl, currentExposure ) ) == 0 ) {
        fprintf( stderr, "ERROR: Cannot set exposure\n");
    }

    // Create a window in which the captured images will be presented
    cvNamedWindow( imagewindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create a window to hold the configuration sliders and the detection frame TODO This is kind of a hack. Make a better solution
    cvNamedWindow( configwindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create a window to hold the transformed image. Handy to see how the dots are translated, but not needed for functionality
    if( warp ) cvNamedWindow( warpwindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create sliders to adjust the lower color boundry
    cvCreateTrackbar( red_lable  , configwindowname, &min.red,   255, NULL );
    cvCreateTrackbar( green_lable, configwindowname, &min.green, 255, NULL );
    cvCreateTrackbar( blue_lable , configwindowname, &min.blue,  255, NULL );

    //Create sliters for the contour based dot detection
    cvCreateTrackbar( min_area_lable, configwindowname, &minDotRadius,255, NULL );

    /* Slider for manual exposure setting */
    cvCreateTrackbar( exposure_lable, configwindowname, &currentExposure, maxExposure, NULL );

    //Create the memory storage
    storage = cvCreateMemStorage( 0 );

    // void cvInitFont( font, font_face, hscale, vscale, shear=0, thickness=1, line_type=8 )
    cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, 1, 1, 0, 1, 8 );

    // Grab an initial image to be able to fetch image size before the main loop.
    grabbedImage = cvQueryFrame( capture );

    //Move the two windows so both are visible at the same time
    cvMoveWindow( imagewindowname, 0, 10 );
    cvMoveWindow( configwindowname, grabbedImage->width+2, 10 );

    //TODO Move these three inits to a function
    // Set masking defaults TODO load from file? Specify file for this file loading?
    DD_mask.topLeft.x = 0;  
    DD_mask.topLeft.y = 0;

    DD_mask.topRight.x = grabbedImage->width-1;
    DD_mask.topRight.y = 0;

    DD_mask.bottomLeft.x = 0;
    DD_mask.bottomLeft.y = grabbedImage->height-1;

    DD_mask.bottomRight.x = grabbedImage->width-1;
    DD_mask.bottomRight.y = grabbedImage->height-1;

    // Set transformation defaults TODO load from file? Specify file for this file loading?
    DD_transform.topLeft.x = 0;  
    DD_transform.topLeft.y = 0;

    DD_transform.topRight.x = grabbedImage->width-1;
    DD_transform.topRight.y = 0;

    DD_transform.bottomLeft.x = 0;
    DD_transform.bottomLeft.y = grabbedImage->height-1;

    DD_transform.bottomRight.x = grabbedImage->width-1;
    DD_transform.bottomRight.y = grabbedImage->height-1;

    // Set the transformation destination
    DD_transform_to.topLeft.x = 0;  
    DD_transform_to.topLeft.y = 0;

    DD_transform_to.topRight.x = grabbedImage->width-1;
    DD_transform_to.topRight.y = 0;

    DD_transform_to.bottomLeft.x = 0;
    DD_transform_to.bottomLeft.y = grabbedImage->height-1;

    DD_transform_to.bottomRight.x = grabbedImage->width-1;
    DD_transform_to.bottomRight.y = grabbedImage->height-1;

    calculateTransformationMatrix( &DD_transform, &DD_transform_to, transMat );

    // Set callback function for mouse clicks
    cvSetMouseCallback( imagewindowname, calibrateClick, ( void* ) &clickParams );

    gettimeofday( &oldTime, NULL );

    // Main loop. Grabbs an image from cam, detects dots, sends dots,and prints dots to images and shows to user
    while( !done ) {

        //PROFILING_PRO_STAMP(); //Uncomment this and the one in the end of the while-loop, and comment all other PROFILING_* to profile main-loop

        // ------ Common actions
        cvClearMemStorage( storage );
        detected_dots = 0;

        //Grab a fram from the camera
        PROFILING_PRO_STAMP();
        grabbedImage = cvQueryFrame( capture );
        PROFILING_POST_STAMP( "cvQueryFrame");

        if( grabbedImage == NULL ) {
            fprintf( stderr, "ERROR: frame is null...\n" );
            getchar();
            returnValue = EXIT_FAILURE;
            break;
        }

        //Flip images to act as a mirror. 
        if( show && flip ) {
            cvFlip( grabbedImage, grabbedImage, 1 );
        }
        if( show && vflip ) {
            cvFlip( grabbedImage, grabbedImage, 0 );
        }

        // ------ State based actions
        switch( state ) {
            case GRAB_DOTS:

                //Create detection image
                imgThreshold = cvCreateImage( cvGetSize( grabbedImage ), 8, 1 );
                cvInRangeS( grabbedImage, cvScalar( DD_COLOR( min )), cvScalar( DD_COLOR( max )), imgThreshold );

                //Mask away anything not in our calibration area
                mask = cvCreateImage( cvGetSize( grabbedImage ), 8, 1 );
                cvZero( mask );
                cvFillConvexPoly( mask, ( CvPoint* ) &DD_mask, 4, cvScalar( WHITE ), 1, 0 );
                cvAnd( imgThreshold, mask, imgThreshold, NULL );

                // Invert mask, increase the number of channels in it and overlay on grabbedImage //TODO Tint the mask red before overlaying
                cvNot( mask, mask );
                coloredMask = cvCreateImage( cvGetSize( grabbedImage ), grabbedImage->depth, grabbedImage->nChannels );
                cvCvtColor( mask, coloredMask, CV_GRAY2BGR );
                cvAddWeighted( grabbedImage, 0.95, coloredMask, 0.05, 0.0, grabbedImage );


                // Reduce noise. 
                // Erode is kind of floor() of pixels, dilate is kind of ceil()
                // I'm not sure which gives the best result.
                switch( noiceReduction ) {
                    case 0: break; //No noice reduction at all
                    case 1: cvErode( imgThreshold, imgThreshold, NULL, 2 ); break;
                    case 2: cvDilate( imgThreshold, imgThreshold, NULL, 2 ); break;
                }

                // Warp the warp-image. We are reusing the coloredMask variable to save some space
                PROFILING_PRO_STAMP();
                if( show && warp ) cvWarpPerspective( grabbedImage, coloredMask, transMat, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ));
                PROFILING_POST_STAMP( "Warping perspective" );


                // Find all dots in the image
                PROFILING_PRO_STAMP();

                // Clear old data from seq
                seq = 0;

                // Find the dots
                cvFindContours(
                        imgThreshold,
                        storage,
                        &seq,
                        sizeof( CvContour ),
                        CV_RETR_LIST,
                        CV_CHAIN_APPROX_SIMPLE,
                        cvPoint( 0,0 )
                        );
                // cvFindContours destroys the original image, so we wipe it here
                // and then repaints the detected dots later
                cvZero( imgThreshold );

                PROFILING_POST_STAMP( "Dot detection" );

                //Process all detected dots
                PROFILING_PRO_STAMP();
                for( ; seq != 0; seq = seq->h_next ) {

                    // Calculate radius of the detected contour
                    CvRect rect =( ( CvContour * )seq )->rect;
                    float relCenterX = rect.width / 2;
                    float relCenterY = rect.height / 2;

                    // Make sure the dot is big enough
                    if( relCenterX < minDotRadius || relCenterY < minDotRadius ) {
                        continue;
                    }

                    // Note that we have found another dot
                    ++detected_dots;

                    // Transform the detected dot according to transformation matrix.
                    float absCenter[] = { rect.x + relCenterX, rect.y + relCenterY };
                    pointRealMat->data.fl = absCenter;
                    cvPerspectiveTransform( pointRealMat, pointTransMat, transMat );

                    // Draw the detected contour back to imgThreshold
                    // Draw the detected dot both to real image and to warped( if warp is active )
                    if( show ) {
                        cvDrawContours( imgThreshold, seq, colorWhite, colorWhite, -1, CV_FILLED, 8, cvPoint( 0,0 ) );
                        drawCircle( absCenter[0], absCenter[1], ( relCenterX + relCenterY ) / 2, grabbedImage );
                        if( warp ) {
                            drawCircle( pointTransMat->data.fl[0], pointTransMat->data.fl[1], ( relCenterX + relCenterY ) / 2, coloredMask );
                        }
                    }

                    // Add detected dot to to send queue
                    addPointToSendQueue( pointTransMat->data.fl, queue ); 
                }

                PROFILING_POST_STAMP("Painting dots");

                //Calculate framerate
                gettimeofday( &time, NULL );
                timeval_subtract( &diff, &time, &oldTime );
                lastKnownFPS = lastKnownFPS * 0.7 + ( 1000000.0 / diff.tv_usec ) * 0.3; //We naïvly assume we have more then 1 fps
                oldTime = time;

                //Send the dots detected this frame to the server
                PROFILING_PRO_STAMP();
                sendQueue( sockfd, queue );
                clearSendQueue( queue );
                PROFILING_POST_STAMP( "Sending dots" );

                /* If calibrating, do the calibration */
                if( calibrate_exposure ) {
                    int ret;
                    ret = calibrateExposureLow( captureControl, detected_dots, &currentExposure, DD_MAX_EXPOSURE, lastKnownFPS );
                    switch( ret ) {
                        case 0: // We are done. Let's leave calibration mode
                            calibrate_exposure = 0;
                            printf( "done\n" );
                            break;

                        case -1: // We hit the upper limit with no detected dots
                            fprintf( stderr, "Reached upper limit (%d). Aborting!\n", DD_MAX_EXPOSURE );
                            calibrate_exposure = 0;
                            break;

                        case -2: // We hit lower limit with more then one dot detected
                            fprintf( stderr, "Too bright. More then one dot found even with minimal exposure. Aborting!\n");
                            calibrate_exposure = 0;
                            break;

                        case -3: //No conclusive results.
                            fprintf( stderr, "No conclusive results. Giving up\n" );
                            calibrate_exposure = 0;
                            break;
                    }
                }

                break; //End of GRAB_DOTS

            case SELECT_TRANSFORM:
                //Falling through here. Poor man's multi-case clause. Not putting this in default as we might
                //want to do different things in these two some day.
            case SELECT_MASK:
                snprintf( strbuf, sizeof( strbuf ), "Select %s point", pointTranslationTable[clickParams.currentPoint]);
                cvDisplayOverlay( imagewindowname, strbuf, 5 );
                break; //End of SELECT_MASK and SELECT_TRANSFORM
        }

        // Paint the corners of the detecting area and the calibration area
        paintOverlayPoints( grabbedImage, &DD_transform );

        //Print some statistics to the image
        if( show ) {
            snprintf( strbuf, sizeof( strbuf ), "Dots: %i", detected_dots ); //Print number of detected dots to the screen
            cvPutText( grabbedImage, strbuf, cvPoint( 10, 20 ), &font, cvScalar( WHITE ));
            snprintf( strbuf, sizeof( strbuf ), "FPS: %.1f", lastKnownFPS );
            cvPutText( grabbedImage, strbuf, cvPoint( 10, 40 ), &font, cvScalar( WHITE ));
            cvCircle( grabbedImage, cvPoint( 15, 55 ), minDotRadius, cvScalar( min.blue, min.green, min.red, min.alpha ), -1, 8, 0 ); // Colors given in order BGR-A, Blue, Green, Red, Alpha
        }

        //Show images 
        PROFILING_PRO_STAMP();
        if( show ) {
            cvShowImage( configwindowname, imgThreshold );
            cvShowImage( imagewindowname, grabbedImage );
            if( warp ) cvShowImage( warpwindowname, coloredMask );
        }
        PROFILING_POST_STAMP("Showing images");

        //Release the temporary images
        cvReleaseImage( &imgThreshold );
        cvReleaseImage( &mask );
        cvReleaseImage( &coloredMask );

        /* Update exposure if needed */
        updateAbsoluteExposure( captureControl, currentExposure );
        cvSetTrackbarPos( exposure_lable, configwindowname, currentExposure );

        //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7( linux version ),
        //remove higher bits using AND operator
        i = ( cvWaitKey( 10 ) & 0xff );
        switch( i ) {
            case 'g': 
                makeCalibrate( &DD_transform, &DD_transform_to, transMat, capture, captureControl, 20 );
                updateAbsoluteExposure( captureControl, currentExposure+1 );
                break;

            case 'e': 
                toggleCalibrationMode( &calibrate_exposure, &currentExposure );
                break; /* Toggles calibration mode */

            case 'c':
                openCamera( &capture, &captureControl );
                break;

            case 's': 
                show = ~show;
                break; //Toggles updating of the image. Can be useful for performance of slower machines... Or as frame freeze

            case 'm': 
                state = SELECT_MASK;
                clickParams.currentPoint = TOP_LEFT;
                clickParams.DD_box = &DD_mask;
                break; //Starts selection of masking area. Will return to dot detection once all four points are set

            case 't':
                state = SELECT_TRANSFORM;
                clickParams.currentPoint = TOP_LEFT;
                clickParams.DD_box = &DD_transform;
                break; //Starts selection of the transformation area. Returns to dot detection when done.

            case 'f':
                flip = ~flip;
                break; //Toggles horizontal flipping of the image
            case 'v':
                vflip = ~vflip;
                break; //Toggles vertical flipping of the image

            case 'w':
                warp = ~warp;
                toggleWarpOutput( warp );
                break; //Toggles showing the warped image

            case 'n':
                noiceReduction = ( noiceReduction + 1 ) % 3;
                break; //Cycles noice reduction algorithm

            case 'q': //falling through here to quit

            case  27: 
                done = 1;
                break; //ESC. Kills the whole thing( in a nice and controlled manner )
        }
        fflush( stdout ); //Make sure everything in the buffer is printed before we go on

        //PROFILING_POST_STAMP("Main loop");
    } //End of main while-loop

    // Release the capture device and do some housekeeping
    cvReleaseImage( &grabbedImage );
    cvReleaseCapture( &capture );
    cvReleaseMemStorage( &storage );
    cvDestroyWindow( imagewindowname );
    cvDestroyWindow( configwindowname );
    if( warp ) cvDestroyWindow( warpwindowname ); //If now warp it is already destroyed
    destroySendQueue( queue );
    close( sockfd );
    close( captureControl );
    return returnValue;
}
Ejemplo n.º 29
0
/*
Allows user to view an array of images as a video.  Keyboard controls
are as follows:

<ul>
<li>Space - start and pause playback</li>
<li>Page Down - skip forward 10 frames</li>
<li>Page Up - jump back 10 frames</li>
<li>Right Arrow - skip forward 1 frame</li>
<li>Left Arrow - jump back 1 frame</li>
<li>Backspace - jump back to beginning</li>
<li>Esc - exit playback</li>
<li>Closing the window also exits playback</li>
</ul>

@param imgs an array of images
@param n number of images in \a imgs
@param win_name name of window in which images are displayed
*/
void vid_view( IplImage** imgs, int n, char* win_name )
{
	int k, i = 0, playing = 0;

	cvNamedWindow( win_name, 1 );
	cvShowImage( win_name, imgs[i] );
	while( ! win_closed( win_name ) )
	{
		/* if already playing, advance frame and check for pause */
		if( playing )
		{
			i = MIN( i + 1, n - 1 );
			cvNamedWindow( win_name, 1 );
			cvShowImage( win_name, imgs[i] );
			k = cvWaitKey( 33 );
			if( k == ' '  ||  i == n - 1 )
				playing = 0;
		}

		else
		{
			k = cvWaitKey( 0 );
			switch( k )
			{
				/* space */
			case ' ':
				playing = 1;
				break;

				/* esc */
			case 27:
			case 1048603:
				cvDestroyWindow( win_name );
				break;

				/* backspace */
			case '\b':
				i = 0;
				cvNamedWindow( win_name, 1 );
				cvShowImage( win_name, imgs[i] );
				break;

				/* left arrow */
			case 65288:
			case 1113937:
				i = MAX( i - 1, 0 );
				cvNamedWindow( win_name, 1 );
				cvShowImage( win_name, imgs[i] );
				break;

				/* right arrow */
			case 65363:
			case 1113939:
				i = MIN( i + 1, n - 1 );
				cvNamedWindow( win_name, 1 );
				cvShowImage( win_name, imgs[i] );
				break;

				/* page up */
			case 65365:
			case 1113941:
				i = MAX( i - 10, 0 );
				cvNamedWindow( win_name, 1 );
				cvShowImage( win_name, imgs[i] );
				break;

				/* page down */
			case 65366:
			case 1113942:
				i = MIN( i + 10, n - 1 );
				cvNamedWindow( win_name, 1 );
				cvShowImage( win_name, imgs[i] );
				break;
			}
		}
	}
}
Ejemplo n.º 30
0
int main( int argc, char** argv ) { 
	
	int res;
	int i;
	
	for (i=0; i<2048; i++) {
		float v = i/2048.0;
		v = powf(v, 3)* 6;
		t_gamma[i] = v*6*256;
	}
	
	printf("Kinect camera test\n");
	
	if (freenect_init(&f_ctx, NULL) < 0) {
		printf("freenect_init() failed\n");
		return 1;
	}
	
	if (freenect_open_device(f_ctx, &f_dev, 0) < 0) {
		printf("Could not open device\n");
		return 1;
	}
	
	cvNamedWindow( "RGB", CV_WINDOW_AUTOSIZE );
	cvMoveWindow( "RGB", 0, 0);
	rgbBack = cvCreateImage(cvSize(FREENECT_FRAME_W, FREENECT_FRAME_H), IPL_DEPTH_8U, 3);
	rgbFront = cvCreateImage(cvSize(FREENECT_FRAME_W, FREENECT_FRAME_H), IPL_DEPTH_8U, 3);
	
	cvNamedWindow( "Depth", CV_WINDOW_AUTOSIZE );
	cvMoveWindow("Depth", FREENECT_FRAME_W, 0);
	depthBack = cvCreateImage(cvSize(FREENECT_FRAME_W, FREENECT_FRAME_H), IPL_DEPTH_8U, 3);
	depthFront = cvCreateImage(cvSize(FREENECT_FRAME_W, FREENECT_FRAME_H), IPL_DEPTH_8U, 3);
	
	freenect_set_depth_callback(f_dev, depth_cb);
	freenect_set_rgb_callback(f_dev, rgb_cb);
	freenect_set_rgb_format(f_dev, FREENECT_FORMAT_RGB);
	freenect_set_depth_format(f_dev, FREENECT_FORMAT_11_BIT);
	
	res = pthread_create(&kinect_thread, NULL, kinect_threadFunc, NULL);
	if (res) {
		printf("pthread_create failed\n");
		return 1;
	}
	
	freenect_start_depth(f_dev);
	freenect_start_rgb(f_dev);
	
	while(1) {  
		pthread_mutex_lock(&backbuf_mutex);
		{	
			while (got_frames < 2) {
				pthread_cond_wait(&framesReady_cond, &backbuf_mutex);
			}
			
			cvConvertImage(rgbBack, rgbFront, CV_BGR2GRAY);
			cvConvertImage(depthBack, depthFront, CV_BGR2GRAY);
			
			got_frames = 0;
		}
		pthread_mutex_unlock(&backbuf_mutex);
		
		
		cvShowImage("RGB", rgbFront);
		cvShowImage("Depth", depthFront);
		
		char c = cvWaitKey(10);
		if( c == 27 ) break;
	}
	
	
	cvDestroyWindow( "RGB" );
	cvDestroyWindow( "Depth" );
}