Esempio n. 1
0
void MainWindow::OpticalFlow()
{
    cvReleaseCapture(&pCapture);
 //   pCapture=cvCaptureFromCAM(0);

    //use webcam
    CvCapture* cam = cvCaptureFromCAM( 0 ) ;
    while(1)
    {
        //get a color image
        IplImage* frame = cvQueryFrame(cam) ;

        CvSize img_sz = cvGetSize(frame);
        const int win_size = 10 ;

        //convert the image to grey image
        IplImage* frame_prev = cvQueryFrame(cam) ;
        IplImage* img_prev = cvCreateImage(img_sz,IPL_DEPTH_8U,1) ;
        cvCvtColor( frame_prev,img_prev ,CV_BGR2GRAY);

        //convert the image to grey image
        IplImage* frame_cur = cvQueryFrame(cam) ;
        IplImage* img_curr = cvCreateImage(img_sz,IPL_DEPTH_8U,1) ;
        cvCvtColor( frame_cur,img_curr ,CV_BGR2GRAY);

        //create a imge to display result
        IplImage* img_res = cvCreateImage(img_sz,IPL_DEPTH_8U,1) ;
        for ( int y = 0 ; y < img_sz.height ; ++y )
        {
            uchar* ptr = (uchar*)( img_res->imageData + y * img_res->widthStep ) ;
            for ( int x = 0 ; x <img_res->width; ++x )
            {
                ptr[x] = 255 ;
            }
        }

        //get good features
        IplImage* img_eig = cvCreateImage(img_sz,IPL_DEPTH_32F,1) ;
        IplImage* img_temp = cvCreateImage(img_sz,IPL_DEPTH_32F,1) ;
        int corner_count = MAX_CORNERS ;
        CvPoint2D32f*  features_prev = new CvPoint2D32f[MAX_CORNERS] ;

        cvGoodFeaturesToTrack(
                    img_prev,
                    img_eig,
                    img_temp,
                    features_prev,
                    &corner_count,
                    0.01,
                    5.0,
                    0,
                    3,
                    0,
                    0.4
                    );

        cvFindCornerSubPix(
                    img_prev,
                    features_prev,
                    corner_count,
                    cvSize(win_size,win_size),
                    cvSize(-1,-1),
                    cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER,20,0.03)
                    );

                // L-K
                char feature_found[ MAX_CORNERS ] ;
                float feature_errors[ MAX_CORNERS ] ;

                CvSize pyr_sz = cvSize( frame->width + 8 ,frame->height / 3 ) ;

                IplImage* pyr_prev = cvCreateImage(pyr_sz,IPL_DEPTH_32F,1) ;
                IplImage* pyr_cur = cvCreateImage(pyr_sz,IPL_DEPTH_32F,1) ;
                CvPoint2D32f*  features_cur = new CvPoint2D32f[ MAX_CORNERS ] ;

                cvCalcOpticalFlowPyrLK(
                    img_prev,
                    img_curr,
                    pyr_prev,
                    pyr_cur,
                    features_prev,
                    features_cur,
                    corner_count,
                    cvSize(win_size,win_size),
                    5,
                    feature_found,
                    feature_errors,
                    cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER,20,0.3),
                    0
                    );

                for ( int i = 0 ; i < corner_count ; i++)
                {
                    if ( 0 == feature_found[i] || feature_errors[i] > 550 )
                    {
                 //       printf("error is %f \n" , feature_errors[i] ) ;
                        continue ;
                    }

                 //   printf("find it !\n") ;

                    CvPoint pt_prev = cvPoint( features_prev[i].x , features_prev[i].y ) ;
                    CvPoint pt_cur = cvPoint( features_cur[i].x , features_cur[i].y ) ;

                    cvLine( img_res,pt_prev,pt_cur,CV_RGB( 255,0,0),2 );
                }

        if(27==cvWaitKey(33))
            break;

        MainWindow::Display(frame_cur,img_curr,img_res);

        cvReleaseImage(&img_curr);
        cvReleaseImage(&img_eig);
        cvReleaseImage(&img_prev);
        cvReleaseImage(&img_res);
        cvReleaseImage(&img_temp);

    }
}
Esempio n. 2
0
void KfieldGui::on_mouse(int event, int x, int y, int flags, void* param) {
	if (state == FINISH || state == READY_TO_SEND_TWO || state == READY_TO_SEND_SINGLE)
		return;
	pthread_mutex_lock(&lock);
	cvCopy(field, screenfield);

	//Select mouse Event
	if (event == CV_EVENT_LBUTTONDOWN) {

		if (state == TWOPOINTSELECTION_2) {
			cvLine(cleanfield, cvPoint(point1.x, point1.y), cvPoint(x, y), color["gray"], 1, 0, 0);
			cvLine(field, cvPoint(point1.x, point1.y), cvPoint(x, y), color["gray"], 1, 0, 0);
			state = TWOPOINTSELECTION_3;
		}
		last.x = x;
		last.y = y;
		drawing = 1;

	} else if (event == CV_EVENT_LBUTTONUP) {

		cvLine(cleanfield, cvPoint(last.x, last.y), cvPoint(x, y), color["gray"], 1, CV_AA, 0);
		cvCircle(cleanfield, cvPoint(last.x, last.y), 100 / scale/*sqrt((last.x - x) * (last.x - x) + (last.y - y) * (last.y - y))*/, CV_RGB(100, 100, 50), 1, CV_AA, 0);

		std::cout << "X " << last.x << " Y " << last.y << " angle " << atan2f(-(y - last.y), x - last.x) * 180.0 / 3.141592653589793238 << std::endl;
		if (state == ONEPOINTSELECTION || state == TWOPOINTSELECTION_1) {
			point1 = last;
			//			point1.y = last.y;

			KfieldGui::pose1.set_x(gui2world_x(last.x));//last.x * scale - (2 * margintoline + field_width) / 2.0);
			KfieldGui::pose1.set_y(gui2world_y(last.y)); //-(last.y * scale - (2 * margintoline + field_height) / 2.0));
			KfieldGui::pose1.set_phi(atan2f(-(y - last.y), x - last.x));
			KfieldGui::pose1.set_confidence(sqrt((last.x - x) * (last.x - x) + (last.y - y) * (last.y - y)));

			std::cout << "X " << pose1.x() << " Y " << pose1.y() << " angle " << atan2f(-(y - last.y), x - last.x) * 180.0 / 3.141592653589793238 << std::endl;
			tempparticl.x = pose1.x();
			tempparticl.y = pose1.y();
			tempparticl.phi = pose1.phi();
			tempparticl.Weight = pose1.confidence();

			if (state == ONEPOINTSELECTION) {
				state = READY_TO_SEND_SINGLE;
				drawing = 0;
			} else {
				state = TWOPOINTSELECTION_2;
				cout << "Select Second Point " << endl;
			}

		} else if (state == TWOPOINTSELECTION_3) {
			KfieldGui::pose2.set_x(gui2world_x(last.x));
			KfieldGui::pose2.set_y(gui2world_y(last.y));
			KfieldGui::pose2.set_phi(atan2f(-(y - last.y), x - last.x));
			KfieldGui::pose2.set_confidence(sqrt((last.x - x) * (last.x - x) + (last.y - y) * (last.y - y)));

			state = FINISH;//READY_TO_SEND_TWO;
			//			cout << " Ready to send two" << endl;
			record_odometry_data();
			drawing = 0;
		}
	} else if (event == CV_EVENT_MOUSEMOVE) {
		if (drawing)
			drawCursor(x, y);
	}
	pthread_mutex_unlock(&lock);
}
Esempio n. 3
0
// parameters:
//  img - input video frame
//  dst - resultant motion picture
//  args - optional parameters
void update_mhi(IplImage* img, IplImage* dst, int diff_threshold)
{
    // 获取当前时间
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds

    // 获取当前帧大小
    CvSize size = cvSize(img->width,img->height); // get current frame size
    int i, idx1 = last, idx2;
    IplImage* silh;
    CvSeq* seq;
    CvRect comp_rect;
    double count;
    double angle;
    CvPoint center;
    double magnitude;          
    CvScalar color;

    // 给图像分配空间或者在尺寸改变的时候重新分配
    // allocate images at the beginning or
    // reallocate them if the frame size is changed
    if (!mhi || mhi->width != size.width || mhi->height != size.height) 
    {
        if (buf == 0) 
        {
            buf = (IplImage**)malloc(N*sizeof(buf[0]));
            memset( buf, 0, N*sizeof(buf[0]));
        }
        
        for( i = 0; i < N; i++ ) {
            cvReleaseImage( &buf[i] );
            buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
            cvZero( buf[i] );
        }
        cvReleaseImage( &mhi );
        cvReleaseImage( &orient );
        cvReleaseImage( &segmask );
        cvReleaseImage( &mask );
        
        mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );

        // 在开始时清空MHI
        cvZero( mhi ); // clear MHI at the beginning

		// 按img的尺寸创建图像
        orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
    }

    // 转换为灰度
    cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale

    idx2 = (last + 1) % N; // index of (last - (N-1))th frame
    last = idx2;

    silh = buf[idx2];

    // 获取两帧间的差异,当前帧跟背景图相减,放到silh里面
    cvAbsDiff(buf[idx1], buf[idx2], silh); // get difference between frames

    // 二值化
    cvThreshold(silh, silh, diff_threshold, 1, CV_THRESH_BINARY); // and threshold it

    // 去掉影像(silhouette) 以更新运动历史图像
    cvUpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION); // update MHI

    // 转换MHI到蓝色8位图
    // convert MHI to blue 8u image
    cvCvtScale( mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION );
    cvZero( dst );
    cvCvtPlaneToPix( mask, 0, 0, 0, dst );

    // 计算运动历史图像的梯度方向 
    // 计算运动梯度趋向和合法的趋向掩码
    // calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
    
    if( !storage )
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);

    // 将整个运动分割为独立的运动部分 
    // 分割运动:获取运动组件序列
    // 分割掩码是运动组件图标识出来的,不再过多的使用
    // segment motion: get sequence of motion components
    // segmask is marked motion components map. It is not used further
    seq = cvSegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA);

	// 按运动组件的数目来循环
    // 通过运动组件迭代
    // 根据整幅图像(全局运动)进行相应的一次或多次迭代
    // iterate through the motion components,
    // One more iteration (i == -1) corresponds to the whole image (global motion)
    for (i = -1; i < seq->total; i++) 
    {

        if (i < 0) 
        { 
            // 全局运动事件
            // case of the whole image
            // 获取当前帧的范围
            comp_rect = cvRect( 0, 0, size.width, size.height );

			// 设置颜色为白色
			color = CV_RGB(255,255,255);

			// 设置放大倍数为100
            magnitude = 100;
        }
        else 
        { 
            // 第i个运动组件
            // i-th motion component
            // 获取当前运动组件的范围
            comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;

			// 丢弃很小的组件
			if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
                continue;

			// 设置颜色为红色
			color = CV_RGB(255,0,0);

			// 设置放大倍数为30
            magnitude = 30;
        }

        // 选择组件感兴趣的区域
        // select component ROI
        cvSetImageROI( silh, comp_rect );
        cvSetImageROI( mhi, comp_rect );
        cvSetImageROI( orient, comp_rect );
        cvSetImageROI( mask, comp_rect );

        // 计算某些选择区域的全局运动方向 
        // 每个运动部件的运动方向就可以被这个函数利用提取的特定部件的掩模(mask)计算出来(使用cvCmp) 
        // 计算趋势
        // calculate orientation
        angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);

        // 根据左上角的原点来调整图像的角度
        angle = 360.0 - angle;  // adjust for images with top-left origin

        // 计算数组的绝对范数, 绝对差分范数或者相对差分范数 
        // 计算轮廓感兴趣区域中点的个数
        count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

        cvResetImageROI( mhi );
        cvResetImageROI( orient );
        cvResetImageROI( mask );
        cvResetImageROI( silh );

        // 检测小运动事件
        // check for the case of little motion
        if (count < comp_rect.width*comp_rect.height * 0.05)
        {
            continue;
        }

        // 画一个带箭头的时钟来指示方向
        // draw a clock with arrow indicating the direction
        center = cvPoint( (comp_rect.x + comp_rect.width/2),
                          (comp_rect.y + comp_rect.height/2) );

        cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
        cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
    }
}
int testfaceLib_pThread ( const char* str_video, int trackerType, int multiviewType, int recognizerType, const char* str_facesetxml, int threads, 
						 bool blink, bool smile, bool gender, bool age, bool recog, bool quiet, bool saveface, const char* sfolder, bool bEnableAutoCluster )
{
    FILE* fp_imaginfo = fopen( "imaginfo.txt", "w" );

	bool bAutoFocus = false;
	IplImage *imgAutoFocus = NULL;

	int  sampleRate =1;
	
	if(str_facesetxml == NULL)
		str_facesetxml = "faceset_model.xml";

	int  prob_estimate[7];
	char sState[256];
	EnumViewAngle  viewAngle = (EnumViewAngle)multiviewType;
	//dynamic clustering for smooth ID registration
	//bEnableAutoCluster =  true;

	CxlibFaceAnalyzer faceAnalyzer(viewAngle, (EnumTrackerType)trackerType, blink, smile, gender, age, recog, sampleRate, str_facesetxml, recognizerType, bEnableAutoCluster); 

	/////////////////////////////////////////////////////////////////////////////////////
	//	init GUI window
	const char* str_title = "Face Tester";
	if( ! quiet )
		cvNamedWindow( str_title, CV_WINDOW_AUTOSIZE );

	char sCaptionInfo[256] = "";
	CvFont *pFont = new CvFont;
	cvInitFont(pFont, CV_FONT_HERSHEY_PLAIN, 0.85, 0.85, 0, 1);

	// load GUI smile icon images
	IplImage *pImgSmileBGR;
	IplImage *pImgSmileMask;
	if(age == 0)
	{   // smile icon
		pImgSmileBGR  = cvLoadImage( "smile.bmp" );
		pImgSmileMask = cvLoadImage( "smilemask.bmp", 0 );
	}
	else
	{   // gender/age/smile icons
		pImgSmileBGR  = cvLoadImage( "faceicon.bmp" );
		pImgSmileMask = cvLoadImage( "faceiconMask.bmp", 0 );
	}

	IplImage *pImgSmileBGRA = cvCreateImage( cvSize(pImgSmileBGR->width, pImgSmileBGR->height), IPL_DEPTH_8U, 4 );
	cvCvtColor(pImgSmileBGR, pImgSmileBGRA, CV_BGR2BGRA );

	// open video source
    size_t len = strlen( str_video );
    bool is_piclist = (0 == stricmp( str_video + len - 4, ".txt" ));
    CxImageSeqReader* vidcap = NULL;
    if( is_piclist )
        vidcap = new CxPicListReader( str_video );
    else
        vidcap = new CxVideoReader( str_video );
	if( cvGetErrStatus() < 0 )
	{   
		cvSetErrStatus( CV_StsOk );
		return -1;
	}

	// when using camera, set to 640x480, 30fps
	if( isdigit(str_video[0]) != 0 && str_video[1] == '\0' )
	{
		vidcap->width( 640 );
		vidcap->height( 480 );
		vidcap->fps( 30 );
	}

	// print beginning info
	printf( "tracker cascade:  '%s'\n", trackerType== TRA_HAAR ? "haar" : (recognizerType== TRA_SURF ? "surf" : "pf tracker SURF"));
	printf( "face recognizer:  '%s'\n", recognizerType == RECOGNIZER_BOOST_GB240 ? "boost gabor240" : "cascade gloh"  );
	printf( "video:    '%s', %dx%d, %2.1f fps\n", str_video, 
		vidcap->width(), vidcap->height(), vidcap->fps() );

	// set mouse event process
	CxMouseParam mouse_faceparam;
	mouse_faceparam.updated = false;
	mouse_faceparam.play    = true;
	mouse_faceparam.ret_online_collecting = 0;

	static const int MAX_FACES = 16; 
	if(! quiet)
	{
		mouse_faceparam.play    = true;
		mouse_faceparam.updated = false;
		mouse_faceparam.face_num  = faceAnalyzer.getMaxFaceNum();
		mouse_faceparam.rects     = faceAnalyzer.getFaceRects();
		mouse_faceparam.image     = NULL;
		mouse_faceparam.cut_big_face= faceAnalyzer.getBigCutFace();
		mouse_faceparam.typeRecognizer = 0;
		mouse_faceparam.faceRecognizer = &faceAnalyzer;
		mouse_faceparam.ret_online_collecting = 0;
		cvSetMouseCallback(	str_title, my_mouse_callback, (void*)&mouse_faceparam );
		faceAnalyzer.setMouseParam(&mouse_faceparam);
	}

	// init count ticks                   
	int64  ticks, start_ticks, total_ticks;
	int64  tracker_total_ticks;
	double tracker_fps, total_fps; 

	start_ticks         = total_ticks  = 0;
	tracker_total_ticks = 0;
		
	// loop for each frame of a video/camera
	int frames = 0;
	IplImage *pImg = NULL;

	while( ! vidcap->eof() )
	{   
		// capture a video frame
		if( mouse_faceparam.play == true)
			pImg = vidcap->query();
		else 
			continue;

		if ( pImg == NULL )
			break;

		// make a copy, flip if upside-down
		CvImage image( cvGetSize(pImg), pImg->depth, pImg->nChannels );
		if( pImg->origin == IPL_ORIGIN_BL ) //flip live camera's frame
			cvFlip( pImg, image );
		else
			cvCopy( pImg, image );

		// convert to gray_image for face analysis
		CvImage gray_image( image.size(), image.depth(), 1 );
		if( image.channels() == 3 )
			cvCvtColor( image, gray_image, CV_BGR2GRAY );
		else
			cvCopy( image, gray_image );

		///////////////////////////////////////////////////////////////////
		// do face tracking and face recognition
		start_ticks = ticks = cvGetTickCount();	

        if( is_piclist )
            faceAnalyzer.detect(gray_image, prob_estimate, sState);
        else
		    faceAnalyzer.track(gray_image, prob_estimate, sState, image);   // track face in each frame but recognize by pthread
		//faceAnalyzer.detect(gray_image, prob_estimate, sState);// track and recognizer face in each frame 

		int face_num = faceAnalyzer.getFaceNum();

		ticks       = cvGetTickCount() - ticks;
		tracker_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
		tracker_total_ticks += ticks;

		
		//set param for mouse event processing
		if(!quiet)
		{
			mouse_faceparam.face_num = face_num;
			mouse_faceparam.image    = image;
		}

        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "%s  %d", vidcap->filename(), face_num );

		// blink/smile/gender/age/face recognize section
		for( int i=0; i<face_num; i++ )
		{
			// get face rect and id from face tracker
			CvRectItem rectItem = faceAnalyzer.getFaceRect(i);
			CvRect rect = rectItem.rc;
			int    face_trackid = rectItem.fid;
			float  probSmile = faceAnalyzer.getFaceSmileProb(i);
			int    bBlink  = faceAnalyzer.getFaceBlink(i);
			int    bSmile  = faceAnalyzer.getFaceSmile(i);
			int    bGender = faceAnalyzer.getFaceGender(i);
			int    nAgeID  = faceAnalyzer.getFaceAge(i);
			int    nFaceID = faceAnalyzer.getFaceID(i);
			float  fFaceProb= faceAnalyzer.getFaceProb(i);
			
			char *sFaceCaption = NULL;
			char sFaceNameBuff[256];
			char *sFaceName = faceAnalyzer.getFaceName(i);
			if(sFaceName[0] != '\0')
			{
				sprintf(sFaceNameBuff, "%s %.2f", sFaceName, fFaceProb);
				sFaceCaption = sFaceName;
				sFaceCaption = sFaceNameBuff;
			}

			if( ! quiet )
			{
				CvPoint2D32f *landmark6 = NULL;
				sprintf(sCaptionInfo, "FPS:%04d, %s", (int)tracker_fps, sState);

				int trackid = -1; //face_trackid , don't display trackid if -1
				cxlibDrawFaceBlob( image, pFont, trackid, rect, landmark6, probSmile, 
					bBlink, bSmile, bGender, nAgeID, sFaceCaption, NULL,
					pImgSmileBGR, pImgSmileBGRA, pImgSmileMask);
			}

            if( fp_imaginfo != NULL )
                fprintf( fp_imaginfo, "  %d %d %d %d", rect.x, rect.y, rect.width, rect.height );
		}
        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "\n" );

		///////////////////////////////////////////////////////////////////
		total_ticks += (cvGetTickCount() - start_ticks);
		
		// frame face_num
		frames++;

		//auto focus faces
		if(quiet == false && bAutoFocus)
		{
			if(imgAutoFocus)
				cvCopy(image, imgAutoFocus);
			else
				imgAutoFocus = cvCloneImage(image);

			CvRectItem *rects = faceAnalyzer.getFaceRects();
			cxlibAutoFocusFaceImage(imgAutoFocus, image, rects, face_num);
		}

		// next frame if quiet
		if( quiet )
			continue;
		else
		{
			// draw status info for custom interaction
			if(mouse_faceparam.ret_online_collecting == 1)
			{
				sprintf(sCaptionInfo, "Collecting faces for track_id = %d", mouse_faceparam.ret_facetrack_id);
				//draw face collecting region
				cvLine(image, cvPoint(image.width()/4, 0), cvPoint(image.width()/4, image.height()-1), CV_RGB(255,255,0), 2);
				cvLine(image, cvPoint(image.width()*3/4, 0), cvPoint(image.width()*3/4, image.height()-1), CV_RGB(255,255,0), 2);
			}
			else
				sprintf(sCaptionInfo, "FPS:%04d, %s", (int)tracker_fps, sState);

			cxlibDrawCaption( image, pFont, sCaptionInfo);
		}
		
		//show Image
		if (image.width() <= 800)
			cvShowImage( str_title, image );
		else
		{   // display scaled smaller aimge
			CvImage scale_image (cvSize(800, image.height()*800/image.width()), image.depth(), 3 );
			cvResize (image, scale_image);
			cvShowImage( str_title, scale_image );
		}

		// user interaction
		int key = cvWaitKey(1);
		//int key = cvWaitKey(0);
		if( key == ' ' )     // press space bar to pause the video play
			cvWaitKey( 0 );                           
		else if( key == 27 ) // press 'esc' to exit
			break;	                                   
		else if( key == 'a' )
		{  // add new face name
			if(face_num > 0)
			{   
				CvRect rect = faceAnalyzer.getFaceRect(0).rc;
				int x = rect.x+rect.width/2;
				int y = rect.y+rect.height/2;
				addFaceSet( x, y, &mouse_faceparam);
			}
		}
		else if( key == 'c' )
		{   //enable flag to collect face exemplars for the selected face name
			mouse_faceparam.ret_online_collecting = 1; //enable online face exemplar collecting
		}
		else if( key == 'z' )
			bAutoFocus = !bAutoFocus;
		else if(key >= 0)
		{
			if(mouse_faceparam.ret_online_collecting == 1)
			{   // stop collecting face exemplars
				mouse_faceparam.ret_online_collecting = 0; //disable online face exemplar collecting
				mouse_faceparam.ret_facetrack_id = -1;
			}

			if( key == 's')
			{
				// save faceset xml model
				faceAnalyzer.saveFaceModelXML("faceset_model.xml");
				sprintf(sCaptionInfo, "%s", "saved the face model");
				cxlibDrawCaption( pImg, pFont, sCaptionInfo);
				cvShowImage( str_title, pImg );
				cvWaitKey( 400 ); 
			}
		}
	}

	// print info about fps
	float temp    = 1e-6f / cvGetTickFrequency();
	tracker_fps   = 1.0f  / ( tracker_total_ticks * temp / frames );
	
	total_fps = 1.0f / (total_ticks * temp / frames);

	printf( "Total frames:%d  Speed:%.1f fps\n", frames, total_fps);
	printf( "FPS: %.1f ", tracker_fps);

	//save updated faceset model
	if(mouse_faceparam.updated == true)
	{
		sprintf(sCaptionInfo, "%s", "press key 's' to save updated face model or other keys to cancel");
		cxlibDrawCaption( pImg, pFont, sCaptionInfo);
		cvShowImage( str_title, pImg );

		int key = cvWaitKey();
		if( key == 's')
			faceAnalyzer.saveFaceModelXML("faceset_model.xml");
	}

	//save merged face model for dynamic clustering of smoothID
	vFaceSet vMergedFaceSet;
	int minWeight =10; 
	faceAnalyzer.getMergedFaceSet(vMergedFaceSet, minWeight);
	faceAnalyzer.saveFaceModelXML("faceset_modelMerged.xml", &vMergedFaceSet);

	//release global GUI data
	if( !quiet )
		cvDestroyWindow( str_title );

	cvReleaseImage(&pImgSmileBGR);
	cvReleaseImage(&pImgSmileBGRA);
	cvReleaseImage(&pImgSmileMask);
	delete pFont;

    delete vidcap;

    if( fp_imaginfo != NULL )
        fclose( fp_imaginfo );

    return 0;
}
Esempio n. 5
0
int mainMatch(void)
{
  // Initialise capture device
  CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
  if(!capture) error("No Capture");

  // Declare Ipoints and other stuff
  IpPairVec matches;
  IpVec ipts, ref_ipts;

  // This is the reference object we wish to find in video frame
  // Replace the line below with IplImage *img = cvLoadImage("imgs/object.jpg");
  // where object.jpg is the planar object to be located in the video
  IplImage *img = cvLoadImage("imgs/object.jpg");
  if (img == NULL) error("Need to load reference image in order to run matching procedure");
  CvPoint src_corners[4] = {{0,0}, {img->width,0}, {img->width, img->height}, {0, img->height}};
  CvPoint dst_corners[4];

  // Extract reference object Ipoints
  surfDetDes(img, ref_ipts, false, 3, 4, 3, 0.004f);
  drawIpoints(img, ref_ipts);
  showImage(img);

  // Create a window
  cvNamedWindow("OpenSURF", CV_WINDOW_AUTOSIZE );

  // Main capture loop
  while( true )
  {
    // Grab frame from the capture source
    img = cvQueryFrame(capture);

    // Detect and describe interest points in the frame
    surfDetDes(img, ipts, false, 3, 4, 3, 0.004f);

    // Fill match vector
    getMatches(ipts,ref_ipts,matches);

    // This call finds where the object corners should be in the frame
    if (translateCorners(matches, src_corners, dst_corners))
    {
      // Draw box around object
      for(int i = 0; i < 4; i++ )
      {
        CvPoint r1 = dst_corners[i%4];
        CvPoint r2 = dst_corners[(i+1)%4];
        cvLine( img, cvPoint(r1.x, r1.y),
          cvPoint(r2.x, r2.y), cvScalar(255,255,255), 3 );
      }

      for (unsigned int i = 0; i < matches.size(); ++i)
        drawIpoint(img, matches[i].first);
    }

    // Draw the FPS figure
    drawFPS(img);

    // Display the result
    cvShowImage("OpenSURF", img);

    // If ESC key pressed exit loop
    if( (cvWaitKey(10) & 255) == 27 ) break;
  }

  // Release the capture device
  cvReleaseCapture( &capture );
  cvDestroyWindow( "OpenSURF" );
  return 0;
}
int main()
{
	// Initialize capturing live feed from the camera
	CvCapture* capture = 0;
	capture = cvCaptureFromCAM(0);	

	// Couldn't get a device? Throw an error and quit
	if(!capture)
    {
        printf("Could not initialize capturing...\n");
        return -1;
    }

	// The two windows we'll be using
    cvNamedWindow("video");
	cvNamedWindow("thresh");

	// This image holds the "scribble" data...
	// the tracked positions of the ball
	IplImage* imgScribble = NULL;

	// An infinite loop
	while(true)
    {
		// Will hold a frame captured from the camera
		IplImage* frame = 0;
		frame = cvQueryFrame(capture);

		// If we couldn't grab a frame... quit
        if(!frame)
            break;
		
		// If this is the first frame, we need to initialize it
		if(imgScribble == NULL)
		{
			imgScribble = cvCreateImage(cvGetSize(frame), 8, 3);
		}

		// Holds the yellow thresholded image (yellow = white, rest = black)
		IplImage* imgYellowThresh = GetThresholdedImage(frame);

		// Calculate the moments to estimate the position of the ball
		CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments));
		cvMoments(imgYellowThresh, moments, 1);

		// The actual moment values
		double moment10 = cvGetSpatialMoment(moments, 1, 0);
		double moment01 = cvGetSpatialMoment(moments, 0, 1);
		double area = cvGetCentralMoment(moments, 0, 0);

		// Holding the last and current ball positions
		static int posX = 0;
		static int posY = 0;

		int lastX = posX;
		int lastY = posY;

		posX = moment10/area;
		posY = moment01/area;

		// Print it out for debugging purposes
		printf("position (%d,%d)\n", posX, posY);

		// We want to draw a line only if its a valid position
		if(lastX>0 && lastY>0 && posX>0 && posY>0)
		{
			// Draw a yellow line from the previous point to the current point
			cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,255,255), 5);
		}

		// Add the scribbling image and the frame... and we get a combination of the two
		cvAdd(frame, imgScribble, frame);
		cvShowImage("thresh", imgYellowThresh);
		cvShowImage("video", frame);

		// Wait for a keypress
		int c = cvWaitKey(10);
		if(c!=-1)
		{
			// If pressed, break out of the loop
            break;
		}

		// Release the thresholded image... we need no memory leaks.. please
		cvReleaseImage(&imgYellowThresh);

		delete moments;
    }

	// We're done using the camera. Other applications can now use it
	cvReleaseCapture(&capture);
    return 0;
}
Esempio n. 7
0
CvRect SimpleHandDetector::get_feature(IplImage *img, FeatureData *feature)
{
    IplImage* pFrame = img;
    char* f = feature->ch;
#if DEBUG
    cvZero(feature_img);
#endif
    cvCvtColor(pFrame, YCrCb, CV_BGR2YCrCb);
    cvInRangeS(YCrCb, lower, upper, skin);
    //cvErode(skin,skin, 0, 1);   //形态学滤波,除去噪声
    //cvDilate(skin,skin, 0, 3);
    cvSmooth(skin,skin);

    //cvCopy(skin,hand);

    cvClearMemStorage(storage);
    CvSeq * contour = 0;

    cvFindContours(skin, storage, &contour, sizeof (CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));
    if(contour == NULL)
        return cvRect(-1,-1,-1,-1);

    float max = fabs(cvContourArea(contour, CV_WHOLE_SEQ));
    for (CvSeq* p = contour; p != NULL; p = p->h_next)
    {
        float now = fabs(cvContourArea(p, CV_WHOLE_SEQ));
        if (now > max)
        {
            max = now;
            contour = p;
        }
    }

    CvRect rect = cvBoundingRect(contour, 0);
    if (rect.width > STEP && rect.height > STEP)
    {
        //cvRectangle(hand, cvPoint(rect.x, rect.y), cvPoint(rect.x + rect.width, rect.y + rect.height), cvScalar(255, 255, 255), 3);

        cvZero(hand);
        cvDrawContours(hand, contour, CV_RGB(255, 255, 255), CV_RGB(255, 255, 255), 0, CV_FILLED,8, cvPoint(0, 0));
        int w_s = rect.width / STEP;
        int h_s = rect.height / STEP;
        int w_off = rect.x;
        int h_off = rect.y;
#if DEBUG
        for(int s = 0;s <= STEP;s++)
        {
            cvLine(hand,cvPoint(rect.x,h_off),cvPoint(rect.x + rect.width,h_off),cvScalar(255),1);
            h_off += h_s;
            cvLine(hand,cvPoint(w_off,rect.y),cvPoint(w_off,rect.y + rect.height),cvScalar(255),1);
            w_off += w_s;
        }
#endif
        w_s = rect.width / STEP;
        h_s = rect.height / STEP;
        int half = w_s * h_s;
        for(int p = 0;p < STEP;p++)
        {
            for(int q = 0;q < STEP;q++)
            {
                int count = 0;
                w_off = rect.x + q * w_s;
                h_off = rect.y + p * h_s;
                for(int y = 0;y < h_s;y++)
                {
                    for(int x = 0;x < w_s;x++)
                    {
                        if(IMG8U(hand,w_off + x,h_off + y) == 255)
                            count++;
                    }
                }
                if((double)count / half > 0.5)
                {
                    f[p * STEP + q] = '1';
#if DEBUG
                        cvSetImageROI(feature,cvRect(q * 100,p * 100,100,100));
                        cvSet(feature,cvScalar(255));
#endif
                }else
                    f[p * STEP + q] = '0';
            }
        }
#if DEBUG
        cvShowImage("hand",hand);
        cvResetImageROI(feature_img);
        cvShowImage("feature",feature_img);
        cvWaitKey(10);
#endif
    }else
        rect = cvRect(-1,-1,-1,-1);

    return rect;
    /*char ch;
    if((ch = cvWaitKey(10)) > 0)
    {
        if(is_train)
        {
            if(ch >= '0' && ch <= '9')
            {
                printf("%c:",ch);
                for(int p = 0;p < step;p++)
                {
                    for(int q = 0;q < step;q++)
                        printf("%c",f[p * step + q]);
                    printf("#\n");
                }
            }
            else if(ch == 13 || ch == 27)
                break;
        }
        else
        {
            if(ch == 13 || ch == 27)
                break;
            else
                findHand(f);
        }
    }*/
}
void CvFunctionPlot::PlotLine(const CvPoint& Point1,const CvPoint& Point2,const CvScalar& PlotColour)
{
    cvLine(Canvas,Point1,Point2, PlotColour,1,8,0);
}
void bw_track_blobs(StereoCluster *cluster)
{

    int *free_blobs = (int *)calloc(cluster->data->NUM_OF_MARKERS,sizeof(int));
    double *d_epi = (double *)calloc(cluster->data->NUM_OF_MARKERS,sizeof(int));
    double *d_pred = (double *)calloc(cluster->data->NUM_OF_MARKERS,sizeof(int));


    for(int k=0; k<cluster->data->NUM_OF_MARKERS; ++k)
    {
        free_blobs[k] = 1;
    }


    double nx,ny;
    double lx,ly,lz;
   
    

    double d_min;
    double d_thresh = 5.0;
    int id;
    int d_cum;
    double d;

    CvPoint2D32f old_marker_pos;


    /* compute distances between markers, if markers too close, no stable tracking possible */
    for(int m=0; m<cluster->data->NUM_OF_MARKERS-1; ++m)
    {
        for(int n=m+1; n<cluster->data->NUM_OF_MARKERS; ++n)
        {
            d = sqrt( (cluster->tracker[1]->marker[m]->blob_pos.x-cluster->tracker[1]->marker[n]->blob_pos.x)*(cluster->tracker[1]->marker[m]->blob_pos.x-cluster->tracker[1]->marker[n]->blob_pos.x) + 
                (cluster->tracker[1]->marker[m]->blob_pos.y-cluster->tracker[1]->marker[n]->blob_pos.y)*(cluster->tracker[1]->marker[m]->blob_pos.y-cluster->tracker[1]->marker[n]->blob_pos.y) );
            if(d<4.0)
            {
                cluster->tracker[1]->state = OFF_TRACK;
                cluster->state = OFF_TRACK;
                return;
            } 
        }
    }


    for(int mc=0; mc<cluster->data->NUM_OF_MARKERS; ++mc)
    {
        /* read out pixel coordinates from color camera */
        nx = cluster->tracker[0]->marker[mc]->pos_measured.x;
        ny = cluster->tracker[0]->marker[mc]->pos_measured.y;
        
        /* compute epipolar line for right image*/
        lx = cluster->FundamentalMatrix[0][0]*nx + cluster->FundamentalMatrix[0][1]*ny + cluster->FundamentalMatrix[0][2];
        ly = cluster->FundamentalMatrix[1][0]*nx + cluster->FundamentalMatrix[1][1]*ny + cluster->FundamentalMatrix[1][2];
        lz = cluster->FundamentalMatrix[2][0]*nx + cluster->FundamentalMatrix[2][1]*ny + cluster->FundamentalMatrix[2][2];
        
        lx /= ly;
        lz /= ly;
        ly = 1.0;

        if(mc==0)
        {
            double x0,x1,y0,y1;
            x0 = 0.0;
            x1 = 779.0;
            y0 = -(lx*x0+lz);
            y1 = -(lx*x1+lz);
//          printf("%f %f %f %f\n",x0,y0,x1,y1);
            cvLine(cluster->tracker[1]->frame,cvPoint((int)x0,(int)y0),cvPoint((int)x1,(int)y1),cvScalarAll(255),1,CV_AA);
        }
#if 0
        if(cluster->tracker[1]->marker[mc]->pos_is_set)
        {

            old_marker_pos.x = cluster->tracker[1]->marker[mc]->pos_measured.x;
            old_marker_pos.y = cluster->tracker[1]->marker[mc]->pos_measured.y;


    //      printf("m = (%f %f)\n",tracker->marker[nm]->pos_measured.x,tracker->marker[nm]->pos_measured.y);

            if(cluster->tracker[1]->marker[mc]->vel_is_set)
            {
                /* if marker velocity is known predict new position */
                cluster->tracker[1]->marker[mc]->pos_predicted.x = cluster->tracker[1]->marker[mc]->pos_measured.x + cluster->tracker[1]->marker[mc]->vel.x;
                cluster->tracker[1]->marker[mc]->pos_predicted.y = cluster->tracker[1]->marker[mc]->pos_measured.y + cluster->tracker[1]->marker[mc]->vel.y;
            }
            else
            {
                /* otherwise take last known position for the center of the ROI as best guess */
                cluster->tracker[1]->marker[mc]->pos_predicted.x = cluster->tracker[1]->marker[mc]->pos_measured.x;
                cluster->tracker[1]->marker[mc]->pos_predicted.y = cluster->tracker[1]->marker[mc]->pos_measured.y;
            }

            

            for(int blob=0; blob<cluster->data->NUM_OF_MARKERS; ++blob)
            {
                 /* distance of blob to epipolar line */
                d_epi[blob] = abs( (lx*cluster->tracker[1]->marker[blob]->blob_pos.x + cluster->tracker[1]->marker[blob]->blob_pos.y + lz) / sqrt(1.0 + lx*lx) );
                /*
                d_pred[blob] = sqrt( (cluster->tracker[1]->marker[mc]->pos_predicted.x-cluster->tracker[1]->marker[blob]->blob_pos.x)*(cluster->tracker[1]->marker[mc]->pos_predicted.x-cluster->tracker[1]->marker[blob]->blob_pos.x) + 
                    (cluster->tracker[1]->marker[mc]->blob_pos.y-cluster->tracker[1]->marker[blob]->pos_predicted.y)*(cluster->tracker[1]->marker[mc]->blob_pos.y-cluster->tracker[1]->marker[blob]->pos_predicted.y) );
                */
            }

        }
#endif
        
        d_cum = 0;
        d_min = 1e6;
        id = 77;
        for(int blob=0; blob<cluster->data->NUM_OF_MARKERS; ++blob)
        {
             /* distance of blob to epipolar line */
            d_epi[blob] = abs( (lx*cluster->tracker[1]->marker[blob]->blob_pos.x + cluster->tracker[1]->marker[blob]->blob_pos.y + lz) / sqrt(1.0 + lx*lx) );
//          printf("%f\n",d_epi[blob]);
            if(d_epi[blob] < d_thresh)
            {
                d_cum++;
            }
            if(d_cum > 1)
            {
                cluster->state = OFF_TRACK;
                cluster->tracker[1]->state = OFF_TRACK;
                return;
            }
            if(d_epi[blob]<d_min)
            {
                d_min = d_epi[blob];
                id = blob;
            }

        }
        
        if(id == 77)
        {
            cluster->state = OFF_TRACK;
            cluster->tracker[1]->state = OFF_TRACK;
            return;
        }
        else
        {
            cluster->tracker[1]->marker[mc]->pos_measured.x = cluster->tracker[1]->marker[id]->blob_pos.x;
            cluster->tracker[1]->marker[mc]->pos_measured.y = cluster->tracker[1]->marker[id]->blob_pos.y;
            cluster->tracker[1]->marker[mc]->pos_is_set = 1;
        }

    }

    cluster->tracker[1]->state = ON_TRACK;
    cluster->state = ON_TRACK;


    
}
Esempio n. 10
0
void main(int argc, char** argv)
{
	cvNamedWindow("src",0 );
	cvNamedWindow("warp image",0 );
	cvNamedWindow("warp image (grey)",0 );
	cvNamedWindow("Smoothed warped gray",0 );
	cvNamedWindow("threshold image",0 );
	cvNamedWindow("canny",0 );
	cvNamedWindow("final",1 );
		
	CvPoint2D32f srcQuad[4], dstQuad[4];
	CvMat* warp_matrix = cvCreateMat(3,3,CV_32FC1);
	float Z=1;

	dstQuad[0].x = 216; //src Top left
	dstQuad[0].y = 15;
	dstQuad[1].x = 392; //src Top right
	dstQuad[1].y = 6;
	dstQuad[2].x = 12; //src Bottom left
	dstQuad[2].y = 187;
	dstQuad[3].x = 620; //src Bot right
	dstQuad[3].y = 159;

	srcQuad[0].x = 100; //dst Top left
	srcQuad[0].y = 120;
	srcQuad[1].x = 540; //dst Top right
	srcQuad[1].y = 120;
	srcQuad[2].x = 100; //dst Bottom left
	srcQuad[2].y = 360;
	srcQuad[3].x = 540; //dst Bot right
	srcQuad[3].y = 360;

	cvGetPerspectiveTransform(srcQuad, dstQuad,	warp_matrix);
	
	//CvCapture *capture = cvCaptureFromCAM(0);
	/*double fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
	IplImage* image = cvRetrieveFrame(capture);
	CvSize imgSize;
    imgSize.width = image->width;
    imgSize.height = image->height;
	CvVideoWriter *writer = cvCreateVideoWriter("out.avi", CV_FOURCC('M', 'J', 'P', 'G'), fps, imgSize);*/
	int ik=0;
	while(1)
	{
		//IplImage* img = cvQueryFrame(capture);
		IplImage* img = cvLoadImage( "../../Data/6 Dec/009.jpg", CV_LOAD_IMAGE_COLOR);
		cvShowImage( "src", img );
		//cvWriteFrame(writer, img);
		//cvSaveImage(nameGen(ik++), img, 0);
		
		IplImage* warp_img = cvCloneImage(img);
		CV_MAT_ELEM(*warp_matrix, float, 2, 2) = Z;
		cvWarpPerspective(img, warp_img, warp_matrix, CV_INTER_LINEAR | CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS);
		cvShowImage( "warp image", warp_img );

		IplImage* grayimg = cvCreateImage(cvGetSize(warp_img),IPL_DEPTH_8U,1);
		cvCvtColor( warp_img, grayimg, CV_RGB2GRAY );
		cvShowImage( "warp image (grey)", grayimg );
		
		cvSmooth(grayimg, grayimg, CV_GAUSSIAN, 3, 3, 0.0, 0.0);
		cvShowImage( "Smoothed warped gray", grayimg );
		
		IplImage* thresholded_img=simplethreshold(grayimg, 220);
		cvShowImage("threshold image",thresholded_img);

		//grayimg = doCanny( thresholded_img, 50, 100, 3 );
		grayimg = cvCloneImage(thresholded_img);
		cvShowImage("canny",grayimg);

		IplImage* finalimg = cvCreateImage(cvGetSize(grayimg),IPL_DEPTH_8U,3);
		CvMemStorage* line_storage=cvCreateMemStorage(0);

		CvSeq* results =  cvHoughLines2(grayimg,line_storage,CV_HOUGH_PROBABILISTIC,10,CV_PI/180*5,350,100,10);
		double angle = 0.0, temp;
		double lengthSqd, wSum=0;
		double xc = 0, yc = 0;
		for( int i = 0; i < results->total; i++ )
		{
			CvPoint* line = (CvPoint*)cvGetSeqElem(results,i);
			cvLine( finalimg, line[0], line[1], CV_RGB(0,0,255), 1, CV_AA, 0 );
			//lengthSqd = (line[0].x - line[1].x)*(line[0].x - line[1].x) + (line[0].y - line[1].y)*(line[0].y - line[1].y);
			wSum += 1;//lengthSqd;
			if(line[0].y > line[1].y)
				temp = atan((line[0].y - line[1].y + 0.0) / (line[0].x - line[1].x));
			else
				temp = atan((line[1].y - line[0].y + 0.0) / (line[1].x - line[0].x));
			if(temp < 0)
				angle += (90 + 180/3.14*temp)/* * lengthSqd*/;
			else
				angle += (180/3.14*temp - 90)/* * lengthSqd*/;
			xc += line[0].x + line[1].x;
			yc += line[0].y + line[1].y;
		}
		angle=angle/wSum;
		//angle+=10;
		printf("total: %d, angle: % f\n", results->total, angle);

		xc /= 2*results->total;
		yc /= 2*results->total;
		double m = (angle != 0) ? 1/tan(angle*3.14/180) : 100;	// 100 represents a very large slope (near vertical)
		m=-m;

		double x1, y1, x2, y2;	// The Center Line
		y1 = 0;
		y2 = finalimg->height;
		x1 = xc + (y1-yc)/m;
		x2 = xc + (y2-yc)/m; 
		cvLine(finalimg, cvPoint(x1, y1), cvPoint(x2, y2), CV_RGB(0,255,0), 1, CV_AA, 0);
		printf("point: %f\t%f\n", xc, yc);

		double lx=0, ly=0, lm=0, lc=0, rx=0, ry=0, rm=0, rc=0;
		for( int i = 0; i < results->total; i++ )
		{
			CvPoint* line = (CvPoint*)cvGetSeqElem(results,i);
			double xm = (line[0].x + line[1].x)/2.0, ym = (line[0].y + line[1].y)/2.0;
			if(ym - yc - m*(xm - xc) > 0)
			{
				lx += xm;
				ly += ym;
				lm += (line[1].y - line[0].y)/(line[1].x - line[0].x+0.0001);
				lc++;
			}
			else
			{
				rx += xm;
				ry += ym;
				rm += (line[1].y - line[0].y)/(line[1].x - line[0].x+0.0001);
				rc++;
			}
		}

		// The Left Line
		lx /= lc;	ly /= lc;	lm /= lc;
		rx /= rc;	ry /= rc;	rm /= rc;
		printf("lins: %f\t%f\t%f\n", lx, ly, lm);
		printf("lins: %f\t%f\t%f\n", rx, ry, rm);
		y1 = 0;
		y2 = finalimg->height-5;
		x1 = lx + (y1-ly)/lm;
		x2 = lx + (y2-ly)/lm; 
		cvLine(finalimg, cvPoint(x1, y1), cvPoint(x2, y2), CV_RGB(255,255,0), 1, CV_AA, 0);

		// The Right Line
		y1 = 0;
		y2 = finalimg->height-5;
		x1 = rx + (y1-ry)/rm;
		x2 = rx + (y2-ry)/rm; 
		cvLine(finalimg, cvPoint(x1, y1), cvPoint(x2, y2), CV_RGB(0,255,255), 1, CV_AA, 0);

		// The Center Point
		CvPoint vpt = cvPoint(finalimg->width/2, 416);
		printf("center point: %d\t%d\n", vpt.x, vpt.y);
		
		// The Dl and Dr
		int dl = vpt.x - lx + (ly-vpt.y+0.0)/lm;
		int dr = (vpt.y-ry+0.0)/rm + rx - vpt.x;
		printf("dl-dr: %d\n", dl-dr);

		cvShowImage("final",finalimg);

		if(dl-dr < SAFEZONE_LL)	// Assume that the bot lies just on the boundary of the safe zone
		{
			if(angle < -10)
			{
				navCommand(7, angle);
			}
			else
			{
				navCommand(7, angle);
			}
		}	
		else if(dl-dr > SAFEZONE_RL)
		{
			if(angle > 10)
			{
				navCommand(-7, angle);
			}
			else
			{
				navCommand(-7, angle);
			}
		}
		else
		{
			if((angle < 10) && (angle > -10))
			{
				navCommand(angle, angle);
			}
			else
			{
				navCommand(0, angle);
			}
		}

		cvWaitKey(0);
	}
}
Esempio n. 11
0
void Figure::DrawAxis(IplImage *output)
{
	int bs = border_size;
	int h = figure_size.height;
	int w = figure_size.width;

	// size of graph
	int gh = h - bs * 2;
	int gw = w - bs * 2;

	// draw the horizontal and vertical axis
	// let x, y axies cross at zero if possible.
	float y_ref = y_min;
	if ((y_max > 0) && (y_min <= 0))
		y_ref = 0;

	int x_axis_pos = h - bs - cvRound((y_ref - y_min) * y_scale);

	cvLine(output, cvPoint(bs,     x_axis_pos),
		           cvPoint(w - bs, x_axis_pos),
				   axis_color);
	cvLine(output, cvPoint(bs, h - bs),
		           cvPoint(bs, h - bs - gh),
				   axis_color);

	// Write the scale of the y axis
	CvFont font;
	cvInitFont(&font,CV_FONT_HERSHEY_PLAIN,0.55,0.7, 0,1,CV_AA);

	int chw = 6, chh = 10;
	char text[16];

	// y max
	if ((y_max - y_ref) > 0.05 * (y_max - y_min))
	{
		snprintf(text, sizeof(text)-1, "%.1f", y_max);
		cvPutText(output, text, cvPoint(bs / 5, bs - chh / 2), &font, text_color);
	}
	// y min
	if ((y_ref - y_min) > 0.05 * (y_max - y_min))
	{
		snprintf(text, sizeof(text)-1, "%.1f", y_min);
		cvPutText(output, text, cvPoint(bs / 5, h - bs + chh), &font, text_color);
	}

	// x axis
	snprintf(text, sizeof(text)-1, "%.1f", y_ref);
	cvPutText(output, text, cvPoint(bs / 5, x_axis_pos + chh / 2), &font, text_color);

	// Write the scale of the x axis
	snprintf(text, sizeof(text)-1, "%.0f", x_max );
	cvPutText(output, text, cvPoint(w - bs - strlen(text) * chw, x_axis_pos + chh),
		      &font, text_color);

	// x min
	snprintf(text, sizeof(text)-1, "%.0f", x_min );
	cvPutText(output, text, cvPoint(bs, x_axis_pos + chh),
		      &font, text_color);


}
Esempio n. 12
0
void MainWindow::optical_flow()
{
    cvReleaseCapture(&pCapture);
    CvCapture *pCapture = cvCaptureFromFile( "/home/kevin/optical_flow_input.avi" );
    if (pCapture == NULL)
    {
        fprintf(stderr, "Error: Can't open video.\n");
    }

    /* Read the video's frame size out of the AVI. */
    CvSize frame_size;
    frame_size.height = (int) cvGetCaptureProperty( pCapture, CV_CAP_PROP_FRAME_HEIGHT );
    frame_size.width =  (int) cvGetCaptureProperty( pCapture, CV_CAP_PROP_FRAME_WIDTH );

    /* Determine the number of frames in the AVI. */
    long number_of_frames;
    /* Go to the end of the AVI (ie: the fraction is "1") */
    cvSetCaptureProperty( pCapture, CV_CAP_PROP_POS_AVI_RATIO, 1. );
    /* Now that we're at the end, read the AVI position in frames */
    number_of_frames = (int) cvGetCaptureProperty( pCapture, CV_CAP_PROP_POS_FRAMES );
    /* Return to the beginning */
    cvSetCaptureProperty( pCapture, CV_CAP_PROP_POS_FRAMES, 0. );

    while(true)
    {
        static IplImage *frame = NULL, *frame1 = NULL, *frame1_1C = NULL;
        static IplImage *frame2_1C = NULL, *eig_image = NULL, *temp_image = NULL;
        static IplImage *pyramid1 = NULL, *pyramid2 = NULL;

        cvSetCaptureProperty( pCapture, CV_CAP_PROP_POS_FRAMES, current_frame );

        frame = cvQueryFrame( pCapture );
        if (frame == NULL)
        {
            /* Why did we get a NULL frame?  We shouldn't be at the end. */
            fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
        }

        allocateOnDemand( &frame1_1C, frame_size, IPL_DEPTH_8U, 1 );

        cvConvertImage(frame, frame1_1C, CV_CVTIMG_FLIP);

        allocateOnDemand( &frame1, frame_size, IPL_DEPTH_8U, 3 );
        cvConvertImage(frame, frame1, CV_CVTIMG_FLIP);

        /* Get the second frame of video.  Same principles as the first. */
        frame = cvQueryFrame( pCapture );
        if (frame == NULL)
        {
            fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
        }
        allocateOnDemand( &frame2_1C, frame_size, IPL_DEPTH_8U, 1 );
        cvConvertImage(frame, frame2_1C, CV_CVTIMG_FLIP);

        /* Preparation: Allocate the necessary storage. */
        allocateOnDemand( &eig_image, frame_size, IPL_DEPTH_32F, 1 );
        allocateOnDemand( &temp_image, frame_size, IPL_DEPTH_32F, 1 );

        /* Preparation: This array will contain the features found in frame 1. */
        CvPoint2D32f frame1_features[400];

        int number_of_features;
        number_of_features = 400;

        cvGoodFeaturesToTrack(frame1_1C, eig_image, temp_image, frame1_features,
                              &number_of_features, .01, .01, NULL);

        /* Pyramidal Lucas Kanade Optical Flow! */

        /* This array will contain the locations of the points from frame 1 in frame 2. */
        CvPoint2D32f frame2_features[400];

        char optical_flow_found_feature[400];
        float optical_flow_feature_error[400];

        /* This is the window size to use to avoid the aperture problem (see slide "Optical Flow: Overview"). */
        CvSize optical_flow_window = cvSize(3,3);
        CvTermCriteria optical_flow_termination_criteria
            = cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );

        /* This is some workspace for the algorithm.
         * (The algorithm actually carves the image into pyramids of different resolutions.)
         */
        allocateOnDemand( &pyramid1, frame_size, IPL_DEPTH_8U, 1 );
        allocateOnDemand( &pyramid2, frame_size, IPL_DEPTH_8U, 1 );

        cvCalcOpticalFlowPyrLK(frame1_1C, frame2_1C, pyramid1, pyramid2,
                               frame1_features, frame2_features, number_of_features,
                               optical_flow_window, 5, optical_flow_found_feature,
                               optical_flow_feature_error, optical_flow_termination_criteria, 0 );

        /* For fun (and debugging :)), let's draw the flow field. */
        for(int i = 0; i < number_of_features; i++)
        {
            /* If Pyramidal Lucas Kanade didn't really find the feature, skip it. */
            if ( optical_flow_found_feature[i] == 0 )	continue;

            int line_thickness;
            line_thickness = 1;
            /* CV_RGB(red, green, blue) is the red, green, and blue components
             * of the color you want, each out of 255.
             */
            CvScalar line_color;
            line_color = CV_RGB(255,0,0);

            CvPoint p,q;
            p.x = (int) frame1_features[i].x;
            p.y = (int) frame1_features[i].y;
            q.x = (int) frame2_features[i].x;
            q.y = (int) frame2_features[i].y;

            double angle;
            angle = atan2( (double) p.y - q.y, (double) p.x - q.x );

            double hypotenuse;
            hypotenuse = sqrt( square(p.y - q.y) + square(p.x - q.x) );

            /* Here we lengthen the arrow by a factor of three. */
            q.x = (int) (p.x - 3 * hypotenuse * cos(angle));
            q.y = (int) (p.y - 3 * hypotenuse * sin(angle));

            cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
            /* Now draw the tips of the arrow.  I do some scaling so that the
             * tips look proportional to the main line of the arrow.
             */
            p.x = (int) (q.x + 9 * cos(angle + pi / 4));
            p.y = (int) (q.y + 9 * sin(angle + pi / 4));
            cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
            p.x = (int) (q.x + 9 * cos(angle - pi / 4));
            p.y = (int) (q.y + 9 * sin(angle - pi / 4));
            cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
        }

        cv::Mat frame1_mat=MainWindow::image_rotate(frame1);
        cv::flip(frame1_mat,frame1_mat,1);
        IplImage dst = frame1_mat;

        cv::Mat frame2_mat=MainWindow::image_rotate(frame2_1C);
        cv::flip(frame2_mat,frame2_mat,1);
        IplImage dst1 = frame2_mat;

        MainWindow::Display(frame,&dst,&dst1);

        int key_pressed;
        key_pressed = cvWaitKey(0);

        if (key_pressed == 'b' || key_pressed == 'B')
            current_frame--;
        else
            current_frame++;
        /* Don't run past the front/end of the AVI. */
        if (current_frame < 0)
            current_frame = 0;

        if (current_frame >= number_of_frames - 1)
            current_frame = number_of_frames - 2;
    }

}
Esempio n. 13
0
void MainWindow::OpencvOpticalFlow()
{
    cvReleaseCapture(&pCapture);
    pCapture=cvCaptureFromCAM(0);

    IplImage* pre;
    IplImage* next;

    int corner_count=MAX_CORNERS;

    while(1)
    {
        //
        pre=cvQueryFrame(pCapture);
        next=cvQueryFrame(pCapture);

        CvSize img_sz=cvGetSize(pre);
        IplImage* imgC=cvCreateImage(img_sz,IPL_DEPTH_8U,1);

        //
        IplImage* imgA=cvCreateImage(img_sz,IPL_DEPTH_8U,1);
        IplImage* imgB=cvCreateImage(img_sz,IPL_DEPTH_8U,1);
        cvCvtColor(pre,imgA,CV_BGR2GRAY);
        cvCvtColor(next,imgB,CV_BGR2GRAY);

        //

        IplImage* eig_image=cvCreateImage(img_sz,IPL_DEPTH_32F,1);
        IplImage* tmp_image=cvCreateImage(img_sz,IPL_DEPTH_32F,1);

        CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];


        //
        cvGoodFeaturesToTrack(
             imgA,
             eig_image,
             tmp_image,
             cornersA,
             &corner_count,
             0.01,
             5.0,
             0,
             3,
             0,
             0.04
         );

        cvFindCornerSubPix(
            imgA,
            cornersA,
            corner_count,
            cvSize(win_size,win_size),
            cvSize(-1,-1),
            cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03)
        );


        char features_found[ MAX_CORNERS ];
        float feature_errors[ MAX_CORNERS ];

        //
        CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );

        IplImage* pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
        IplImage* pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
        CvPoint2D32f* cornersB  = new CvPoint2D32f[ MAX_CORNERS ];

        cvCalcOpticalFlowPyrLK(
             imgA,
             imgB,
             pyrA,
             pyrB,
             cornersA,
             cornersB,
             corner_count,
             cvSize( win_size,win_size ),
             5,
             features_found,
             feature_errors,
             cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 ),
             0
          );

        for( int i=0; i<corner_count; i++ ) {
           if( features_found[i]==0|| feature_errors[i]>550 ) {
       //       printf("Error is %f/n",feature_errors[i]);
              continue;
           }
       //    printf("Got it/n");
           CvPoint p0 = cvPoint(
              cvRound( cornersA[i].x ),
              cvRound( cornersA[i].y )
           );

           CvPoint p1 = cvPoint(
              cvRound( cornersB[i].x ),
              cvRound( cornersB[i].y )
           );

           cvLine( imgC, p0, p1, CV_RGB(255,0,0),2 );
        }

        if(27==cvWaitKey(33))
            break;
        MainWindow::Display(imgA,imgB,imgC);

    }

}
Esempio n. 14
0
void MainWindow::OpticalFlowDetect()
{
    cvReleaseCapture(&pCapture);
    pCapture=cvCaptureFromCAM(0);

    int corner_count = 1000;

     CvTermCriteria criteria;
     criteria = cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 64, 0.01);

     IplImage *src_img1;
     IplImage *src_img2;

     IplImage *dst_img;
     IplImage *pre_img;
     IplImage *result;


     IplImage *eig_img;
     IplImage *temp_img;

     IplImage *prev_pyramid;
     IplImage *curr_pyramid;

     CvPoint2D32f *corners1;
     CvPoint2D32f *corners2;
     corners1 = (CvPoint2D32f *) cvAlloc (corner_count * sizeof (CvPoint2D32f));
     corners2 = (CvPoint2D32f *) cvAlloc (corner_count * sizeof (CvPoint2D32f));

     char *status;
     status = (char *) cvAlloc (corner_count);

     while (1)
     {

      pre_img = cvQueryFrame(pCapture);

      CvSize img_sz = cvGetSize(pre_img);
      src_img1 = cvCreateImage(img_sz, IPL_DEPTH_8U, 1);
      cvCvtColor(pre_img, src_img1, CV_RGB2GRAY);

      dst_img = cvQueryFrame(pCapture);
      src_img2 = cvCreateImage(img_sz, IPL_DEPTH_8U, 1);
      cvCvtColor(dst_img, src_img2, CV_RGB2GRAY);

      result=cvCreateImage(img_sz,IPL_DEPTH_8U,1);
      cvZero(result);

      eig_img = cvCreateImage (img_sz, IPL_DEPTH_32F, 1);
      temp_img = cvCreateImage (img_sz, IPL_DEPTH_32F, 1);


      prev_pyramid = cvCreateImage (cvSize (src_img1->width + 8, src_img1->height / 3), IPL_DEPTH_8U, 1);
      curr_pyramid = cvCreateImage (cvSize (src_img1->width + 8, src_img1->height / 3), IPL_DEPTH_8U, 1);



      cvGoodFeaturesToTrack (src_img1, eig_img, temp_img, corners1, &corner_count, 0.001, 5, NULL);

      cvCalcOpticalFlowPyrLK (src_img1, src_img2, prev_pyramid, curr_pyramid,
       corners1, corners2, corner_count, cvSize (10, 10), 4, status, NULL, criteria, 0);

      for (int i = 0; i < corner_count; i++)
      {

          if (status[i])
              cvLine (dst_img, cvPointFrom32f (corners1[i]), cvPointFrom32f (corners2[i]), CV_RGB (255, 0, 0), 1, CV_AA, 0);
      }

      if(27==cvWaitKey(33))
          break;

    //  cvCvtScale(dst_img,result,1.0/255,0);
      MainWindow::Display(pre_img,src_img2,dst_img);

     }
}
Esempio n. 15
0
int main(int argc, char** argv)
{

	profile_name = (argc > 1 ? argv[1] : (char*)"blue_goal.yml");

	int cam = (argc > 2 ? atoi(argv[2]) : 0);

	// value loading
	fs = cvOpenFileStorage(profile_name, 0, CV_STORAGE_READ, NULL);
	Hmax = cvReadIntByName(fs, NULL, "Hmax", Hmax);
	Smax = cvReadIntByName(fs, NULL, "Smax", Smax);
	Vmax = cvReadIntByName(fs, NULL, "Vmax", Vmax);
	Hmin = cvReadIntByName(fs, NULL, "Hmin", Hmin);
	Smin = cvReadIntByName(fs, NULL, "Smin", Smin);
	Vmin = cvReadIntByName(fs, NULL, "Vmin", Vmin);
	minH = cvReadIntByName(fs, NULL, "minH", minH);


	cvNamedWindow("img", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("treshed", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("graph", CV_WINDOW_AUTOSIZE);


	cvCreateTrackbar("Hmin", "treshed", &Hmin, 360, onTrack);
	cvCreateTrackbar("Smin", "treshed", &Smin, 255, onTrack);
	cvCreateTrackbar("Vmin", "treshed", &Vmin, 255, onTrack);

	cvCreateTrackbar("Hmax", "treshed", &Hmax, 360, onTrack);
	cvCreateTrackbar("Smax", "treshed", &Smax, 255, onTrack);
	cvCreateTrackbar("Vmax", "treshed", &Vmax, 255, onTrack);
	cvCreateTrackbar("minH", "treshed", &minH, 255, onTrack);


	onTrack(0);

	CvCapture* camera = cvCaptureFromCAM(cam);


	while(1){
		img = cvQueryFrame(camera);

		allocateCvImage(&imgHSV, cvGetSize(img), 8, 3);
		cvCvtColor(img, imgHSV, CV_BGR2HSV);

		allocateCvImage(&imgThreshed, cvGetSize(img), 8, 1);
		cvInRangeS(imgHSV, cvScalar(Hmin, Smin, Vmin, 0), cvScalar(Hmax,
			Smax, Vmax, 0), imgThreshed);

		cvErode(imgThreshed, imgThreshed, 0, 2);


		int width = imgThreshed->width;
		int height = imgThreshed->height;
		int nchannels = imgThreshed->nChannels;
		int step = imgThreshed->widthStep;

		uchar* data = (uchar *)imgThreshed->imageData;
		unsigned int graph[width];

		int x,y;
		for(x = 0; x < width ; x++)
			graph[x] = 0;

		int sum = 0, notnull = 0;
		for(x = 0; x < width; x++){
			for( y = 0 ; y < height ; y++ ) {
				if(data[y*step + x*nchannels] == 255){
					graph[x]++;
				}
			}
			sum += graph[x];
			if(graph[x] != 0)
				notnull += 1;

	//		printf("%d\t%d\n", x, graph[x]);
		}
		if(notnull == 0)
			notnull = 1;
		int average = sum/notnull;
		if(average == 0)
			average = 1;
		float pix = 12.0/average;

		printf("\n sum: %d average: %d\n",sum,average);


		int first = 0, last = 0;
		// looking for goal
		for(x = 0;x < width; x++){
			if(graph[x] >= average && graph[x-1] < average){
				cvLine(img, cvPoint(x, 0), cvPoint(x, height),
					cvScalar(255, 255, 0, 0), 1, 0, 0);
				if(first == 0)
					first = x;
			}
			if(graph[x] >= average && graph[x+1] < average){
				cvLine(img, cvPoint(x, 0), cvPoint(x, height),
					cvScalar(255, 255, 0, 0), 1, 0, 0);
				last = x;
			}
		}

		float goal = pix*(last-first);
		float error = (goal-60.0)/60.0*100.0;

		printf("Pix: %f; Goal in cm: %f; Error: %f%\n", pix, goal, error);

		// image center
		cvLine(img, cvPoint(width/2, 0), cvPoint(width/2, height),
			cvScalar(0, 255, 255, 0), 1, 0, 0);

		int gCenter = (last+first) / 2;

		// X
		float X = ((width/2) - gCenter)*pix;

		printf("X: %f +- %f\n",X, abs(error)/100.0*X);

		// goal center
		cvLine(img,cvPoint(gCenter, 0),cvPoint(gCenter, height),
			cvScalar(0, 0, 255, 0), 1, 0, 0);



		cvShowImage("img", img);
		cvShowImage("treshed", imgThreshed);
		//cvShowImage("graph", imgGraph);

		cvWaitKey(10);
	}


}
Esempio n. 16
0
/*消除上下边界*/
void remove_border_ul(IplImage * img_plate)
{
	int i = 0, j = 0;
	/*这两个变量分别为上下边界的高度*/
	int up_bound = -1, low_bound = -1;
	int white_to_black = 0;
	int black_to_white = 0;
	/*从i从0 到高度一半进行遍历,进行投影,找到上边界*/
	//cvNamedWindow("img_plate", 1);
	//cvShowImage("img_plate", img_plate);
	//cvWaitKey(0);
	for (i = 0; i < (img_plate->height) / 2; i = i + 3) {

		unsigned char * prow = (unsigned char *)(img_plate->imageData + i * img_plate->widthStep);
		white_to_black = 0;
		black_to_white = 0;

		/*记录下每一行的black_to_white和w_to_b的个数*/
		for (j = 0; j < img_plate->width; j = j + 3) {
			if (prow[j] == 0 && prow[j + 3] == 255) {
				black_to_white++;
			} else if (prow[j] == 255 && prow[j + 3] == 0) {
				white_to_black++;
			}
		}

		/*设成6的话对图片的清晰度有很高的要求*/
		if (black_to_white >= 6 && white_to_black >= 6 && up_bound < 0) {
			up_bound = i;
		} else if (black_to_white < 6 && white_to_black < 6 && up_bound > 0) {
//			printf("black_to_white : %d whilte_to_black: %d , up_bound %d\n",black_to_white, white_to_black, up_bound);
			up_bound = -1;
		}
	}

	/*i从最底端到高度的一半进行遍历*/
	for (i = img_plate->height - 1; i > (img_plate->height) / 2; i = i - 3) {
		unsigned char * prow = (unsigned char *)(img_plate->imageData + i * img_plate->widthStep);
		white_to_black = 0;
		black_to_white = 0;

		/*记录下每一行的black_to_white和w_to_b的个数*/
		for (j = 0; j < img_plate->width; j = j + 3) {
			if (prow[j] == 0 && prow[j + 3] == 255) {
				black_to_white++;
			} else if (prow[j] == 255 && prow[j + 3] == 0) {
				white_to_black++;
			}
		}

		if (black_to_white >= 6 && white_to_black >= 6 && low_bound < 0) {
			low_bound = i;
		} else if (black_to_white < 6 && white_to_black < 6 && low_bound > 0) {
			low_bound = -1;
		}
	
		//printf("%d\n", low_bound);
	}
	
#if 0
	cvNamedWindow("img", 1);
	printf("up_bound is %d, low_bound is %d\n", up_bound, low_bound);

	/*画直线操作*/
	/*	void cvLine( CvArr* img, CvPoint pt1, CvPoint pt2, CvScalar color, int thickness=1, int line_type=8, int shift=0 );*/
	cvLine(img_plate, cvPoint(0, up_bound), cvPoint(img_plate->width - 3, up_bound), CV_RGB(0xbF, 0xfd, 0xba), 3, 8, 0);
	cvLine(img_plate, cvPoint(0, low_bound), cvPoint(img_plate->width - 3, low_bound), CV_RGB(0xbF, 0xfd, 0xba), 3, 8, 0);
	cvShowImage("img", img_plate);
	cvWaitKey(0);
#endif
	/*这里容易出错!*/

//printf("%d %d %d %d", 0, up_bound, img_plate->width - 2, low_bound - up_bound - 2);
//	printf("low_bound:%d up_bound:%d\n", low_bound, up_bound);

	assert(low_bound >= 0 && up_bound >= 0);
	cvSetImageROI(img_plate, cvRect(0, up_bound, img_plate->width - 2, low_bound - up_bound - 2));		/*-2保证不要越界*/
	IplImage * tmp_img = cvCreateImage(cvSize(img_plate->width - 2, low_bound - up_bound - 2), img_plate->depth, img_plate->nChannels);
	cvCopy(img_plate, tmp_img);
	cvSaveImage("image/img_after_border_removed.bmp", tmp_img);
	cvResetImageROI(img_plate);
//	printf("setROI in remove bound success\n");
}
Esempio n. 17
0
//============================================================================
void AAM_TDM::SaveSeriesTemplate(const CvMat* AllTextures, const AAM_PAW& m_warp)
{
	LOGD("Saving the face template image...\n");
	AAM_Common::MkDir("registration");
	AAM_Common::MkDir("Modes");
	AAM_Common::MkDir("Tri");
	char filename[100];
	
	int i;
	for(i = 0; i < AllTextures->rows; i++)
	{
		CvMat oneTexture;
		cvGetRow(AllTextures, &oneTexture, i);
		sprintf(filename, "registration/%d.jpg", i);
		m_warp.SaveWarpTextureToImage(filename, &oneTexture);
	}
	
	for(int nmodes = 0; nmodes < nModes(); nmodes++)
	{
		CvMat oneVar;
		cvGetRow(__TextureEigenVectors, &oneVar, nmodes);
	
		sprintf(filename, "Modes/A%03d.jpg", nmodes+1);
		m_warp.SaveWarpTextureToImage(filename, &oneVar);
	}
	
	IplImage* templateimg = cvCreateImage
		(cvSize(m_warp.Width(), m_warp.Height()), IPL_DEPTH_8U, 3);
	IplImage* convexImage = cvCreateImage
		(cvSize(m_warp.Width(), m_warp.Height()), IPL_DEPTH_8U, 3);
	IplImage* TriImage = cvCreateImage
		(cvSize(m_warp.Width(), m_warp.Height()), IPL_DEPTH_8U, 3);

	m_warp.SaveWarpTextureToImage("Modes/Template.jpg", __MeanTexture);
	m_warp.TextureToImage(templateimg, __MeanTexture);

	cvSetZero(convexImage);
	for(i = 0; i < m_warp.nTri(); i++)
	{
		CvPoint p, q;
		int ind1, ind2;

		cvCopy(templateimg, TriImage);

		ind1 = m_warp.Tri(i, 0); ind2 = m_warp.Tri(i, 1);
		p = cvPointFrom32f(m_warp.Vertex(ind1));
		q = cvPointFrom32f(m_warp.Vertex(ind2));
		cvLine(TriImage, p, q, CV_RGB(255, 255, 255));
		cvLine(convexImage, p, q, CV_RGB(255, 255, 255));

		ind1 = m_warp.Tri(i, 1); ind2 = m_warp.Tri(i, 2);
		p = cvPointFrom32f(m_warp.Vertex(ind1));
		q = cvPointFrom32f(m_warp.Vertex(ind2));
		cvLine(TriImage, p, q, CV_RGB(255, 255, 255));
		cvLine(convexImage, p, q, CV_RGB(255, 255, 255));

		ind1 = m_warp.Tri(i, 2); ind2 = m_warp.Tri(i, 0);
		p = cvPointFrom32f(m_warp.Vertex(ind1));
		q = cvPointFrom32f(m_warp.Vertex(ind2));
		cvLine(TriImage, p, q, CV_RGB(255, 255, 255));
		cvLine(convexImage, p, q, CV_RGB(255, 255, 255));

		sprintf(filename, "Tri/%03i.jpg", i+1);
		cvSaveImage(filename, TriImage);
	}
	cvSaveImage("Tri/convex.jpg", convexImage);
	
	cvReleaseImage(&templateimg);
	cvReleaseImage(&convexImage);
	cvReleaseImage(&TriImage);
}
Esempio n. 18
0
void process_image()
{
  int i, j;
  int *inliers_index;
  CvSize ellipse_axis;
  CvPoint gaze_point;
  static int lost_frame_num = 0;
  Grab_Camera_Frames();
  cvZero(ellipse_image);

  cvSmooth(eye_image, eye_image, CV_GAUSSIAN, 5, 5);
  Reduce_Line_Noise(eye_image);  
  
  if (save_image == 1) {
    printf("save image %d\n", image_no);
    sprintf(eye_file, "./Eye/Eye_%05d.jpg", image_no);
    image_no++;
    cvSaveImage(eye_file, eye_image);
  }
  
  //corneal reflection
  remove_corneal_reflection(eye_image, threshold_image, (int)start_point.x, (int)start_point.y, cr_window_size, 
                   (int)eye_image->height/10, corneal_reflection.x, corneal_reflection.y, corneal_reflection_r);  
  printf("corneal reflection: (%d, %d)\n", corneal_reflection.x, corneal_reflection.y);
  Draw_Cross(ellipse_image, corneal_reflection.x, corneal_reflection.y, 15, 15, Yellow);  

  //starburst pupil contour detection
  starburst_pupil_contour_detection((UINT8*)eye_image->imageData, eye_image->width, eye_image->height,
                                edge_threshold, rays, min_feature_candidates);
  
  inliers_num = 0;
  inliers_index = pupil_fitting_inliers((UINT8*)eye_image->imageData, eye_image->width, eye_image->height, inliers_num);
  ellipse_axis.width = (int)pupil_param[0];
  ellipse_axis.height = (int)pupil_param[1];
  pupil.x = (int)pupil_param[2];
  pupil.y = (int)pupil_param[3];
  Draw_Cross(ellipse_image, pupil.x, pupil.y, 15, 15, Red);
  cvLine(eye_image, pupil, corneal_reflection, Red, 4, 8);
  cvLine(ellipse_image, pupil, corneal_reflection, Red, 4, 8);
  
  printf("ellipse a:%lf; b:%lf, cx:%lf, cy:%lf, theta:%lf; inliers_num:%d\n\n", 
         pupil_param[0], pupil_param[1], pupil_param[2], pupil_param[3], pupil_param[4], inliers_num);

  bool is_inliers = 0;
  for (int i = 0; i < edge_point.size(); i++) {
    is_inliers = 0;
    for (int j = 0; j < inliers_num; j++) {
      if (i == inliers_index[j])
        is_inliers = 1;
    }
    stuDPoint *edge = edge_point.at(i);
    if (is_inliers)
      Draw_Cross(ellipse_image, (int)edge->x,(int)edge->y, 5, 5, Green);
    else
      Draw_Cross(ellipse_image, (int)edge->x,(int)edge->y, 3, 3, Yellow);
  }
  free(inliers_index);

  if (ellipse_axis.width > 0 && ellipse_axis.height > 0) {
    start_point.x = pupil.x;
    start_point.y = pupil.y;
    //printf("start_point: %d,%d\n", start_point.x, start_point.y);
    Draw_Cross(eye_image, pupil.x, pupil.y, 10, 10, Green);
    cvEllipse(eye_image, pupil, ellipse_axis, -pupil_param[4]*180/PI, 0, 360, Red, 2);
    cvEllipse(ellipse_image, pupil, ellipse_axis, -pupil_param[4]*180/PI, 0, 360, Green, 2);

    diff_vector.x = pupil.x - corneal_reflection.x;
    diff_vector.y = pupil.y - corneal_reflection.y;
    if (do_map2scene) {
      gaze_point = homography_map_point(diff_vector);
      printf("gaze_point: (%d,%d)\n", gaze_point.x, gaze_point.y);  
      Draw_Cross(scene_image, gaze_point.x, gaze_point.y, 60, 60, Red);
    }
    lost_frame_num = 0;    
  } else {
    lost_frame_num++;
  }
  if (lost_frame_num > 5) {
    start_point.x = FRAMEW/2;
    start_point.y = FRAMEH/2;
  }
  Draw_Cross(ellipse_image, (int)start_point.x, (int)start_point.y, 7, 7, Blue);
  Draw_Cross(eye_image, (int)start_point.x, (int)start_point.y, 7, 7, Blue);

  if (save_ellipse == 1) {
    printf("save ellipse %d\n", ellipse_no);
    sprintf(ellipse_file, "./Ellipse/Ellipse_%05d.jpg", ellipse_no);
    ellipse_no++;
    cvSaveImage(ellipse_file, ellipse_image);
    fprintf(ellipse_log, "%.3f\t %8.2lf %8.2lf %8.2lf %8.2lf %8.2lf\n",
            Time_Elapsed(), pupil_param[0], pupil_param[1], pupil_param[2], pupil_param[3], pupil_param[4]);
  }
  
  printf("Time elapsed: %.3f\n", Time_Elapsed()); 
  fprintf(logfile,"%.3f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
					Time_Elapsed(),
					pupil.x,    
					pupil.y,    
					corneal_reflection.x,    
					corneal_reflection.y,
					diff_vector.x,    
					diff_vector.y,    
					gaze_point.x,    
					gaze_point.y);    

  if (view_cal_points) Show_Calibration_Points();
}
Esempio n. 19
0
// Draw the graph of an array of floats into imageDst or a new image, between minV & maxV if given.
// Remember to free the newly created image if imageDst is not given.
IplImage* drawFloatGraph(const float *arraySrc, int nArrayLength, IplImage *imageDst, float minV, float maxV, int width, int height, char *graphLabel, bool showScale)
{
	int w = width;
	int h = height;
	int b = 10;		// border around graph within the image
	if (w <= 20)
		w = nArrayLength + b*2;	// width of the image
	if (h <= 20)
		h = 220;

	int s = h - b*2;// size of graph height
	float xscale = 1.0;
	if (nArrayLength > 1)
		xscale = (w - b*2) / (float)(nArrayLength-1);	// horizontal scale
	IplImage *imageGraph;	// output image

	// Get the desired image to draw into.
	if (!imageDst) {
		// Create an RGB image for graphing the data
		imageGraph = cvCreateImage(cvSize(w,h), 8, 3);

		// Clear the image
		cvSet(imageGraph, WHITE);
	}
	else {
		// Draw onto the given image.
		imageGraph = imageDst;
	}
	if (!imageGraph) {
		std::cerr << "ERROR in drawFloatGraph(): Couldn't create image of " << w << " x " << h << std::endl;
		exit(1);
	}
	CvScalar colorGraph = getGraphColor();	// use a different color each time.

	// If the user didnt supply min & mav values, find them from the data, so we can draw it at full scale.
	if (fabs(minV) < 0.0000001f && fabs(maxV) < 0.0000001f) {
		for (int i=0; i<nArrayLength; i++) {
			float v = (float)arraySrc[i];
			if (v < minV)
				minV = v;
			if (v > maxV)
				maxV = v;
		}
	}
	float diffV = maxV - minV;
	if (diffV == 0)
		diffV = 0.00000001f;	// Stop a divide-by-zero error
	float fscale = (float)s / diffV;

	// Draw the horizontal & vertical axis
	int y0 = cvRound(minV*fscale);
	cvLine(imageGraph, cvPoint(b,h-(b-y0)), cvPoint(w-b, h-(b-y0)), BLACK);
	cvLine(imageGraph, cvPoint(b,h-(b)), cvPoint(b, h-(b+s)), BLACK);

	// Write the scale of the y axis
	CvFont font;
	cvInitFont(&font,CV_FONT_HERSHEY_PLAIN,0.55,0.7, 0,1,CV_AA);	// For OpenCV 1.1
	if (showScale) {
		//cvInitFont(&font,CV_FONT_HERSHEY_PLAIN,0.5,0.6, 0,1, CV_AA);	// For OpenCV 2.0
		CvScalar clr = GREY;
		char text[16];
		sprintf_s(text, sizeof(text)-1, "%.1f", maxV);
		cvPutText(imageGraph, text, cvPoint(1, b+4), &font, clr);
		// Write the scale of the x axis
		sprintf_s(text, sizeof(text)-1, "%d", (nArrayLength-1) );
		cvPutText(imageGraph, text, cvPoint(w-b+4-5*strlen(text), (h/2)+10), &font, clr);
	}

	// Draw the values
	CvPoint ptPrev = cvPoint(b,h-(b-y0));	// Start the lines at the 1st point.
	for (int i=0; i<nArrayLength; i++) {
		int y = cvRound((arraySrc[i] - minV) * fscale);	// Get the values at a bigger scale
		int x = cvRound(i * xscale);
		CvPoint ptNew = cvPoint(b+x, h-(b+y));
		cvLine(imageGraph, ptPrev, ptNew, colorGraph, 1, CV_AA);	// Draw a line from the previous point to the new point
		ptPrev = ptNew;
	}

	// Write the graph label, if desired
	if (graphLabel != NULL && strlen(graphLabel) > 0) {
		//cvInitFont(&font,CV_FONT_HERSHEY_PLAIN, 0.5,0.7, 0,1,CV_AA);
		cvPutText(imageGraph, graphLabel, cvPoint(30, 10), &font, CV_RGB(0,0,0));	// black text
	}

	return imageGraph;
}
Esempio n. 20
0
void MaizeDetector::processLaserScan(



	const sensor_msgs::LaserScan::ConstPtr& laser_scan) {
	float rthetamean = 0, rrhomean = 0, lthetamean = 0, lrhomean = 0, theta = 0, rho = 0;
	double x0 = 0, y0 = 0, a, b;
	int lmc = 0, rmc = 0;

	static int right_count = 0;
	static int left_count = 0;

	clearRawImage();

	tf::TransformListener listener_;
	sensor_msgs::PointCloud cloud;


    try
    {
    	projector_.projectLaser(*laser_scan, cloud);
       // projector_.transformLaserScanToPointCloud("base_link",*laser_scan,cloud,listener_);
    }
    catch (tf::TransformException& e)
    {
        std::cout << e.what();
        return;
    }


	//ROS_INFO("Got it right");
	int size = cloud.points.size();

	for (int i = 0; i < size; i++) {
		//if (((abs(cloud.points[i].y))-((cloud.points[i].x-300)*0.5))<0.65) {
		if (abs((cloud.points[i].y))<0.65 && cloud.points[i].x >0 ) {


			point1.x = ((int)(cloud.points[i].x * 50) + 300);
			point1.y = ((int)(cloud.points[i].y * 50) + 300);
			point2.x = point1.x-4;
			point2.y = point1.y;
			cvLine(rawData_, point1, point2, CV_RGB(255,255,255), 3, CV_AA, 0);

		}
	}

	cvCvtColor(rawData_, workingImg_, CV_BGR2GRAY);
	cvDilate(rawData_,rawData_,NULL,6);
	cvErode(rawData_,rawData_,NULL,4);


	lines_ = cvHoughLines2(workingImg_, storage_, CV_HOUGH_STANDARD, 1, CV_PI/ 180, 60, 0, 0);
	//lines_ = cvHoughLines2(workingImg_, storage_, CV_HOUGH_PROBABILISTIC, 1, CV_PI/360, 30, 10, 30);


	for(int i = 0; i < MIN(lines_->total,20); i++){
	//for (int i = 0; i < MIN(lines_->total,15); i++) {

		float* line = (float*) cvGetSeqElem(lines_, i);
		rho = line[0];
		theta = line[1];

		a = cos(theta);
		b = sin(theta);
		x0 = a * rho;
		y0 = b * rho;
		point1.x = cvRound(x0 + 600 * (-b));
		point1.y = cvRound(y0 + 600 * (a));
		point2.x = cvRound(x0 - 600 * (-b));
		point2.y = cvRound(y0 - 600 * (a));
		point3.x = 300, point3.y = 300;
		point4.x = 300, point4.y = 600;
		point5.x = 300, point5.y = 0;

		//cvLine(rawData_, point1, point2, CV_RGB(255,0,0), 1, CV_AA,0);

		cvLine(rawData_, point3, point4, CV_RGB(0,0,255), 1, CV_AA, 0);
		cvLine(rawData_, point3, point5, CV_RGB(0,0,255), 1, CV_AA, 0);

		if (intersect(point1, point2, point3, point4)) {
			{
				rrhomean += rho;
				rthetamean += theta;
				rmc++;
				//cvLine(workingImg_, point1, point2, CV_RGB(0,0,255), 1, CV_AA,0);
			}
		} else if (intersect(point1, point2, point3, point5)) {
			{
				lrhomean += rho;
				lthetamean += theta;
				lmc++;
				//cvLine(workingImg_, point1, point2, CV_RGB(255,255,255), 1,CV_AA, 0);
			}
		}
	}
	theta = lthetamean / lmc;
	rho = lrhomean / lmc;

	a = cos(theta);
	b = sin(theta);
	x0 = a * rho;
	y0 = b * rho;
	point1.x = cvRound(x0 + 600 * (-b)), point1.y = cvRound(y0 + 600 * (a));
	point2.x = cvRound(x0 - 600 * (-b)), point2.y = cvRound(y0 - 600 * (a));

	//cvLine(rawData_, point1, point2, CV_RGB(255,0,0), 3, CV_AA, 0);

	point4.x = 300;
	point4.y = 300;

	point5.x = point4.x + 800 * sin(CV_PI - (theta - CV_PI / 2));
	point5.y = point4.y + 800 * cos(CV_PI - (theta - CV_PI / 2));
	cvLine(rawData_, point5, point4, CV_RGB(255,255,255), 1, CV_AA, 0);

	rows_.header.stamp = ros::Time::now();
	rows_.leftvalid = false;
	rows_.rightvalid = false;


	//detect valid lines
	if (intersect(point1, point2, point4, point5)) {

		right_distance_ = sqrt(((intersection_.y - 300) * (intersection_.y- 300)) + ((intersection_.x - 300) * (intersection_.x - 300)))* 2;
		right_angle_ = (theta) - CV_PI / 2;
		right_count++;

		right_angle_rolling_mean_[right_count%10] = right_angle_;
		right_dist_rolling_mean_[right_count%10] = right_distance_;
		right_angle_ = 0;
		right_distance_ = 0;

		for(int i=0;i < 10;i++){
			right_angle_ += right_angle_rolling_mean_[i];
			right_distance_ += right_dist_rolling_mean_[i];
		}

		right_angle_ = right_angle_/10;
		right_distance_ = right_distance_ /10;

		ROS_WARN("right_distance_: %f",right_distance_);
		cvLine(rawData_, point1, point2, CV_RGB(0,255,0), 1, CV_AA, 0);


		marker_r.header.frame_id = "/laser_front_link";
		marker_r.header.stamp = ros::Time::now();

		marker_r.ns = "kf_shapes";


		// Set the marker type.  Initially this is CUBE, and cycles between that and SPHERE, ARROW, and CYLINDER
		marker_r.type = visualization_msgs::Marker::CUBE;
		// Set the marker action.  Options are ADD and DELETE
		marker_r.action = visualization_msgs::Marker::ADD;

		// Set the pose of the marker.  This is a full 6DOF pose relative to the frame/time specified in the header
		marker_r.id = 2;
		geometry_msgs::Quaternion pose_quat = tf::createQuaternionMsgFromYaw(right_angle_);
		marker_r.pose.position.x = 0;
		marker_r.pose.position.y = -((float) right_distance_) / 100;
		marker_r.pose.position.z = 0;
		marker_r.pose.orientation = pose_quat;

		// Set the scale of the marker -- 1x1x1 here means 1m on a side
		marker_r.scale.x = 10.0;
		marker_r.scale.y = 0.1;
		marker_r.scale.z = 0.5;
		marker_r.color.r = 0.0f;
		marker_r.color.g = 1.0f;
		marker_r.color.b = 0.0f;
		marker_r.color.a = 0.5;
		marker_r.lifetime = ros::Duration(.1);
		marker_r_pub.publish(marker_r);

		// Set the color -- be sure to set alpha to something non-zero!

		// Publish the marker

		rows_.rightvalid = true;
		rows_.rightdistance = ((float) right_distance_) / 100;
		rows_.rightangle = right_angle_;

	}else{
		rows_.rightvalid = false;
		rows_.rightdistance = 0;
		rows_.rightangle = 0;

	}


	theta = rthetamean / rmc;
	rho = rrhomean / rmc;

	a = cos(theta);
	b = sin(theta);
	x0 = a * rho;
	y0 = b * rho;
	point1.x = cvRound(x0 + 600 * (-b)), point1.y = cvRound(y0 + 600 * (a));
	point2.x = cvRound(x0 - 600 * (-b)), point2.y = cvRound(y0 - 600 * (a));

	point4.x = 300;
	point4.y = 300;
	point5.x = point4.x - 800 * sin(CV_PI - (theta - CV_PI / 2));
	point5.y = point4.y - 800 * cos(CV_PI - (theta - CV_PI / 2));

	cvLine(rawData_, point5, point4, CV_RGB(255,255,255), 1, CV_AA, 0);

	//detect valid lines
	if (intersect(point1, point2, point4, point5)) {

		left_distance_ = sqrt(((intersection_.y - 300) * (intersection_.y- 300)) + ((intersection_.x - 300) * (intersection_.x - 300)))* 2;
		left_angle_ = (theta) - CV_PI / 2;
		left_count++;

		left_angle_rolling_mean_[left_count%10] = left_angle_;
		left_dist_rolling_mean_[left_count%10] = left_distance_;
		left_angle_ = 0;
		left_distance_ = 0;

		for(int i=0;i < 10;i++){
			left_angle_ += left_angle_rolling_mean_[i];
			left_distance_ += left_dist_rolling_mean_[i];
		}

		left_angle_ = left_angle_/10;
		left_distance_ = left_distance_ /10;


		ROS_WARN("left_distance_: %f",left_distance_);

		marker_r.id = 1;
		geometry_msgs::Quaternion pose_quat = tf::createQuaternionMsgFromYaw(left_angle_);

		marker_r.color.r = 0.0f;
		marker_r.color.g = 0.0f;
		marker_r.color.b = 1.0f;
		marker_r.pose.position.x = 0;
		marker_r.pose.position.y = ((float) left_distance_) / 100;
		marker_r.pose.position.z = 0;
		marker_r.pose.orientation = pose_quat;
		marker_r_pub.publish(marker_r);

		//rolling_mean_[count%10] = right_angle_;

		//left_angle_ = 0;

		//for(int i=0;i < 10;i++){
		//	left_angle_ += rolling_mean_[i];
		//}

		//right_angle_ = right_angle_/10;

		cvLine(rawData_, point1, point2, CV_RGB(0,255,255), 1, CV_AA, 0);

		rows_.leftvalid = true;
		rows_.leftdistance = ((float) left_distance_) / 100;
		rows_.leftangle = left_angle_;

	}else{
		rows_.leftvalid = false;
		rows_.leftdistance = 0;
		rows_.leftangle = 0;
	}

	if (rows_.leftvalid && rows_.rightvalid){
		e_angle = (rows_.leftangle + rows_.rightangle) / 2;
		e_distance = (rows_.leftdistance - rows_.rightdistance);

		row_dist_est =( (rows_.leftdistance + rows_.rightdistance) + row_dist_est)/2;

		ROS_INFO("2ROWS row_dist_est %f, e_dist %f",row_dist_est,e_distance);

		rows_.error_angle = e_angle;
		rows_.error_distance = e_distance;
		rows_.var = 5^2;

		marker_r.id = 3;
		geometry_msgs::Quaternion pose_quat = tf::createQuaternionMsgFromYaw(e_angle);

		marker_r.color.r = 0.0f;
		marker_r.color.g = 1.0f;
		marker_r.color.b = 1.0f;

		marker_r.pose.orientation = pose_quat;
		marker_r.pose.position.x = 0;
		marker_r.pose.position.y = (e_distance);
		marker_r.pose.position.z = 0;
		marker_r_pub.publish(marker_r);
	}else if (rows_.leftvalid && !rows_.rightvalid){
		e_angle = (rows_.leftangle);
		e_distance = 0;//row_dist_est/2-(rows_.leftdistance);

		//ROS_INFO("e_angle %f, e_dist %f",e_angle,e_distance);

		rows_.error_angle = e_angle;
		rows_.error_distance = e_distance;//e_distance-(0.75/2);
		rows_.var = 10^2;

		marker_r.id = 3;

		ROS_INFO("LEFTROW row_dist_est %f, e_dist %f",row_dist_est,e_distance);


		geometry_msgs::Quaternion pose_quat = tf::createQuaternionMsgFromYaw(e_angle);

		marker_r.color.r = 0.0f;
		marker_r.color.g = 1.0f;
		marker_r.color.b = 1.0f;

		marker_r.pose.orientation = pose_quat;
		marker_r.pose.position.x = 0;
		marker_r.pose.position.y = (e_distance);
		marker_r.pose.position.z = 0;
		marker_r_pub.publish(marker_r);
	}else if (!rows_.leftvalid && rows_.rightvalid){


		e_angle = (rows_.rightangle);
		e_distance = 0;//row_dist_est/2-(rows_.rightdistance);

		//ROS_INFO("e_angle %f, e_dist %f",e_angle,e_distance);
		ROS_INFO("LRIGHTROW row_dist_est %f, e_dist %f",row_dist_est,e_distance);


		rows_.error_angle = (0.75/2)-e_angle;
		rows_.error_distance = e_distance;//e_distance-(0.75/2);
		rows_.var = 10^2;

		marker_r.id = 3;
		geometry_msgs::Quaternion pose_quat = tf::createQuaternionMsgFromYaw(e_angle);

		marker_r.color.r = 0.0f;
		marker_r.color.g = 1.0f;
		marker_r.color.b = 1.0f;

		marker_r.pose.orientation = pose_quat;
		marker_r.pose.position.x = 0;
		marker_r.pose.position.y = (e_distance);
		marker_r.pose.position.z = 0;
		marker_r_pub.publish(marker_r);
	}else{
		e_angle = 0;
		e_distance = 0;

		ROS_INFO("e_angle %f, e_dist %f",e_angle,e_distance);
		ROS_INFO("NOROW row_dist_est %f, e_dist %f",row_dist_est,e_distance);

		rows_.error_angle = e_angle;
		rows_.error_distance = e_distance;
		rows_.var = 4000^2;

		marker_r.id = 3;
		geometry_msgs::Quaternion pose_quat = tf::createQuaternionMsgFromYaw(e_angle);

		marker_r.color.r = 0.0f;
		marker_r.color.g = 1.0f;
		marker_r.color.b = 1.0f;

		marker_r.pose.orientation = pose_quat;
		marker_r.pose.position.x = 0;
		marker_r.pose.position.y = (e_distance);
		marker_r.pose.position.z = 0;
		marker_r_pub.publish(marker_r);
	}




	//cvLine(rawData_, cvPoint(0,300+150), cvPoint(600,300+150), CV_RGB(255,255,255), 1, CV_AA, 0);
	//cvLine(rawData_, cvPoint(0,300-150), cvPoint(600,300-150), CV_RGB(255,255,255), 1, CV_AA, 0);
	row_pub.publish(rows_);

	//cvShowImage("TEST", rawData_);
	//cvWaitKey(10);
	//pc_pub.publish(cloud);

}
int testfaceLib_sThread ( const char* str_video, int  trackerType, int multiviewType, int recognizerType, const char* str_facesetxml, int threads, 
						 bool blink, bool smile, bool gender, bool age, bool recog, bool quiet, bool saveface, const char* sfolder, bool bEnableAutoCluster)
{
	int  faceimgID = 0;
	char driver[8];
	char dir[1024];
	char fname[1024];
	char ext[8];
	char sImgPath[1024];

	if(sfolder)
	{
		char sysCommand[128];
		sprintf (sysCommand, "mkdir %s", sfolder);
		system (sysCommand);

		sprintf(sImgPath, "%s//%s", sfolder,  "imaginfo.txt");
		sprintf(fname,   "%s//%s", sfolder,  "faceinfo.txt");
	}
	else
	{
		sprintf(sImgPath, "%s", "imaginfo.txt");
		sprintf(fname,   "%s", "faceinfo.txt");
	}

	FILE* fp_imaginfo = fopen( sImgPath, "wt" );
    FILE* fp_faceinfo = fopen( fname, "wt" );

    bool bAutoFocus = false;
	IplImage *imgAutoFocus = NULL;

	/////////////////////////////////////////////////////////////////////////////////////
	//	init GUI window
	const char* str_title = "Face Tester";
	if( ! quiet )
		cvNamedWindow( str_title, CV_WINDOW_AUTOSIZE );

	char sCaptionInfo[256]="";
	CvFont *pFont = new CvFont;
	cvInitFont(pFont, CV_FONT_HERSHEY_PLAIN, 0.85, 0.85, 0, 1);
	
	// load GUI smile icon images
	IplImage *pImgSmileBGR;
	IplImage *pImgSmileMask;
	if(age == 0)
	{
		pImgSmileBGR  = cvLoadImage( "smile.bmp" );
		pImgSmileMask = cvLoadImage( "smilemask.bmp", 0 );
	}
	else
	{
		pImgSmileBGR  = cvLoadImage( "faceicon.bmp" );
		pImgSmileMask = cvLoadImage( "faceiconMask.bmp", 0 );
	}
	IplImage *pImgSmileBGRA = cvCreateImage( cvSize(pImgSmileBGR->width, pImgSmileBGR->height), IPL_DEPTH_8U, 4 );
	cvCvtColor(pImgSmileBGR, pImgSmileBGRA, CV_BGR2BGRA );

	// open video source
    size_t len = strlen( str_video );
    bool is_piclist = (0 == stricmp( str_video + len - 4, ".txt" ));
    CxImageSeqReader* vidcap = NULL;
    if( is_piclist )
        vidcap = new CxPicListReader( str_video );
    else
        vidcap = new CxVideoReader( str_video );

	if( cvGetErrStatus() < 0 )
	{   
		cvSetErrStatus( CV_StsOk );
		return -1;
	}

	// when using camera, set to 640x480, 30fps
	if( isdigit(str_video[0]) != 0 && str_video[1] == '\0' )
	{
		vidcap->width( 640 );
		vidcap->height( 480 );
		vidcap->fps( 30 );
	}

	// print beginning info
	printf( "tracker cascade:  '%s'\n", trackerType == TRA_HAAR ? "haar" : (trackerType== TRA_SURF ? "surf" : "pf tracker SURF"));
	printf( "face recognizer:  '%s'\n", recognizerType == RECOGNIZER_BOOST_GB240 ? "boost gabor240" : "cascade gloh"  );
	printf( "video:    '%s', %dx%d, %2.1f fps\n", str_video, 
		vidcap->width(), vidcap->height(), vidcap->fps() );

	// config face tracker
	const int  face_max = 16;
	CvRectItem rects[face_max];
	
	tagDetectConfig configParam;
	EnumViewAngle  viewAngle = (EnumViewAngle)multiviewType;

	CxlibFaceDetector detector;
	detector.init(viewAngle, (EnumFeaType)trackerType);
	detector.config( configParam );

	CxlibFaceTracker tracker;
	tracker.init(viewAngle, (EnumTrackerType)trackerType);
	tracker.config( configParam, TR_NLEVEL_3 );

	if( cvGetErrStatus() < 0 )
	{
		cvSetErrStatus( CV_StsOk );
		return -1;
	}

	// config landmark detector
	CvPoint2D32f   landmark6[6+1]; // consider both 6-pt and 7-pt
	float          parameters[16];
	bool      bLandmark = false;
	CxlibLandmarkDetector landmarkDetector(LDM_6PT);

	int size_smallface = 64;
	int size_bigface   = 128;
	CxlibAlignFace cutFace(size_smallface, size_bigface);
	
	// config blink/smile/gender detector
	int    bBlink = 0, bSmile = 0, bGender = 0, bAge = 0;  //+1, -1, otherwise 0: no process 
	float  probBlink = 0, probSmile = 0, probGender = 0, probAge[4];
	int    nAgeID = 0;

	CxlibBlinkDetector  blinkDetector(size_smallface);
	CxlibSmileDetector  smileDetector(size_smallface);
	CxlibGenderDetector genderDetector(size_smallface);
	CxlibAgeDetector    ageDetector(size_bigface);

	// config face recognizer
	float probFaceID = 0;
	if(str_facesetxml == NULL)
		str_facesetxml = "faceset_model.xml";

	CxlibFaceRecognizer faceRecognizer( size_bigface, recognizerType );
	if(recog) faceRecognizer.loadFaceModelXML(str_facesetxml);
	
	// set mouse event process
	CxMouseParam mouse_faceparam;
	mouse_faceparam.updated = false;
	mouse_faceparam.play = true;
	mouse_faceparam.ret_online_collecting = 0;
		
	if(! quiet)
	{
		mouse_faceparam.face_num  = face_max;
		mouse_faceparam.rects     = rects;
		mouse_faceparam.image     = NULL;
		mouse_faceparam.cut_big_face= cutFace.getBigCutFace();
		mouse_faceparam.typeRecognizer = 1;
		mouse_faceparam.faceRecognizer = &faceRecognizer;
		cvSetMouseCallback(	str_title, my_mouse_callback, (void*)&mouse_faceparam );
	}

	// init count ticks                   
	int64  ticks, start_ticks, total_ticks;
	int64  tracker_total_ticks, landmark_total_ticks, align_total_ticks,
		   blink_total_ticks, smile_total_ticks, gender_total_ticks, age_total_ticks, recg_total_ticks;
	double frame_fps, tracker_fps, landmark_fps, align_fps, blink_fps, smile_fps, gender_fps, age_fps, recg_fps, total_fps; 

	start_ticks         = total_ticks          = 0;
	tracker_total_ticks = landmark_total_ticks = align_total_ticks  = 0;
	blink_total_ticks   = smile_total_ticks    = gender_total_ticks = age_total_ticks = recg_total_ticks = 0;

	tracker_fps = landmark_fps = align_fps = blink_fps = smile_fps = gender_fps = age_fps = recg_fps = total_fps = 0.0;        

	// loop for each frame of a video/camera
	int frames = 0;
	IplImage *pImg = NULL;
	int   print_faceid=-1;
	float print_score = 0;
	std::string  print_facename;

	bool bRunLandmark = blink || smile|| gender|| age|| recog || saveface;
	IplImage *thumbnailImg   = cvCreateImage(cvSize(THUMBNAIL_WIDTH, THUMBNAIL_HEIGHT), IPL_DEPTH_8U, 3);   
	
	//dynamic clustering for smooth ID registration
	//bEnableAutoCluster = true;
	if( is_piclist ) bEnableAutoCluster = false;

	while( ! vidcap->eof() )
	{   
		// capture a video frame
		if( mouse_faceparam.play == true)
			pImg = vidcap->query();
		else 
			continue;

		if ( pImg == NULL )
			continue;

		// make a copy, flip if upside-down
		CvImage image( cvGetSize(pImg), pImg->depth, pImg->nChannels );
		if( pImg->origin == IPL_ORIGIN_BL ) //flip live camera's frame
			cvFlip( pImg, image );
		else
			cvCopy( pImg, image );

		// convert to gray_image for face analysis
		CvImage gray_image( image.size(), image.depth(), 1 );
		if( image.channels() == 3 )
			cvCvtColor( image, gray_image, CV_BGR2GRAY );
		else
			cvCopy( image, gray_image );

		// do face tracking
		start_ticks = ticks = cvGetTickCount();	
       
		int face_num = 0;
        if( is_piclist )
            face_num = detector.detect( gray_image, rects, face_max );
        else
            face_num = tracker.track( gray_image, rects, face_max, image ); // track in a video for faster speed
		  //face_num = tracker.detect( gray_image, rects, face_max ); // detect in an image

		//set param for mouse event processing
		if(!quiet)
		{
			mouse_faceparam.face_num = face_num;
			mouse_faceparam.image    = image;
		}

		ticks       = cvGetTickCount() - ticks;
		tracker_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
		tracker_total_ticks += ticks;

        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "%s  %d", vidcap->filename(), face_num );

        // blink/smile/gender/age/face recognize section
		for( int i=0; i<face_num; i++ )
		//for( int i=0; i< MIN(1,face_num); i++ )
		{
			// get face rect and id from face tracker
			CvRect rect = rects[i].rc;

            if( fp_imaginfo != NULL )
                fprintf( fp_imaginfo, "  %d %d %d %d %f", rect.x, rect.y, rect.width, rect.height, rects[i].prob );

			int    face_trackid = rects[i].fid;
			float  like = rects[i].prob;
			int    angle= rects[i].angle;

			// filter out outer faces
			if( rect.x+rect.width  > gray_image.width()   || rect.x < 0 ) continue;
			if( rect.y+rect.height > gray_image.height() || rect.y < 0 ) continue;

			//tracker.getThumbnail(image, rect, thumbnailImg);

			// detect landmark points 
			ticks = cvGetTickCount();	

			if(bRunLandmark)
			{
                if( is_piclist )
				    bLandmark = landmarkDetector.detect( gray_image, &rect, landmark6, parameters, angle ); //detect in an image
                else
				    bLandmark = landmarkDetector.track( gray_image, &rect, landmark6, parameters, angle ); // track in a video for faster speed

				ticks = cvGetTickCount() - ticks;
				landmark_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
				landmark_total_ticks += ticks;
			}
			else
				bLandmark = false;

	
			if(quiet == false && bLandmark == false) 
			{
				//DrawFaceRect
				cxlibDrawFaceRect(image, rect);
				continue;
			}

			// warped align face and hist eq to delighting
			ticks = cvGetTickCount();	

			cutFace.init(gray_image, rect, landmark6);

			ticks = cvGetTickCount() - ticks;
			if(ticks > 1)
				align_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
			else
			{	align_fps = 0;
				ticks = 0;
			}
			align_total_ticks += ticks;

			if(saveface)   //save face icon for training later
			{
				//save cutfaces
				if(sfolder)
				{
#ifdef WIN32
					_splitpath(vidcap->filename(),driver,dir,fname,ext);
					sprintf(sImgPath, "%s//%s%s", sfolder, fname,ext);
#else
					sprintf(sImgPath, "%s//%06d.jpg", sfolder, faceimgID++);
#endif
				}
				else
					sprintf(sImgPath, "%s#.jpg", vidcap->filename());
				
				cvSaveImage(sImgPath, cutFace.getBigCutFace());
			}

			// detect blink
			bBlink = 0;	
			probBlink = 0;
			if(blink && bLandmark)
			{
				ticks = cvGetTickCount();	
				float blink_threshold = blinkDetector.getDefThreshold();//0.5;
				int ret = blinkDetector.predict( &cutFace, &probBlink);
			
				if(probBlink > blink_threshold )
					bBlink = 1;  //eye close
				else 
					bBlink = -1; //eye open

				ticks = cvGetTickCount() - ticks;
				blink_fps = 1000.0/(1e-3*ticks/cvGetTickFrequency());
				blink_total_ticks += ticks;

				print_score = probBlink;
			}
			else blink_fps = 0;

			// detect smile
			bSmile    = 0;	
			probSmile = 0;
			if ( smile && bLandmark )
			{	
				ticks = cvGetTickCount();
				float smile_threshold = smileDetector.getDefThreshold(); //0.48;  
				int ret = smileDetector.predict(&cutFace, &probSmile);

				if(probSmile > smile_threshold)
					bSmile = 1;  //smile
				else 
					bSmile = -1; //not smile

				ticks	  = cvGetTickCount() - ticks;
				smile_fps = 1000.0 /( 1e-3 * ticks / cvGetTickFrequency() );
				smile_total_ticks += ticks;

				print_score = probSmile;
			}
			else smile_fps = 0;

			//detect gender
			bGender    = 0;	
			probGender = 0;
			if(gender && bLandmark)
			{
				ticks = cvGetTickCount();	
				float gender_threshold = genderDetector.getDefThreshold(); // 0.42; 
				int ret = genderDetector.predict(&cutFace, &probGender);

				if(probGender > gender_threshold)
					bGender =  1; //female
				else
					bGender = -1; //male

				//bGender = -1:male, 1:female, 0: null
				// smooth prediction result
                if( ! is_piclist )
				    bGender = genderDetector.voteLabel(face_trackid, bGender);
				
				ticks = cvGetTickCount() - ticks;
				gender_fps = 1000.0/(1e-3*ticks/cvGetTickFrequency());
				gender_total_ticks += ticks;

				print_score = probGender; 
			}
			else gender_fps = 0;

			//detect age
			nAgeID  = -1;
			if(age && bLandmark && rect.width*rect.height > 40*40)
			{
				ticks = cvGetTickCount();	

				//nAgeID = 0:"Baby", 1:"Kid", 2:"Adult", 3:"Senior"
				nAgeID = ageDetector.predict(&cutFace, probAge);

				// smooth prediction result
                if( ! is_piclist )
				    nAgeID = ageDetector.voteLabel(face_trackid, nAgeID); 

				ticks = cvGetTickCount() - ticks;
				age_fps = 1000.0/(1e-3*ticks/cvGetTickFrequency());
				age_total_ticks += ticks;

				print_score = probAge[nAgeID]; 
				//if( ! quiet )	cxDrawAignFace2Image(image, pCutFace2);
			}
			else 
			{
				age_fps = 0;
			}

			// recognize the face id
			// we only do recognition every 5 frames,interval
			char  *sFaceCaption = NULL;
			char  sFaceCaptionBuff[256];
            int face_id = 0;
			probFaceID = 0;
			if ( recog && bLandmark )
			{
				ticks = cvGetTickCount();
				float face_threshold = faceRecognizer.getDefThreshold(); 
				/////////////////////////////////////////////////////////////////////////////////////////
				int face_id  = -1;
				if(bEnableAutoCluster & !is_piclist)
				{
					bool bAutocluster = true;
					if(mouse_faceparam.ret_online_collecting) bAutocluster = false;
					//face clustering
					face_id  = faceRecognizer.predict(&cutFace, &probFaceID, bAutocluster, face_trackid, frames);
				}
				else//face recognition
					face_id  = faceRecognizer.predict(&cutFace, &probFaceID);
				/////////////////////////////////////////////////////////////////////////////////////////

				ticks    = cvGetTickCount() - ticks;
				recg_fps = 1000.0f / ( 1e-3 * ticks / cvGetTickFrequency() );
				recg_total_ticks += ticks;
				
				// smooth prediction result
                if( ! is_piclist && !bEnableAutoCluster)
                {
				    if(probFaceID > face_threshold*1.0)
					    face_id = faceRecognizer.voteLabel(face_trackid, face_id); 
				    else
					    face_id = faceRecognizer.voteLabel(face_trackid, -1);
                }
				else if(probFaceID <= face_threshold)
				{
					face_id =-1;
				}

				//set face name caption
				if(face_id >= 0)
				{
					// recognized face name
					const char* sFaceName = faceRecognizer.getFaceName(face_id);
					sprintf(sFaceCaptionBuff, "%s %.2f", sFaceName, probFaceID);
					//sprintf(sFaceCaptionBuff, "%s", sFaceName); //dispaly score
					sFaceCaption = sFaceCaptionBuff;
					
					print_score  = probFaceID;
					print_faceid = face_id;
				}
				else
				{   // failed to recognize 
					//sprintf(sFaceCaptionBuff, "N\A %.2f", probFaceID);
					//sFaceCaption = sFaceCaptionBuff;
				}

				// collect and save unknown face exemplars
				if(probFaceID < face_threshold*0.9 || face_id != mouse_faceparam.ret_faceset_id )
				{
					if(mouse_faceparam.ret_online_collecting && (face_num ==1 || face_trackid == mouse_faceparam.ret_facetrack_id))
					{
						if( rect.x > gray_image.width()/4 && rect.x+rect.width < gray_image.width()*3/4 ) 
						{
							mouse_faceparam.updated = true;
							int nFaceSetIdx = faceRecognizer.getFaceSetIdx(mouse_faceparam.ret_faceset_id);
							bool bflag = faceRecognizer.tryInsertFace(cutFace.getBigCutFace(), nFaceSetIdx);
							//printf("insert flag %d\n", bflag);
						}
					}
				}
			}
			else recg_fps = 0;

			if( ! quiet )
			{
				sprintf(sCaptionInfo, "FPS: %03d Fd:%04d Ld:%04d Fa:%04d Bl:%04d Sm:%04d Ge:%04d Ag:%03d Rc:%03d",
					(int)frame_fps, (int)tracker_fps, (int)landmark_fps, (int)align_fps, 
					(int)blink_fps,   (int)smile_fps,    (int)gender_fps, (int)age_fps, (int)recg_fps);

				//sprintf(sFaceCaptionBuff, "%.2f", print_score);
				//sFaceCaption = sFaceCaptionBuff;

				int trackid = -1; //face_trackid. don't display trackid if -1
				cxlibDrawFaceBlob( image, pFont, trackid, rect, landmark6, probSmile, 
					bBlink, bSmile, bGender, nAgeID, sFaceCaption, NULL,
					pImgSmileBGR, pImgSmileBGRA, pImgSmileMask);
			}

            // log file
            if( fp_faceinfo != NULL )
            {
                // index,  rect,  landmark6,  bBlink, probBlink, bSmile, probSmile, bGender, probGender, nAgeID, probAge[nAgeID], face_id, probFaceID
				//fprintf( fp_faceinfo, "#%s# @%s@ ",    vidcap->filename(), sImgPath);
				fprintf( fp_faceinfo, "#%s# ",    vidcap->filename());
                fprintf( fp_faceinfo, "faceidx=( %06d %02d )", vidcap->index(), i+1 );
				fprintf( fp_faceinfo, "   rect=( %3d %3d %3d %3d )", rect.x, rect.y, rect.width, rect.height );
                fprintf( fp_faceinfo, "   landmark6=(" );
                int l;
                for( l = 0; l < 6; l++ )
                    fprintf( fp_faceinfo, " %3.0f %3.0f", landmark6[l].x, landmark6[l].y );
                fprintf( fp_faceinfo, " )");
                fprintf( fp_faceinfo, "   blink=( %+d %f )", bBlink, probBlink );
                fprintf( fp_faceinfo, "   smile=( %+d %f )", bSmile, probSmile );
                fprintf( fp_faceinfo, "   gender=( %+d %f )", bGender, probGender );
                fprintf( fp_faceinfo, "   agegroup=( %+d %f )", nAgeID, (nAgeID >= 0 && nAgeID < 4) ? probAge[nAgeID] : 1.0f );
                fprintf( fp_faceinfo, "   identity=( %+d %f )", face_id, probFaceID );
                fprintf( fp_faceinfo, "\n" );
            }
        }
        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "\n" );

		ticks    = cvGetTickCount() - start_ticks;
		total_ticks += (ticks);
		frame_fps = 1000.0f / ( 1e-3 * ticks / cvGetTickFrequency() );

		// frame face_num
		frames++;

		//auto focus faces
		if(quiet == false && bAutoFocus)
		{
			if(imgAutoFocus)
				cvCopy(image, imgAutoFocus);
			else
				imgAutoFocus = cvCloneImage(image);
			cxlibAutoFocusFaceImage(imgAutoFocus, image, rects, face_num);
		}

		// next frame if quiet
		if( quiet )
			continue;
		else
		{
			// draw status info for custom interaction
			if(mouse_faceparam.ret_online_collecting == 1)
			{
				sprintf(sCaptionInfo, "Collecting faces for track_id = %d", mouse_faceparam.ret_facetrack_id);
				//draw face collecting region
				cvLine(image, cvPoint(image.width()/4, 0), cvPoint(image.width()/4, image.height()-1), CV_RGB(255,255,0), 2);
				cvLine(image, cvPoint(image.width()*3/4, 0), cvPoint(image.width()*3/4, image.height()-1), CV_RGB(255,255,0), 2);
			}
			else
				sprintf(sCaptionInfo, "FPS: %03d Fd:%04d Ld:%04d Fa:%04d Bl:%04d Sm:%04d Ge:%04d Ag:%03d Rc:%03d",
					(int)frame_fps, (int)tracker_fps, (int)landmark_fps, (int)align_fps, 
					(int)blink_fps,   (int)smile_fps,    (int)gender_fps, (int)age_fps, (int)recg_fps);

			cxlibDrawCaption( image, pFont, sCaptionInfo);
		}
	
		//show Image
		if (image.width() <= 800)
		{
			//show image
			cvShowImage( str_title, image );
		}
		else
		{   // show scaled smaller image
			CvImage scale_image (cvSize(800, image.height()*800/image.width()), image.depth(), 3 );
			cvResize (image, scale_image);
			cvShowImage( str_title, scale_image );
		}

		// user interaction
		int key = cvWaitKey( 30 );
		//int key = cvWaitKey( );
		if( key == ' ' ) // press the spacebar to pause the video play 
			cvWaitKey( 0 );                           
		else if( key == 27 )
			break;	    // press 'esc' to exit
		else if( key == 'a' )
		{  // add new face name
			if(face_num > 0)
			{   
				CvRect rect = rects[0].rc;
				int x = rect.x+rect.width/2;
				int y = rect.y+rect.height/2;
				addFaceSet( x, y, &mouse_faceparam);
			}
		}
		else if( key == 'c' )
		{   // collect face exemplars for current selected facename
			mouse_faceparam.ret_online_collecting = 1; //enable online face exemplar collecting
		}
		else if( key == 'z' )
			// turn on/off the autofocus flag
			bAutoFocus = !bAutoFocus;
		else if(key >= 0)
		{
			if(mouse_faceparam.ret_online_collecting == 1)
			{   // stop collecting faces
				mouse_faceparam.ret_online_collecting = 0; //disable online face exemplar collecting
				mouse_faceparam.ret_facetrack_id = -1;
			}

			if( key == 's')
			{   // save face models
				faceRecognizer.saveFaceModelXML("faceset_model.xml");
				sprintf(sCaptionInfo, "%s", "saved the face model");
				cxlibDrawCaption( pImg, pFont, sCaptionInfo);
				cvShowImage( str_title, pImg );
				cvWaitKey( 400 ); 
			}
		}
	}

	// print speed info about fps
	float temp    = 1e-6f / cvGetTickFrequency();
	tracker_fps   = 1.0f  / ( tracker_total_ticks * temp / frames );

	if (landmark_total_ticks != 0.0)
		landmark_fps = 1.0f  / ( landmark_total_ticks * temp / frames );

	if (align_total_ticks != 0.0)
		align_fps    = 1.0f  / ( align_total_ticks * temp / frames );

	if (blink_total_ticks != 0.0)
		blink_fps  = 1.0f  / (blink_total_ticks * temp / frames);

	if (smile_total_ticks != 0.0)
		smile_fps  = 1.0f  / (smile_total_ticks * temp / frames);

	if (gender_total_ticks != 0.0)
		gender_fps = 1.0f  / (gender_total_ticks * temp / frames);

	if (age_total_ticks != 0.0)
		age_fps = 1.0f  / (age_total_ticks * temp / frames);

	if (recg_total_ticks != 0.0)
		recg_fps   = 1.0f  / (recg_total_ticks  * temp / frames);

	total_fps = 1.0f / (total_ticks * temp / frames);

	printf( "Total frames:%d  Speed:%.1f fps\n", frames, total_fps);
	printf( "FPS: Fd:%.1f Ld:%.1f Fa:%.1f Bl:%.1f Sm:%.1f Ge:%.1f Ag:%.1f Rc:%.1f",
		tracker_fps, landmark_fps, align_fps, 
		blink_fps,   smile_fps,    gender_fps, age_fps, recg_fps);

	//save updated face model
	if(mouse_faceparam.updated == true)
	{
		sprintf(sCaptionInfo, "%s", "press key 's' to save updated face model or other keys to cancel");
		cxlibDrawCaption( pImg, pFont, sCaptionInfo);
		cvShowImage( str_title, pImg );

		int key = cvWaitKey();
		if( key == 's')
			faceRecognizer.saveFaceModelXML("faceset_model.xml");
	}

	
	//save merged face model for dynamic clustering of smoothID
	vFaceSet vMergedFaceSet;
	int minWeight = 10;
	faceRecognizer.getMergedFaceSet(vMergedFaceSet, minWeight);
	faceRecognizer.saveFaceModelXML("faceset_modelMerged.xml", &vMergedFaceSet);
	//faceRecognizer.saveFaceModelXML("faceset_modelMerged#.xml");

	//release buff 
	
	//release global GUI data
	if( !quiet )
		cvDestroyWindow( str_title );

	cvReleaseImage(&thumbnailImg);
	cvReleaseImage(&pImgSmileBGR);
	cvReleaseImage(&pImgSmileBGRA);
	cvReleaseImage(&pImgSmileMask);
	
	delete pFont;

    delete vidcap;

    if( fp_imaginfo != NULL )
        fclose( fp_imaginfo );
	
    if( fp_faceinfo != NULL )
        fclose( fp_faceinfo );

    return 0;
}
Esempio n. 22
0
int main()
{
    IplImage* img = cvLoadImage("goal_arena.bmp");
    CvSize imgSize = cvGetSize(img);
    IplImage* detected = cvCreateImage(imgSize, 8, 1);
 
    IplImage* imgBlue = cvCreateImage(imgSize, 8, 1);
    IplImage* imgGreen = cvCreateImage(imgSize, 8, 1);
    IplImage* imgRed = cvCreateImage(imgSize, 8, 1);

    cvSplit(img, imgBlue, imgGreen, imgRed, NULL);
    cvAnd(imgGreen, imgBlue, detected);
    cvAnd(detected, imgRed, detected);
    cvErode(detected, detected);
    cvDilate(detected, detected);    // Opening
 
    // cvThreshold(detected, detected, 100, 250, CV_THRESH_BINARY);
    CvMat* lines = cvCreateMat(100, 1, CV_32FC2);
    cvHoughLines2(detected, lines, CV_HOUGH_STANDARD, 1, 0.001, 100);
    // CvMat* lines = cvCreateMat(100, 1, CV_32FC2);
    // cvHoughLines2(detected, lines, CV_HOUGH_STANDARD, 1, 0.001, 100);

    CvPoint left1 = cvPoint(0, 0);
    CvPoint left2 = cvPoint(0, 0);
    CvPoint right1 = cvPoint(0, 0);
    CvPoint right2 = cvPoint(0, 0);
    CvPoint top1 = cvPoint(0, 0);
    CvPoint top2 = cvPoint(0, 0);
    CvPoint bottom1 = cvPoint(0, 0);
    CvPoint bottom2 = cvPoint(0, 0);
 
    int numLines = lines->rows;
    int numTop = 0;
    int numBottom = 0;
    int numLeft = 0;
    int numRight = 0;

    for(int i=0;i<numLines;i++)
    {
    	CvScalar dat = cvGet1D(lines, i);
        double rho = dat.val[0];
        double theta = dat.val[1];
        if(theta==0.0)
            continue;
        double degrees = theta*180.0/(3.1412);
 
        CvPoint pt1 = cvPoint(0, rho/sin(theta));
        CvPoint pt2 = cvPoint(img->width, (-img->width/tan(theta)) + rho/sin(theta));
         if(abs(rho)<50.0)
        {
        	if(degrees>45.0 && degrees<135.0)
            {
            	numTop++;
 
                // The line is vertical and near the top
                top1.x+=pt1.x;
                top1.y+=pt1.y;
 
                top2.x+=pt2.x;
                top2.y+=pt2.y;
            }

            else
            {
                numLeft++;
 
                // The line is vertical and near the left
                left1.x+=pt1.x;
                left1.y+=pt1.y;
 
                left2.x+=pt2.x;
                left2.y+=pt2.y;
            }
        }

        else
        {
            // We're in the right portion
            if(degrees>45.0 && degrees<135.0)
            {
                numBottom++;
 
                //The line is horizontal and near the bottom
                bottom1.x+=pt1.x;
                bottom1.y+=pt1.y;
 
                bottom2.x+=pt2.x;
                bottom2.y+=pt2.y;
            }
            else
            {
                numRight++;
 
                // The line is vertical and near the right
                right1.x+=pt1.x;
                right1.y+=pt1.y;
 
                right2.x+=pt2.x;
                right2.y+=pt2.y;
            }
        }
    }

    left1.x/=numLeft;
    left1.y/=numLeft;
    left2.x/=numLeft;
    left2.y/=numLeft;
 
    right1.x/=numRight;
    right1.y/=numRight;
    right2.x/=numRight;
    right2.y/=numRight;
 
    top1.x/=numTop;
    top1.y/=numTop;
    top2.x/=numTop;
    top2.y/=numTop;
 
    bottom1.x/=numBottom;
    bottom1.y/=numBottom;
    bottom2.x/=numBottom;
    bottom2.y/=numBottom;

    cvLine(img, left1, left2, CV_RGB(255, 0,0), 1);
    cvLine(img, right1, right2, CV_RGB(255, 0,0), 1);
    cvLine(img, top1, top2, CV_RGB(255, 0,0), 1);
    cvLine(img, bottom1, bottom2, CV_RGB(255, 0,0), 1);

    // Next, we need to figure out the four intersection points
    double leftA = left2.y-left1.y;
    double leftB = left1.x-left2.x;
    double leftC = leftA*left1.x + leftB*left1.y;
 
    double rightA = right2.y-right1.y;
    double rightB = right1.x-right2.x;
    double rightC = rightA*right1.x + rightB*right1.y;
 
    double topA = top2.y-top1.y;
    double topB = top1.x-top2.x;
    double topC = topA*top1.x + topB*top1.y;
 
    double bottomA = bottom2.y-bottom1.y;
    double bottomB = bottom1.x-bottom2.x;
    double bottomC = bottomA*bottom1.x + bottomB*bottom1.y;
 
    // Intersection of left and top
    double detTopLeft = leftA*topB - leftB*topA;
    CvPoint ptTopLeft = cvPoint((topB*leftC - leftB*topC)/detTopLeft, (leftA*topC - topA*leftC)/detTopLeft);
 
    // Intersection of top and right
    double detTopRight = rightA*topB - rightB*topA;
    CvPoint ptTopRight = cvPoint((topB*rightC-rightB*topC)/detTopRight, (rightA*topC-topA*rightC)/detTopRight);
 
    // Intersection of right and bottom
    double detBottomRight = rightA*bottomB - rightB*bottomA;
    CvPoint ptBottomRight = cvPoint((bottomB*rightC-rightB*bottomC)/detBottomRight, (rightA*bottomC-bottomA*rightC)/detBottomRight);
 
    // Intersection of bottom and left
    double detBottomLeft = leftA*bottomB-leftB*bottomA;
    CvPoint ptBottomLeft = cvPoint((bottomB*leftC-leftB*bottomC)/detBottomLeft, (leftA*bottomC-bottomA*leftC)/detBottomLeft);

    cvLine(img, ptTopLeft, ptTopLeft, CV_RGB(0,255,0), 5);
    cvLine(img, ptTopRight, ptTopRight, CV_RGB(0,255,0), 5);
    cvLine(img, ptBottomRight, ptBottomRight, CV_RGB(0,255,0), 5);
    cvLine(img, ptBottomLeft, ptBottomLeft, CV_RGB(0,255,0), 5);

    IplImage* imgMask = cvCreateImage(imgSize, 8, 3);
    cvZero(imgMask);
    CvPoint* pts = new CvPoint[4];
    pts[0] = ptTopLeft;
    pts[1] = ptTopRight;
    pts[2] = ptBottomRight;
    pts[3] = ptBottomLeft;
    cvFillConvexPoly(imgMask, pts, 4, cvScalar(255,255,255));
    cvAnd(img, imgMask, img);
 
    cvNamedWindow("Original");
    cvNamedWindow("Detected");
 
    cvShowImage("Original", img);
    cvShowImage("Detected", detected);
    cvWaitKey(0);
 
    return 0;
}
Esempio n. 23
0
	void PixelFlow::computeFlow( const Image& image, Math::Vector< int, 2 >& result, int& difference, Image* pDebugImg)
	{
		//decliaring variables
		int xCut = 0;
		int yCut = 0;
		int temp = 0;
		int xIndex = 0;
		int yIndex = 0;
		int xBufSize = 0;
		int yBufSize = 0;
		int width= image.widthStep;
		int hight = image.height; 
		int xSize = int(x.size());
		int ySize = int(y.size());
		Math::Vector< int, 4 > xRec;
		Math::Vector< int, 4 > yRec;

		calculateIncrisedCoordiants(width, hight, 0.5, topL, bottomR, xRec,  yRec);

		
		std::vector<int> newX( xRec(2) - xRec(0) + 1, 0 );
		std::vector<int> newY( yRec(3) - yRec(1) + 1, 0 );
		
		if(xRec(2) != width)
		{	
			if(((int)x.size())%2 != 0)
				xBufSize = int(x.size());
			else
				xBufSize = int(x.size())+1;
		}
		else
		{	
			xBufSize = int(newX.size()) - int(x.size()/2);
		}
		
		if(yRec(3) != hight)
		{	
			if(((int)y.size())%2 != 0)
				yBufSize = int(y.size());
			else
				yBufSize = int(y.size())+1;
		}
		else
		{	
			yBufSize = int(newY.size()) - int(y.size()/2) ;
		}
		
		std::vector<int> xErrorBuf(xBufSize, 255);
		std::vector<int> yErrorBuf(yBufSize, 255);
						
		//calculating the updated buffer
		for (int i = 0; i < xRec(2) - xRec(0); i++)
			for(int j = 0; j < xRec(3) - xRec(1); j++)
			{	
				int temp = (xRec(1)  + j)*image.width +  xRec(0) + i;
				newX[i] += ((unsigned char*)image.imageData)[temp];
			}
		
		for(int i = 0; i < int(newX.size()); i++)
			newX[i] /= xRec(3) - xRec(1);

		for (int i = 0; i < yRec(2) - yRec(0); i++)
			for(int j = 0; j < yRec(3) - yRec(1); j++)
			{	
				int temp = (yRec(1)  + j)*image.width +  yRec(0) + i;
				newY[j] += ((unsigned char*)image.imageData)[temp];
			}

		for(int i = 0; i < int(newY.size()); i++)
			newY[i] /= yRec(2) - yRec(0);

		//check whether there was a cut
		if(xRec(2) == width)
			xCut = 1;
		if(xRec(0)== 0)
			if(xCut != 1)
				xCut = -1;
			else
				xCut = 2;
			
		//calculating the error buffer
		calculateErrorBuffer(x,newX, xErrorBuf,xCut);

		//check whether there was a cut
		if(yRec(3) == hight)
			yCut = 1;
		if(yRec(1)== 0)
			if(yCut != 1)
				yCut = -1;
			else
				yCut = 2;

		//calculating the error buffer
		calculateErrorBuffer(y,newY, yErrorBuf,yCut);
		
		//finding the best shift in x direction
		temp = xErrorBuf[0];

		for (int i = 1; i <int(xErrorBuf.size()); i++)
			if(temp > xErrorBuf[i])
			{
				temp = xErrorBuf[i];
				xIndex = i;	
			}
		
		//finding the best shift in y direcion
		temp = yErrorBuf[0];
		
		for (int i = 1; i < int(yErrorBuf.size()); i++)
			if(temp > yErrorBuf[i])
			{
				temp = yErrorBuf[i];
				yIndex = i;
			}
		
		//if the nuew buffer is cut from left, the error buffer is shifting on xdif, or ydif
		int xdif, ydif;

		xdif = int(newX.size()) - 2*int(x.size()/2) - int(x.size());
		ydif = int(newY.size()) - 2*int(y.size()/2) - int(y.size());
		
		//returning the reslt
		if(xCut == 0 || xCut == 1)	
		{	
			result(0) = xIndex- int(x.size()/2);
		}
		else
		if(xCut = -1)
		{
			result(0) = xIndex - 2*int(x.size()/2)-xdif;
		}

		if(yCut == 0 || yCut == 1)	
		{	
			result(1) = yIndex - int(y.size()/2) ;
		}
		else
		if(yCut = -1)
		{
			result(1) = yIndex - 2*int(y.size()/2) -ydif;
		}
		
		//TO BE MODIFIED::
		difference = 0;

		//visualizing the error buffer
		if(pDebugImg)
		{
			CvPoint p1,p2;
				
			if(int(xErrorBuf.size()) < width -50)
			{	
				p1.y = 10;
				p2.y = 20;
				
				for (int i = 0; i < int(xErrorBuf.size()); i++)
				{	
					p1.x = width - int(xErrorBuf.size()) - 50 + i ;
					p2.x = width - int(xErrorBuf.size()) - 50 + i ;
					int color = std::min( int(xErrorBuf[i]) + 50, 255 );
					cvLine(pDebugImg,p1,p2,cvScalar(0,0,color),10,CV_AA,0);
				}

				p1.x = width -10;
				p2.x = width -20;
				
				for (int i = 0; i < int(yErrorBuf.size()); i++)
				{	
					p1.y = i + 50;
					p2.y = i + 50;
					int color = std::min( int(yErrorBuf[i]) + 50, 255 );
					cvLine(pDebugImg,p1,p2,cvScalar(0,0,color),10,CV_AA,0);
				}
			}
		}
	}
Esempio n. 24
0
IplImage * Transformation::getMatchImg(){
	IplImage* rgb_img_src 	= src->input->rgb_img;//cvLoadImage(src->input->rgb_path.c_str(),CV_LOAD_IMAGE_UNCHANGED);
	char * data_src = (char *)rgb_img_src->imageData;
	unsigned short * depth_data_src		= (unsigned short *)(src->input->depth_img->imageData);

	IplImage* rgb_img_dst 	= dst->input->rgb_img;//cvLoadImage(dst->input->rgb_path.c_str(),CV_LOAD_IMAGE_UNCHANGED);
	char * data_dst = (char *)rgb_img_dst->imageData;
	unsigned short * depth_data_dst		= (unsigned short *)(dst->input->depth_img->imageData);

	int	width = rgb_img_src->width;
	int	height = rgb_img_src->height;

	IplImage* img_combine = cvCreateImage(cvSize(2*width,height), IPL_DEPTH_8U, 3);
	char * data = (char *)img_combine->imageData;

	for (int j = 0; j < height; j++){
		for (int i = 0; i < width; i++){
			int ind = 3*(640*j+i);
			int dst_ind = 3 * (j * (2*width) + (width+i));
			int src_ind = 3 * (j * (2*width) + (i));
			int d_dst = depth_data_dst[(640*j+i)];
			int d_src = depth_data_src[(640*j+i)];
			if(d_dst == 0 && (i % 2 == 0) && (j % 2 == 0)){
				data[dst_ind + 0] = 255;
				data[dst_ind + 1] = 0;
				data[dst_ind + 2] = 255;
			}else{
				data[dst_ind + 0] = data_dst[ind +0];
				data[dst_ind + 1] = data_dst[ind +1];
				data[dst_ind + 2] = data_dst[ind +2];
			}
			if(d_src == 0 && (i % 2 == 0) && (j % 2 == 0)){
				data[src_ind + 0] = 255;
				data[src_ind + 1] = 0;
				data[src_ind + 2] = 255;
			}else{
				data[src_ind + 0] = data_src[ind +0];
				data[src_ind + 1] = data_src[ind +1];
				data[src_ind + 2] = data_src[ind +2];
			}
		}
	}
	//cvReleaseImage( &rgb_img_src );
	//cvReleaseImage( &rgb_img_dst );

	for(unsigned int i = 0; i < src->keypoints->valid_key_points.size(); i++){
		KeyPoint * src_kp = src->keypoints->valid_key_points.at(i);
		cvCircle(img_combine,cvPoint(src_kp->point->w			, src_kp->point->h), 3,cvScalar(0, 255, 0, 0),1, 8, 0);
	}

	for(unsigned int i = 0; i < src->keypoints->invalid_key_points.size(); i++){
		KeyPoint * kp = src->keypoints->invalid_key_points.at(i);
		cvCircle(img_combine,cvPoint(kp->point->w			, kp->point->h), 3,cvScalar(0, 255, 255, 0),1, 8, 0);
	}

	for(unsigned int i = 0; i < dst->keypoints->valid_key_points.size(); i++){
		KeyPoint * dst_kp = dst->keypoints->valid_key_points.at(i);
		cvCircle(img_combine,cvPoint(dst_kp->point->w + width	, dst_kp->point->h), 3,cvScalar(0, 255, 0, 0),1, 8, 0);
	}

	for(unsigned int i = 0; i < dst->keypoints->invalid_key_points.size(); i++){
		KeyPoint * kp = dst->keypoints->invalid_key_points.at(i);
		cvCircle(img_combine,cvPoint(kp->point->w + width	, kp->point->h), 3,cvScalar(0, 255, 255, 0),1, 8, 0);
	}

	for(unsigned int i = 0; i < matches.size(); i++){
		KeyPoint * src_kp = matches.at(i).first;
		KeyPoint * dst_kp = matches.at(i).second;
		cvLine(img_combine,cvPoint(dst_kp->point->w  + width ,dst_kp->point->h),cvPoint(src_kp->point->w,src_kp->point->h),cvScalar(0, 0, 255, 0),1, 8, 0);
	}

	return img_combine;
}
Esempio n. 25
0
void jl_cv_draw_line(jl_cv_t* jl_cv, jl_cv_line_t line) {
	jl_cv_getoutput(jl_cv);
	cvLine(jl_cv->image_rgb, line.p1, line.p2, CV_RGB(0xFF,0x00,0xFF),
		1, 8, 0);
	jl_cv_disp_gray_(jl_cv);
}
Esempio n. 26
0
void Transformation::saveProblem(string path){
	printf("saveProblem(%s)\n",path.c_str());
	IplImage* rgb_img_src 	= cvLoadImage(src->input->rgb_path.c_str(),CV_LOAD_IMAGE_UNCHANGED);
	char * data_src = (char *)rgb_img_src->imageData;
	unsigned short * depth_data_src		= (unsigned short *)(src->input->depth_img->imageData);

	IplImage* rgb_img_dst 	= cvLoadImage(dst->input->rgb_path.c_str(),CV_LOAD_IMAGE_UNCHANGED);
	char * data_dst = (char *)rgb_img_dst->imageData;
	unsigned short * depth_data_dst		= (unsigned short *)(dst->input->depth_img->imageData);

	int	width = rgb_img_src->width;
	int	height = rgb_img_src->height;

	IplImage* img_combine = cvCreateImage(cvSize(2*width,height), IPL_DEPTH_8U, 3);
	char * data = (char *)img_combine->imageData;

	for (int j = 0; j < height; j++){
		for (int i = 0; i < width; i++){
			int ind = 3*(640*j+i);
			int dst_ind = 3 * (j * (2*width) + (width+i));
			int src_ind = 3 * (j * (2*width) + (i));
			int d_dst = depth_data_dst[(640*j+i)];
			int d_src = depth_data_src[(640*j+i)];
			if(d_dst == 0 && (i % 2 == 0) && (j % 2 == 0)){
				data[dst_ind + 0] = 255;
				data[dst_ind + 1] = 0;
				data[dst_ind + 2] = 255;
			}else{
				data[dst_ind + 0] = data_dst[ind +0];
				data[dst_ind + 1] = data_dst[ind +1];
				data[dst_ind + 2] = data_dst[ind +2];
			}
			if(d_src == 0 && (i % 2 == 0) && (j % 2 == 0)){
				data[src_ind + 0] = 255;
				data[src_ind + 1] = 0;
				data[src_ind + 2] = 255;
			}else{
				data[src_ind + 0] = data_src[ind +0];
				data[src_ind + 1] = data_src[ind +1];
				data[src_ind + 2] = data_src[ind +2];
			}
		}
	}
	cvReleaseImage( &rgb_img_src );
	cvReleaseImage( &rgb_img_dst );

	for(unsigned int i = 0; i < src->keypoints->valid_key_points.size(); i++){
		KeyPoint * src_kp = src->keypoints->valid_key_points.at(i);
		cvCircle(img_combine,cvPoint(src_kp->point->w			, src_kp->point->h), 3,cvScalar(0, 255, 0, 0),1, 8, 0);
	}

	for(unsigned int i = 0; i < src->keypoints->invalid_key_points.size(); i++){
		KeyPoint * kp = src->keypoints->invalid_key_points.at(i);
		cvCircle(img_combine,cvPoint(kp->point->w			, kp->point->h), 3,cvScalar(0, 255, 255, 0),1, 8, 0);
	}

	for(unsigned int i = 0; i < dst->keypoints->valid_key_points.size(); i++){
		KeyPoint * dst_kp = dst->keypoints->valid_key_points.at(i);
		cvCircle(img_combine,cvPoint(dst_kp->point->w + width	, dst_kp->point->h), 3,cvScalar(0, 255, 0, 0),1, 8, 0);
	}

	for(unsigned int i = 0; i < dst->keypoints->invalid_key_points.size(); i++){
		KeyPoint * kp = dst->keypoints->invalid_key_points.at(i);
		cvCircle(img_combine,cvPoint(kp->point->w + width	, kp->point->h), 3,cvScalar(0, 255, 255, 0),1, 8, 0);
	}

	for(unsigned int i = 0; i < matches.size() ; i++){
		KeyPoint * src_kp = matches.at(i).first;
		KeyPoint * dst_kp = matches.at(i).second;
		cvLine(img_combine,cvPoint(dst_kp->point->w  + width ,dst_kp->point->h),cvPoint(src_kp->point->w,src_kp->point->h),cvScalar(0, 0, 255, 0),1, 8, 0);
	}

	cvShowImage("combined image", img_combine);
	char buf[1024];
	sprintf(buf,"%i_%s.png",(int)matches.size(),path.c_str());
	if(!cvSaveImage(buf,img_combine)){printf("Could not save: %s\n",buf);}
	cvWaitKey(30);
	cvReleaseImage( &img_combine);
}
Esempio n. 27
0
void rotate(IplImage *img)
{
	CvRect rect;
	IplImage *imgLine;
	rect.x=GlobalGandhiji.x+GlobalGandhiji.width;
	rect.y=GlobalGandhiji.y-5;
	rect.width=(int)((GlobalGandhiji.width)-5);
	rect.height=GlobalGandhiji.height+15;
	if(GlobalGandhiji.matchval!=-1 && rect.x>0 && rect.y>0 &&  rect.y+rect.height<= img->height && rect.x+rect.width<= img->width)
	{	
	
		imgLine=cropRectangle(img,rect);
		cvNamedWindow("imgLine",1);
		cvShowImage("imgLine",imgLine);
		IplImage* src1 = cvCreateImage(
		cvGetSize(imgLine), 8, 1 );

		cvCvtColor( imgLine, src1, CV_RGB2GRAY );

		IplImage* dst = cvCreateImage(
		cvGetSize(src1), 8, 1 );
		IplImage* color_dst = cvCreateImage(
		cvGetSize(src1), 8, 3 );
		CvMemStorage* storage =
		cvCreateMemStorage(0);
		CvSeq* lines = 0;
		int i;
		cvCanny( src1, dst,50, 150, 3 );
		//cvDilate( dst, dst, 0, 1 );
		cvNamedWindow("edgedest",1);
		cvShowImage("edgedest",dst);
		cvCvtColor( dst, color_dst, CV_GRAY2BGR );
		#if 1
		lines = cvHoughLines2( dst, storage,
		CV_HOUGH_STANDARD, 1, CV_PI/180, 30, 0, 0 );
		for( i = 0; i < MIN(lines->total,100); i++ )
		{
		float* line =
		(float*)cvGetSeqElem(lines,i);
		float rho = line[0];
		float theta = line[1];

		printf("theta = %f",(theta*180/3.142));
		CvPoint pt1, pt2;
		double a = cos(theta), b = sin(theta);
		double x0 = a*rho, y0 = b*rho;
		printf("a= %f  b=%f  x0=%f  y0=%f roh=%f\n", a,b,x0,y0,rho);
		pt1.x = cvRound(x0 + 1000*(-b));
		pt1.y = cvRound(y0 + 1000*(a));
		pt2.x = cvRound(x0 - 1000*(-b));
		pt2.y = cvRound(y0 - 1000*(a));
		printf("    x1 = %d, y1 = %d",pt1.x,pt1.y);
		printf("    x2 = %d, y2 = %d\n\n",pt2.x,pt2.y);

		//if((theta*180/3.142) < 100 && (theta*180/3.142) > 79 )
		cvLine( color_dst, pt1, pt2,
		CV_RGB(255,0,0), 3, 8 );
		}
		#else
		lines = cvHoughLines2( dst, storage,
		CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, 30, 0, 0 );
		for( i = 0; i < lines->total; i++ )
		{
		CvPoint* line =
		(CvPoint*)cvGetSeqElem(lines,i);
		cvLine( color_dst, line[0], line[1],
		CV_RGB(255,0,0), 3, 8 );
		}
		#endif
		
		cvNamedWindow( "Hough", 1 );
		cvShowImage( "Hough", color_dst );



	}
	


	/*
	




*/




}
Esempio n. 28
0
int main(int argc, const char * argv[]) {
    
    IplImage* img = cvCreateImage( cvSize( 500, 500 ), 8, 3 );
    CvRNG rng = cvRNG(-1);
    
    cvNamedWindow( "fitline", 1 );
    for(;;) {
        char key;
        int i, count = cvRandInt(&rng) % 100 + 1, outliers = count/5;
        float a = cvRandReal(&rng) * 200;
        float b = cvRandReal(&rng) * 40;
        float angle = cvRandReal(&rng) * CV_PI;
        float cos_a = cos(angle), sin_a = sin(angle);
        CvPoint pt1, pt2;
        CvPoint* points = (CvPoint*)malloc( count * sizeof(points[0]));
        CvMat pointMat = cvMat( 1, count, CV_32SC2, points );
        float line[4];
        float d, t;
        
        b = MIN(a*0.3, b);
        
        // generate some points that are close to the line
        for( i = 0; i < count - outliers; i++ ) {
            float x = (cvRandReal(&rng)*2-1)*a;
            float y = (cvRandReal(&rng)*2-1)*b;
            points[i].x = cvRound(x*cos_a - y*sin_a + img->width/2);
            points[i].y = cvRound(x*sin_a + y*cos_a + img->height/2);
        }
        
        // generate "completely off" points
        for( ; i < count; i++ ) {
            points[i].x = cvRandInt(&rng) % img->width;
            points[i].y = cvRandInt(&rng) % img->height;
        }
        
        // find the optimal line
        cvFitLine( &pointMat, CV_DIST_L1, 1, 0.001, 0.001, line );
        cvZero( img );
        
        // draw the points
        for( i = 0; i < count; i++ ) {
            cvCircle( img, points[i], 2, i < count - outliers ? CV_RGB(255, 0, 0) : CV_RGB(255,255,0), CV_FILLED, CV_AA, 0 );
        }
        
        d = sqrt((double)line[0]*line[0] + (double)line[1]*line[1]);
        line[0] /= d;
        line[1] /= d;
        t = (float)(img->width + img->height);
        pt1.x = cvRound(line[2] - line[0]*t);
        pt1.y = cvRound(line[3] - line[1]*t);
        pt2.x = cvRound(line[2] + line[0]*t);
        pt2.y = cvRound(line[3] + line[1]*t);
        cvLine( img, pt1, pt2, CV_RGB(0,255,0), 3, CV_AA, 0 );
        
        cvShowImage( "fitline", img );
        
        key = (char) cvWaitKey(0);
        if( key == 27 )  break;
        free( points );
    }
    
    cvDestroyWindow( "fitline" );
    return 0;
}
Esempio n. 29
0
int main(int argc, char **argv) {
  // Check parameters
  if (argc < 2) {
    fprintf(stderr, "%s: %s\n", APP_NAME, "No video name given");
    fprintf(stderr, "Usage: %s <video file name> [output file name]\n", APP_NAME);

    exit(EXIT_FAILURE);
  }

  char *output_file_name;
  if (argc == 3) {
    output_file_name = argv[2];
  }
  else {
    output_file_name = OUTPUT_FILE_NAME;
  }

  // Load video
  char *file_name = argv[1];
  CvCapture *video = cvCaptureFromFile(file_name);

  if (!video) {
    exit(EXIT_FAILURE);
  }

  // Extract video parameters
  CvSize video_frame_size;
  video_frame_size.width = cvGetCaptureProperty(video, CV_CAP_PROP_FRAME_WIDTH);
  video_frame_size.height = cvGetCaptureProperty(video, CV_CAP_PROP_FRAME_HEIGHT);
  double video_fps = cvGetCaptureProperty(video, CV_CAP_PROP_FPS);
  long video_frame_count = cvGetCaptureProperty(video, CV_CAP_PROP_FRAME_COUNT);

  // Initialize video writer
  CvVideoWriter *video_writer = cvCreateVideoWriter(output_file_name,
    FOURCC, video_fps, video_frame_size, true);

  // Initialize variables for optical flow calculation
  IplImage *current_frame = cvCreateImage(video_frame_size, IPL_DEPTH_8U, 3);
  IplImage *eigen_image = cvCreateImage(video_frame_size, IPL_DEPTH_32F, 1);
  IplImage *temp_image = cvCreateImage(video_frame_size, IPL_DEPTH_32F, 1);

  int corner_count = MAX_CORNERS;
  CvPoint2D32f corners[2][MAX_CORNERS];
  char features_found[MAX_CORNERS];
  float feature_errors[MAX_CORNERS];

  IplImage *frame_buffer[2];
  IplImage *pyramid_images[2];
  CvSize pyramid_size = cvSize(video_frame_size.width + 8, video_frame_size.height / 3);

  int i;
  for (i = 0; i < 2; i++) {
    frame_buffer[i] = cvCreateImage(video_frame_size, IPL_DEPTH_8U, 1);
    pyramid_images[i] = cvCreateImage(pyramid_size, IPL_DEPTH_32F, 1);
  }

  // Process video
  while (query_frame(video, frame_buffer, current_frame)) {
    // Corner finding with Shi and Thomasi
    cvGoodFeaturesToTrack(
      frame_buffer[0],
      eigen_image,
      temp_image,
      corners[0],
      &corner_count,
      0.01,
      5.0,
      0,
      3,
      0,
      0.4);

    cvFindCornerSubPix(
      frame_buffer[0],
      corners[0],
      corner_count,
      cvSize(WINDOW_SIZE, WINDOW_SIZE),
      cvSize(-1, -1),
      cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3));

    // Pyramid Lucas-Kanade
    cvCalcOpticalFlowPyrLK(
      frame_buffer[0],
      frame_buffer[1],
      pyramid_images[0],
      pyramid_images[1],
      corners[0],
      corners[1],
      corner_count,
      cvSize(WINDOW_SIZE, WINDOW_SIZE),
      5,
      features_found,
      feature_errors,
      cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3),
      0);

    // Draw optical flow vectors
    int i;
	double l_max, l;
	for (i = 0; i < corner_count; i++) {
      if (features_found[i] == 0 || feature_errors[i] > 550) {
        continue;
      }
	  l = sqrt(corners[1][i].x*corners[1][i].x+corners[1][i].y*corners[1][i].y);
	  if(l>l_max) l_max = l;	  
	}
	
    for (i = 0; i < corner_count; i++) {
      if (features_found[i] == 0 || feature_errors[i] > 550) {
        continue;
      }
	  
	  double spinSize = 5.0 * l/l_max; 

      CvPoint points[2];
      points[0] = cvPoint(cvRound(corners[0][i].x), cvRound(corners[0][i].y));
      points[1] = cvPoint(cvRound(corners[1][i].x), cvRound(corners[1][i].y));

      cvLine(current_frame, points[0], points[1], CV_RGB(0, 255, 0), 1, 8, 0);
	  
	  double angle;                                                                          
	  angle = atan2( (double) points[0].y - points[1].y, (double) points[0].x - points[1].x );
	
	  points[0].x = (int) (points[1].x + spinSize * cos(angle + 3.1416 / 4));
	  points[0].y = (int) (points[1].y + spinSize * sin(angle + 3.1416 / 4));
	  cvLine(current_frame, points[0], points[1], CV_RGB(0, 255, 0), 1, 8, 0);

	  points[0].x = (int) (points[1].x + spinSize * cos(angle - 3.1416 / 4));
	  points[0].y = (int) (points[1].y + spinSize * sin(angle - 3.1416 / 4));
	  cvLine( current_frame, points[0], points[1], CV_RGB(0, 255, 0), 1, 8, 0);
    }

    cvWriteFrame(video_writer, current_frame);
  }

  // Clean up
  cvReleaseImage(&current_frame);
  cvReleaseImage(&eigen_image);
  cvReleaseImage(&temp_image);

  for (i = 0; i < 2; i++) {
    cvReleaseImage(&frame_buffer[0]);
    cvReleaseImage(&pyramid_images[0]);
  }
  cvReleaseCapture(&video);
  cvReleaseVideoWriter(&video_writer);

  return 0;
}
Esempio n. 30
0
void MCRenderer::showCurrentResult(const vector<vec3f>& pixelColors , unsigned* time , unsigned* iter)
{
	IplImage* image = cvCreateImage(cvSize(renderer->camera.width, renderer->camera.height), IPL_DEPTH_32F, 3);
	for(int x=0; x<renderer->camera.width; x++)
	{
		for(unsigned y=0; y<renderer->camera.height; y++)
		{
			vec3f rgb = pixelColors[y*renderer->camera.width + x];
			vec3f &bgr = ((vec3f*)image->imageData)[y*renderer->camera.width + x];
			bgr = vec3f(rgb.z, rgb.y, rgb.x);
		}
	}

	cvSetMouseCallback("Renderer", mouseEvent, this);
    //printf("savePath = %s\n" , savePath.c_str());
	if(savePath != "")
	{
		saveImagePFM(savePath, image);
	}

	if (iter && time)
	{
		char timeStr[100] , iterStr[100];
		memset(timeStr , 0 , sizeof(timeStr));
		memset(iterStr , 0 , sizeof(iterStr));
        sprintf(timeStr , "%d" , *time);
        sprintf(iterStr , "%d" , *iter);
		//itoa(*time , timeStr , 10);
		//itoa(*iter , iterStr , 10);
		string fileName(savePath);
		for (int i = savePath.length() - 1; i >= 0; i--)
		{
			fileName.pop_back();
			if (savePath[i] == '.')
				break;
		}
		fileName += "_time_";
		for (int i = 0; i < strlen(timeStr); i++)
			fileName.push_back(timeStr[i]);
		fileName += "_iter_";
		for (int i = 0; i < strlen(iterStr); i++)
			fileName.push_back(iterStr[i]);
		fileName += ".pfm";
		saveImagePFM(fileName , image);
		printf("savePath = %s\n" , fileName.c_str());
	}

	for(int p=0; p<3*image->width*image->height; p++)
		((float*)image->imageData)[p] = powf(((float*)image->imageData)[p], 1/2.2);

	//cvShowImage("Renderer", image);
	
	//response(image);
	cvReleaseImage(&image);

	switch(showEvent)
	{
	case SHOW_PATH:
		if(showPath.size() > 1)
		{
			for(unsigned i=0; i<showPath.size()-1; i++)
			{
				vec3f point1 = showPath[i].origin;
				vec3f point2 = showPath[i+1].origin;
				vec2<float> pixel1 = renderer->getCamera().transToPixel(point1);
				vec2<float> pixel2 = renderer->getCamera().transToPixel(point2);
				CvPoint p1, p2;
				p1.x = pixel1.x;
				p1.y = pixel1.y;
				p2.x = pixel2.x;
				p2.y = pixel2.y;
				if(i==0)
					p1 = p2;
				cvLine(image, p1, p2, cvScalar(0, 1, 0));
				cvCircle(image, p2, 5, cvScalar(0, 0, 1), 2);
			}
		}
		break;

        default:
            break;
	/*case SHOW_PIXEL_VALUE:
		printf("%lf, %lf, %lf\n", pixelColors[pathPixelID].x, pixelColors[pathPixelID].y, pixelColors[pathPixelID].z);
		int x = pathPixelID % renderer->camera.width;
		int y = pathPixelID / renderer->camera.width;
		cvRectangle(image, cvPoint(x-1, y-1), cvPoint(x+1, y+1), cvScalar(0, 0, 1));
		break;*/
	}
}