示例#1
0
文件: split.c 项目: amnosuperman/LPRS
char* char_ext(IplImage* imagen,basicOCR ocr  )
{
	
	//cvNamedWindow("temp");
	//cvShowImage("temp",imagen);
	//cvWaitKey(0);
	//char* plate=NULL;
	char* no=(char*)malloc(20*sizeof(char));
//-------------------------------------	-----------------------------------------------
    //NUMBER ISOLATION

	//Create needed images
	smooth= cvCreateImage(cvGetSize(imagen), imagen->depth, 1);
	threshold= cvCreateImage(cvGetSize(imagen), imagen->depth, 1);
	open_morf= cvCreateImage(cvGetSize(imagen), imagen->depth, 1);
	
	//Init variables for countours
	contour = 0;
	contourLow = 0;
	//Create storage needed for contour detection
	CvMemStorage* storage = cvCreateMemStorage(0);
	//Smooth image
	cvSmooth(imagen, smooth, CV_GAUSSIAN, 3, 0, 0, 0);
	
	CvScalar avg;
	CvScalar avgStd;
	cvAvgSdv(smooth, &avg, &avgStd, NULL);
	//printf("Avg: %f\nStd: %f\n", avg.val[0], avgStd.val[0]);
	//threshold image
	cvThreshold(smooth, threshold, (int)avg.val[0]+4*(int)(avgStd.val[0]/8), 255, CV_THRESH_BINARY_INV);
	//Morfologic filters
	cvErode(threshold, open_morf, NULL,1); 
	cvDilate(open_morf, open_morf, NULL,1); 
	//Duplicate image for countour
	img_contornos=cvCloneImage(open_morf);
	
	//Search countours in preprocesed image
	cvFindContours( img_contornos, storage, &contour, sizeof(CvContour),
			CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0) );
	//Optimize contours, reduce points
	contourLow=cvApproxPoly(contour, sizeof(CvContour), storage,CV_POLY_APPROX_DP,1,1);
//-----------------------------------------------------------------------------------------------------------
//-----------------------------------------------------------------------------------------------------------
//NUMBER RECOGNITION
	CvRect rect;
	int carea=0,area=0;
	int count=0;
    int match;	
    int w,h;
    w=imagen->width;
    h=imagen->height;
	area=(w)*(h);
//	printf("area : %d, %d %d\n",area,w,h);
	//printf("\n%d\n",area/26);
	
	char name[6];
	//static int width;
	for( ; contourLow != 0; contourLow = contourLow->h_next )
	{		

		rect=cvBoundingRect(contourLow,0);
		cvSetImageROI(smooth,rect);
		IplImage *temp22=cvCreateImage(cvSize(rect.width,rect.height),IPL_DEPTH_8U,1);
		IplImage *temp23=cvCreateImage(cvSize(rect.width,rect.height),IPL_DEPTH_8U,1);
		cvCopy(smooth,temp22,NULL);
		carea=rect.width*rect.height;

		/*if((rect.width>rect.height)||(carea>(area/6))||(carea<(area/25)))
		{	
		    cvReleaseImage(&temp22);
		    continue;
		}*/
		
		if((carea<(area/4))&&(carea>(area/25)))
		{
			static int width = temp22->width;
			sprintf(name,"char%d",count);
			cvNamedWindow(name);
			cvMoveWindow(name,840 - count*3*width,10);
			cvThreshold(temp22, temp23, (int)avg.val[0]+4*(int)(avgStd.val[0]/8), 255, CV_THRESH_BINARY);

			cvShowImage(name,temp23);
			cvWaitKey(500);
			match=ocr.classify(temp23,0);
			if(match<=25)
			    no[count]=97+match;
			else
			    no[count]=48+match-26;

			count++;

		}

		cvReleaseImage(&temp22);
		cvReleaseImage(&temp23);		
		cvResetImageROI(smooth);
	}
	cvWaitKey(0);

	no[count]='\0';
	rev(no,count);
	//strcpy(plate,no);
	//printf("\n%d\n",count);
//-------------------------------------------------------------------------------------------------------------------------------------
	//cvReleaseImage(&imagen_color);
	cvReleaseImage(&imagen);
	cvReleaseImage(&open_morf);
	cvReleaseImage(&img_contornos);	
	return no;
	free(no);
}
示例#2
0
文件: test6.1.cpp 项目: Mrzhy/test6.1
int main(int argc, char* argv[]) {

	// Déclarations
	CvHaarClassifierCascade *pCascadeFrontal = 0, *pCascadeProfile = 0;	// le detecteur de visage 
	CvMemStorage *pStorage = 0;		// buffer mémoire expensible
	CvSeq *pFaceRectSeq;			// liste des visages detectés
	int i;

	/*/ Capture Webcam
	CvCapture *capture;
	capture = cvCreateCameraCapture(CV_CAP_ANY);
	pInpImg = cvQueryFrame(capture);*/

	// Initialisations
	//IplImage* pInpImg = (argc > 1) ? cvLoadImage(argv[1], CV_LOAD_IMAGE_COLOR) : 0;
	IplImage *pInpImg = cvLoadImage("D:/测试/test6/3.jpg", CV_LOAD_IMAGE_COLOR);
	pStorage = cvCreateMemStorage(0);

	pCascadeFrontal = (CvHaarClassifierCascade *) cvLoad ("D:/测试/test6/FaceRecognition/resources/haarcascade/haarcascade_frontalface_default.xml",0,0,0);
	//pCascadeFrontal = (CvHaarClassifierCascade *) cvLoad ("D:/测试/test6/FaceRecognition/resources/haarcascade/haarcascade_frontalface_alt_tree.xml",0,0,0);
	pCascadeProfile = (CvHaarClassifierCascade *) cvLoad ("D:/测试/test6/FaceRecognition/resources/haarcascade/haarcascade_profileface.xml",0,0,0);
	//pCascadeProfile = (CvHaarClassifierCascade *) cvLoad ("D:/测试/test6/FaceRecognition/resources/haarcascade/haarcascade_lowerbody.xml",0,0,0);

	// On valide que tout a bien été initialisé correctement
	if (!pInpImg || !pStorage || !pCascadeFrontal || !pCascadeProfile) {
		printf("L'initilisation a echoue");
		exit(-1);
	}

	// Affiche une fenêtre pour l'affichage des visages
	cvNamedWindow("Fenetre de Haar", CV_WINDOW_NORMAL);
	cvShowImage("Fenetre de Haar", pInpImg);
	cvWaitKey(50);

	// Detection de visage DE FACE dans l'image
	pFaceRectSeq = cvHaarDetectObjects
		(pInpImg, pCascadeFrontal, pStorage,
		1.1,	// augmente l'échelle de recherche de 10% à chaque passe [1.0-1.4] : plus c'est grand, plus c'est rapide
		3,	// met de côté les groupes plus petit que 3 détections [0-4] : plus c'est petit, plus il y aura de "hits"
		/*0,*/ CV_HAAR_DO_CANNY_PRUNING,	// [0] : explore tout ; [1] : abandonne les régions non candidates à contenir un visage
		cvSize(0, 0));	// utilise les paramètres XML par défaut (24, 24) pour la plus petite echelle de recherche

	// Dessine un rectangle autour de chaque visage detecté
	for (i=0 ; i < (pFaceRectSeq ? pFaceRectSeq->total : 0) ; i++) {
		CvRect* r = (CvRect*)cvGetSeqElem(pFaceRectSeq, i);
		CvPoint pt1 = { r->x, r->y };
		CvPoint pt2 = { r->x + r->width, r->y + r->height };
		cvRectangle(pInpImg, pt1, pt2, CV_RGB(0,255,0), 3, 4, 0);
		
		// Floutage 
		cvSetImageROI(pInpImg, *r);
		cvSmooth(pInpImg, pInpImg, CV_GAUSSIAN, 5, 3);
		cvResetImageROI(pInpImg);
	}
	cvShowImage("Fenetre de Haar", pInpImg);
	cvWaitKey(1);

	// Detection de visage DE PROFIL dans l'image
	pFaceRectSeq = cvHaarDetectObjects
		(pInpImg, pCascadeProfile, pStorage,
		1.4,	// augmente l'échelle de recherche de 10% à chaque passe [1.0-1.4] : plus c'est grand, plus c'est rapide
		3,	// met de côté les groupes plus petit que 3 détections [0-4] : plus c'est petit, plus il y aura de "hits"
		/*0,*/ CV_HAAR_DO_CANNY_PRUNING,	// abandonne les régions non candidates à contenir un visage
		cvSize(0, 0));	// utilise les paramètres XML par défaut (24, 24) pour la plus petite echelle de recherche

	// Dessine un rectangle autour de chaque visage detecté
	for (i=0 ; i < (pFaceRectSeq ? pFaceRectSeq->total : 0) ; i++) {
		CvRect* r = (CvRect*)cvGetSeqElem(pFaceRectSeq, i);
		CvPoint pt1 = { r->x, r->y };
		CvPoint pt2 = { r->x + r->width, r->y + r->height };
		cvRectangle(pInpImg, pt1, pt2, CV_RGB(255,165,0), 3, 4, 0);
		
		// Floutage 
		cvSetImageROI(pInpImg, *r);
		cvSmooth(pInpImg, pInpImg, CV_GAUSSIAN, 5, 3);
		cvResetImageROI(pInpImg);
	}

	// Affiche la détection de visage
	cvShowImage("Fenetre de Haar", pInpImg);
	cvWaitKey(0);
	cvDestroyWindow("Fenetre de Haar");

	// Libère les ressources
	//cvReleaseCapture(&capture); // Capture Webcam
	cvReleaseImage(&pInpImg);
	if (pCascadeFrontal) cvReleaseHaarClassifierCascade(&pCascadeFrontal);
	if (pCascadeProfile) cvReleaseHaarClassifierCascade(&pCascadeProfile);
	if (pStorage) cvReleaseMemStorage(&pStorage);
}
示例#3
0
void FTS_ANPR_Seg::extractCharByCCAnalysis( const cv::Mat& oBin,
                                            FTS_ANPR_SegResult& oSegResult )
{
    // Padd the input image first
    // ------------------------------------------------------------------------
	m_oPadded.create( oBin.rows + 2,
					  oBin.cols  + 2,
					  CV_8UC1 );
	cv::copyMakeBorder( oBin, m_oPadded, 1, 1, 1, 1, cv::BORDER_CONSTANT );

    IplImage iiBin    = oBin;
    IplImage iiPadded = m_oPadded;

    cvCopyMakeBorder( &iiBin,
                      &iiPadded,
                      cvPoint( 1, 1 ),
                      IPL_BORDER_CONSTANT,
                      cvScalarAll( 0 )  ); // pad with black border


    // Initializes contour scanning process
    // ------------------------------------------------------------------------
    CvSeq* poContour = 0;
    CvContourScanner oContourScanner;

    oContourScanner = cvStartFindContours( &iiPadded,
                                           m_poStorage,
                                           sizeof( CvContour ),
                                           CV_RETR_EXTERNAL, //CV_RETR_LIST,
                                           CV_CHAIN_APPROX_SIMPLE,
                                           cvPoint( 0, 0 )  );

    // Contour scanning process
    // ------------------------------------------------------------------------
    while(  ( poContour = cvFindNextContour( oContourScanner ) )  )
    {
        // Finding bounding boxes that meet the ratio tests
        // --------------------------------------------------------------------
        CvRect oBox = cvBoundingRect( poContour, 0 );

        if(    !testArea( oBox )
            || !testHeightOverWidth( oBox )
            || !testHeight( oBox.height, iiBin.height )  )
        {
            continue;
        }

        std::list< FTS_ANPR_SegChar*>& oChars = oSegResult.m_oChars;

        // Make sure not too many candidates
        // --------------------------------------------------------------------
        if( oChars.size() >= m_nMaxNumCharCandidates )
        {
            break; // exit the while loop
        }

        // Store the character candidate to the segmentation structure
        // --------------------------------------------------------------------
        oChars.push_back( new FTS_ANPR_SegChar );

        FTS_ANPR_SegChar& oSegChar = *( oChars.back() ); // fill in the empty object

        oSegChar.m_oCharRect = oBox;

        // Offset the bounding box from coordinates in padded image, into coordinates of input image.
        --oSegChar.m_oCharRect.x;
        --oSegChar.m_oCharRect.y;

//        oSegChar.m_oCharBin.resize(oBox.width, oBox.height, SN_PIX_FMT_GREY );
        oSegChar.m_oCharBin = cv::Mat::zeros( cv::Size( oSegChar.m_oCharRect.width, oSegChar.m_oCharRect.height ), CV_8UC1 );

        IplImage iiSegCharBin = oSegChar.m_oCharBin;
//        cvZero( &iiSegCharBin );
//        printf("width = %d, height = %d\n", oSegChar.m_oCharRect.width, oSegChar.m_oCharRect.height );

        // Draw the outer contour and fill all holes. No internal holes
        // after this.
        cvDrawContours( &iiSegCharBin,
                        poContour,
                        CV_RGB( 255, 255, 255 ),
                        CV_RGB( 255, 255, 255 ),
                        1,
                        CV_FILLED,
                        8,
                        cvPoint( -oBox.x, -oBox.y ) // offset contour to smaller image
                        );

        // Recover all the holes in the original image
        cvSetImageROI( &iiBin, oSegChar.m_oCharRect );
        cvAnd( &iiBin, &iiSegCharBin, &iiSegCharBin, 0 );

//        cv::namedWindow( "CCCCCCCCCCCCCCCCCCCCCCC" );
//        cv::imshow( "CCCCCCCCCCCCCCCCCCCCCCC", oSegChar.m_oCharBin );
//        cv::waitKey();
    }

    cvResetImageROI( &iiBin );
    cvEndFindContours( &oContourScanner );


    // Sort the segments using x-coordinate
    // --------------------------------------------------------------------
    oSegResult.m_oChars.sort( &FTS_ANPR_SegChar::LessInX );
}
示例#4
0
int main(int argc, char** argv)
{
	CvMemStorage* mstrg = cvCreateMemStorage();
	CvSeq* contours = 0; 
	CvSeq* contours2 = 0; 

	const char* filename = 0;
	IplImage* rawImage = 0, *yuvImage = 0, *borde = 0; //yuvImage is for codebook method
	IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;
	CvCapture* capture = 0;		

	int c, n, nframes = 0;
	int nframesToLearnBG = 300;

	model = cvCreateBGCodeBookModel();

	//Set color thresholds to default values
	model->modMin[0] = 3;
	model->modMin[1] = model->modMin[2] = 3;
	model->modMax[0] = 10;
	model->modMax[1] = model->modMax[2] = 10;
	model->cbBounds[0] = model->cbBounds[1] = model->cbBounds[2] = 10;

	bool pause = false;
	bool singlestep = false;

	printf("Capturando de la camara...\n");
	capture = cvCaptureFromCAM( 0 );

	if( !capture )
	{
		printf( "No se pudo inicializar la captura de video\n\n" );
		return -1;
	}

	while (true)
	{

		rawImage = cvQueryFrame( capture );
		++nframes;
		if(!rawImage) 
			break;


		//First time:
		if( nframes == 1 && rawImage )
		{
			borde = cvLoadImage("Borde.png",0);

			// CODEBOOK METHOD ALLOCATION
			yuvImage = cvCloneImage(rawImage);

			int w = yuvImage->width;
			cvSetImageROI(yuvImage, cvRect(w-250,0,250,250));
			IplImage *tmp = cvCreateImage(cvGetSize(yuvImage),yuvImage->depth,yuvImage->nChannels);
			cvCopy(yuvImage, tmp, NULL);
			cvResetImageROI(yuvImage);
			yuvImage = cvCloneImage(tmp);

			ImaskCodeBook = cvCreateImage( cvGetSize(yuvImage), IPL_DEPTH_8U, 1 );
			ImaskCodeBookCC = cvCreateImage( cvGetSize(yuvImage), IPL_DEPTH_8U, 1 );

			cvSet(ImaskCodeBook,cvScalar(255));

			cvNamedWindow("CapturaCam",CV_WINDOW_AUTOSIZE);
			cvNamedWindow( "ForegroundCodeBook",CV_WINDOW_AUTOSIZE);
			cvNamedWindow( "CodeBook_ConnectComp",CV_WINDOW_AUTOSIZE);

			printf (">>Aprendiendo fondo\n");
		}

		// If we've got an rawImage and are good to go:                
		if( rawImage )
		{
			cvFlip(rawImage, NULL, 1);
			int w = rawImage->width;

			cvFindContours(borde,mstrg,&contours,sizeof(CvContour),CV_RETR_EXTERNAL);

			//Dibujar contorno
			cvLine(rawImage, cv::Point (w-250,0), cv::Point (w-250,250), CV_RGB(255,0,0),1, CV_AA, 0) ;
			cvLine(rawImage, cv::Point (w-250,250), cv::Point (w,250), CV_RGB(255,0,0),1, CV_AA, 0) ;
			//
			if(nframes - 1 < nframesToLearnBG)
			{
				char buffer [33];
				_itoa (nframesToLearnBG - nframes,buffer,10);
				CvFont font2;
				cvInitFont(&font2, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 3, CV_AA);
				cvPutText(rawImage, buffer, cvPoint(50, 50), &font2, cvScalar(0, 0, 255, 0));
			}

			cvSetImageROI(rawImage, cvRect(w-250,0,250,250));
			IplImage *temp = cvCreateImage(cvGetSize(rawImage),rawImage->depth,rawImage->nChannels);

			cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );
			//YUV para el metodo del codebook

			//Construccion del modelo del fondo
			if(nframes-1 < nframesToLearnBG  )
				cvBGCodeBookUpdate( model, yuvImage );


			if( nframes-1 == nframesToLearnBG  )
			{
				cvBGCodeBookClearStale( model, model->t/2 );
				printf (">>Fondo aprendido\n");
			}

			//Se encuentran objetos por el metodo de codebook
			if( nframes-1 >= nframesToLearnBG  )
			{
				cvBGCodeBookDiff( model, yuvImage, ImaskCodeBook );

				cvCopy(ImaskCodeBook,ImaskCodeBookCC);	
				cvSegmentFGMask( ImaskCodeBookCC );

				cvShowImage( "CodeBook_ConnectComp",ImaskCodeBookCC);

				//deteccion de imagen
				detect(ImaskCodeBookCC,rawImage);

				//base para dibujar la mano
				if(contours)
					cvDrawContours(rawImage,contours, cvScalar(255, 0, 0, 0), cvScalarAll(128), 1 );


			}
			//Display
			cvResetImageROI(rawImage);
			cvShowImage( "CapturaCam", rawImage );
			cvShowImage( "ForegroundCodeBook",ImaskCodeBook);

		}

		// User input:
		c = cvWaitKey(10)&0xFF;
		c = tolower(c);
		// End processing on ESC, q or Q
		if(c == 27 || c == 'q')
			break;
		//Else check for user input
		switch( c )
		{
		case 'c':
			saveLength = true;
			break;        
		case ' ':
			cvBGCodeBookClearStale( model, 0 );
			nframes = 0;
			break;            
		}

		if (c != 'c')
			saveLength=false;
	}		

	cvReleaseCapture( &capture );
	cvReleaseMemStorage(&mstrg);
	cvDestroyWindow( "CapturaCam" );
	cvDestroyWindow( "ForegroundCodeBook");
	cvDestroyWindow( "CodeBook_ConnectComp");
	return 0;
}
/*mouse event handler*/
void mouseHandler( int event, int x, int y, int flags, void *param)
{
	//Single object tracking code source:  http://nashruddin.com/eyetracking-track-user-eye.htm 

    /*save left eye template */
    if( event == CV_EVENT_LBUTTONDOWN && faceTrack.count == 0) {
        faceTrack.object_x0_left_eye = x - ( TPL_WIDTH  / 2 );
        faceTrack.object_y0_left_eye = y - ( TPL_HEIGHT / 2 );
       
        cvSetImageROI( faceTrack.frame,
                       cvRect( faceTrack.object_x0_left_eye,
                               faceTrack.object_y0_left_eye,
                               TPL_WIDTH,
                               TPL_HEIGHT ) );
        cvCopy( faceTrack.frame, faceTrack.tmplLeftEye, NULL );
        cvResetImageROI( faceTrack.frame );
 
        cout<<"Starting tracking of left eye..."<<endl;
        faceTrack.left_eye_tracking = 1;
		faceTrack.count++;
		}
	   
	/*track right eye*/
	else if( event == CV_EVENT_LBUTTONDOWN && faceTrack.count == 1) {
        faceTrack.object_x0_right_eye = x - ( TPL_WIDTH  / 2 );
        faceTrack.object_y0_right_eye = y - ( TPL_HEIGHT / 2 );
       
        cvSetImageROI( faceTrack.frame,
                       cvRect( faceTrack.object_x0_right_eye,
                               faceTrack.object_y0_right_eye,
                               TPL_WIDTH,
                               TPL_HEIGHT ) );
        cvCopy(faceTrack.frame,faceTrack.tmplRightEye, NULL );
        cvResetImageROI( faceTrack.frame );
 
        cout<<"Starting tracking of right eye..."<<endl;
        faceTrack.right_eye_tracking = 1;
		faceTrack.count++;
    }

	/*track left mouth*/
	else if (event == CV_EVENT_LBUTTONDOWN && faceTrack.count == 2)
	{
		
        faceTrack.object_x0_left_mouth = x - ( TPL_WIDTH  / 2 );
        faceTrack.object_y0_left_mouth = y - ( TPL_HEIGHT / 2 );
       
        cvSetImageROI( faceTrack.frame,
                       cvRect( faceTrack.object_x0_left_mouth,
                               faceTrack.object_y0_left_mouth,
                               TPL_WIDTH,
                               TPL_HEIGHT ) );
        cvCopy( faceTrack.frame, faceTrack.tmplLeftMouth, NULL );
        cvResetImageROI( faceTrack.frame );
 
     
        cout<<"Starting tracking of left mouth..."<<endl;
        faceTrack.left_mouth_tracking = 1;
		faceTrack.count++;

	}
	/*track right mouth*/
	else if (event == CV_EVENT_LBUTTONDOWN && faceTrack.count == 3)
	{
		faceTrack.object_x0_right_mouth = x - ( TPL_WIDTH  / 2 );
        faceTrack.object_y0_right_mouth = y - ( TPL_HEIGHT / 2 );
       
        cvSetImageROI( faceTrack.frame,
                       cvRect( faceTrack.object_x0_right_mouth,
                               faceTrack.object_y0_right_mouth,
                               TPL_WIDTH,
                               TPL_HEIGHT ) );
        cvCopy( faceTrack.frame, faceTrack.tmplRightMouth, NULL );
        cvResetImageROI( faceTrack.frame );
 
      
        cout<<"Starting tracking of right mouth..."<<endl;
        faceTrack.right_mouth_tracking = 1;
		faceTrack.count++;

	}

	/*track nose*/
	else if (event == CV_EVENT_LBUTTONDOWN && faceTrack.count == 4)
	{
		faceTrack.object_x0_nose = x - ( TPL_WIDTH  / 2 );
        faceTrack.object_y0_nose = y - ( TPL_HEIGHT / 2 );
       
        cvSetImageROI( faceTrack.frame,
                       cvRect( faceTrack.object_x0_nose,
                               faceTrack.object_y0_nose,
                               TPL_WIDTH,
                               TPL_HEIGHT ) );
        cvCopy( faceTrack.frame, faceTrack.tmplNose, NULL );
        cvResetImageROI( faceTrack.frame );
 
        
        cout<<"Starting tracking of nose..."<<endl;
		faceTrack.nose_tracking = 1;
		faceTrack.count++;

	}


}
// main()
int main( int argc, char** argv )
{

	int starting = 3;
	int flag=0;
	CvPoint pt;
	int x = 0,diffx=0,prevx=0,initx=0;
	int y = 0,diffy=0,prevy=0,inity=0;
 
  	// Open X display
	Display *display = XOpenDisplay (NULL);
	if (display == NULL)
        {
      		fprintf (stderr, "Can't open display!\n");
      		return -1;
    	}
  
  	// Wait 3 seconds to start
  	printf ("Starting in ");
  		fflush (stdout);
  	while (starting > 0)
    	{
      		printf ("\b\b\b %d...", starting);
      		fflush (stdout);
      		sleep (1);
      		starting--;
    	}
  	printf ("\n");
    IplImage* temp=cvCreateImage(cvSize(80,120),8,3);
	IplImage* pframe1;
	
	CvRect *pHandRect=0,*vrect=NULL;
	capture=cvCaptureFromCAM(0);	
	if( !initAll() ) exitProgram(-1);
	
		int g;
	piframe=cvQueryFrame(capture);
	pframe=invert(piframe);
	pframe1=cvCloneImage(piframe);
	// Capture and display video frames until a hand
	// is detected
	int i=0;
	char c;	
	initPCA();
    char ch;

	x :
	printf("came to x\n");
	while( 1 )
	{		
		// Look for a hand in the next video frame
		pframe=cvQueryFrame(capture);
		pframe1=cvCloneImage(pframe);
    	detect_and_draw(pframe);
		pHandRect = detectHand(pframe);
		
		if((pHandRect)&&(pHandRect->x>4)&&(pHandRect->y>4)&&(pHandRect->x*pHandRect->y<(240*300))&&(pHandRect->x<630)&&(pHandRect->y<470))
		{	
			cvRectangle(pframe1,cvPoint((pHandRect->x-4),pHandRect->y-4),cvPoint((pHandRect->x+pHandRect->width+4),pHandRect->y+pHandRect->height+4),CV_RGB(255,0,0),1,8,0);		
			i++;
		}
		else 
			i=0;
		// Show the display image
		cvShowImage( DISPLAY_WINDOW, pframe1 );
		cvMoveWindow(DISPLAY_WINDOW,0,0);
		c=cvWaitKey(10); 
		if(c==27)
                {   
        	exitProgram(0);
		}
		if(i>=3)
		{	// exit loop when a hand is detected
			if(pHandRect) {
				i=0;
				prevx=pHandRect->x;
				initx=pHandRect->x;
				prevy=pHandRect->y+pHandRect->height;
				flag=3;
				break;
			}
		}
	}

	// initialize tracking
	KalmanFilter kfilter;
	startTracking(pframe, *pHandRect,kfilter);
	// Track the detected hand using CamShift
	while( 1 )
	{
		CvRect handBox;

		// get the next video frame
		pframe=cvQueryFrame(capture);
		pframe1=cvCloneImage(pframe);
		handBox = combi_track(pframe,kfilter);
        int old_ht;
        int a;
		IplImage* temp;
		if(!((handBox.x<0)||(handBox.y<0)||((handBox.x+handBox.width)>pframe->width)||((handBox.y+handBox.height)>pframe->height))) 
        {
            if(handBox.height>(1.3*handBox.width))
            {
                old_ht=handBox.height;
                handBox.height=2.4*handBox.width;
                handBox.y-=handBox.height-old_ht;
            }
            cvSetImageROI(pframe,handBox);
            temp=cvCreateImage(cvGetSize(pframe),8,3);

            cvCopy(pframe,temp,NULL);

	        a=recognize(temp);
	        cvReleaseImage(&temp);
	        if(handBox.height>(2.3*handBox.width))
            {	
            	if(a==3)
            		a=5;
            }
			diffx=handBox.x+(handBox.width/2)-initx;
			diffy=handBox.y+handBox.height-(handBox.width/2)-prevy;
			prevx=handBox.x+(handBox.width/2);
			prevy=handBox.y+handBox.height-(handBox.width/2);

	        cvResetImageROI(pframe);
    		cvRectangle(pframe1,cvPoint(handBox.x,handBox.y),cvPoint(handBox.x+handBox.width,handBox.y+handBox.height),CV_RGB(0,0,255),3,8,0);		
            
	        if(diffx<(-60))
	        {	click(display,1,0);
	        	printf("right click\n");
	        	goto x;
	        }
	        else if(diffx>(60))
	        {
	        	fake(display, 0);
	        	printf("left click\n");
	        	goto x;
	        }
	        else
	        {}

        }
        else
        	goto x;

		cvShowImage( DISPLAY_WINDOW, pframe1 );

		ch=cvWaitKey(10);
		if( ch==27 ) {
			exitProgram(0);			
			break;
		}
		if(ch=='s'){
		    cvSetImageROI(pframe,handBox);
		    cvResize(pframe,temp);
		    cvSaveImage("image6.jpg",temp);
		    cvResetImageROI(pframe);
		}
	}
	return 0;
}
示例#7
0
文件: main.cpp 项目: Barbakas/windage
void  main()
{
	char message[100];
	cvNamedWindow("template");
	cvNamedWindow("sampling");
	cvNamedWindow("result");

	// initialize
	int width = WIDTH;
	int height = HEIGHT;
	int startX = (width-TEMPLATE_WIDTH)/2;
	int startY = (height-TEMPLATE_HEIGHT)/2;
	
	IplImage* inputImage = NULL;
	IplImage* grayImage = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
	IplImage* resultImage = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);
	IplImage* templateImage = cvCreateImage(cvSize(TEMPLATE_WIDTH, TEMPLATE_HEIGHT), IPL_DEPTH_8U, 1);
	IplImage* samplingImage = NULL;

	// initial template & homography
	CvRect rect = cvRect(startX, startY, TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
	windage::Matrix3 homography(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0);
	homography._13 = startX;
	homography._23 = startY;
	windage::Matrix3 e = homography;

	// Template based Tracking using Inverse Compositional
#if USE_IC
	windage::InverseCompositional* tracker = new windage::InverseCompositional(TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
#endif
#if USE_ESM
	windage::HomographyESM* tracker = new windage::HomographyESM(TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
#endif
	tracker->SetInitialHomography(e);

	// homography update stack
	std::vector<windage::Matrix3> homographyList;

	// camera
	CvCapture* capture = cvCaptureFromCAM(CV_CAP_ANY);

	bool isTrained = false;
	bool processing =true;
	while(processing)
	{
		inputImage = cvRetrieveFrame(capture);
		cvResize(inputImage, resultImage);
		cvCvtColor(resultImage, grayImage, CV_BGR2GRAY);
		
		if(GAUSSIAN_BLUR > 0)
			cvSmooth(grayImage, grayImage, CV_GAUSSIAN, GAUSSIAN_BLUR, GAUSSIAN_BLUR);

		// processing
		int64 startTime = cvGetTickCount();
		
		float error = 0.0;
		float delta = 1.0;
		int iter = 0;
		homographyList.clear();
		for(iter=0; iter<MAX_ITERATION; iter++)
		{
			error = tracker->UpdateHomography(grayImage, &delta);
			homography = tracker->GetHomography();
			homographyList.push_back(homography);

//			if(delta < HOMOGRAPHY_DELTA)
//				break;
		}
		int64 endTime = cvGetTickCount();
		samplingImage = tracker->GetSamplingImage();

		// draw result
		int count = homographyList.size();
		for(int i=0; i<count; i++)
 			DrawResult(resultImage, homographyList[i], CV_RGB(((count-i)/(double)count) * 255.0, (i/(double)count) * 255.0, 0), 1);
 		
		double processingTime = (endTime - startTime)/(cvGetTickFrequency() * 1000.0);
		sprintf_s(message, "processing time : %.2lf ms (%02d iter), error : %.2lf", processingTime, iter, error);
		std::cout << message << std::endl;

#if USE_IC
		windage::Utils::DrawTextToImage(resultImage, cvPoint(5, 15), "Inverse Compositional", 0.6);
#endif
#if USE_ESM
		windage::Utils::DrawTextToImage(resultImage, cvPoint(5, 15), "Efficient Second-order Minimization", 0.6);
#endif
		windage::Utils::DrawTextToImage(resultImage, cvPoint(5, 35), message, 0.6);

		// draw image
		cvShowImage("sampling", samplingImage);
		cvShowImage("result", resultImage);

		char ch = cvWaitKey(1);
		switch(ch)
		{
		case ' ':
			cvSetImageROI(grayImage, rect);
			cvCopyImage(grayImage, templateImage);
			cvShowImage("template", templateImage);
			cvResetImageROI(grayImage);

			tracker->AttatchTemplateImage(templateImage);
			tracker->SetInitialHomography(e);
			tracker->Initialize();
			break;
		case 'r':
		case 'R':
			delete tracker;
			tracker = NULL;
#if USE_IC
			tracker = new windage::InverseCompositional(TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
#endif
#if USE_ESM
			tracker = new windage::HomographyESM(TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
#endif
			tracker->SetInitialHomography(e);
			break;
		case 'q':
		case 'Q':
			processing = false;
			break;
		}

	}

	cvReleaseCapture(&capture);

	cvReleaseImage(&grayImage);
	cvReleaseImage(&resultImage);
	cvDestroyAllWindows();
}
static int aGestureRecognition(void)
{       
    IplImage *image, *imagew, *image_rez, *mask_rez, *image_hsv, *img_p[2],*img_v,
             *init_mask_ver = 0, *final_mask_ver = 0;
    CvPoint3D32f *pp, p;

    CvPoint pt;
    CvSize2D32f fsize;
    CvPoint3D32f center, cf;
    IplImage *image_mask, *image_maskw;
    
    CvSize size;
    CvHistogram *hist, *hist_mask;

    int width, height;
    int k_points, k_indexs;
    int warpFlag, interpolate;

    int hdim[2] = {20, 20};
    
    double coeffs[3][3], rect[2][2], rez = 0, eps_rez = 2.5, rez_h;
    float *thresh[2];
    float hv[3];
    
    float reps, aeps, ww;
    float line[6], in[3][3], h[3][3];
    float cx, cy, fx, fy;

    static char num[4]; 
    
    char *name_image;  
    char *name_range_image;
    char *name_verify_data;
    char *name_init_mask_very;
    char *name_final_mask_very;

    CvSeq *numbers;
    CvSeq *points;
    CvSeq *indexs;
        
    CvMemStorage *storage;
    CvRect hand_roi, hand_roi_trans;
    
    int i,j, lsize, block_size = 1000, flag;
    int code;

    FILE *filin, *fil_ver;

/* read tests params */

    code = TRS_OK;

/*  define input information    */
    strcpy (num, "001"); 

    lsize = strlen(data_path)+12;
    name_verify_data = (char*)trsmAlloc(lsize);
    name_range_image = (char*)trsmAlloc(lsize);
    name_image = (char*)trsmAlloc(lsize);

    name_init_mask_very = (char*)trsmAlloc(lsize);
    name_final_mask_very = (char*)trsmAlloc(lsize);

/*  define input range_image file path        */
    strcpy(name_range_image, data_path);
    strcat(name_range_image, "rpts");
    strcat(name_range_image, num);
    strcat(name_range_image, ".txt");

/*  define input image file path        */
    strcpy(name_image, data_path);
    strcat(name_image, "real");
    strcat(name_image, num);
    strcat(name_image, ".bmp");

/*  define verify data file path        */
    strcpy(name_verify_data, data_path);
    strcat(name_verify_data, "very");
    strcat(name_verify_data, num);
    strcat(name_verify_data, ".txt");

/*  define verify init mask file path    */
    strcpy(name_init_mask_very, data_path);
    strcat(name_init_mask_very, "imas");
    strcat(name_init_mask_very, num);
    strcat(name_init_mask_very, ".bmp");

/*  define verify final mask file path    */
    strcpy(name_final_mask_very, data_path);
    strcat(name_final_mask_very, "fmas");
    strcat(name_final_mask_very, num);
    strcat(name_final_mask_very, ".bmp");

    filin = fopen(name_range_image,"r");
    fil_ver = fopen(name_verify_data,"r");

    fscanf( filin, "\n%d %d\n", &width, &height);
    printf("width=%d height=%d  reading testing data...", width,height);

    OPENCV_CALL( storage = cvCreateMemStorage ( block_size ) );
    OPENCV_CALL( points = cvCreateSeq( CV_SEQ_POINT3D_SET, sizeof(CvSeq),
                            sizeof(CvPoint3D32f), storage ) );
    OPENCV_CALL (indexs = cvCreateSeq( CV_SEQ_POINT_SET, sizeof(CvSeq),
                            sizeof(CvPoint), storage ) );

    pp = 0;
    
/*  read input image from file   */   
    image = atsCreateImageFromFile( name_image );
    if(image == NULL)   {code = TRS_FAIL; goto m_exit;}

/*  read input 3D points from input file        */
    for (i = 0; i < height; i++)
    {
        for (j = 0; j < width; j++)    
        {
            fscanf( filin, "%f %f %f\n", &p.x, &p.y, &p.z);
            if(/*p.x != 0 || p.y != 0 ||*/ p.z != 0)
            {
                OPENCV_CALL(cvSeqPush(points, &p));
                pt.x = j; pt.y = i;
                OPENCV_CALL(cvSeqPush(indexs, &pt));
                               
            }
        }
    }

    k_points = points->total;
    k_indexs = indexs->total;

/*   convert sequence to array          */
    pp = (CvPoint3D32f*)trsmAlloc(k_points * sizeof(CvPoint3D32f));
    OPENCV_CALL(cvCvtSeqToArray(points, pp ));

/*  find 3D-line      */

    reps = (float)0.1;
    aeps = (float)0.1;
    ww = (float)0.08;

    OPENCV_CALL( cvFitLine3D(pp, k_points, CV_DIST_WELSCH, &ww, reps, aeps, line ));

/*  find hand location      */
    flag = -1;
    fsize.width = fsize.height = (float)0.22;  //   (hand size in m)

    numbers = NULL;
    OPENCV_CALL( cvFindHandRegion (pp, k_points, indexs,line, fsize,
                      flag,&center,storage, &numbers));

/*   read verify data    */
    fscanf( fil_ver, "%f %f %f\n", &cf.x, &cf.y, &cf.z);
    rez+= cvSqrt((center.x - cf.x)*(center.x - cf.x)+(center.y - cf.y)*(center.y - cf.y)+
         (center.z - cf.z)*(center.z - cf.z))/3.;
    
/*    create hand mask            */
    size.height = height;
    size.width = width;
    OPENCV_CALL( image_mask = cvCreateImage(size, IPL_DEPTH_8U, 1) ); 

    OPENCV_CALL( cvCreateHandMask(numbers, image_mask, &hand_roi) );

/*  read verify initial image mask                  */
    init_mask_ver = atsCreateImageFromFile( name_init_mask_very );
    if(init_mask_ver == NULL)   {code = TRS_FAIL; goto m_exit;}
    
    rez+= iplNorm(init_mask_ver, image_mask, IPL_L2) / (width*height+0.);

/*  calculate homographic transformation matrix            */
    cx = (float)(width / 2.);
    cy = (float)(height / 2.);
    fx = fy = (float)571.2048;

/* define intrinsic camera parameters                      */
    in[0][1] = in[1][0] = in[2][0] = in[2][1] = 0;
    in[0][0] = fx; in[0][2] = cx;
    in[1][1] = fy; in[1][2] = cy;
    in[2][2] = 1;

    OPENCV_CALL( cvCalcImageHomography(line, &center, in, h) );
    
    rez_h = 0;
    for(i=0;i<3;i++)
    {
        fscanf( fil_ver, "%f %f %f\n", &hv[0], &hv[1], &hv[2]);
        for(j=0;j<3;j++)
        {
            rez_h+=(hv[j] - h[i][j])*(hv[j] - h[i][j]);
        }
    }
    rez+=sqrt(rez_h)/9.;

/*   image unwarping         */
    size.width = image->width; 
    size.height = image->height; 
    OPENCV_CALL( imagew = cvCreateImage(size, IPL_DEPTH_8U,3) );
    OPENCV_CALL( image_maskw = cvCreateImage(size, IPL_DEPTH_8U,1) );

    iplSet(image_maskw, 0);

    cvSetImageROI(image, hand_roi);
    cvSetImageROI(image_mask, hand_roi);

/* convert homographic transformation matrix from float to double      */
    for(i=0;i<3;i++)
        for(j=0;j<3;j++)
            coeffs[i][j] = (double)h[i][j];

/*  get bounding rectangle for image ROI         */
    iplGetPerspectiveBound(image, coeffs, rect);

    width = (int)(rect[1][0] - rect[0][0]);
    height = (int)(rect[1][1] - rect[0][1]);
    hand_roi_trans.x = (int)rect[0][0];hand_roi_trans.y = (int)rect[0][1];
    hand_roi_trans.width = width; hand_roi_trans.height = height;

    cvMaxRect(&hand_roi, &hand_roi_trans, &hand_roi);
    iplSetROI((IplROI*)image->roi, 0, hand_roi.x, hand_roi.y,
               hand_roi.width,hand_roi.height);
    iplSetROI((IplROI*)image_mask->roi, 0, hand_roi.x, hand_roi.y,
                hand_roi.width,hand_roi.height);

    warpFlag = IPL_WARP_R_TO_Q;
/*    interpolate = IPL_INTER_CUBIC;   */
/*    interpolate = IPL_INTER_NN;      */
    interpolate = IPL_INTER_LINEAR;
    iplWarpPerspective(image, imagew, coeffs, warpFlag, interpolate);
    iplWarpPerspective(image_mask, image_maskw, coeffs, warpFlag, IPL_INTER_NN);  
    
/*  set new image and mask ROI after transformation        */
    iplSetROI((IplROI*)imagew->roi,0, (int)rect[0][0], (int)rect[0][1],(int)width,(int)height);
    iplSetROI((IplROI*)image_maskw->roi,0, (int)rect[0][0], (int)rect[0][1],(int)width,(int)height);

/*  copy image ROI to new image and resize        */
    size.width = width; size.height = height;
    image_rez = cvCreateImage(size, IPL_DEPTH_8U,3);
    mask_rez = cvCreateImage(size, IPL_DEPTH_8U,1);
 
    iplCopy(imagew,image_rez);
    iplCopy(image_maskw,mask_rez);
    
/* convert rezult image from RGB to HSV               */
    image_hsv = iplCreateImageHeader(3, 0, IPL_DEPTH_8U, "HSV", "HSV",
                                   IPL_DATA_ORDER_PIXEL, IPL_ORIGIN_TL,IPL_ALIGN_DWORD,
                                   image_rez->width, image_rez->height, NULL, NULL, NULL, NULL);
    iplAllocateImage(image_hsv, 0, 0 ); 
    strcpy(image_rez->colorModel, "RGB");
    strcpy(image_rez->channelSeq, "RGB");
    image_rez->roi = NULL;

    iplRGB2HSV(image_rez, image_hsv);

/* convert to three images planes                      */
    img_p[0] = cvCreateImage(size, IPL_DEPTH_8U,1);
    img_p[1] = cvCreateImage(size, IPL_DEPTH_8U,1);
    img_v = cvCreateImage(size, IPL_DEPTH_8U,1);

    cvCvtPixToPlane(image_hsv, img_p[0], img_p[1], img_v, NULL);
   
/*  calculate histograms                */
    hist = cvCreateHist ( 2, hdim, CV_HIST_ARRAY);
    hist_mask = cvCreateHist ( 2, hdim, CV_HIST_ARRAY);

/*  install histogram threshold         */
    thresh[0] = (float*) trsmAlloc(2*sizeof(float));
    thresh[1] = (float*) trsmAlloc(2*sizeof(float));

    thresh[0][0] = thresh[1][0] = -0.5;
    thresh[0][1] = thresh[1][1] = 255.5;
    cvSetHistThresh( hist, thresh, 1);
    cvSetHistThresh( hist_mask, thresh, 1);

    cvCalcHist(img_p, hist, 0);
        
    cvCalcHistMask(img_p, mask_rez, hist_mask, 0);
            
    cvCalcProbDensity(hist, hist_mask, hist_mask);

    cvCalcBackProject( img_p, mask_rez, hist_mask ); 

/*  read verify final image mask                  */
    final_mask_ver = atsCreateImageFromFile( name_final_mask_very );
    if(final_mask_ver == NULL)   {code = TRS_FAIL; goto m_exit;}

    rez+= iplNorm(final_mask_ver, mask_rez, IPL_L2) / (width*height+0.);

    trsWrite( ATS_CON | ATS_SUM, "\n gesture recognition \n");
    trsWrite( ATS_CON | ATS_SUM, "result testing error = %f \n",rez);

    if(rez > eps_rez) code = TRS_FAIL;
    else code = TRS_OK;
    
m_exit:    

    cvReleaseImage(&image_mask);
    cvReleaseImage(&mask_rez);
    cvReleaseImage(&image_rez);
    atsReleaseImage(final_mask_ver);
    atsReleaseImage(init_mask_ver);

    cvReleaseImage(&imagew);
    cvReleaseImage(&image_maskw); 

    cvReleaseImage(&img_p[0]);
    cvReleaseImage(&img_p[1]);
    cvReleaseImage(&img_v);
 
    cvReleaseHist( &hist);
    cvReleaseHist( &hist_mask);
    
    cvReleaseMemStorage ( &storage );

    trsFree(pp);
    trsFree(name_final_mask_very);
    trsFree(name_init_mask_very);
    trsFree(name_image);
    trsFree(name_range_image);
    trsFree(name_verify_data);

    fclose(filin);
    fclose(fil_ver);

    
/*    _getch();       */
    return code;
}
示例#9
0
/*
 * Performs the face detection
 */
static GstFlowReturn
gst_face_detect_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
                              IplImage * img)
{
    GstFaceDetect *filter = GST_FACE_DETECT (base);

    if (filter->cvFaceDetect) {
        GstMessage *msg = NULL;
        GstStructure *s;
        GValue facelist = { 0 };
        GValue facedata = { 0 };
        CvSeq *faces;
        CvSeq *mouth = NULL, *nose = NULL, *eyes = NULL;
        gint i;
        gboolean do_display = FALSE;
        gboolean post_msg = FALSE;

        if (filter->display) {
            if (gst_buffer_is_writable (buf)) {
                do_display = TRUE;
            } else {
                GST_LOG_OBJECT (filter, "Buffer is not writable, not drawing faces.");
            }
        }

        cvCvtColor (img, filter->cvGray, CV_RGB2GRAY);
        cvClearMemStorage (filter->cvStorage);

        faces = gst_face_detect_run_detector (filter, filter->cvFaceDetect,
                                              filter->min_size_width, filter->min_size_height);

        switch (filter->updates) {
        case GST_FACEDETECT_UPDATES_EVERY_FRAME:
            post_msg = TRUE;
            break;
        case GST_FACEDETECT_UPDATES_ON_CHANGE:
            if (faces && faces->total > 0) {
                if (!filter->face_detected)
                    post_msg = TRUE;
            } else {
                if (filter->face_detected) {
                    post_msg = TRUE;
                }
            }
            break;
        case GST_FACEDETECT_UPDATES_ON_FACE:
            if (faces && faces->total > 0) {
                post_msg = TRUE;
            } else {
                post_msg = FALSE;
            }
            break;
        case GST_FACEDETECT_UPDATES_NONE:
            post_msg = FALSE;
            break;
        default:
            post_msg = TRUE;
            break;
        }

        filter->face_detected = faces ? faces->total > 0 : FALSE;

        if (post_msg) {
            msg = gst_face_detect_message_new (filter, buf);
            g_value_init (&facelist, GST_TYPE_LIST);
        }

        for (i = 0; i < (faces ? faces->total : 0); i++) {
            CvRect *r = (CvRect *) cvGetSeqElem (faces, i);
            guint mw = filter->min_size_width / 8;
            guint mh = filter->min_size_height / 8;
            guint rnx = 0, rny = 0, rnw, rnh;
            guint rmx = 0, rmy = 0, rmw, rmh;
            guint rex = 0, rey = 0, rew, reh;
            gboolean have_nose, have_mouth, have_eyes;

            /* detect face features */

            if (filter->cvNoseDetect) {
                rnx = r->x + r->width / 4;
                rny = r->y + r->height / 4;
                rnw = r->width / 2;
                rnh = r->height / 2;
                cvSetImageROI (filter->cvGray, cvRect (rnx, rny, rnw, rnh));
                nose =
                    gst_face_detect_run_detector (filter, filter->cvNoseDetect, mw, mh);
                have_nose = (nose && nose->total);
                cvResetImageROI (filter->cvGray);
            } else {
                have_nose = FALSE;
            }

            if (filter->cvMouthDetect) {
                rmx = r->x;
                rmy = r->y + r->height / 2;
                rmw = r->width;
                rmh = r->height / 2;
                cvSetImageROI (filter->cvGray, cvRect (rmx, rmy, rmw, rmh));
                mouth =
                    gst_face_detect_run_detector (filter, filter->cvMouthDetect, mw,
                                                  mh);
                have_mouth = (mouth && mouth->total);
                cvResetImageROI (filter->cvGray);
            } else {
                have_mouth = FALSE;
            }

            if (filter->cvEyesDetect) {
                rex = r->x;
                rey = r->y;
                rew = r->width;
                reh = r->height / 2;
                cvSetImageROI (filter->cvGray, cvRect (rex, rey, rew, reh));
                eyes =
                    gst_face_detect_run_detector (filter, filter->cvEyesDetect, mw, mh);
                have_eyes = (eyes && eyes->total);
                cvResetImageROI (filter->cvGray);
            } else {
                have_eyes = FALSE;
            }

            GST_LOG_OBJECT (filter,
                            "%2d/%2d: x,y = %4u,%4u: w.h = %4u,%4u : features(e,n,m) = %d,%d,%d",
                            i, faces->total, r->x, r->y, r->width, r->height,
                            have_eyes, have_nose, have_mouth);
            if (post_msg) {
                s = gst_structure_new ("face",
                                       "x", G_TYPE_UINT, r->x,
                                       "y", G_TYPE_UINT, r->y,
                                       "width", G_TYPE_UINT, r->width,
                                       "height", G_TYPE_UINT, r->height, NULL);
                if (have_nose) {
                    CvRect *sr = (CvRect *) cvGetSeqElem (nose, 0);
                    GST_LOG_OBJECT (filter, "nose/%d: x,y = %4u,%4u: w.h = %4u,%4u",
                                    nose->total, rnx + sr->x, rny + sr->y, sr->width, sr->height);
                    gst_structure_set (s,
                                       "nose->x", G_TYPE_UINT, rnx + sr->x,
                                       "nose->y", G_TYPE_UINT, rny + sr->y,
                                       "nose->width", G_TYPE_UINT, sr->width,
                                       "nose->height", G_TYPE_UINT, sr->height, NULL);
                }
                if (have_mouth) {
                    CvRect *sr = (CvRect *) cvGetSeqElem (mouth, 0);
                    GST_LOG_OBJECT (filter, "mouth/%d: x,y = %4u,%4u: w.h = %4u,%4u",
                                    mouth->total, rmx + sr->x, rmy + sr->y, sr->width, sr->height);
                    gst_structure_set (s,
                                       "mouth->x", G_TYPE_UINT, rmx + sr->x,
                                       "mouth->y", G_TYPE_UINT, rmy + sr->y,
                                       "mouth->width", G_TYPE_UINT, sr->width,
                                       "mouth->height", G_TYPE_UINT, sr->height, NULL);
                }
                if (have_eyes) {
                    CvRect *sr = (CvRect *) cvGetSeqElem (eyes, 0);
                    GST_LOG_OBJECT (filter, "eyes/%d: x,y = %4u,%4u: w.h = %4u,%4u",
                                    eyes->total, rex + sr->x, rey + sr->y, sr->width, sr->height);
                    gst_structure_set (s,
                                       "eyes->x", G_TYPE_UINT, rex + sr->x,
                                       "eyes->y", G_TYPE_UINT, rey + sr->y,
                                       "eyes->width", G_TYPE_UINT, sr->width,
                                       "eyes->height", G_TYPE_UINT, sr->height, NULL);
                }

                g_value_init (&facedata, GST_TYPE_STRUCTURE);
                g_value_take_boxed (&facedata, s);
                gst_value_list_append_value (&facelist, &facedata);
                g_value_unset (&facedata);
                s = NULL;
            }

            if (do_display) {
                CvPoint center;
                CvSize axes;
                gdouble w, h;
                gint cb = 255 - ((i & 3) << 7);
                gint cg = 255 - ((i & 12) << 5);
                gint cr = 255 - ((i & 48) << 3);

                w = r->width / 2;
                h = r->height / 2;
                center.x = cvRound ((r->x + w));
                center.y = cvRound ((r->y + h));
                axes.width = w;
                axes.height = h * 1.25; /* tweak for face form */
                cvEllipse (img, center, axes, 0.0, 0.0, 360.0, CV_RGB (cr, cg, cb),
                           3, 8, 0);

                if (have_nose) {
                    CvRect *sr = (CvRect *) cvGetSeqElem (nose, 0);

                    w = sr->width / 2;
                    h = sr->height / 2;
                    center.x = cvRound ((rnx + sr->x + w));
                    center.y = cvRound ((rny + sr->y + h));
                    axes.width = w;
                    axes.height = h * 1.25;       /* tweak for nose form */
                    cvEllipse (img, center, axes, 0.0, 0.0, 360.0, CV_RGB (cr, cg, cb),
                               1, 8, 0);
                }
                if (have_mouth) {
                    CvRect *sr = (CvRect *) cvGetSeqElem (mouth, 0);

                    w = sr->width / 2;
                    h = sr->height / 2;
                    center.x = cvRound ((rmx + sr->x + w));
                    center.y = cvRound ((rmy + sr->y + h));
                    axes.width = w * 1.5; /* tweak for mouth form */
                    axes.height = h;
                    cvEllipse (img, center, axes, 0.0, 0.0, 360.0, CV_RGB (cr, cg, cb),
                               1, 8, 0);
                }
                if (have_eyes) {
                    CvRect *sr = (CvRect *) cvGetSeqElem (eyes, 0);

                    w = sr->width / 2;
                    h = sr->height / 2;
                    center.x = cvRound ((rex + sr->x + w));
                    center.y = cvRound ((rey + sr->y + h));
                    axes.width = w * 1.5; /* tweak for eyes form */
                    axes.height = h;
                    cvEllipse (img, center, axes, 0.0, 0.0, 360.0, CV_RGB (cr, cg, cb),
                               1, 8, 0);
                }
            }
            gst_buffer_add_video_region_of_interest_meta (buf, "face",
                    (guint) r->x, (guint) r->y, (guint) r->width, (guint) r->height);
        }

        if (post_msg) {
            gst_structure_set_value ((GstStructure *) gst_message_get_structure (msg),
                                     "faces", &facelist);
            g_value_unset (&facelist);
            gst_element_post_message (GST_ELEMENT (filter), msg);
        }
    }

    return GST_FLOW_OK;
}
/* computes the color difference map for the region of interest */
void compute_color_diff_map(Tracker *tracker, struct StaticData *data)
{

    CvSize sz;

    /* for all markers (LEDs) */
    for(int nm=0; nm<data->NUM_OF_MARKERS; ++nm)
    {
        /* if marker was found in the prior frame */
        if(tracker->marker[nm]->pos_is_set)
        {
            if(tracker->marker[nm]->vel_is_set)
            {
                /* if marker velocity is known predict new position */
                tracker->marker[nm]->pos_predicted.x = tracker->marker[nm]->pos_measured.x + tracker->marker[nm]->vel.x;
                tracker->marker[nm]->pos_predicted.y = tracker->marker[nm]->pos_measured.y + tracker->marker[nm]->vel.y;
            }
            else
            {
                /* otherwise take last known position for the center of the ROI as best guess */
                tracker->marker[nm]->pos_predicted.x = tracker->marker[nm]->pos_measured.x;
                tracker->marker[nm]->pos_predicted.y = tracker->marker[nm]->pos_measured.y;
            }

            /**
             * dynamically adapt roi size according to marker acceleration
             * linear upper boundary with slope 1.5 and intersect 4.
             * roi size is actually 2 times the computed value, because LED can be located at
             * x_predicted + x_prediction_error or at
             * x_predicted - x_prediction_error.
             * The same holds true for the y direction.
            **/
            uint8_t adapt_roi_width;
            uint8_t adapt_roi_height;
            if(tracker->marker[nm]->acc_is_set)
            {
                /**
                 * if marker acc is known adapt roi size dynamically according to linear
                 * upper bound of prediction error made (measured)
                **/
                adapt_roi_width = 2 * round(fabs(1.5 *tracker->marker[nm]->acc.x) + 4.0);
                adapt_roi_height = 2 * round(fabs(1.5 * tracker->marker[nm]->acc.y) + 4.0);
            }
            else
            {
                /* otherwise take default roi size */
                adapt_roi_width = ROI_WIDTH;
                adapt_roi_height = ROI_HEIGHT;
            }

            /* check wether roi is within image boundaries and update roi position */
            if(		(int)tracker->marker[nm]->pos_predicted.x-adapt_roi_width/2>=0 &&
                    (int)tracker->marker[nm]->pos_predicted.x+adapt_roi_width/2<FRAME_WIDTH &&
                    (int)tracker->marker[nm]->pos_predicted.y-adapt_roi_height/2>=0 &&
                    (int)tracker->marker[nm]->pos_predicted.y+adapt_roi_height/2<FRAME_HEIGHT
              )
            {
                tracker->marker[nm]->roi = cvRect(	(int)tracker->marker[nm]->pos_predicted.x-adapt_roi_width/2,
                                                    (int)tracker->marker[nm]->pos_predicted.y-adapt_roi_height/2,
                                                    adapt_roi_width,
                                                    adapt_roi_height
                                                 );
                /* set the region of interest to the computed size and origin */
                cvSetImageROI(tracker->frame,tracker->marker[nm]->roi);
                tracker->marker[nm]->roi_set = 1;
            }
            /* otherwise extend search on whole image */
            else
            {
                tracker->marker[nm]->roi = cvRect(0,0,FRAME_WIDTH,FRAME_HEIGHT);
                tracker->marker[nm]->roi_set = 0;
            }

        }
        /* otherwise search on whole image */
        else
        {
            tracker->marker[nm]->roi = cvRect(0,0,FRAME_WIDTH,FRAME_HEIGHT);
            tracker->marker[nm]->roi_set = 0;
        }


        sz = cvSize(tracker->marker[nm]->roi.width,tracker->marker[nm]->roi.height);

        /**
         * Define intermediate images.
         * ROI is converted to floating point HSV color space.
        **/
        IplImage *hsv = cvCreateImage(sz,IPL_DEPTH_8U,3);
        IplImage *hsv_f = cvCreateImage(sz,IPL_DEPTH_32F,3);

        /* create image header to hold distance map according to the computed ROI size */
        tracker->marker[nm]->result = cvCreateImage(sz,IPL_DEPTH_32F,1);

        /* reset distance map to zero */
        cvZero(tracker->marker[nm]->result);

        float h_val,s_val,v_val;
        float res;

        /* Convert ROI to floating point HSV image */
        cvCvtColor(tracker->frame,hsv,CV_BGR2HSV);
        cvConvertScale(hsv,hsv_f,1.0);


        /**
         * compute the mean squared error (color distance) for all markers between the pixel value and the specified marker color (red,green,blue)
         * for all pixels in the region of interest.
         * store result in  the floating point image tracker->result.
        **/
        for(int i=0; i<tracker->marker[nm]->roi.width; ++i)
        {
            for(int j=0; j<tracker->marker[nm]->roi.height; ++j)
            {
                /* normalize pixel values between 0 and 1; h in [0,180], s,v in [0,255] */
                h_val = ((float*)(hsv_f->imageData + hsv_f->widthStep*j))[i*3] / 181.0;
                s_val = ((float*)(hsv_f->imageData + hsv_f->widthStep*j))[i*3+1] / 256.0;
                v_val = ((float*)(hsv_f->imageData + hsv_f->widthStep*j))[i*3+2] / 256.0;

                /**
                 * exclude values that are not in the same color range (H component) and values that are too dark (V component)
                **/
                if(fabs(h_val-tracker->marker[nm]->colorvalue.h)<0.1 && v_val>0.1)
                {
                    /**
                     * compute root mean squared errors and store result in tracker->result.
                     * res = sqrt(3) - e; minima are converted into maxima and vice versa
                    **/
                    res = 1.73 - sqrt(
                              (h_val-tracker->marker[nm]->colorvalue.h)*(h_val-tracker->marker[nm]->colorvalue.h)
                              + 	(s_val-tracker->marker[nm]->colorvalue.s)*(s_val-tracker->marker[nm]->colorvalue.s)
                              + 	(v_val-tracker->marker[nm]->colorvalue.v)*(v_val-tracker->marker[nm]->colorvalue.v)
                          );
                    ((float*)(tracker->marker[nm]->result->imageData + tracker->marker[nm]->result->widthStep*j))[i] = res;

                }
                else
                {
                    res = 0.0;
                }

            }
        }
        /* reset region of interest of the camera frame */
        cvResetImageROI(tracker->frame);

        /* clean up memory of intermediate images */
        cvReleaseImage(&hsv);
        cvReleaseImage(&hsv_f);
    }

}
示例#11
0
/*消除上下边界*/
void remove_border_ul(IplImage * img_plate)
{
	int i = 0, j = 0;
	/*这两个变量分别为上下边界的高度*/
	int up_bound = -1, low_bound = -1;
	int white_to_black = 0;
	int black_to_white = 0;
	/*从i从0 到高度一半进行遍历,进行投影,找到上边界*/
	//cvNamedWindow("img_plate", 1);
	//cvShowImage("img_plate", img_plate);
	//cvWaitKey(0);
	for (i = 0; i < (img_plate->height) / 2; i = i + 3) {

		unsigned char * prow = (unsigned char *)(img_plate->imageData + i * img_plate->widthStep);
		white_to_black = 0;
		black_to_white = 0;

		/*记录下每一行的black_to_white和w_to_b的个数*/
		for (j = 0; j < img_plate->width; j = j + 3) {
			if (prow[j] == 0 && prow[j + 3] == 255) {
				black_to_white++;
			} else if (prow[j] == 255 && prow[j + 3] == 0) {
				white_to_black++;
			}
		}

		/*设成6的话对图片的清晰度有很高的要求*/
		if (black_to_white >= 6 && white_to_black >= 6 && up_bound < 0) {
			up_bound = i;
		} else if (black_to_white < 6 && white_to_black < 6 && up_bound > 0) {
//			printf("black_to_white : %d whilte_to_black: %d , up_bound %d\n",black_to_white, white_to_black, up_bound);
			up_bound = -1;
		}
	}

	/*i从最底端到高度的一半进行遍历*/
	for (i = img_plate->height - 1; i > (img_plate->height) / 2; i = i - 3) {
		unsigned char * prow = (unsigned char *)(img_plate->imageData + i * img_plate->widthStep);
		white_to_black = 0;
		black_to_white = 0;

		/*记录下每一行的black_to_white和w_to_b的个数*/
		for (j = 0; j < img_plate->width; j = j + 3) {
			if (prow[j] == 0 && prow[j + 3] == 255) {
				black_to_white++;
			} else if (prow[j] == 255 && prow[j + 3] == 0) {
				white_to_black++;
			}
		}

		if (black_to_white >= 6 && white_to_black >= 6 && low_bound < 0) {
			low_bound = i;
		} else if (black_to_white < 6 && white_to_black < 6 && low_bound > 0) {
			low_bound = -1;
		}
	
		//printf("%d\n", low_bound);
	}
	
#if 0
	cvNamedWindow("img", 1);
	printf("up_bound is %d, low_bound is %d\n", up_bound, low_bound);

	/*画直线操作*/
	/*	void cvLine( CvArr* img, CvPoint pt1, CvPoint pt2, CvScalar color, int thickness=1, int line_type=8, int shift=0 );*/
	cvLine(img_plate, cvPoint(0, up_bound), cvPoint(img_plate->width - 3, up_bound), CV_RGB(0xbF, 0xfd, 0xba), 3, 8, 0);
	cvLine(img_plate, cvPoint(0, low_bound), cvPoint(img_plate->width - 3, low_bound), CV_RGB(0xbF, 0xfd, 0xba), 3, 8, 0);
	cvShowImage("img", img_plate);
	cvWaitKey(0);
#endif
	/*这里容易出错!*/

//printf("%d %d %d %d", 0, up_bound, img_plate->width - 2, low_bound - up_bound - 2);
//	printf("low_bound:%d up_bound:%d\n", low_bound, up_bound);

	assert(low_bound >= 0 && up_bound >= 0);
	cvSetImageROI(img_plate, cvRect(0, up_bound, img_plate->width - 2, low_bound - up_bound - 2));		/*-2保证不要越界*/
	IplImage * tmp_img = cvCreateImage(cvSize(img_plate->width - 2, low_bound - up_bound - 2), img_plate->depth, img_plate->nChannels);
	cvCopy(img_plate, tmp_img);
	cvSaveImage("image/img_after_border_removed.bmp", tmp_img);
	cvResetImageROI(img_plate);
//	printf("setROI in remove bound success\n");
}
int main(int argc, char** argv)
{
	printf("\nprogram started\n");
	pthread_t thread_s;
	int width, height, key;
	int resWidth, resHeight;
  	
    if (argc != 6) {
        quit("Usage: stream_server <server_port> <width> <height> <screen width> <screen height>", 0);
    }

    /* get the parameters */
 //   server_ip   = argv[1];
    server_port = atoi(argv[1]);
    width       = atoi(argv[2]);
    height      = atoi(argv[3]);
    resWidth    = atoi(argv[4]);
    resHeight   = atoi(argv[5]);

    img[0] = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);
    img[1] = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);
    img[2] = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);
    img[3] = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);
    DispImage = cvCreateImage(cvSize(resWidth,resHeight), IPL_DEPTH_8U, 3 );
	if (pthread_create(&thread_s, NULL, waitServer, NULL )) 
	{
	   quit("pthread_create failed.", 1);
        }
printf("\nThread started\n");

while(key != 'q') 
{
	pthread_mutex_lock(&mutex);
    if (is_data_ready) {
	if(temp==1){
	cvSetImageROI(DispImage, cvRect(0, 0,resWidth, resHeight));
        //Resize the input image and copy the it to the Single Big Image
        cvResize(img[0], DispImage,CV_INTER_LINEAR);
       // Reset the ROI in order to display the next image
        cvResetImageROI(DispImage);       
	cvShowImage("stream_server_cam",DispImage);
	//cvShowImage("stream_server_cam",img[0]);
}   
else if(temp ==2){
	cvSetImageROI(DispImage, cvRect(0, 0,resWidth/2, resHeight));
        // Resize the input image and copy the it to the Single Big Image
        cvResize(img[0], DispImage,CV_INTER_LINEAR);
	cvSetImageROI(DispImage, cvRect(resWidth/2, 0,resWidth/2, resHeight));
        cvResize(img[1], DispImage,CV_INTER_LINEAR); 
	cvResetImageROI(DispImage);      
	cvShowImage("stream_server_cam",DispImage);
}
else if(temp==3){

	cvSetImageROI(DispImage, cvRect(0, 0,resWidth/2, resHeight/2));
        cvResize(img[0], DispImage,CV_INTER_LINEAR);
	cvSetImageROI(DispImage, cvRect(resWidth/2, 0,resWidth/2, resHeight/2));
        cvResize(img[1], DispImage,CV_INTER_LINEAR); 
	cvSetImageROI(DispImage, cvRect(0,resHeight/2,resWidth/2, resHeight));
        cvResize(img[2], DispImage,CV_INTER_LINEAR);

	cvResetImageROI(DispImage);      
	cvShowImage("stream_server_cam",DispImage);

}
else if(temp == 4){

	cvSetImageROI(DispImage, cvRect(0, 0,resWidth/2, resHeight/2));
        // Resize the input image and copy the it to the Single Big Image
        cvResize(img[0], DispImage,CV_INTER_LINEAR); 
	cvSetImageROI(DispImage, cvRect(resWidth/2, 0,resWidth/2, resHeight/2));
        // Resize the input image and copy the it to the Single Big Image
        cvResize(img[1], DispImage,CV_INTER_LINEAR); 
	cvSetImageROI(DispImage, cvRect(0, resHeight/2,resWidth/2, resHeight));
        // Resize the input image and copy the it to the Single Big Image
        cvResize(img[2], DispImage,CV_INTER_LINEAR);
	cvSetImageROI(DispImage, cvRect(resWidth/2,resHeight/2,resWidth, resHeight));
        // Resize the input image and copy the it to the Single Big Image
        cvResize(img[3], DispImage,CV_INTER_LINEAR);
	
	cvResetImageROI(DispImage);      
	cvShowImage("stream_server_cam",DispImage);
}
else{}
//usleep(100);
is_data_ready = 0;
	}
		pthread_mutex_unlock(&mutex);
 
        key = cvWaitKey(100);
}    

		if (pthread_cancel(thread_s)) {
        quit("pthread_cancel failed.", 1);
    }

    /* free memory */
    quit(NULL, 0);
}
示例#13
0
    CvSeq* detect()
    {
        if (!cascade) return 0;
        double scale = this->scale == 0? 1.0 : this->scale;
        IplImage* gray = cvCreateImage(cvSize(width, height ), 8, 1);
        IplImage* small = cvCreateImage(cvSize(cvRound(width * scale), cvRound(height * scale)), 8, 1);
        int min = cvRound(smallest * 1000);            
        CvSeq* faces = 0;
        
        // use a region of interest to improve performance
        // This idea comes from the More than Technical blog:
        // http://www.morethantechnical.com/2009/08/09/near-realtime-face-detection-on-the-iphone-w-opencv-port-wcodevideo/
        if ( roi.width > 0 && roi.height > 0)
        {
            cvSetImageROI(small, roi);
            CvRect scaled_roi = cvRect(roi.x / scale, roi.y / scale,
                                       roi.width / scale, roi.height / scale);
            cvSetImageROI(image, scaled_roi);
            cvSetImageROI(gray, scaled_roi);
        }
        
        // use an equalized grayscale to improve detection
        cvCvtColor(image, gray, CV_BGR2GRAY);
        // use a smaller image to improve performance
        cvResize(gray, small, CV_INTER_LINEAR);
        cvEqualizeHist(small, small);
        
        // detect with OpenCV
        cvClearMemStorage(storage);
        faces = cvHaarDetectObjects(small, cascade, storage,
                                    search_scale * 10.0,
                                    cvRound(neighbors * 100),
                                    CV_HAAR_DO_CANNY_PRUNING,
                                    cvSize(min, min));
        
#ifdef USE_ROI
        if (!faces || faces->total == 0)
        {
            // clear the region of interest
            roi.width = roi.height = 0;
        }
        else if (faces && faces->total > 0)
        {
            // determine the region of interest from the first detected object
            // XXX: based on the first object only?
            CvRect* r = (CvRect*) cvGetSeqElem(faces, 0);
            
            if (roi.width > 0 && roi.height > 0)
            {
                r->x += roi.x;
                r->y += roi.y;
            }
            int startX = MAX(r->x - PAD, 0);
            int startY = MAX(r->y - PAD, 0);
            int w = small->width - startX - r->width - PAD * 2;
            int h = small->height - startY - r->height - PAD * 2;
            int sw = r->x - PAD, sh = r->y - PAD;
            
            // store the region of interest
            roi.x = startX;
            roi.y = startY,
            roi.width = r->width + PAD * 2 + ((w < 0) ? w : 0) + ((sw < 0) ? sw : 0);
            roi.height = r->height + PAD * 2 + ((h < 0) ? h : 0) + ((sh < 0) ? sh : 0); 
        }
#endif
        cvReleaseImage(&gray);
        cvReleaseImage(&small);
        cvResetImageROI(image);
        return faces;
    }
示例#14
0
void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
	//bridge that will transform the message (image) from ROS code back to "image" code
  sensor_msgs::CvBridge bridge;
  fprintf(stderr, "\n callBaack funtion \n");
  //publish data (obstacle waypoints) back to the boat
  //ros::NodeHandle n;
  //std_msgs::Float32 xWaypoint_msg;         // X coordinate obstacle message
  //std_msgs::Float32 yWaypoint_msg;         // Y coordinate obstacle message
  //publish the waypoint data             
  //ros::Publisher waypoint_info_pub = n.advertise<std_msgs::Float32>("waypoint_info", 1000);
  //ros::Publisher Ywaypoint_info_pub = n.advertise<std_msgs::Float32>("waypoint_info", 1000);
  //std::stringstream ss;
  
  /***********************************************************************/
  //live image coming streamed straight from the boat's camera
  IplImage* boatFront = bridge.imgMsgToCv(msg, "bgr8");
  IplImage* backUpImage = bridge.imgMsgToCv(msg, "bgr8");
  boatFront->origin = IPL_ORIGIN_TL;   //sets image origin to top left corner
  //Crop the image to the ROI
  //cvSetImageROI(boatFront, cvRect(0,0,boatFront->height/0.5,boatFront->width/1.83));
  int X = boatFront->height;
  int Y = boatFront->width;
  /***********************************************************************/
  //boat's edge distance from the camera. This is used for visual calibration
  //to know the distance from the boat to the nearest obstacles.
  //With respect to the mounted camera, distance is 21 inches (0.5334 m) side to side
  //and 15 inches (0.381 m).
  //float boatFrontDistance = 0.381;    //distance in meters
  //float boatSideDistance = 0.5334;    //distance in meters
  
  // These variables tell the distance from the center bottom of the image
  // (the camera) to the square surrounding a the obstacle
  float xObstacleDistance = 0.0;
  float yObstacleDistance = 0.0;
  float obstacleDistance = 0.0;
  
  int pixelsNumber = 6;  //number of pixels for an n x n matrix and # of neighbors
  const int arraySize = pixelsNumber;
  const int threeArraySize = pixelsNumber;
  //if n gets changed, then the algorithm might have to be
  //recalibrated. Try to keep it constant
  //these variables are used for the k nearest neighbors
  //int accuracy;
  //reponses for each of the classifications
  float responseWaterH, responseWaterS, responseWaterV; 
  float responseGroundH, responseGroundS, responseGroundV;
  //float responseSkyH, responseSkyS, responseSkyV;
  float averageHue = 0.0;
  float averageSat = 0.0;
  float averageVal = 0.0;
  CvMat* trainClasses = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
  CvMat* trainClasses2 = cvCreateMat( pixelsNumber, 1, CV_32FC1 );
  //for (int i = 0; i < pixelsNumber/2; i++)
  //{
    //  cvmSet(trainClasses, i,0,1);
     // cvmSet(trainClasses2, i,0,1);
  //}
  //for (int i = pixelsNumber/2; i < pixelsNumber; i++)
  //{
    //  cvmSet(trainClasses, i,0,2);
     // cvmSet(trainClasses2, i,0,2);
  //}
  //for (int i =0; i<pixelsNumber;i++)
  //{
    //   cout << cvmGet(trainClasses,i,0);
      // cout << cvmGet(trainClasses2,i,0);   
  //}
  //CvMat sample = cvMat( 1, 2, CV_32FC1, _sample );
  //used with the classifier 
  CvMat* nearestWaterH = cvCreateMat(1, pixelsNumber, CV_32FC1);
  CvMat* nearestWaterS = cvCreateMat(1, pixelsNumber, CV_32FC1);
  CvMat* nearestWaterV = cvCreateMat(1, pixelsNumber, CV_32FC1);
  CvMat* nearestGroundH = cvCreateMat(1, pixelsNumber, CV_32FC1);
  CvMat* nearestGroundS = cvCreateMat(1, pixelsNumber, CV_32FC1);
  CvMat* nearestGroundV = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* nearestSkyH = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* nearestSkyS = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* nearestSkyV = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //Distance
  //CvMat* distanceWaterH = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceWaterS = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceWaterV = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceGroundH = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceGroundS = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceGroundV = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceSkyH = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceSkyS = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //CvMat* distanceSkyV = cvCreateMat(1, pixelsNumber, CV_32FC1);
  //these variables are use to traverse the picture by blocks of n x n pixels at
  //a time. 
  //Index(0,0) does not exist, so make sure kj and ki start from 1 (in the
  //right way, of course)
  //x and y are the dimensions of the local patch of pixels
  int x = (boatFront->height)/4; //(boatFront->height)/2.5 + pixelsNumber + 99; 
  int y = pixelsNumber-1; 
  int ix = 0; 
  int iy = 0; 
  int skyX = 0; 
  int skyY = 0;
  //M controls the x axis (up and down); N controls the y axis (left and
  //right)
  int Mw = -550; 
  int Nw = 1300; 
  int Mg = -350; 
  int Ng = 700;
  int row1 = 0;
  int column1 = 0;
  int row2 = 0;
  int column2 = 0;
  //ground sample
  //CvMat* groundTrainingHue = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
  //CvMat* groundTrainingSat = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
  //CvMat* groundTrainingVal = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
  //water sample
  CvMat* waterTrainingHue = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
  CvMat* waterTrainingSat = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
  CvMat* waterTrainingVal = cvCreateMat(threeArraySize,arraySize,CV_32FC1);
  //n x n sample patch taken from the picture
  CvMat* sampleHue = cvCreateMat(1,arraySize,CV_32FC1);
  CvMat* sampleSat = cvCreateMat(1,arraySize,CV_32FC1);
  CvMat* sampleVal = cvCreateMat(1,arraySize,CV_32FC1);
  CvMat* resampleHue = cvCreateMat(arraySize,arraySize,CV_32FC1);
  CvMat* resampleSat = cvCreateMat(arraySize,arraySize,CV_32FC1);
  CvMat* resampleVal = cvCreateMat(arraySize,arraySize,CV_32FC1);
  //sky training sample
  CvMat* skyTrainingHue = cvCreateMat(arraySize,arraySize,CV_32FC1);
  CvMat* skyTrainingSat = cvCreateMat(arraySize,arraySize,CV_32FC1);
  CvMat* skyTrainingVal = cvCreateMat(arraySize,arraySize,CV_32FC1);
  //initialize each matrix element to zero for ease of use
  //cvZero(groundTrainingHue);
  //cvZero(groundTrainingSat);
  //cvZero(groundTrainingVal);
  cvZero(waterTrainingHue);
  cvZero(waterTrainingSat);
  cvZero(waterTrainingVal);
  cvZero(sampleHue);
  cvZero(sampleSat);
  cvZero(sampleVal);
  cvZero(resampleHue);
  cvZero(resampleSat);
  cvZero(resampleVal);
  cvZero(skyTrainingHue);
  cvZero(skyTrainingSat);
  cvZero(skyTrainingVal);    
  //Stores the votes for each channel (whether it belongs to water or not
  //1 is part of water, 0 not part of water
  //if sum of votes is bigger than 1/2 the number of elements, then it belongs to water
  int votesSum = 0;
  int comparator[3];        //used when only three votes are needed
  //int comparatorTwo [3][3];    //used when six votes are needed
  //initial sum of votes is zero
  //Error if initialize both matrices inside a single for loop. Dont know why
 // for(int i = 0; i < 3; i++)
  //{   
      //comparator[i] = 0;
    //  for(int j = 0; j < 3; j++)
     // {
       //   comparatorTwo[i][j] = 0;
     // }
  //}
  for(int i = 0; i < 3; i++)
  {   
      comparator[i] = 0;
  }
  
  /***********************************************************************/
  //Convert from RGB to HSV to control the brightness of the objects.
  //work with reflexion
  /*Sky recognition. Might be useful for detecting reflexion on the water. If
    the sky is detected, and the reflection has the same characteristics of
    something below the horizon, that "something" might be water. Assume sky
    wont go below the horizon*/
  
  //convert from RGB to HSV
  cvCvtColor(boatFront, boatFront, CV_BGR2HSV);
  cvCvtColor(backUpImage, backUpImage, CV_BGR2HSV);
  HsvImage I(boatFront);
  HsvImage IBackUp(backUpImage);
  fprintf(stderr,"\n About to do Sky detection\n");
  //Sky detection
  /*for (int i=0; i<boatFront->height/3;i++)
  {
      for (int j=0; j<boatFront->width;j++)
      {
      //if something is bright enough, consider it sky and store the
      //value. HSV values go from 0 to 180 ... RGB goes from 0 to 255
          if (((I[i][j].v >= 180) && (I[i][j].s <= 16)))
              // && ((I[i][j].h >=10)))) //&& (I[i][j].h <= 144))))
          {
              //The HSV values vary between 0 and 1
              cvmSet(skyTrainingHue,skyX,skyY,I[i][j].h);
              cvmSet(skyTrainingSat,skyX,skyY,I[i][j].s);
              cvmSet(skyTrainingVal,skyX,skyY,I[i][j].v);
              I[i][j].h = 0.3*180;       //H (color)
              I[i][j].s = 0.3*180;          //S (color intensity)
              I[i][j].v = 0.6*180;          //V (brightness)
              if (skyY == pixelsNumber-1)
              {
                 if (skyX == pixelsNumber-1)
                   skyX = 0;
                 else
                   skyX = skyX + 1;
                 skyY = 0;
              }
              else
                skyY = skyY + 1;
         }   
      }
  }*/
  
  /***********************************************************************/
  //offline input pictures. Samples of water properties are taken from these 
  //pictures to get a range of values for H, S, V that will be stored into a 
  //pre-defined classifier
  IplImage* imageSample1 = cvLoadImage("bigObstacle.jpg");
  cvSetImageROI(imageSample1, cvRect(0,0,imageSample1->height/0.5,imageSample1->width/1.83));
  cvCvtColor(imageSample1, imageSample1, CV_BGR2HSV);
  HsvImage I1(imageSample1);
  IplImage* imageSample2 = cvLoadImage("bigObstacle2.jpg");
  cvSetImageROI(imageSample2, cvRect(0,0,imageSample2->height/0.5,imageSample2->width/1.83));
  cvCvtColor(imageSample2, imageSample2, CV_BGR2HSV);
  HsvImage I2(imageSample2);
  IplImage* imageSample3 = cvLoadImage("bigObstacle3.jpg");
  cvSetImageROI(imageSample3, cvRect(0,0,imageSample3->height/0.5,imageSample3->width/1.83));
  cvCvtColor(imageSample3, imageSample3, CV_BGR2HSV);
  HsvImage I3(imageSample3);
  IplImage* imageSample4 = cvLoadImage("river.jpg");
  cvSetImageROI(imageSample4, cvRect(0,0,imageSample4->height/0.5,imageSample4->width/1.83));
  cvCvtColor(imageSample4, imageSample4, CV_BGR2HSV);
  HsvImage I4(imageSample4);
  IplImage* imageSample5 = cvLoadImage("river2.jpg");
  cvSetImageROI(imageSample5, cvRect(0,0,imageSample5->height/0.5,imageSample5->width/1.83));
  cvCvtColor(imageSample5, imageSample5, CV_BGR2HSV);
  HsvImage I5(imageSample5);
  IplImage* imageSample6 = cvLoadImage("roundObstacle4.jpg");
  cvSetImageROI(imageSample6, cvRect(0,0,imageSample6->height/0.5,imageSample6->width/1.83));
  cvCvtColor(imageSample6, imageSample6, CV_BGR2HSV);
  HsvImage I6(imageSample6);
  IplImage* imageSample7 = cvLoadImage("farm.jpg");
  cvSetImageROI(imageSample7, cvRect(0,0,imageSample7->height/0.5,imageSample7->width/1.83));
  cvCvtColor(imageSample7, imageSample7, CV_BGR2HSV);
  HsvImage I7(imageSample7);
  IplImage* imageSample8 = cvLoadImage("bigObstacle4.jpg");
  cvSetImageROI(imageSample8, cvRect(0,0,imageSample8->height/0.5,imageSample8->width/1.83));
  cvCvtColor(imageSample8, imageSample8, CV_BGR2HSV);
  HsvImage I8(imageSample8);
  IplImage* imageSample9 = cvLoadImage("roundObstacle6.jpg");
  cvSetImageROI(imageSample9, cvRect(0,0,imageSample9->height/0.5,imageSample9->width/1.83));
  cvCvtColor(imageSample9, imageSample9, CV_BGR2HSV);
  HsvImage I9(imageSample9);
  IplImage* imageSample10 = cvLoadImage("roundObstacle.jpg");
  cvSetImageROI(imageSample10, cvRect(0,0,imageSample10->height/0.5,imageSample10->width/1.83));
  cvCvtColor(imageSample10, imageSample10, CV_BGR2HSV);
  HsvImage I10(imageSample10);
  fprintf(stderr,"\n Grab water samples\n");
  //grab water samples from each picture
  for (int i=0; i < threeArraySize; i++)
  {
  	fprintf(stderr,"\n patch is pink (this is for me to know where the ground patch sample is\n");
      for (int j=0; j < arraySize; j++)
      {
          row1 = ceil(X/1.2866)+ceil(X/5.237)+i+ceil(-X/3.534545455);
          row1 = x + i;
        	//if (row1 > X-1)
	          //  row1 = X-1;
          column1 = ceil(Y/7.0755)+ceil(Y/21.01622)+j+ceil(X/1.495384615);
         // if (column1 > Y-1)
          	//	column1 = Y-1;
          averageHue = (I1[row1][column1].h + I2[row1][column1].h + I3[row1][column1].h + I4[row1][column1].h + I5[row1][column1].h + 	
          I6[row1][column1].h + I7[row1][column1].h + I8[row1][column1].h + I9[row1][column1].h + I10[row1][column1].h) / 10;
          averageSat = (I1[row1][column1].s + I2[row1][column1].s + I3[row1][column1].s + I4[row1][column1].s + I5[row1][column1].s + 
          I6[row1][column1].s + I7[row1][column1].s + I8[row1][column1].s + I9[row1][column1].s + I10[row1][column1].s) / 10;
          averageVal = (I1[row1][column1].v + I2[row1][column1].v + I3[row1][column1].v + I4[row1][column1].v + I5[row1][column1].v + 
          I6[row1][column1].v + I7[row1][column1].v + I8[row1][column1].v + I9[row1][column1].v + I10[row1][column1].v) / 10;   
          fprintf(stderr,"\n water patch sample (n X n matrix)\n");
          cvmSet(waterTrainingHue,i,j,averageHue);
          cvmSet(waterTrainingSat,i,j,averageSat);
          cvmSet(waterTrainingVal,i,j,averageVal);  
          fprintf(stderr,"\n patch is red (this is for me to know where the ground patch sample is\n");
          //I[row1][column1].h = 0;
          //I[row1][column1].s = 255;
          //I[row1][column1].v = 255;
      }
  }
  
  fprintf(stderr,"\n Order water samples in ascending\n");
  //order the water samples in ascending order on order to know a range
  cvSort(waterTrainingHue, waterTrainingHue, CV_SORT_ASCENDING);
  cvSort(waterTrainingSat, waterTrainingSat, CV_SORT_ASCENDING);
  cvSort(waterTrainingVal, waterTrainingVal, CV_SORT_ASCENDING);
  // find the maximum and minimum values in the array to create a range
  int maxH = cvmGet(waterTrainingHue,0,0);
  int maxS = cvmGet(waterTrainingSat,0,0);
  int maxV = cvmGet(waterTrainingVal,0,0);
  int minH = cvmGet(waterTrainingHue,0,0);
  int minS = cvmGet(waterTrainingSat,0,0);
  int minV = cvmGet(waterTrainingVal,0,0);
  for (int i=0; i < threeArraySize; i++)
  {
      for (int j=0; j < arraySize; j++)
      {
          if (cvmGet(waterTrainingHue,i,j) > maxH)
              maxH = cvmGet(waterTrainingHue,i,j);
          if (cvmGet(waterTrainingSat,i,j) > maxS)
              maxS = cvmGet(waterTrainingHue,i,j);
          if (cvmGet(waterTrainingVal,i,j) > maxV)
              maxV = cvmGet(waterTrainingVal,i,j);
          if (cvmGet(waterTrainingHue,i,j) < minH)
              minH = cvmGet(waterTrainingHue,i,j);
          if (cvmGet(waterTrainingSat,i,j) < minS)
              minS = cvmGet(waterTrainingSat,i,j);
          if (cvmGet(waterTrainingVal,i,j) < minV)
              minV = cvmGet(waterTrainingVal,i,j);
      }
  }
	
	/***********************************************************************/
  //Grab a random patch of water below the horizon and compare every other
  //pixel against it
  //The results of the water detection depend on where in the picture the
  //training samples are located. Maybe adding more training samples will
  //help improve this?
  fprintf(stderr,"\n Random patch\n");
  /*for (int i=0; i < threeArraySize; i++)
  {
      for (int j=0; j < arraySize; j++)
      {
          row2 = ceil(X/4.7291)+ceil(X/8.3176)+i+Mg;
          column2 = ceil(Y/7.78378)+ceil(Y/16.54468)+j+Ng;
      //ground patch sample (n X n matrix)
      //Detecting the horizon in the picture might be an excellent visual aid to
      //choose where (above the horizon) you can take a ground training(1:3*n,1:n)g sample
      //from. The ground pixel sample can be at a constant distance from the
      //horizon
          cvmSet(groundTrainingHue,i,j,I[row2][column2].h);
          cvmSet(groundTrainingSat,i,j,I[row2][column2].s);
          cvmSet(groundTrainingVal,i,j,I[row2][column2].v);   
      //patch is red (this is for me to know where the ground patch sample is)
          I[row2][column2].h = 60; 
          I[row2][column2].s = 180;
          I[row2][column2].v = 90;
      }
  }
  //order the water samples in ascending order on order to know a range
  cvSort(groundTrainingHue, groundTrainingHue, CV_SORT_ASCENDING);
  cvSort(groundTrainingSat, groundTrainingSat, CV_SORT_ASCENDING);
  cvSort(groundTrainingVal, groundTrainingVal, CV_SORT_ASCENDING);
  */ 
  // Main loop. It traverses through the picture
  skyX = 0; 
  skyY = 0;
  
  //The distance formula calculated by plotting points is given by:
  /*********** distance = 0.0006994144*(1.011716711^x)     *****************/
  //cout << "Distance: " << distancePixels << endl;
  fprintf(stderr,"\n Painting water red!!!!!\n");
  while (x < boatFront->height/1.2)
  {
      //get a random sample taken from the picture. Must be determined whether
      //it is water or ground
      for (int i = 0; i<pixelsNumber;i++)
      {
          cvmSet(sampleHue,0,i,I[x][y].h);
          cvmSet(sampleSat,0,i,I[x][y].s);
          cvmSet(sampleVal,0,i,I[x][y].v);
      }
      //Find the shortest distance between a pixel and the neighbors from each of
      //the training samples (sort of inefficient, but might do the job...sometimes)
      //if (ix == pixelsNumber-1)
      //{
          //HSV for water sample
          // learn classifier
          //CvKNearest knn(trainData, trainClasses, 0, false, itemsNumber);
          //CvKNearest knnWaterHue(waterTrainingHue, trainClasses, 0, false, pixelsNumber);
          //CvKNearest knnWaterSat(waterTrainingSat, trainClasses, 0, false, pixelsNumber);
          //CvKNearest knnWaterVal(waterTrainingVal, trainClasses, 0, false, pixelsNumber);
          //HSV for ground sample
          //CvKNearest knnGroundHue(groundTrainingHue, trainClasses2, 0, false, pixelsNumber);
          //CvKNearest knnGroundSat(groundTrainingSat, trainClasses2, 0, false, pixelsNumber);
          //CvKNearest knnGroundVal(groundTrainingVal, trainClasses2, 0, false, pixelsNumber);
          //HSV for sky sample
          //if (cvmGet(skyTrainingHue,0,0)!=0.0 && cvmGet(skyTrainingSat,0,0)!=0.0 && cvmGet(skyTrainingVal,0,0)!=0.0)
          //{
            //  CvKNearest knnSkyHue(skyTrainingHue, trainClasses, 0, false, pixelsNumber);
              //CvKNearest knnSkySat(skyTrainingSat, trainClasses, 0, false, pixelsNumber);
              //CvKNearest knnSkyVal(skyTrainingVal, trainClasses, 0, false, pixelsNumber);
          //}
          
          //scan nearest neighbors to each pixel
          //responseWaterH = knnWaterHue.find_nearest(sampleHue,pixelsNumber,0,0,nearestWaterH,0);
          //responseWaterS = knnWaterSat.find_nearest(sampleSat,pixelsNumber,0,0,nearestWaterS,0);
          //responseWaterV = knnWaterVal.find_nearest(sampleVal,pixelsNumber,0,0,nearestWaterV,0);
          //responseGroundH = knnGroundHue.find_nearest(sampleHue,pixelsNumber,0,0,nearestGroundH,0);
          //responseGroundS = knnGroundSat.find_nearest(sampleSat,pixelsNumber,0,0,nearestGroundS,0);
          //responseGroundV = knnGroundVal.find_nearest(sampleVal,pixelsNumber,0,0,nearestGroundV,0);
              for (int i=0;i<pixelsNumber;i++)
              {
                  for (int j=0;j<pixelsNumber;j++)
                  {
                      if ((minH <= cvmGet(sampleHue,0,j)) || (maxH >= cvmGet(sampleHue,0,j)))
                          //mark water samples as green
                          comparator[0] = 1;
                      else
                          comparator[0] = 0;
                    if (((minS <= cvmGet(sampleSat,0,j)) || (maxS <= cvmGet(sampleSat,0,j))))
                      //mark water samples as green
                          comparator[1] = 1;
                      else
                          comparator[1] = 0;
                      if ((minV <= cvmGet(sampleVal,0,j)) || (maxV <= cvmGet(sampleVal,0,j)))
                      //mark water samples as green
                          comparator[2] = 1;
                      else
                          comparator[2] = 0;
                      //count votes
                      for (int i3=0; i3 < 3; i3++)
                          votesSum = votesSum + comparator[i3];
                      //sky detection 
                      if (votesSum > 1) //&& ((sampleSat[i][j] - sampleVal[i][j]) <= 0.1*180)
                      {
                      // classify pixel as water 
                          I[x-pixelsNumber+i][y-pixelsNumber+j].h = 0;
                          I[x-pixelsNumber+i][y-pixelsNumber+j].s = 255;
                          I[x-pixelsNumber+i][y-pixelsNumber+j].v = 255;
                      }
                      votesSum = 0;
                  }
              }
          if (y < Y-1)
              y = y + pixelsNumber-1;
          if (y > Y-1)
              y = Y-1;
          else if (y == Y-1)
          {
              x = x + pixelsNumber-1;
              y = pixelsNumber-1;
          }
          //ix = 0;
  }
  //traverse through the image one more time, divide the image in grids of
    // 500x500 pixels, and see how many pixels of water are in each grid. If
    // most of the pixels are labeled water, then mark all the other pixels
    // as water as well    
    for(int i = 0; i < 3; i++)
    {   
        comparator[i] = 0;
    }
    //int counter = 0;
    int xDivisor = 100;
    int yDivisor = 100;
    votesSum = 0;
    column1 = 0;
    row1 = 0;
    x = ceil(boatFront->height/2.5);
    obstacleDistance = x;
    y = 0;
    int counter = 0;
    
    //The problem lies somewhere below this line!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    while (x < boatFront->height/1.2)
    {
        //get a random sample taken from the picture. Must be determined whether
        //it is water or ground
        for (int i = 0; i < ceil(boatFront->height/xDivisor); i++)
        {
            for(int j = 0; j < ceil(boatFront->width/yDivisor); j++)
            {
                cvmSet(resampleHue,i,j,I[x+i][y+j].h);
                cvmSet(resampleSat,i,j,I[x+i][y+j].s);
                cvmSet(resampleVal,i,j,I[x+i][y+j].v);
                if(cvmGet(resampleHue,i,j)==0 && cvmGet(resampleSat,i,j)==255 && cvmGet(resampleVal,i,j)==255)
                {
                    votesSum++;
                }
            }
        }
        if (votesSum > ((boatFront->height/xDivisor)*(boatFront->width/yDivisor)*(8.9/9)))
        {   
        // if bigger than 4/5 the total number of pixels in a square, then consider the entire thing as water  
        // We might need to use other smaller quantities (like 5/6 maybe?)
            for (int i = 0; i < ceil(boatFront->height/xDivisor);i++)
            {
                for (int j = 0; j < ceil(boatFront->width/yDivisor); j++)
                {
                    row1 = x + i;
                    if (row1 > X-1)
                        row1 = X-1;
                    column1 = y+j;
                    I[row1][column1].h = 0;
                    I[row1][column1].s = 0;
                    I[row1][column1].v = 0;
                }
            }
        }
        else
        {   
        // If not water, eliminate all red pixels and turn those pixels
        // back to the original color. These pixels shall, then, be marked
        // as obstacles
            for (int i = 0; i < ceil(boatFront->height/xDivisor);i++)
            {
                for (int j = 0; j < ceil(boatFront->width/yDivisor); j++)
                {
                    row1 = x + i;
                    if (row1 > X-1)
                        row1 = X-1;
                    column1 = y+j;
                    if (column1 > Y-1)
                        column1 = Y-1;
                    //the darker the color, the closer the object to the boat
                    //I[row1][column1].h = 128;    
                    //I[row1][column1].s = 255;   
                    //I[row1][column1].v = 255 - counter;
                    I[row1][column1].h = IBackUp[row1][column1].h;
                    I[row1][column1].s = IBackUp[row1][column1].s;
                    I[row1][column1].v = IBackUp[row1][column1].v;
                    //counter = counter + 20;
                }
            }
            //The distance formula calculated by plotting points is given by:
    /***********  distance = (1.76e-11)*pow(pixels,3.99)  *****************/
    /***********  pixel = (513.9332077469)pow(distance,0.240675506  *****************/
    
            // Convert from pixel distance to normal distance in meters
            if(obstacleDistance > sqrt(pow(xObstacleDistance,2) + pow(yObstacleDistance,2)))
            {
                // x,y coordinates of the obstacle
                xObstacleDistance = (1.76e-11)*pow(((boatFront->height/xDivisor)+x)/2, 3.99) ;
                yObstacleDistance = (1.76e-11)*pow(((boatFront->width/yDivisor)+y)/2, 3.99);
                //xWaypoint_msg = xObstacleDistance;
                //yWaypoint_msg = yObstacleDistance;
                //publish position data
                //waypoint_info_pub.publish(xWaypoint_msg);
                //waypoint_info_pub.publish(yWaypoint_msg);
                //ROS_INFO("Obstacle coordinates: X = %f meters, Y = %f meters", xObstacleDistance, yObstacleDistance);  
                obstacleDistance = sqrt(pow(xObstacleDistance,2) + pow(yObstacleDistance,2));
                //ROS_INFO("Obstacle distance from: %f", obstacleDistance);
            }
            //cout << "Distance to Obstacle is: " << obstacleDistance << endl << endl;
            
        }
        y = y + boatFront->width/xDivisor;
        if (y > Y-1)
        {
            x = x + boatFront->height/yDivisor;
            y = 0;
            counter = counter + 30;
        }
        votesSum = 0;
    }
    
  fprintf(stderr,"\n About to color\n");
  cvCvtColor(boatFront, boatFront, CV_HSV2BGR);
  //cvCvtColor(backUpImage, backUpImage, CV_HSV2BGR);
  /**************************************************************************/
	try
  {
  	fprintf(stderr,"\n boatFront\n");
    cvShowImage("Boat Front", boatFront);
  }
  catch (sensor_msgs::CvBridgeException& e)
  {
    ROS_ERROR("Could not convert from '%s' to 'bgr8'.", msg->encoding.c_str());
  }
}
示例#15
0
int  main()
{
	int width;
	char* bayer[] = {"RG","BG","GR","GB"};
	char* controls[MAX_CONTROL] = {"Exposure", "Gain", "Gamma", "WB_R", "WB_B", "Brightness", "USB Traffic"};

	int height;
	int i;
	char c;
	bool bresult;

	int time1,time2;
	int count=0;

	char buf[128]={0};

	int CamNum=0;
	
	///long exposure, exp_min, exp_max, exp_step, exp_flag, exp_default;
	//long gain, gain_min, gain_max,gain_step, gain_flag, gain_default;

	IplImage *pRgb;


	int numDevices = getNumberOfConnectedCameras();
	if(numDevices <= 0)
	{
		printf("no camera connected, press any key to exit\n");
		getchar();
		return -1;
	}
	else
		printf("attached cameras:\n");

	for(i = 0; i < numDevices; i++)
		printf("%d %s\n",i, getCameraModel(i));

	printf("\nselect one to privew\n");
	scanf("%d", &CamNum);


	bresult = openCamera(CamNum);
	if(!bresult)
	{
		printf("OpenCamera error,are you root?,press any key to exit\n");
		getchar();
		return -1;
	}

	printf("%s information\n",getCameraModel(CamNum));
	int iMaxWidth, iMaxHeight;
	iMaxWidth = getMaxWidth();
	iMaxHeight =  getMaxHeight();
	printf("resolution:%dX%d\n", iMaxWidth, iMaxHeight);
	if(isColorCam())
		printf("Color Camera: bayer pattern:%s\n",bayer[getColorBayer()]);
	else
		printf("Mono camera\n");
	
	for( i = 0; i < MAX_CONTROL; i++)
	{
			if(isAvailable((Control_TYPE)i))
				printf("%s support:Yes\n", controls[i]);
			else
				printf("%s support:No\n", controls[i]);
	}

	printf("\nPlease input the <width height bin image_type> with one space, ie. 640 480 2 0. use max resolution if input is 0. Press ESC when video window is focused to quit capture\n");
	int bin = 1, Image_type;
	scanf("%d %d %d %d", &width, &height, &bin, &Image_type);
	if(width == 0 || height == 0)
	{
		width = iMaxWidth;
		height = iMaxHeight;
	}

	initCamera(); //this must be called before camera operation. and it only need init once
	printf("sensor temperature:%02f\n", getSensorTemp());

//	IMG_TYPE image_type;
	
	while(!setImageFormat(width, height, bin, (IMG_TYPE)Image_type))//IMG_RAW8
	{
		printf("Set format error, please check the width and height\n ASI120's data size(width*height) must be integer multiple of 1024\n");
		printf("Please input the width and height again£¬ie. 640 480\n");
		scanf("%d %d %d %d", &width, &height, &bin, &Image_type);
	}
	printf("\nset image format %d %d %d %d success, start privew, press ESC to stop \n", width, height, bin, Image_type);

	
	if(Image_type == IMG_RAW16)
		pRgb=cvCreateImage(cvSize(getWidth(),getHeight()), IPL_DEPTH_16U, 1);
	else if(Image_type == IMG_RGB24)
		pRgb=cvCreateImage(cvSize(getWidth(),getHeight()), IPL_DEPTH_8U, 3);
	else
		pRgb=cvCreateImage(cvSize(getWidth(),getHeight()), IPL_DEPTH_8U, 1);

	setValue(CONTROL_EXPOSURE, 100*1000, false); //ms//auto
	setValue(CONTROL_GAIN,getMin(CONTROL_GAIN), false); 
	setValue(CONTROL_BANDWIDTHOVERLOAD, getMin(CONTROL_BANDWIDTHOVERLOAD), false); //low transfer speed

	setValue(CONTROL_WB_B, 90, false);
 	setValue(CONTROL_WB_R, 48, false);
  	setAutoPara(getMax(CONTROL_GAIN)/2,10,150); //max auto gain and exposure and target brightness
//	EnableDarkSubtract("dark.bmp"); //dark subtract will be disabled when exposure set auto and exposure below 500ms
	startCapture(); //start privew


	

	bDisplay = 1;
#ifdef _LIN
	pthread_t thread_display;
	pthread_create(&thread_display, NULL, Display, (void*)pRgb);
#elif defined _WINDOWS
	HANDLE thread_setgainexp;
	thread_setgainexp = (HANDLE)_beginthread(Display,  NULL, (void*)pRgb);
#endif

	time1 = GetTickCount();
	int iStrLen = 0, iTextX = 40, iTextY = 60;
	void* retval;
//	int time0, iWaitMs = -1;
//	bool bGetImg;
	while(bMain)
	{

//		time0 = GetTickCount();
		getImageData((unsigned char*)pRgb->imageData, pRgb->imageSize, 200);

//		bGetImg = getImageData((unsigned char*)pRgb->imageData, pRgb->imageSize, iWaitMs);
		time2 = GetTickCount();
//		printf("waitMs%d, deltaMs%d, %d\n", iWaitMs, time2 - time0, bGetImg);
		count++;
		
		if(time2-time1 > 1000 )
		{
			sprintf(buf, "fps:%d dropped frames:%lu ImageType:%d",count, getDroppedFrames(), (int)getImgType());

			count = 0;
			time1=GetTickCount();	
			printf(buf);
			printf("\n");

		}
		if(Image_type != IMG_RGB24 && Image_type != IMG_RAW16)
		{
			iStrLen = strlen(buf);
			CvRect rect = cvRect(iTextX, iTextY - 15, iStrLen* 11, 20);
			cvSetImageROI(pRgb , rect);
			cvSet(pRgb, CV_RGB(180, 180, 180)); 
			cvResetImageROI(pRgb);
		}
		cvText(pRgb, buf, iTextX,iTextY );

		if(bChangeFormat)
		{
			bChangeFormat = 0;
			bDisplay = false;
			pthread_join(thread_display, &retval);
			cvReleaseImage(&pRgb);
			stopCapture();
			
			switch(change)
			{
				 case change_imagetype:
					Image_type++;
					if(Image_type > 3)
						Image_type = 0;
					
					break;
				case change_bin:
					if(bin == 1)
					{
						bin = 2;
						width/=2;
						height/=2;
					}
					else 
					{
						bin = 1;
						width*=2;
						height*=2;
					}
					break;
				case change_size_smaller:
					if(width > 320 && height > 240)
					{
						width/= 2;
						height/= 2;
					}
					break;
				
				case change_size_bigger:
				
					if(width*2*bin <= iMaxWidth && height*2*bin <= iMaxHeight)
					{
						width*= 2;
						height*= 2;
					}
					break;
			}
			setImageFormat(width, height, bin, (IMG_TYPE)Image_type);
			if(Image_type == IMG_RAW16)
				pRgb=cvCreateImage(cvSize(getWidth(),getHeight()), IPL_DEPTH_16U, 1);
			else if(Image_type == IMG_RGB24)
				pRgb=cvCreateImage(cvSize(getWidth(),getHeight()), IPL_DEPTH_8U, 3);
			else
				pRgb=cvCreateImage(cvSize(getWidth(),getHeight()), IPL_DEPTH_8U, 1);
			bDisplay = 1;
			pthread_create(&thread_display, NULL, Display, (void*)pRgb);
			startCapture(); //start privew
		}
	}
END:
	
	if(bDisplay)
	{
		bDisplay = 0;
#ifdef _LIN
   		pthread_join(thread_display, &retval);
#elif defined _WINDOWS
		Sleep(50);
#endif
	}
	
	stopCapture();
	closeCamera();
	cvReleaseImage(&pRgb);
	printf("main function over\n");
	return 1;
}
示例#16
0
CvRect SimpleHandDetector::get_feature(IplImage *img, FeatureData *feature)
{
    IplImage* pFrame = img;
    char* f = feature->ch;
#if DEBUG
    cvZero(feature_img);
#endif
    cvCvtColor(pFrame, YCrCb, CV_BGR2YCrCb);
    cvInRangeS(YCrCb, lower, upper, skin);
    //cvErode(skin,skin, 0, 1);   //形态学滤波,除去噪声
    //cvDilate(skin,skin, 0, 3);
    cvSmooth(skin,skin);

    //cvCopy(skin,hand);

    cvClearMemStorage(storage);
    CvSeq * contour = 0;

    cvFindContours(skin, storage, &contour, sizeof (CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));
    if(contour == NULL)
        return cvRect(-1,-1,-1,-1);

    float max = fabs(cvContourArea(contour, CV_WHOLE_SEQ));
    for (CvSeq* p = contour; p != NULL; p = p->h_next)
    {
        float now = fabs(cvContourArea(p, CV_WHOLE_SEQ));
        if (now > max)
        {
            max = now;
            contour = p;
        }
    }

    CvRect rect = cvBoundingRect(contour, 0);
    if (rect.width > STEP && rect.height > STEP)
    {
        //cvRectangle(hand, cvPoint(rect.x, rect.y), cvPoint(rect.x + rect.width, rect.y + rect.height), cvScalar(255, 255, 255), 3);

        cvZero(hand);
        cvDrawContours(hand, contour, CV_RGB(255, 255, 255), CV_RGB(255, 255, 255), 0, CV_FILLED,8, cvPoint(0, 0));
        int w_s = rect.width / STEP;
        int h_s = rect.height / STEP;
        int w_off = rect.x;
        int h_off = rect.y;
#if DEBUG
        for(int s = 0;s <= STEP;s++)
        {
            cvLine(hand,cvPoint(rect.x,h_off),cvPoint(rect.x + rect.width,h_off),cvScalar(255),1);
            h_off += h_s;
            cvLine(hand,cvPoint(w_off,rect.y),cvPoint(w_off,rect.y + rect.height),cvScalar(255),1);
            w_off += w_s;
        }
#endif
        w_s = rect.width / STEP;
        h_s = rect.height / STEP;
        int half = w_s * h_s;
        for(int p = 0;p < STEP;p++)
        {
            for(int q = 0;q < STEP;q++)
            {
                int count = 0;
                w_off = rect.x + q * w_s;
                h_off = rect.y + p * h_s;
                for(int y = 0;y < h_s;y++)
                {
                    for(int x = 0;x < w_s;x++)
                    {
                        if(IMG8U(hand,w_off + x,h_off + y) == 255)
                            count++;
                    }
                }
                if((double)count / half > 0.5)
                {
                    f[p * STEP + q] = '1';
#if DEBUG
                        cvSetImageROI(feature,cvRect(q * 100,p * 100,100,100));
                        cvSet(feature,cvScalar(255));
#endif
                }else
                    f[p * STEP + q] = '0';
            }
        }
#if DEBUG
        cvShowImage("hand",hand);
        cvResetImageROI(feature_img);
        cvShowImage("feature",feature_img);
        cvWaitKey(10);
#endif
    }else
        rect = cvRect(-1,-1,-1,-1);

    return rect;
    /*char ch;
    if((ch = cvWaitKey(10)) > 0)
    {
        if(is_train)
        {
            if(ch >= '0' && ch <= '9')
            {
                printf("%c:",ch);
                for(int p = 0;p < step;p++)
                {
                    for(int q = 0;q < step;q++)
                        printf("%c",f[p * step + q]);
                    printf("#\n");
                }
            }
            else if(ch == 13 || ch == 27)
                break;
        }
        else
        {
            if(ch == 13 || ch == 27)
                break;
            else
                findHand(f);
        }
    }*/
}
示例#17
0
文件: zoom.c 项目: truongminh/ccache
void zoomImg(safeQueue *sq, struct bio_job *job)
{
    /* Search tmp folder */

    char *uri = job->name+strlen(SERVICE_ZOOM) + 1;
    sds dstpath = zoomePathInTmpDir(uri);
    //job->result = ufileMakeHttpReplyFromFile(dstpath);
    job->result = ufileMmapHttpReply(dstpath);
    printf("After Read File %.2lf \n", (double)(clock()));
    if(job->result) {
        sdsfree(dstpath);
        safeQueuePush(sq,job); /* the current job will be freed by master */
        return;
    }

    int width = 0, height = 0;
    sds fn = NULL;
    sds srcpath = NULL;
    IplImage* src = NULL;
    IplImage* dst = NULL;
    IplImage* toencode = NULL;
    CvMat* enImg = NULL;
    int notpushed = 1;
    int iscrop = 1;
    int p[3];
    p[0] = CV_IMWRITE_JPEG_QUALITY;
    p[1] = IMG_DEFAULT_QUALITY;
    p[2] = 0;
    uchar *buf = NULL;
    size_t len = 0;
    uri_parse_state state = img_parse_uri(uri,&fn,&width,&height, &iscrop, &p[1]);
    if(state == parse_error) goto clean;
    // initializations
    srcpath = bioPathInSrcDir(fn);    
    printf("Before Load Image %.2lf \n", (double)(clock()));
    src = cvLoadImage(srcpath, CV_LOAD_IMAGE_COLOR);
    printf("After Load Image %.2lf \n", (double)(clock()));
    /* validate that everything initialized properly */
    if(!src)
    {
        ulog(CCACHE_VERBOSE,"can't load image file: %s\n",srcpath);
        goto clean;
    }

    int src_width = src->width;
    int src_height = src->height;
    int roi_src_width = src_width;
    int roi_src_height = src_height;


    if(width&&height) {
        /* Preserve origial ratio */
        /* NOTICE: dangerous type conversion */
        roi_src_width = src_height*width/height;
        roi_src_height = src_width*height/width;
        if(roi_src_width>src_width) roi_src_width = src_width;
        if(roi_src_height>src_height) roi_src_height = src_height;
    }
    else if(!width&&height) {
        width = src_width;
    }
    else if(width&&!height) {
        height = src_height;
    }
    else {
        toencode = src;
    }

    if(!toencode) {
        if(iscrop) {
            int x = (src_width - roi_src_width)/2;
            int y = (src_height - roi_src_height)/2;
            // Say what the source region is
            cvSetImageROI( src, cvRect(x,y,roi_src_width,roi_src_height));
        }

        dst = cvCreateImage(cvSize(width,height), src->depth, src->nChannels);
        if(!dst) goto clean;

        cvResize(src,dst,CV_INTER_CUBIC);
        printf("After Resize Image %.2lf \n", (double)(clock()));


        if(iscrop) {
            cvResetImageROI( src );
        }

        toencode = dst;
    }


    enImg = cvEncodeImage(IMG_ENCODE_DEFAULT, toencode, p );

    printf("After Encode Image %.2lf \n", (double)(clock()));

    buf = enImg->data.ptr;
    len = enImg->rows*enImg->cols;
    job->result = ufilMakettpReplyFromBuffer(buf,len);
    job->type |= BIO_WRITE_FILE; /* Remind master of new written file  */
    safeQueuePush(sq,job);    
    notpushed = 0;

  /* clean up and release resources */
clean:
    if(notpushed) {
        job->result = NULL;
        safeQueuePush(sq,job);
    }
    if(fn) sdsfree(fn);
    if(srcpath) sdsfree(srcpath);
    if(src) cvReleaseImage(&src);
    if(enImg){
        saveImage(dstpath, buf, len);
        cvReleaseMat(&enImg);
    }
    sdsfree(dstpath);
    if(dst) cvReleaseImage(&dst);
    return;
}
示例#18
0
int BotGetInventaryInfo(int slot_y, int slot_x)
{

	IplImage *object = GetIplImage(membit);

	int start_x, start_y, end_x, end_y;

	if(slot_x == 0)
	{
		start_x = 663;
		end_x = 695;
	}
	else if(slot_x == 1)
	{
		start_x = 697;
		end_x = 730;
	}
	else if(slot_x == 2)
	{
		start_x = 732;
		end_x = 765;
	}
	else if(slot_x == 3)
	{
		start_x = 767;
		end_x = 799;
	}
	else if(slot_x == 4)
	{
		start_x = 801;
		end_x = 834;
	}
	else if(slot_x == 5)
	{
		start_x = 836;
		end_x = 868;
	}
	else if(slot_x == 6)
	{
		start_x = 870;
		end_x = 903;
	}
	else if(slot_x == 7)
	{
		start_x = 905;
		end_x = 937;
	}
	else if(slot_x == 8)
	{
		start_x = 939;
		end_x = 972;
	}
	else if(slot_x == 9)
	{
		start_x = 974;
		end_x = 1007;
	}

	if(slot_y == 0)
	{
		start_y = 417;
		end_y = 449;
	}
	else if(slot_y == 1)
	{
		start_y = 451;
		end_y = 483;
	}
	else if(slot_y == 2)
	{
		start_y = 485;
		end_y = 517;
	}
	else if(slot_y == 3)
	{
		start_y = 519;
		end_y = 551;
	}
	else if(slot_y == 4)
	{
		start_y = 553;
		end_y = 585;
	}
	else if(slot_y == 5)
	{
		start_y = 587;
		end_y = 620;
	}

	cvSetImageROI(object, cvRect(start_x, start_y, end_x - start_x + 1, end_y - start_y + 1));
	IplImage *TemplateImage = cvCreateImage(cvGetSize(object), object->depth, object->nChannels);
	cvCopy(object, TemplateImage, NULL);

	if(BotTemplateMatch(slot_x, slot_y, IdentifyImageEmptySlots, TemplateImage, start_x, start_y, end_x, end_y) == 1)
	{
		cvResetImageROI(object);
		cvReleaseImage(&object);
		return 1;
	}

	if(BotTemplateMatch(slot_x, slot_y, IdentifyImageItemsToStash, TemplateImage, start_x, start_y, end_x, end_y) == 1)
	{
		cvResetImageROI(object);
		cvReleaseImage(&object);
		return 2;
	}

	cvResetImageROI(object);
	cvReleaseImage(&object);
	return 0;	

}
示例#19
0
// parameters:
//  img - input video frame
//  dst - resultant motion picture
//  args - optional parameters
void update_mhi(IplImage* img, IplImage* dst, int diff_threshold)
{
    // 获取当前时间
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds

    // 获取当前帧大小
    CvSize size = cvSize(img->width,img->height); // get current frame size
    int i, idx1 = last, idx2;
    IplImage* silh;
    CvSeq* seq;
    CvRect comp_rect;
    double count;
    double angle;
    CvPoint center;
    double magnitude;          
    CvScalar color;

    // 给图像分配空间或者在尺寸改变的时候重新分配
    // allocate images at the beginning or
    // reallocate them if the frame size is changed
    if (!mhi || mhi->width != size.width || mhi->height != size.height) 
    {
        if (buf == 0) 
        {
            buf = (IplImage**)malloc(N*sizeof(buf[0]));
            memset( buf, 0, N*sizeof(buf[0]));
        }
        
        for( i = 0; i < N; i++ ) {
            cvReleaseImage( &buf[i] );
            buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
            cvZero( buf[i] );
        }
        cvReleaseImage( &mhi );
        cvReleaseImage( &orient );
        cvReleaseImage( &segmask );
        cvReleaseImage( &mask );
        
        mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );

        // 在开始时清空MHI
        cvZero( mhi ); // clear MHI at the beginning

		// 按img的尺寸创建图像
        orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
    }

    // 转换为灰度
    cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale

    idx2 = (last + 1) % N; // index of (last - (N-1))th frame
    last = idx2;

    silh = buf[idx2];

    // 获取两帧间的差异,当前帧跟背景图相减,放到silh里面
    cvAbsDiff(buf[idx1], buf[idx2], silh); // get difference between frames

    // 二值化
    cvThreshold(silh, silh, diff_threshold, 1, CV_THRESH_BINARY); // and threshold it

    // 去掉影像(silhouette) 以更新运动历史图像
    cvUpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION); // update MHI

    // 转换MHI到蓝色8位图
    // convert MHI to blue 8u image
    cvCvtScale( mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION );
    cvZero( dst );
    cvCvtPlaneToPix( mask, 0, 0, 0, dst );

    // 计算运动历史图像的梯度方向 
    // 计算运动梯度趋向和合法的趋向掩码
    // calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
    
    if( !storage )
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);

    // 将整个运动分割为独立的运动部分 
    // 分割运动:获取运动组件序列
    // 分割掩码是运动组件图标识出来的,不再过多的使用
    // segment motion: get sequence of motion components
    // segmask is marked motion components map. It is not used further
    seq = cvSegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA);

	// 按运动组件的数目来循环
    // 通过运动组件迭代
    // 根据整幅图像(全局运动)进行相应的一次或多次迭代
    // iterate through the motion components,
    // One more iteration (i == -1) corresponds to the whole image (global motion)
    for (i = -1; i < seq->total; i++) 
    {

        if (i < 0) 
        { 
            // 全局运动事件
            // case of the whole image
            // 获取当前帧的范围
            comp_rect = cvRect( 0, 0, size.width, size.height );

			// 设置颜色为白色
			color = CV_RGB(255,255,255);

			// 设置放大倍数为100
            magnitude = 100;
        }
        else 
        { 
            // 第i个运动组件
            // i-th motion component
            // 获取当前运动组件的范围
            comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;

			// 丢弃很小的组件
			if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
                continue;

			// 设置颜色为红色
			color = CV_RGB(255,0,0);

			// 设置放大倍数为30
            magnitude = 30;
        }

        // 选择组件感兴趣的区域
        // select component ROI
        cvSetImageROI( silh, comp_rect );
        cvSetImageROI( mhi, comp_rect );
        cvSetImageROI( orient, comp_rect );
        cvSetImageROI( mask, comp_rect );

        // 计算某些选择区域的全局运动方向 
        // 每个运动部件的运动方向就可以被这个函数利用提取的特定部件的掩模(mask)计算出来(使用cvCmp) 
        // 计算趋势
        // calculate orientation
        angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);

        // 根据左上角的原点来调整图像的角度
        angle = 360.0 - angle;  // adjust for images with top-left origin

        // 计算数组的绝对范数, 绝对差分范数或者相对差分范数 
        // 计算轮廓感兴趣区域中点的个数
        count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

        cvResetImageROI( mhi );
        cvResetImageROI( orient );
        cvResetImageROI( mask );
        cvResetImageROI( silh );

        // 检测小运动事件
        // check for the case of little motion
        if (count < comp_rect.width*comp_rect.height * 0.05)
        {
            continue;
        }

        // 画一个带箭头的时钟来指示方向
        // draw a clock with arrow indicating the direction
        center = cvPoint( (comp_rect.x + comp_rect.width/2),
                          (comp_rect.y + comp_rect.height/2) );

        cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
        cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
    }
}
示例#20
0
int main222( int argc,   char** argv )
{
    CvCapture* capture = 0;

    if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
        capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
    else if( argc == 2 )
        capture = cvCaptureFromAVI( argv[1] );

    if( !capture )
    {
        fprintf(stderr,"Could not initialize capturing...\n");
        return -1;
    }

    printf( "Hot keys: \n"
        "\tESC - quit the program\n"
        "\tc - stop the tracking\n"
        "\tb - switch to/from backprojection view\n"
        "\th - show/hide object histogram\n"
        "To initialize tracking, select the object with mouse\n" );

    cvNamedWindow( "Histogram", 1 );
    cvNamedWindow( "CamShiftDemo", 1 );
    cvSetMouseCallback( "CamShiftDemo", on_mouse, 0 );
    cvCreateTrackbar( "Vmin", "CamShiftDemo", &vmin, 256, 0 );
    cvCreateTrackbar( "Vmax", "CamShiftDemo", &vmax, 256, 0 );
    cvCreateTrackbar( "Smin", "CamShiftDemo", &smin, 256, 0 );

    for(;;)
    {
        IplImage* frame = 0;
        int i, bin_w, c;


        if( !frame )
            break;

        if( !image )
        {
            /* allocate all the buffers */
            image = cvCreateImage( cvGetSize(frame), 8, 3 );
            image->origin = frame->origin;
            hsv = cvCreateImage( cvGetSize(frame), 8, 3 );
            hue = cvCreateImage( cvGetSize(frame), 8, 1 );
            mask = cvCreateImage( cvGetSize(frame), 8, 1 );
            backproject = cvCreateImage( cvGetSize(frame), 8, 1 );
            hist = cvCreateHist( 1, &hdims, CV_HIST_ARRAY, &hranges, 1 );
            histimg = cvCreateImage( cvSize(320,200), 8, 3 );
            cvZero( histimg );
        }

        cvCopy( frame, image, 0 );
        cvCvtColor( image, hsv, CV_BGR2HSV );

        if( track_object )
        {
            int _vmin = vmin, _vmax = vmax;

            cvInRangeS( hsv, cvScalar(0,smin,MIN(_vmin,_vmax),0),
                        cvScalar(180,256,MAX(_vmin,_vmax),0), mask );
            cvSplit( hsv, hue, 0, 0, 0 );

            if( track_object < 0 )
            {
                float max_val = 0.f;
                cvSetImageROI( hue, selection );
                cvSetImageROI( mask, selection );
                cvCalcHist( &hue, hist, 0, mask );
                cvGetMinMaxHistValue( hist, 0, &max_val, 0, 0 );
                cvConvertScale( hist->bins, hist->bins, max_val ? 255. / max_val : 0., 0 );
                cvResetImageROI( hue );
                cvResetImageROI( mask );
                track_window = selection;
                track_object = 1;

                cvZero( histimg );
                bin_w = histimg->width / hdims;
                for( i = 0; i < hdims; i++ )
                {
                    int val = cvRound( cvGetReal1D(hist->bins,i)*histimg->height/255 );
                    CvScalar color = hsv2rgb(i*180.f/hdims);
                    cvRectangle( histimg, cvPoint(i*bin_w,histimg->height),
                                 cvPoint((i+1)*bin_w,histimg->height - val),
                                 color, -1, 8, 0 );
                }
            }

            cvCalcBackProject( &hue, backproject, hist );
            cvAnd( backproject, mask, backproject, 0 );
            cvCamShift( backproject, track_window,
                        cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
                        &track_comp, &track_box );
            track_window = track_comp.rect;

            if( backproject_mode )
                cvCvtColor( backproject, image, CV_GRAY2BGR );
            if( !image->origin )
                track_box.angle = -track_box.angle;
            cvEllipseBox( image, track_box, CV_RGB(255,0,0), 3, CV_AA, 0 );
        }

        if( select_object && selection.width > 0 && selection.height > 0 )
        {
            cvSetImageROI( image, selection );
            cvXorS( image, cvScalarAll(255), image, 0 );
            cvResetImageROI( image );
        }

        cvShowImage( "CamShiftDemo", image );
        cvShowImage( "Histogram", histimg );

        c = cvWaitKey(10);
        if( (char) c == 27 )
            break;
        switch( (char) c )
        {
        case 'b':
            backproject_mode ^= 1;
            break;
        case 'c':
            track_object = 0;
            cvZero( histimg );
            break;
        case 'h':
            show_hist ^= 1;
            if( !show_hist )
                cvDestroyWindow( "Histogram" );
            else
                cvNamedWindow( "Histogram", 1 );
            break;
        default:
            ;
        }
    }

    cvReleaseCapture( &capture );
    cvDestroyWindow("CamShiftDemo");

    return 0;
}
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage 
CvSeq* findSquares4( IplImage* img, CvMemStorage* storage ){  
	CvSeq* contours;  
	int i, c, l, N = 11;  
	CvSize sz = cvSize( img->width & -2, img->height & -2 );
	IplImage* timg = cvCloneImage( img );
	// make a copy of input image 
	IplImage* gray = cvCreateImage( sz, 8, 1 );  
	IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 ); 
	IplImage* tgray;  CvSeq* result;  double s, t; 
	// create empty sequence that will contain points -  
	// 4 points per square (the square's vertices) 
	CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage ); 
	// select the maximum ROI in the image 
	// with the width and height divisible by 2 
	cvSetImageROI( timg, cvRect( 0, 0, sz.width, sz.height )); 
	// down-scale and upscale the image to filter out the noise  
	cvPyrDown( timg, pyr, 7 ); 
	cvPyrUp( pyr, timg, 7 );  tgray = cvCreateImage( sz, 8, 1 );
	// find squares in every color plane of the image  
	for( c = 0; c < 3; c++ )  {  
		// extract the c-th color plane  
		cvSetImageCOI( timg, c+1 ); 
		cvCopy( timg, tgray, 0 ); 
		// try several threshold levels 
		for( l = 0; l < N; l++ )  { 
			// hack: use Canny instead of zero threshold level. 
			// Canny helps to catch squares with gradient shading
			if( l == 0 )  {  
				// apply Canny. Take the upper threshold from slider 
				// and set the lower to 0 (which forces edges merging) 
				cvCanny( tgray, gray, 0, thresh, 5 );
				// dilate canny output to remove potential 
				// holes between edge segments  
				cvDilate( gray, gray, 0, 1 );
			} 
			else  
			{ 
				// apply threshold if l!=0:  
				// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0  
				cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY ); 
			}  // find contours and store them all as a list 
			cvFindContours( gray, storage, &contours, sizeof(CvContour),  CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); 
			// test each contour 
			while( contours )  { 
				// approximate contour with accuracy proportional 
				// to the contour perimeter
				result = cvApproxPoly( contours, sizeof(CvContour), storage,  CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );
				// square contours should have 4 vertices after approximation  
				// relatively large area (to filter out noisy contours)
				// and be convex.  
				// Note: absolute value of an area is used because 
				// area may be positive or negative - in accordance with the 
				// contour orientation
				if( result->total == 4 &&  fabs(cvContourArea(result,CV_WHOLE_SEQ)) > 1000 &&  cvCheckContourConvexity(result) )  {
					s = 0; 
					for( i = 0; i < 5; i++ )  
					{  
						// find minimum angle between joint 
						// edges (maximum of cosine)  
						if( i >= 2 )  {  
							t = fabs(angle(  (CvPoint*)cvGetSeqElem( result, i ),  (CvPoint*)cvGetSeqElem( result, i-2 ),  (CvPoint*)cvGetSeqElem( result, i-1 )));  s = s > t ? s : t;
						}  
					}  
					// if cosines of all angles are small  
					// (all angles are ~90 degree) then write quandrange  
					// vertices to resultant sequence 
					if( s < 0.3 )  
						for( i = 0; i < 4; i++ )  
							cvSeqPush( squares,  (CvPoint*)cvGetSeqElem( result, i ));
				}  
				// take the next contour  
				contours = contours->h_next; 
			}  
		}  
	} 
	// release all the temporary images 
	cvReleaseImage( &gray );
	cvReleaseImage( &pyr ); 
	cvReleaseImage( &tgray ); 
	cvReleaseImage( &timg );
	return squares;
} 
void CFRManagerCtrl::ShowImageOnClient(HDC pDC, IplImage* image, CRect rect,CRect ROI,int flags)
{
    char info[100];
    IplImage* image_show=cvCloneImage(image);
    if (ROI.Width()!=0&&ROI.Height()!=0)
    {
        CvRect rc;
        rc.x=ROI.left;
        rc.y=ROI.top;
        rc.height=ROI.Height();
        rc.width=ROI.Width();
        cvSetImageROI(image,rc);

        cvNamedWindow("处理前");
        cvShowImage("处理前",image);

        //转换色彩空间
        cvCvtColor(image,image,CV_RGB2HSV);
        //分离通道
        IplImage* imgChannel[3] = { 0, 0, 0 };

        for (int i=0; i<image->nChannels; i++)
        {
            imgChannel[i] = cvCreateImage( cvGetSize( image ), IPL_DEPTH_8U, 1 );  //要求单通道图像才能直方图均衡化
        }

        cvSplit( image, imgChannel[0], imgChannel[1], imgChannel[2],0);//HSVA

        CvFont font;
        cvInitFont( &font,CV_FONT_HERSHEY_PLAIN,1, 1, 0, 1, 8);
        for (int i=0; i<image->nChannels; i++)
        {
            CvScalar avg=cvAvg(imgChannel[i]);
            memset(info,'\0',100);
            sprintf(info,"%f",avg.val[0]);

            cvPutText(image_show, info , cvPoint(0,20*(i+1)), &font, CV_RGB(255,0,0));
        }

// 		CvScalar avg=cvAvg(image);
// 		memset(info,'\0',100);
// 		sprintf(info,"%f",avg.val[0]);
// 		CvFont font;
// 		cvInitFont( &font,CV_FONT_HERSHEY_PLAIN,1, 1, 0, 1, 8);
// 		cvPutText(image_show, info , cvPoint(0,20), &font, CV_RGB(255,0,0));

        /*cvCvtScale(image,image,1.0,100-avg.val[0]);*/
        CvScalar avg=cvAvg(imgChannel[2]);
        cvCvtScale(imgChannel[2],imgChannel[2],1.0,YUZHI-avg.val[0]);
        cvMerge( imgChannel[0], imgChannel[1], imgChannel[2], 0, image );
        for (int i=0; i<image->nChannels; i++)
        {
            CvScalar avg=cvAvg(imgChannel[i]);
            memset(info,'\0',100);
            sprintf(info,"%f",avg.val[0]);

            cvPutText(image_show, info , cvPoint(0,20*(i+4)), &font, CV_RGB(255,0,0));
        }

        cvCvtColor(image,image,CV_HSV2RGB);
        cvNamedWindow("处理后");
        cvShowImage("处理后",image);

        for (int i=0; i<image->nChannels; i++)
        {
            cvReleaseImage(&imgChannel[i] );
        }
// 		avg=cvAvg(image);
// 		memset(info,'\0',100);
// 		sprintf(info,"%f",avg.val[0]);
// 		cvPutText(image_show, info , cvPoint(0,40), &font, CV_RGB(255,0,0));
    }


    char* imagedata=image_show->imageData;
    LPBITMAPINFO lpbitm;
    lpbitm=CtreateMapInfo(image_show,flags);
    StretchDIBits(pDC,
                  rect.left,rect.top,rect.Width(),rect.Height(),
                  0,0,image_show->width,image_show->height,
                  imagedata,lpbitm,DIB_RGB_COLORS,SRCCOPY);
    cvReleaseImage(&image_show);
    //pDC.Rectangle(rect);
}
示例#23
0
int detect_and_draw1( IplImage* img,int num,void* pThis)
{
	//CvRect area; 
	//CvRect regions;
	Config_para* para=(Config_para*)pThis;
	//char* cascade_name="face.xml";/*    "haarcascade_profileface.xml";*/

	CvMemStorage* storage= cvCreateMemStorage(0);
	//CvHaarClassifierCascade* cascade  = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
	//------定义感兴趣区域-------
	para->area2.x=cvRound(img->width*para->window_scale1);
	para->area2.y=cvRound(img->height*para->window_scale1);
	para->area2.width=cvRound(img->width*(1-para->window_scale1*2));
	para->area2.height=cvRound(img->height*(1-para->window_scale1*2));

	IplImage *img_ROI=cvCreateImage(cvSize(para->area2.width,para->area2.height),img->depth,img->nChannels);
	CvPoint p1,p2;
	p1.x=para->area2.x;
	p1.y=para->area2.y;
	p2.x=para->area2.x+para->area2.width;
	p2.y=para->area2.y+para->area2.height;

	cvSetImageROI(img,para->area2);
	cvCopy(img,img_ROI);
	cvResetImageROI(img);

	//显示"Interest para->area2"
	CvFont font;    
	double hScale=1;   
	double vScale=1;    
	int lineWidth=2;// 相当于写字的线条    
	char showMsg[]="IA"; //Interest para->area2
	// 初始化字体   
	cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, hScale,vScale,0,lineWidth);//初始化字体,准备写到图片上的   
	// cvPoint 为起笔的x,y坐标   
	cvPutText(img,showMsg,cvPoint(para->area2.x,para->area2.y-10),&font,CV_RGB(255,100,255));//在图片中输出字符 

	cvRectangle(img,p1,p2,CV_RGB(0,0,255),1,8,0);

	double Scale = 1.3;
	IplImage* gray = cvCreateImage( cvSize(img_ROI->width,img_ROI->height),img_ROI->depth, 1 );
	IplImage* small_img = cvCreateImage( cvSize( cvRound (img_ROI->width/Scale),
		cvRound (img_ROI->height/Scale)),
		img_ROI->depth, 1 );
	IplImage *img_show=cvCreateImage(cvSize(para->show_width,para->show_height),img->depth,img->nChannels);

	int i,faces_num;
	int  x1, y1, x2, y2, w, h;

	cvCvtColor( img_ROI, gray, CV_BGR2GRAY );
	cvResize( gray, small_img, CV_INTER_LINEAR );
	cvEqualizeHist( small_img, small_img );
	cvClearMemStorage( storage );


	CvSeq* faces = cvHaarDetectObjects( small_img, cascade1, storage,
		1.2,para->FD_iterations, CV_HAAR_DO_CANNY_PRUNING,
		cvSize(50, 50) );

	faces_num=faces->total;
	if (faces->total==1)
	{
		for( i = 0; i < (faces ? faces->total : 0); i++ )
		{
			CvRect* r = (CvRect*)cvGetSeqElem( faces, i );
			CvPoint center;
			int radius;
			center.x = cvRound((r->x + r->width*0.5)*Scale);
			center.y = cvRound((r->y + r->height*0.5)*Scale);
			radius = cvRound((r->width + r->height)*0.25*Scale);
			CvPoint p1,p2;
			p1.x=center.x-radius+para->area2.x;
			p1.y=center.y-radius+para->area2.y;
			p2.x=center.x+radius+para->area2.x;
			p2.y=center.y+radius+para->area2.y+cvRound(2*radius*0.1);//修正值cvRound(2*radius*0.1)

			if ((num>0)&&(num<6))
				cvRectangle(img,p1,p2,CV_RGB(0,255,255),1,8,0);//整合到大图中
			//cvCircle( img_ROI, center, radius, colors[i%8], 3, 8, 0 );
			if ((num>=6)&&(num<34))
				cvRectangle(img,p1,p2,CV_RGB(0,255,0),1,8,0);//整合到大图中
			if(num>34)
				cvRectangle(img,p1,p2,CV_RGB(255,0,0),1,8,0);//整合到大图中

			//返回人脸的位置
			x1 = p1.x-para->area2.x;
			x2 = p2.x-para->area2.x;
			y1 = p1.y-para->area2.y;
			y2 = p2.y-para->area2.y;
			w = x2 - x1;
			h = y2 - y1;

			/* ensure odd width and height */
			w = ( w % 2 )? w : w+1;
			h = ( h % 2 )? h : h+1;

			para->regions2.x=x1;
			para->regions2.y=y1;
			para->regions2.width=w;
			para->regions2.height=h;
		}

		//显示"Prepare Begin or OK"
		if ((num>0)&&(num<6))//预备阶段
		{
			char  showMsg[]="Prepare..."; 
			cvPutText(img,showMsg,cvPoint(para->area2.x+para->regions2.x+para->regions2.width,para->area2.y+para->regions2.y),&font,CV_RGB(0,255,255));//在图片中输出字符 
		}

		if ((num>=6)&&(num<34))//采集的10张照片
		{
			char  showMsg[]="Begin..."; 
			cvPutText(img,showMsg,cvPoint(para->area2.x+para->regions2.x+para->regions2.width,para->area2.y+para->regions2.y),&font,CV_RGB(0,255,0));//在图片中输出字符 
		}
		if(num>34)//采集满35张照片,结束
		{
			char showMsg[]="OK...";
			cvPutText(img,showMsg,cvPoint(para->area2.x+para->regions2.x+para->regions2.width,para->area2.y+para->regions2.y),&font,CV_RGB(255,0,0));//在图片中输出字符 
		}

	}

	cvClearSeq(faces);

	cvResize(img,img_show);//放大人脸
	cvShowImage( "人脸采集2", img_show);
	//cvWaitKey(0);
	cvReleaseImage(&img_show);
	cvReleaseImage( &gray );
	cvReleaseImage( &small_img );
	cvReleaseImage(&img_ROI);

	cvReleaseMemStorage(&storage);

	if (faces_num==1)//只处理一个人脸
		return faces_num;
	else
		return 0;
}	
/*
// Getting feature pyramid  
//
// API
// int getFeaturePyramid(IplImage * image, const filterObject **all_F, 
                      const int n_f,
                      const int lambda, const int k, 
                      const int startX, const int startY, 
                      const int W, const int H, featurePyramid **maps);
// INPUT
// image             - image
// lambda            - resize scale
// k                 - size of cells
// startX            - X coordinate of the image rectangle to search
// startY            - Y coordinate of the image rectangle to search
// W                 - width of the image rectangle to search
// H                 - height of the image rectangle to search
// OUTPUT
// maps              - feature maps for all levels
// RESULT
// Error status
*/
int getFeaturePyramid(IplImage * image,
                      const int lambda, const int k, 
                      const int startX, const int startY, 
                      const int W, const int H, CvLSVMFeaturePyramid **maps)
{
    IplImage *img2, *imgTmp, *imgResize;
    float   step, tmp;
    int      cntStep;
    int      maxcall;
    int i;
    int err;
    CvLSVMFeatureMap *map;
    
    //geting subimage
    cvSetImageROI(image, cvRect(startX, startY, W, H));
    img2 = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
    cvCopy(image, img2, NULL);
    cvResetImageROI(image);

    if(img2->depth != IPL_DEPTH_32F)
    {
        imgResize = cvCreateImage(cvSize(img2->width , img2->height) , IPL_DEPTH_32F , 3);
        cvConvert(img2, imgResize);
    }
    else
    {
        imgResize = img2;
    }
    
    step = powf(2.0f, 1.0f/ ((float)lambda));
    maxcall = W/k;
    if( maxcall > H/k )
    {
        maxcall = H/k;
    }
    cntStep = (int)(logf((float)maxcall/(5.0f))/logf(step)) + 1;
    //printf("Count step: %f %d\n", step, cntStep);

    allocFeaturePyramidObject(maps, lambda, cntStep + lambda);

    for(i = 0; i < lambda; i++)
    {
        tmp = 1.0f / powf(step, (float)i);
        imgTmp = resize_opencv (imgResize, tmp);
        //imgTmp = resize_article_dp(img2, tmp, 4);
        err = getFeatureMaps_dp(imgTmp, 4, &map);
        err = normalizationAndTruncationFeatureMaps(map, 0.2f);
        err = PCAFeatureMaps(map);
        (*maps)->pyramid[i] = map;
        //printf("%d, %d\n", map->sizeY, map->sizeX);
        cvReleaseImage(&imgTmp);
    }

    /**********************************one**************/
    for(i = 0; i <  cntStep; i++)
    {
        tmp = 1.0f / powf(step, (float)i);
        imgTmp = resize_opencv (imgResize, tmp);
        //imgTmp = resize_article_dp(imgResize, tmp, 8);
	    err = getFeatureMaps_dp(imgTmp, 8, &map);
        err = normalizationAndTruncationFeatureMaps(map, 0.2f);
        err = PCAFeatureMaps(map);
        (*maps)->pyramid[i + lambda] = map;
        //printf("%d, %d\n", map->sizeY, map->sizeX);
		cvReleaseImage(&imgTmp);
    }/*for(i = 0; i < cntStep; i++)*/

    if(img2->depth != IPL_DEPTH_32F)
    {
        cvReleaseImage(&imgResize);
    }

    cvReleaseImage(&img2);
    return LATENT_SVM_OK;
}
示例#25
0
    void CvScan::charsImgSegement(IplImage *src, vector<IplImage*> &vector) {
        
        if (src == NULL) {
            return;
        }
        IplImage *pimg = cvCreateImage(cvSize(src->width*1.1, src->height*1.1), src->depth, src->nChannels);
        //*
        int m_otsu = otsu(pimg);
        printf("m_otsu:%d\n",m_otsu);
        cvReleaseImage(&pimg);
        cvZero(pimg);
        pimg = cvCreateImage(cvGetSize(src), src->depth, src->nChannels);
        cvThreshold(src, pimg, m_otsu, 255, CV_THRESH_BINARY);
        //查看 ret:right
        //vector.push_back(pimg);
        //return;
        //*/
        
        std::vector<CvRect> contours;
        CvSeq* contour;
        CvMemStorage *storage = cvCreateMemStorage(0);
        CvContourScanner scanner= cvStartFindContours(pimg,storage,sizeof(CvContour),CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE,cvPoint(0,0));
        //开始遍历轮廓树
        CvRect rect;
        double tmparea = 0.0;double indexArea = 0.0;double minarea = 5*5;double pixels = pimg->width*pimg->height;
        int i = 0;uchar *pp;IplImage *pdst;
        while ((contour = cvFindNextContour(scanner))) {
            tmparea = fabs(cvContourArea(contour));
            indexArea = fabs(cvContourArea(contour)/pixels);
            rect = cvBoundingRect(contour,0);
            
//            if (indexArea < 0.02 || indexArea >= 1 || tmparea < minarea) {
//                //不符合条件 删除区域
//                cvSubstituteContour(scanner, NULL);
//            }else{
//                contours.push_back(rect);
//            }
            //*
            if (tmparea<minarea){
                //当连通区域的中心点为白色时,而且面积较小则用黑色进行填充
                pp=(uchar*)(pimg->imageData+pimg->widthStep*(rect.y+rect.height/2)+rect.x+rect.width/2);
                if (pp[0]==255){
                    for (int y=rect.y;y<rect.y+rect.height;y++){
                        for (int x=rect.x;x<rect.x+rect.width;x++){
                            pp=(uchar*)(pimg->imageData+pimg->widthStep*y+x);
                            if(pp[0]==255){
                                pp[0]=0;
                            }
                        }
                    }
                }
            }else{
                contours.push_back(rect);
            };
            //*/
        }
        cvEndFindContours(&scanner);
        int size = (int)contours.size();
        if (size <= 0) {
            return;
        }
        printf("检测出的矩形个数:%d\n",size);
        
        std::vector<CvRect> sortedRect;
        ////对符合尺寸的图块按照从左到右进行排序
        sortRect(contours, sortedRect);
        for (i = 0; i < sortedRect.size(); i++) {
            
            //printf("找到的rect:%d-%d-%d-%d\n",sortedRect[i].x,sortedRect[i].y,sortedRect[i].width,sortedRect[i].height);
            pdst = cvCreateImage(cvSize(sortedRect[i].width,sortedRect[i].height), IPL_DEPTH_8U, 1);
            cvSetImageROI(pimg, sortedRect[i]);
            //cvAdd(pimg, pdst, pdst, NULL);
            cvCopy(pimg, pdst, NULL);
            //cvReleaseImage(&pdst);
            cvResetImageROI(pimg);
            if (verifyImgCharSizes(pdst)) {
                IplImage *dst = cvCreateImage(cvSize(kTrimmedCharacterImageWidth, kTrimmedCharacterImageHeight), pdst->depth, pdst->nChannels);
                cvResize(pdst, dst, CV_INTER_LINEAR);
                vector.push_back(dst);
                cvReleaseImage(&pdst);
            }
        }
        //printf("共找到%d个字符块\n",i);
    }
int main(int argc, char* argv[])
{
    if (argc != 2)
    {
        printf("\n--Argument error!\n--Please specify input/output directory. ");
        printf("(e.g. AddNewFace.exe faces_to_find)\n");
        return -1;
    }
    //description
    printf("* This program is used to ADD NEW FACE images to be found.\n");
    printf("* Transfer face images to FEATURE DATA, and store in file 'feature.dat'.\n");
    printf("* Face image should be better named as PERSON's name.\n");
    printf("Press any key to continue...");
    getchar();

    //load face cascade
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
    if( !cascade )
    {
        fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
        return -1;
    }
    storage = cvCreateMemStorage(0);

    //find all images
    string strFilePath(argv[1]);
    strFilePath.append("\\*");
    WIN32_FIND_DATAA FileData;
    HANDLE hFind;
    hFind = FindFirstFileA(strFilePath.c_str(), &FileData);
    if (hFind == INVALID_HANDLE_VALUE)
    {
        printf ("\n--Invalid File Handle. GetLastError reports %d/n", GetLastError ());
        return -1;
    }
    //get image names
    vector<string> fileNames;
    do
    {
        //eliminate directories
        //if(!strcmp(FileData.cFileName,"..") || !strcmp(FileData.cFileName,"."))
        if(FileData.dwFileAttributes == FILE_ATTRIBUTE_DIRECTORY)
            continue;
        fileNames.push_back(string(FileData.cFileName));
    } while (FindNextFileA(hFind, &FileData));
    //FindClose(&hFind);

    //file to store feature data
    ofstream featureDataFile;
    char dataFileName[256];
    sprintf(dataFileName, "%s\\%s", argv[1], feature_file_name);
    featureDataFile.open(dataFileName, ofstream::out);
    //class object of processing
    CFaceRecognize* pFaceRec = new CFaceRecognize;
    //process images
    for (size_t i = 0; i < fileNames.size(); i++)
    {
        string fileName = fileNames[i];
        int index = fileName.rfind(".");
        string name = fileName.substr(0, index);	//image name, i.e. person's name
        string extend = fileName.substr(index);		//extend name

        //skip feature data file
        if (extend == ".dat")
            continue;

        printf("\nprocessing image: %s.", fileName.c_str());

        //1--load image
        char filePath[256];
        sprintf(filePath, "%s\\%s", argv[1], fileName.c_str());
        IplImage* srcImg = cvLoadImage(filePath, 1);		//rgb
        IplImage* dstImg = 0;
        CvRect roi;
        if( srcImg )
        {
            //get key parts of face
            bool res = detect_and_draw( srcImg, roi);
            if (!res)	continue;

            //use roi to crop image
            dstImg = cvCreateImage(cvSize(roi.width,roi.height),srcImg->depth,srcImg->nChannels);
            cvSetImageROI(srcImg, roi);
            cvCopy(srcImg, dstImg);
            cvResetImageROI(srcImg);
            cvReleaseImage( &srcImg );
        }
        else
        {
            printf("--Error loading source image!\n");
            continue;
        }

        //2--standard image
        IplImage* standImage = pFaceRec->StandardImage(dstImg);
        cvShowImage("standard face",standImage);
        cvWaitKey(30);

        //3--generate LGBPHS data
        vector<int> LGBPHist;
        pFaceRec->GetLGBPHS(standImage, LGBPHist);

        //4--write into feature data file
        size_t imm = 0;
        featureDataFile<<name<<":";
        for (; imm<LGBPHist.size()-1; imm++)
            featureDataFile<<LGBPHist[imm]<<",";
        featureDataFile<<LGBPHist[imm]<<"\n";
    }

    cvReleaseMemStorage(&storage);

    printf("\n\nAll finished...\n");
    //system("pause");
    return 0;
}
示例#27
0
文件: mask.c 项目: Thachnh/cs283final
int main(int argc, char ** argv) {
	FILE * fp;
	char * line = NULL;
	size_t len = 0;
	ssize_t read;
	
	int numberOfImages;
	IplImage *src, *dst,
			*smallRes;
	IplImage **timages;
	printf("Start processing images\n");

	//load source image, which is USA map
	dst = cvLoadImage(argv[1], CV_LOAD_IMAGE_COLOR);

	// Read all hex codes, then create mask for each
	// Draw image to that mask
	fp = fopen("stateHex.txt", "r");
	if (fp == NULL) exit(EXIT_FAILURE);

	while ((read = getline(&line, &len, fp)) != -1) {
		if(line[read-1] == '\n') {
			line[read-1] = '\0';
			--read;
		}

		// TODO: split line into State code and Color code
		
		// Get mask based on color code (which is line currently)
		// TODO: use color code extracted from string above instead of line
		CvSize size = cvGetSize(dst);
		IplImage *mask = drawMask(dst, size.width, size.height, line);

		// Find the bounding box surround the state
		// CvRect bounding is what we are looking for
		CvSeq *contours = 0;
		CvMemStorage *mem;
		mem = cvCreateMemStorage(0);
		int n = cvFindContours( mask, mem, &contours, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
		CvSeq* ptr = 0;
		CvRect bounding;
		for (ptr = contours; ptr != NULL; ptr = ptr->h_next)
		{
			bounding = cvBoundingRect(ptr, 0);
		}

		// TODO: use State code to pull image from Instagram
		// TODO: use bounding.width and bounding.height to resize state image

		// TODO: load resized state image into src
		src = cvLoadImage("test.jpg", CV_LOAD_IMAGE_COLOR);
		
		// printf("%d %d %d %d\n", bounding.x, bounding.y, bounding.width, bounding.height);

		// Set ROI of output file and mask to be at bounding box
		cvSetImageROI(dst, bounding);
		cvSetImageROI(mask, bounding);
		// If size of state image is the same as of bounding box, comment out this line
		cvSetImageROI(src, cvRect(0, 0, bounding.width, bounding.height));

		// Copy state image into state area
		cvCopy(src, dst, mask);
		// Reset the ROI of output file
		cvResetImageROI(dst);
	}

	// Generate output file
    cvSaveImage("output.png", dst, NULL);

	printf("Done processing images\n");

	// This is how to display image, until user press ESC key
	// cvNamedWindow("img", 0);
	// cvMoveWindow("img", 200, 200);
	// char k;
	// k = cvWaitKey(0);
	// if(k == 'ESC') {
	// 	cvShowImage("img", src);
	// 	cvReleaseImage(&src);
	// }
	return 0;
}
示例#28
0
void CvFaceElement::FindContours(IplImage* img, IplImage* thresh, int nLayers, int dMinSize) {
    CvSeq* seq;
    CvRect roi = m_rROI;
    Extend(roi, 1);
    cvSetImageROI(img, roi);
    cvSetImageROI(thresh, roi);
    // layers
    int colors[MAX_LAYERS] = {0};
    int iMinLevel = 0, iMaxLevel = 255;
    float step, power;
    ThresholdingParam(img, nLayers / 2, iMinLevel, iMaxLevel, step, power, 4);
    int iMinLevelPrev = iMinLevel;
    int iMaxLevelPrev = iMinLevel;
    if (m_trPrev.iColor != 0) {
        iMinLevelPrev = m_trPrev.iColor - nLayers / 2;
        iMaxLevelPrev = m_trPrev.iColor + nLayers / 2;
    }
    if (iMinLevelPrev < iMinLevel) {
        iMaxLevelPrev += iMinLevel - iMinLevelPrev;
        iMinLevelPrev = iMinLevel;
    }
    if (iMaxLevelPrev > iMaxLevel) {
        iMinLevelPrev -= iMaxLevelPrev - iMaxLevel;
        if (iMinLevelPrev < iMinLevel) {
            iMinLevelPrev = iMinLevel;
        }
        iMaxLevelPrev = iMaxLevel;
    }
    int n = nLayers;
    n -= (iMaxLevelPrev - iMinLevelPrev + 1) / 2;
    step = float(iMinLevelPrev - iMinLevel + iMaxLevel - iMaxLevelPrev) / float(n);
    int j = 0;
    float level;
    for (level = (float)iMinLevel; level < iMinLevelPrev && j < nLayers; level += step, j++) {
        colors[j] = int(level + 0.5);
    }
    for (level = (float)iMinLevelPrev; level < iMaxLevelPrev && j < nLayers; level += 2.0, j++) {
        colors[j] = int(level + 0.5);
    }
    for (level = (float)iMaxLevelPrev; level < iMaxLevel && j < nLayers; level += step, j++) {
        colors[j] = int(level + 0.5);
    }
    //
    for (int i = 0; i < nLayers; i++) {
        cvThreshold(img, thresh, colors[i], 255.0, CV_THRESH_BINARY);
        if (cvFindContours(thresh, m_mstgRects, &seq, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE)) {
            CvTrackingRect cr;
            for (CvSeq* external = seq; external; external = external->h_next) {
                cr.r = cvContourBoundingRect(external);
                Move(cr.r, roi.x, roi.y);
                if (RectInRect(cr.r, m_rROI) && cr.r.width > dMinSize  && cr.r.height > dMinSize) {
                    cr.ptCenter = Center(cr.r);
                    cr.iColor = colors[i];
                    cvSeqPush(m_seqRects, &cr);
                }
                for (CvSeq* internal = external->v_next; internal; internal = internal->h_next) {
                    cr.r = cvContourBoundingRect(internal);
                    Move(cr.r, roi.x, roi.y);
                    if (RectInRect(cr.r, m_rROI) && cr.r.width > dMinSize  && cr.r.height > dMinSize) {
                        cr.ptCenter = Center(cr.r);
                        cr.iColor = colors[i];
                        cvSeqPush(m_seqRects, &cr);
                    }
                }
            }
            cvClearSeq(seq);
        }
    }
    cvResetImageROI(img);
    cvResetImageROI(thresh);
}//void CvFaceElement::FindContours(IplImage* img, IplImage* thresh, int nLayers)
void CueTemplate::initialize() {
	TrackData* track = trackIn.getBuffer();
	if(!track) { return; }
	if(track->reliability < m_threshold) { 
		return; 
	}

	CVImage* cvgrayimg = cvGrayImageIn.getBuffer();
	if(!cvgrayimg) { std::cerr<< getName() << "::ERROR::execute()::cvGrayImageIn is NULL!...\n"; return; }
	IplImage* grayimg = cvgrayimg->ipl;

	m_track.winnerPos = track->winnerPos;
	m_track.winnerSize = track->winnerSize;
	m_track.winnerRect = track->winnerRect;
	m_track.reliability = track->reliability;

	if(debug) { std::cout << getName() << "::initialize()::Initializing rect [" << m_track.winnerRect.x << " " << m_track.winnerRect.y << " " << m_track.winnerRect.width << " " << m_track.winnerRect.height << "]\n"; }

	int width, height;
	width = grayimg->width;
	height = grayimg->height;
	m_track.imageSize.width = width;
	m_track.imageSize.height = height;
	
	m_templatesizex = m_track.winnerRect.width;
	m_templatesizey = m_track.winnerRect.height;
	
	m_halftemplatesizex = cvRound(floor((double)m_templatesizex/2.0));
	m_halftemplatesizey = cvRound(floor((double)m_templatesizey/2.0));

	m_boundaryRect.x = m_halftemplatesizex;
	m_boundaryRect.y = m_halftemplatesizey;
	m_boundaryRect.width = grayimg->width - m_templatesizex + 1; // W-w+1
	m_boundaryRect.height = grayimg->height - m_templatesizey + 1; // H-h+1
	if(debug) std::cout << "m_boundaryRect = [" << m_boundaryRect.x << " " << m_boundaryRect.y << " " << m_boundaryRect.width << " " << m_boundaryRect.height << "]\n";

	if (mp_cvtemplateimg) delete mp_cvtemplateimg;
	if (mp_newtemplateimg) cvReleaseImage(&mp_newtemplateimg);
	if (mp_origtemplateimg) cvReleaseImage(&mp_origtemplateimg);
	if (mp_temptemplateimg) cvReleaseImage(&mp_temptemplateimg);

	mp_cvtemplateimg = new CVImage( cvSize( m_templatesizex, m_templatesizey ), CV_8UC1, 0);
	mp_templateimg = mp_cvtemplateimg->ipl;
	mp_newtemplateimg = cvCreateImage( cvSize( m_templatesizex, m_templatesizey ), IPL_DEPTH_8U, 1);
	mp_origtemplateimg = cvCreateImage( cvSize( m_templatesizex, m_templatesizey ), IPL_DEPTH_8U, 1);
	mp_temptemplateimg = cvCreateImage( cvSize( m_templatesizex, m_templatesizey ), IPL_DEPTH_8U, 1);

	cvSetZero(mp_templateimg);
	cvTemplateImageOut.setBuffer(mp_cvtemplateimg);
	
	int x, y;
	x = track->winnerPos.x;
	y = track->winnerPos.y;
	if(x < m_halftemplatesizex) x = m_halftemplatesizex;
	if(y < m_halftemplatesizey) y = m_halftemplatesizey;
	if(x >= width - m_halftemplatesizex) x = width - m_halftemplatesizex - 1;
	if(y >= height - m_halftemplatesizey) y = height - m_halftemplatesizey - 1;

	CvRect rect;
	rect.x = x - m_halftemplatesizex;
	rect.y = y - m_halftemplatesizey;
	rect.width = m_templatesizex;
	rect.height = m_templatesizey;

	cvSetImageROI(grayimg, rect );
	cvCopy( grayimg, mp_templateimg );
	cvResetImageROI( grayimg );
	cvCopy( mp_templateimg, mp_origtemplateimg );

	cvTemplateImageOut.out();

	m_init = true;
	if(debug) { std::cout << getName() << "::initialize()::Initializing complete\n"; }
}
//--------------------------------------------------------------
void threesixtyUnwarp::setup(){
	
	//---------------------------
	// app properties
	ofSetVerticalSync(false);
	bMousePressed   = false;
	bCenterChanged  = false;
	bPlayerPaused   = false;
	bAngularOffsetChanged = false;
	bMousePressedInPlayer = false;
	bMousepressedInUnwarped = false;
	bSavingOutVideo = false;
	bSaveAudioToo   = false;
	nWrittenFrames  = 0;
	handyString = new char[128];
	outputFileName = "output.mov";

	//---------------------------
	// Load settings file
	if( XML.loadFile("UnwarperSettings.xml") ){
		printf("UnwarperSettings.xml loaded!\n");
	} else{
		printf("Unable to load UnwarperSettings.xml!\nPlease check 'data' folder.\n");
	}
	
	maxR_factor   = XML.getValue("MAXR_FACTOR", 0.96);
	minR_factor   = XML.getValue("MINR_FACTOR", 0.16);
	angularOffset = XML.getValue("ROTATION_DEGREES", 0.0);
	
	int loadedQuality  = XML.getValue("CODEC_QUALITY", 3);
	loadedQuality = MIN(5, MAX(0, loadedQuality));
	int codecQualities[] = {
		OF_QT_SAVER_CODEC_QUALITY_MIN,
		OF_QT_SAVER_CODEC_QUALITY_LOW,
		OF_QT_SAVER_CODEC_QUALITY_NORMAL,
		OF_QT_SAVER_CODEC_QUALITY_HIGH,
		OF_QT_SAVER_CODEC_QUALITY_MAX,             
		OF_QT_SAVER_CODEC_QUALITY_LOSSLESS
	};
	codecQuality = codecQualities[loadedQuality];
	
	player.loadMovie(XML.getValue("INPUT_FILENAME", "input.mov"));
	player.getDuration();
	
	unwarpedW = (int) XML.getValue("OUTPUT_W", 1280);
	unwarpedH = (int) XML.getValue("OUTPUT_H", 256);
	
	//if the XML element doesn't exist, create it.
	XML.setValue("OUTPUT_W", (int) unwarpedW);
	XML.setValue("OUTPUT_H", (int) unwarpedH);
	
	
	// Interpolation method: 
	// 0 = CV_INTER_NN, 1 = CV_INTER_LINEAR, 2 = CV_INTER_CUBIC.
	interpMethod = (int) XML.getValue("INTERP_METHOD", 1); 
	XML.setValue("INTERP_METHOD", (int) interpMethod);
	
	int bSaveAud = (int) XML.getValue("INCLUDE_AUDIO", 0); 
	bSaveAudioToo = (bSaveAud != 0);
	
	/*
	// straight rectilinearization
	yWarpA = -0.2047;
	yWarpB =  0.8632;
	yWarpC =  0.3578;
	yWarpA = XML.getValue("R_WARP_A", -0.2047);
	yWarpB = XML.getValue("R_WARP_B",  0.8632);
	yWarpC = XML.getValue("R_WARP_C",  0.3578);
	 */

	yWarpA =   0.1850;
	yWarpB =   0.8184;
	yWarpC =  -0.0028;
	yWarpA = XML.getValue("R_WARP_A",  0.1850);
	yWarpB = XML.getValue("R_WARP_B",  0.8184);
	yWarpC = XML.getValue("R_WARP_C", -0.0028);


	//======================================
	// create data structures for unwarping
	blackOpenCV = cvScalarAll(0);
	
	// The "warped" original source video produced by the Bloggie.
	warpedW = player.width;
	warpedH = player.height;
	int nWarpedBytes = warpedW * warpedH * 3;
	printf("warpedW = %d, warpedH = %d\n", warpedW, warpedH);
	
	warpedImageOpenCV.allocate(warpedW, warpedH);
	warpedPixels = new unsigned char[nWarpedBytes];	
	warpedIplImage = warpedImageOpenCV.getCvImage();
	cvSetImageROI(warpedIplImage, cvRect(0, 0, warpedW, warpedH));
	
	int nUnwarpedPixels = unwarpedW * unwarpedH;
	int nUnwarpedBytes  = unwarpedW * unwarpedH * 3;
	unwarpedImage.allocate(unwarpedW, unwarpedH, OF_IMAGE_COLOR);
	unwarpedPixels = new unsigned char[nUnwarpedBytes];
	unwarpedTexture.allocate(unwarpedW, unwarpedH,GL_RGB);
	
	unwarpedImageOpenCV.allocate(unwarpedW, unwarpedH);
	unwarpedImageOpenCV.setROI(0, 0, unwarpedW, unwarpedH);
	unwarpedIplImage = unwarpedImageOpenCV.getCvImage();
	
	srcxArrayOpenCV.allocate(unwarpedW, unwarpedH);
	srcyArrayOpenCV.allocate(unwarpedW, unwarpedH);
	srcxArrayOpenCV.setROI(0, 0, unwarpedW, unwarpedH);
	srcyArrayOpenCV.setROI(0, 0, unwarpedW, unwarpedH);
	
	xocvdata = (float*) srcxArrayOpenCV.getCvImage()->imageData; 
	yocvdata = (float*) srcyArrayOpenCV.getCvImage()->imageData; 
	
	playerScaleFactor = (float)(ofGetHeight() - unwarpedH)/(float)warpedH;
	savedWarpedCx = warpedCx = XML.getValue("CENTERX", warpedW / 2.0);
	savedWarpedCy = warpedCy = XML.getValue("CENTERY", warpedH / 2.0);
	savedAngularOffset = angularOffset;
	
	//if the XML element doesn't exist, create it.
	XML.setValue("CENTERX", warpedCx);
	XML.setValue("CENTERY", warpedCy);
	XML.setValue("ROTATION_DEGREES", angularOffset);
	
	
	//---------------------------
	// cylinder vizualization properties
	cylinderRes = 90;
	cylinderWedgeAngle = 360.0 / (cylinderRes-1);
	cylinderX = new float[cylinderRes];
	cylinderY = new float[cylinderRes];
	for (int i = 0; i < cylinderRes; i++) {
		cylinderX[i] = cos(ofDegToRad((float)i * cylinderWedgeAngle));
		cylinderY[i] = sin(ofDegToRad((float)i * cylinderWedgeAngle));
	}
	blurredMouseY = 0;
	blurredMouseX = 0;
	
	videoRecorder = new ofxQtVideoSaver();
	currentCodecId = 16;
	videoRecorder->setCodecType (currentCodecId);
	videoRecorder->setCodecQualityLevel (codecQuality); 
	

	//---------------------------
	// start it up
	computePanoramaProperties();
	computeInversePolarTransform(); 
	player.play();
	
	
	
}