Example #1
0
int main( int argc, char** argv )
{
	

    contadorBlue = 0;
    contadorGreen = 0;
    contadorRed = 0;

    CvCapture *capture = NULL;
    IplImage  *frame = NULL;
    IplImage  *result = NULL;
    int       key;
    char      *filename = (char*)"aGest.xml";


    /* load the classifier
       note that I put the file in the same directory with
       this code */
    cascade = ( CvHaarClassifierCascade* )cvLoad( filename, 0, 0, 0 );

    /* setup memory buffer; needed by the face detector */
    storage = cvCreateMemStorage( 0 );

    /* initialize camera */
    capture = cvCaptureFromCAM( 0 );

    /* always check */
    assert( cascade && storage && capture );
    
    /* open and rezise images to be overlayed */
    IplImage *drumblue = cvLoadImage("./Drums/DrumBlue.png");
    IplImage *drumgreen = cvLoadImage("./Drums/DrumGreen.png");
    IplImage *drumred = cvLoadImage("./Drums/DrumRed.png");
    IplImage *lineblue = cvLoadImage("./Drums/BlueLine.png");
    IplImage *linegreen = cvLoadImage("./Drums/GreenLine.png");
    IplImage *linered = cvLoadImage("./Drums/RedLine.png");
    IplImage *step1 = cvLoadImage("./Drums/Step.png");
    IplImage *step2 = cvLoadImage("./Drums/Step2.png");
    IplImage *arrow1 = cvLoadImage("./Drums/Arrow1.png");
    IplImage *arrow2 = cvLoadImage("./Drums/Arrow2.png");
    IplImage *bien = cvLoadImage("./Drums/Bien.png");
    IplImage *buu = cvLoadImage("./Drums/Buu.png");


    IplImage *rdrumblue = cvCreateImage(cvSize(110,95),drumblue->depth, drumblue->nChannels);
    IplImage *rdrumgreen = cvCreateImage(cvSize(110,95),drumgreen->depth, drumgreen->nChannels);
    IplImage *rdrumred = cvCreateImage(cvSize(110,95),drumred->depth, drumred->nChannels);
    IplImage *rdrumblue2 = cvCreateImage(cvSize(110,95),drumblue->depth, drumblue->nChannels);
    IplImage *rdrumgreen2 = cvCreateImage(cvSize(110,95),drumgreen->depth, drumgreen->nChannels);
    IplImage *rdrumred2 = cvCreateImage(cvSize(110,95),drumred->depth, drumred->nChannels);
    IplImage *rlineblue = cvCreateImage(cvSize(230,80),lineblue->depth, lineblue->nChannels);
    IplImage *rlinegreen = cvCreateImage(cvSize(230,80),linegreen->depth, linegreen->nChannels);
    IplImage *rlinered = cvCreateImage(cvSize(230,80),linered->depth, linered->nChannels);
    IplImage *rlineblue2 = cvCreateImage(cvSize(230,80),lineblue->depth, lineblue->nChannels);
    IplImage *rlinegreen2 = cvCreateImage(cvSize(230,80),linegreen->depth, linegreen->nChannels);
    IplImage *rlinered2 = cvCreateImage(cvSize(230,80),linered->depth, linered->nChannels);
    IplImage *rstep1 = cvCreateImage(cvSize(100,100),step1->depth, step1->nChannels);
    IplImage *rstep2 = cvCreateImage(cvSize(100,100),step2->depth, step2->nChannels);
    IplImage *rarrow1 = cvCreateImage(cvSize(110,70),arrow1->depth, arrow1->nChannels);
    IplImage *rarrow2 = cvCreateImage(cvSize(110,70),arrow2->depth, arrow2->nChannels);
    IplImage *rbien = cvCreateImage(cvSize(60,25),bien->depth, bien->nChannels);
    IplImage *rbuu = cvCreateImage(cvSize(60,25),buu->depth, buu->nChannels);
    

    cvResize(drumblue, rdrumblue);
    cvResize(drumgreen, rdrumgreen);
    cvResize(drumred, rdrumred);
    cvResize(drumblue, rdrumblue2);
    cvResize(drumgreen, rdrumgreen2);
    cvResize(drumred, rdrumred2);
    cvResize(lineblue, rlineblue);
    cvResize(linegreen, rlinegreen);
    cvResize(linered, rlinered);
    cvResize(lineblue, rlineblue2);
    cvResize(linegreen, rlinegreen2);
    cvResize(linered, rlinered2);
    cvResize(step1, rstep1);
    cvResize(step2, rstep2);
    cvResize(arrow1, rarrow1);
    cvResize(arrow2, rarrow2);
    cvResize(bien, rbien);
    cvResize(buu, rbuu);

    cvFlip(rdrumblue2, rdrumblue2,1);
    cvFlip(rdrumgreen2, rdrumgreen2,1);
    cvFlip(rdrumred2, rdrumred2,1);
    cvFlip(rlineblue2, rlineblue2,1);
    cvFlip(rlinegreen2, rlinegreen2,1);
    cvFlip(rlinered2, rlinered2,1);

    /* release memory */
    cvReleaseImage( &drumblue);
    cvReleaseImage( &drumgreen);
    cvReleaseImage( &drumred);
    cvReleaseImage( &lineblue);
    cvReleaseImage( &linegreen);
    cvReleaseImage( &linered );
    cvReleaseImage( &step1 );
    cvReleaseImage( &step2 );
    cvReleaseImage( &arrow1 );
    cvReleaseImage( &arrow2 );
    cvReleaseImage( &bien);
    cvReleaseImage( &buu);

 
    /* create a window */
    cvNamedWindow( "video", 1 );
    
    /* set time and frame variables*/
    initGame = clock ();
    frameN = 0;

    /* set scores*/
    score1 = 0;
    score2 = 0;
    redb = false;
    greenb = false;
    blueb = false;
    redb2 = false;
    greenb2 = false;
    blueb2 = false;
    bienn =0;
    maln =0;

std::list<int> lista;
lista.push_front(1);
lista.push_front(2);
lista.push_front(3);
lista.push_front(4);
lista.push_front(5); 


    while( key != 'q' ) {

        /* get a frame */
        //frame: 640,480
        frame = cvQueryFrame( capture );

        /* always check */
        if( !frame ) break;

        /* clone and 'fix' frame */
        cvFlip( frame, frame, 1 );

	GenerateScoreMessage(frame,score1,score2);	
		
        /* detect Hands and draw boxes */
        detectHands( frame, rlineblue2, rlinegreen2, rlinered2, false );
	detectHands( frame, rlineblue, rlinegreen, rlinered, true);

  
	/* overlay the game play buttons */
	
	cvLine(frame, cvPoint(320,0), cvPoint(320,480), cvScalar(255,255,0), 2);
	
        OverlayImage(frame,rdrumblue,cvPoint(0,240),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
        OverlayImage(frame,rdrumgreen,cvPoint(0,315),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
        OverlayImage(frame,rdrumred,cvPoint(0,390),cvScalar(1,1,1,1),cvScalar(1,1,1,1));

        OverlayImage(frame,rdrumblue2,cvPoint(530, 15),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
        OverlayImage(frame,rdrumgreen2,cvPoint(530,90),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
        OverlayImage(frame,rdrumred2,cvPoint(530,165),cvScalar(1,1,1,1),cvScalar(1,1,1,1));

 	OverlayImage(frame,rarrow1,cvPoint(0, 23),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
	OverlayImage(frame,rarrow1,cvPoint(0,98),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
	OverlayImage(frame,rarrow1,cvPoint(0,173),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
	
	OverlayImage(frame,rarrow2,cvPoint(530,248),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
        OverlayImage(frame,rarrow2,cvPoint(530,323),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
        OverlayImage(frame,rarrow2,cvPoint(530,398),cvScalar(1,1,1,1),cvScalar(1,1,1,1));

	drawAndAdvance(frame,rbien, rbuu, rstep1, rstep2 );

//        OverlayImage(frame,rstep1,cvPoint(200,330),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
//        OverlayImage(frame,rstep2,cvPoint(400,330),cvScalar(1,1,1,1),cvScalar(1,1,1,1));

//        OverlayImage(frame,rbien,cvPoint(200,200),cvScalar(1,1,1,1),cvScalar(1,1,1,1));
//        OverlayImage(frame,rbuu,cvPoint(400,200),cvScalar(1,1,1,1),cvScalar(1,1,1,1));

	
       /* display video */
        cvShowImage( "video", frame );

        /* quit if user press 'q' */
        key = cvWaitKey( 10 );
	
	frameN++;
    }
    
	
    /* free memory */
    cvReleaseCapture( &capture );
    cvDestroyWindow( "video" );
    cvReleaseHaarClassifierCascade( &cascade );
    cvReleaseMemStorage( &storage );
    return 0;

}
IplImage* CvCaptureCAM_VFW::retrieveFrame(int)
{
    BITMAPINFO vfmt;
    memset( &vfmt, 0, sizeof(vfmt));
    BITMAPINFOHEADER& vfmt0 = vfmt.bmiHeader;
    int sz, prevWidth, prevHeight;

    if( !capWnd )
        return 0;

    sz = capGetVideoFormat( capWnd, &vfmt, sizeof(vfmt));
    prevWidth = frame ? frame->width : 0;
    prevHeight = frame ? frame->height : 0;

    if( !hdr || hdr->lpData == 0 || sz == 0 )
        return 0;

    if( !frame || frame->width != vfmt0.biWidth || frame->height != vfmt0.biHeight )
    {
        cvReleaseImage( &frame );
        frame = cvCreateImage( cvSize( vfmt0.biWidth, vfmt0.biHeight ), 8, 3 );
    }

    if( vfmt.bmiHeader.biCompression != BI_RGB ||
        vfmt.bmiHeader.biBitCount != 24 )
    {
        BITMAPINFOHEADER vfmt1 = icvBitmapHeader( vfmt0.biWidth, vfmt0.biHeight, 24 );

        if( hic == 0 || fourcc != vfmt0.biCompression ||
            prevWidth != vfmt0.biWidth || prevHeight != vfmt0.biHeight )
        {
            closeHIC();
            hic = ICOpen( MAKEFOURCC('V','I','D','C'),
                          vfmt0.biCompression, ICMODE_DECOMPRESS );
            if( hic )
            {
                if( ICDecompressBegin( hic, &vfmt0, &vfmt1 ) != ICERR_OK )
                {
                    closeHIC();
                    return 0;
                }
            }
        }

        if( !hic || ICDecompress( hic, 0, &vfmt0, hdr->lpData,
            &vfmt1, frame->imageData ) != ICERR_OK )
        {
            closeHIC();
            return 0;
        }

        cvFlip( frame, frame, 0 );
    }
    else
    {
        IplImage src;
        cvInitImageHeader( &src, cvSize(vfmt0.biWidth, vfmt0.biHeight),
            IPL_DEPTH_8U, 3, IPL_ORIGIN_BL, 4 );
        cvSetData( &src, hdr->lpData, src.widthStep );
        cvFlip( &src, frame, 0 );
    }

    return frame;
}
Example #3
0
int run(const char *serverAddress, const int serverPort, char headless)
{
	int i, sockfd, show = ~0;
	int frames = 0;
	int returnValue = EXIT_SUCCESS;
	CvCapture *capture;
	CvMemStorage *storage;
	IplImage *grabbedImage;
	IplImage *imgThreshold;
	CvSeq *seq;
	CvFont font;
	SendQueue *queue;
	char strbuf[255];
	struct timeval oldTime, time, diff;
	float lastKnownFPS = 0;

	sockfd = initNetwork(serverAddress, serverPort);
	if (sockfd == -1) {
		fprintf(stderr, "ERROR: initNetwork returned -1\n");
		return EXIT_FAILURE;
	}
	queue = initSendQueue();

	capture = cvCaptureFromCAM(CV_CAP_ANY);
	if (capture == NULL) {
		fprintf( stderr, "ERROR: capture is NULL \n" );
		getchar();
		return EXIT_FAILURE;
	}

	// Create a window in which the captured images will be presented
	cvNamedWindow("mywindow", CV_WINDOW_AUTOSIZE);

	storage = cvCreateMemStorage(0);

	// void cvInitFont(font, font_face, hscale, vscale, shear=0, thickness=1, line_type=8 )
	cvInitFont(&font, CV_FONT_HERSHEY_PLAIN, 1, 1, 0, 1, 8);

	gettimeofday(&oldTime, NULL);
	// Show the image captured from the camera in the window and repeat
	while (1) {
		cvClearMemStorage(storage);

		grabbedImage = cvQueryFrame(capture);
		if (grabbedImage == NULL) {
			fprintf( stderr, "ERROR: frame is null...\n" );
			getchar();
			returnValue = EXIT_FAILURE;
			break;
		}

		//Create detection image
		imgThreshold = cvCreateImage(cvGetSize(grabbedImage), 8, 1);
		cvInRangeS(grabbedImage, min, max, imgThreshold);

		//Flip images to act as a mirror. 
		//TODO remove when camera faces screen
		if (show) {
			cvFlip(grabbedImage, grabbedImage, 1);
			cvFlip(imgThreshold, imgThreshold, 1);
		}

		//Find all dots in the image. This is where any calibration of dot detection is done, if needed, though it
		//should be fine as it is right now.
		/*
		 * image, circleStorage, method, double dp,	double minDist,	double param1, double param2, int minRadius, int maxRadius
		 */
		seq = cvHoughCircles(imgThreshold, storage, CV_HOUGH_GRADIENT, 2, 20, 20, 2, 0, 10);

		for (i = 0; i < seq->total; i++){
			// Get point
			float *p = (float*)cvGetSeqElem(seq, i);

			//Draw current circle to the original image
			if (show) paintCircle(p, grabbedImage);

			//Buffer current circle to be sent to the server
			addPointToSendQueue(p, queue);
		}
		
		//Print some statistics to the image
		if (show) {
			snprintf(strbuf, sizeof(strbuf), "Dots: %i", seq->total);
			cvPutText(grabbedImage, strbuf, cvPoint(10, 20), &font, cvScalar(WHITE));
			snprintf(strbuf, sizeof(strbuf), "FPS: %.1f", lastKnownFPS);
			cvPutText(grabbedImage, strbuf, cvPoint(10, 200), &font, cvScalar(WHITE));
		}

		//Show images 
		//TODO Comment these out will probably improve performance quite a bit
		if (show) {
			cvShowImage("mywindow", imgThreshold);
			cvShowImage("mywindow", grabbedImage);
		}

		gettimeofday(&time, NULL);
		timeval_subtract(&diff, &time, &oldTime);
//		printf("Frames = %i\n", diff.tv_sec);
		if (diff.tv_sec >= 2) {
			lastKnownFPS = (float)frames / diff.tv_sec;
			oldTime = time;
			frames = 0;
		}

		//Add one to the frame rate counter
		frames++;
		
		//Send to dots detected this frame to the server
		sendQueue(sockfd, queue);
		clearSendQueue(queue);
//
		//If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
		//remove higher bits using AND operator
		i = (cvWaitKey(10) & 0xff);
		if (i == 'v') show = ~show;
		if (i == 27) break;
	}

	// Release the capture device housekeeping
	cvReleaseCapture( &capture );
	cvDestroyWindow( "mywindow" );
	destroySendQueue(queue);
	close(sockfd);
	return returnValue;
}
void FkFingerKeyboard::programRun(){
	//////////////////////////디버깅용/////////////
	//FkCurrentMode::state = SET_KB_REGION_BOTTOM;
	///////////////////////////////////////////////
	int a = 135;
	int b = 180;
	int c = 80;
	int d = 160;
	while(true) {
#ifdef _WINDOWS	//Window
		if(!this->camera.getQueryFrame(&(this->dstImageTop), &(this->dstImageBottom))) break;
		switch(FkCurrentMode::state) {
		case PREPROCESS_TOP:
			if(preProcessor.paperKeyboardRecognizer.setPaperKeyboardCornerTop(dstImageTop)) {
				camera.setCameraPostprocessTop();
				preProcessor.paperKeyboardRecognizer.setKeyButton(dstImageTop);
				preProcessor.paperKeyboardRecognizer.setKeyButtonImage(dstImageTop);
				preProcessor.paperKeyboardRecognizer.showButtonImage();
				FkCurrentMode::state = CONFRIM_KB_BUTTON;
			}
			break;
		case CONFRIM_KB_BUTTON:
			break;
		case SET_KB_REGION_BOTTOM :
			if(mouseListener.isSettingROIBottom())
				imageProcessor.paperAreaDraggingImage(this->dstImageBottom, mouseListener);
			break;
		case CONFIRM_KB_REGION_BOTTOM : 
			preProcessor.paperKeyboardRecognizer.setSelectedPaperKeyboardBottom(mouseListener.getMouseDragArea());
			mouseListener.setBenchmarkPoint();
			imageProcessor.drawSelectedArea(this->dstImageBottom, preProcessor.paperKeyboardRecognizer.getSelectedPaperKeyboardBottom());
			camera.setCameraPreprocessBottom();
			break;
		case SET_KB_CONTOUR_BOTTOM:
			if(preProcessor.paperKeyboardRecognizer.setPaperKeyboardContourBottom(this->dstImageBottom, mouseListener) == -1){
				FkCurrentMode::state = SET_KB_REGION_BOTTOM;
				mouseListener.resetMouseDragArea();
				message->showMessage("MESSAGE : Bottom Incorrect Area.");  
			}
			else
				FkCurrentMode::state = CONFIRM_KB_CONTOUR_BOTTOM;
			camera.setCameraPostprocessBottom();
			break;
		case CONFIRM_KB_CONTOUR_BOTTOM:
			//cvSetImageROI(this->dstImageBottom, preProcessor.paperKeyboardRecognizer.getSelectedPaperKeyboardBottom());
			//imageProcessor.drawPaperKeyboardContour(this->dstImageBottom);
			//cvResetImageROI(this->dstImageBottom);
			imageProcessor.drawMaskBottom(this->dstImageBottom);
			break;
		case POSTPROCESS_INIT:
			//cvDestroyAllWindows();
			camera.setCameraPostprocessBottom();
			//postProcessor.fingerTipDetector.setBackgroundImageBottom(this->dstImageBottom); // 배경이미지 가져옴.
			//postProcessor.fingerTipDetector.setkeyboardMaskBottom(this->dstImageBottom,FkPaperKeyboard::keyboardContour, preProcessor.paperKeyboardRecognizer.getSelectedPaperKeyboardBottom()); // 외각선 mask 설정.
			postProcessor.fingerTipDetector.setkeyboardMaskTop(this->dstImageTop,FkPaperKeyboard::keyboardMaskTop);
			postProcessor.fingerTipDetector.setkeyboardMaskBottom(this->dstImageBottom,FkPaperKeyboard::keyboardMaskBottom, preProcessor.paperKeyboardRecognizer.getSelectedPaperKeyboardBottom());
			postProcessor.fingerTipDetector.setkeyboardMaskRectBottom();
			postProcessor.fingerTipDetector.setKeyButtonDivision(preProcessor.paperKeyboardRecognizer.getKeyButton());
			FkCurrentMode::state = INPUT_AVAILABLE;
			break;
		case INPUT_AVAILABLE:
			//test
			//cvSetImageROI(this->dstImageBottom, preProcessor.paperKeyboardRecognizer.getSelectedPaperKeyboardBottom());
			//imageProcessor.drawPaperKeyboardContour(this->dstImageBottom);
			//cvResetImageROI(this->dstImageBottom);
			//
			//imageProcessor.drawMaskTop(this->dstImageTop);
			//imageProcessor.drawMaskBottom(this->dstImageBottom);
			postProcessor.fingerTipDetector.initFingerTipPoint();
			if(postProcessor.fingerTipDetector.isTouchKeyboard(this->dstImageBottom, preProcessor.paperKeyboardRecognizer.getSelectedPaperKeyboardBottom(),a,b,c,d)) {
				postProcessor.fingerTipDetector.setFingerXTop();
				postProcessor.fingerTipDetector.setFingerROITop();
				postProcessor.fingerTipDetector.detectFingerTip(this->dstImageTop);
				postProcessor.keyEventProcessing();
				postProcessor.fingerTipDetector.drawFingerTip(this->dstImageTop, this->dstImageBottom);
			}
			else 
				postProcessor.keyButtonEventListener.detachFinger();
			break;
		}	

		cvShowImage(WINDOW_NAME_TOP, this->dstImageTop);
		cvShowImage(WINDOW_NAME_BOTTOM, this->dstImageBottom);

		char chKey = cvWaitKey(1);
		
		if ( chKey == 27)
			break;
#endif
#ifndef _WINDOWS //Linux
		if ((vcos_semaphore_wait(&(camera0Set.complete_semaphore)) == VCOS_SUCCESS) && (vcos_semaphore_wait(&(camera1Set.complete_semaphore)) == VCOS_SUCCESS)  ) {
		
		memcpy(this->dstImageTop->imageData, camera0Set.image->imageData, CAMERA_VIEW_WIDTH * CAMERA_VIEW_HEIGHT * 3); //Img Copy
		memcpy(this->dstImageBottom->imageData, camera1Set.image->imageData, CAMERA_VIEW_WIDTH * CAMERA_VIEW_HEIGHT * 3); //Img Copy
		
		cvFlip(this->dstImageTop,this->dstImageTop,1);			//Img Reverse
		cvFlip(this->dstImageBottom,this->dstImageBottom,1);	//Img Reverse
		
		switch(FkCurrentMode::state) {

		case PREPROCESS_TOP:
			if(preProcessor.paperKeyboardRecognizer.setPaperKeyboardCornerTop(this->dstImageTop)) {
				preProcessor.paperKeyboardRecognizer.setKeyButton(this->dstImageTop);
				preProcessor.paperKeyboardRecognizer.setKeyButtonImage(this->dstImageTop);
				preProcessor.paperKeyboardRecognizer.showButtonImage();
				FkCurrentMode::state = CONFRIM_KB_BUTTON;
			}
			break;
		case CONFRIM_KB_BUTTON:
			break;
		case SET_KB_REGION_BOTTOM :
			if(mouseListener.isSettingROIBottom())
				imageProcessor.paperAreaDraggingImage(this->dstImageBottom, mouseListener);
			break;
		case CONFIRM_KB_REGION_BOTTOM : 
			preProcessor.paperKeyboardRecognizer.setSelectedPaperKeyboardBottom(mouseListener.getMouseDragArea());
			mouseListener.setBenchmarkPoint();
			imageProcessor.drawSelectedArea(this->dstImageBottom, preProcessor.paperKeyboardRecognizer.getSelectedPaperKeyboardBottom());
			break;
		case SET_KB_CONTOUR_BOTTOM:
			if(preProcessor.paperKeyboardRecognizer.setPaperKeyboardContourBottom(this->dstImageBottom, mouseListener) == -1){
				FkCurrentMode::state = SET_KB_REGION_BOTTOM;
				mouseListener.resetMouseDragArea();
				message->showMessage("MESSAGE : Bottom Incorrect Area.");  
			}
			else
				FkCurrentMode::state = CONFIRM_KB_CONTOUR_BOTTOM;
			break;
		case CONFIRM_KB_CONTOUR_BOTTOM:
			//cvSetImageROI(this->dstImageBottom, preProcessor.paperKeyboardRecognizer.getSelectedPaperKeyboardBottom());
			//imageProcessor.drawPaperKeyboardContour(this->dstImageBottom);
			//cvResetImageROI(this->dstImageBottom);
			imageProcessor.drawMaskBottom(this->dstImageBottom);
			break;
		case POSTPROCESS_INIT:
			//cvDestroyAllWindows();
			//postProcessor.fingerTipDetector.setBackgroundImageBottom(this->dstImageBottom); // 배경이미지 가져옴.
			//postProcessor.fingerTipDetector.setkeyboardMaskBottom(this->dstImageBottom,FkPaperKeyboard::keyboardContour, preProcessor.paperKeyboardRecognizer.getSelectedPaperKeyboardBottom()); // 외각선 mask 설정.
			postProcessor.fingerTipDetector.setkeyboardMaskTop(this->dstImageTop,FkPaperKeyboard::keyboardMaskTop);
			postProcessor.fingerTipDetector.setkeyboardMaskBottom(this->dstImageBottom,FkPaperKeyboard::keyboardMaskBottom, preProcessor.paperKeyboardRecognizer.getSelectedPaperKeyboardBottom());
			postProcessor.fingerTipDetector.setkeyboardMaskRectBottom();
			postProcessor.fingerTipDetector.setKeyButtonDivision(preProcessor.paperKeyboardRecognizer.getKeyButton());
			FkCurrentMode::state = INPUT_AVAILABLE;
			break;
		case INPUT_AVAILABLE:
			//test
			//cvSetImageROI(this->dstImageBottom, preProcessor.paperKeyboardRecognizer.getSelectedPaperKeyboardBottom());
			//imageProcessor.drawPaperKeyboardContour(this->dstImageBottom);
			//cvResetImageROI(this->dstImageBottom);
			//
			//imageProcessor.drawMaskTop(this->dstImageTop);
			//imageProcessor.drawMaskBottom(this->dstImageBottom);
			postProcessor.fingerTipDetector.initFingerTipPoint();
			if(postProcessor.fingerTipDetector.isTouchKeyboard(this->dstImageBottom, preProcessor.paperKeyboardRecognizer.getSelectedPaperKeyboardBottom(),a,b,c,d)) {
				postProcessor.fingerTipDetector.setFingerXTop();
				postProcessor.fingerTipDetector.setFingerROITop();
				postProcessor.fingerTipDetector.detectFingerTip(this->dstImageTop);
				postProcessor.keyEventProcessing();
				postProcessor.fingerTipDetector.drawFingerTip(this->dstImageTop, this->dstImageBottom);
			}
			else
				postProcessor.keyButtonEventListener.detachFinger();
			break;
		}
		
		cvShowImage(WINDOW_NAME_TOP, this->dstImageTop);
		cvShowImage(WINDOW_NAME_BOTTOM, this->dstImageBottom);
		char chKey = cvWaitKey(500);
		
		if ( chKey == 27)
			break;

		
#endif
		}
	}
}
Example #5
0
/**           
 * 
 * Return the size (in bytes) of the data image (it depends on pixels depth and image size)
 *
 * \param flip_mode Specifies how to flip the array.\n
 * flip_mode = 0 means flipping around x-axis, flip_mode > 0 (e.g. 1) means flipping around y-axis and flip_mode < 0 (e.g. -1) means flipping around both axises
 *
 * \return Size in bytes
 */
void DiVAImage::flip(int flip_mode){
	cvFlip(image,NULL,flip_mode);
}
CV_IMPL int
cvSaveImage( const char* filename, const CvArr* arr )
{
    int origin = 0;
    GrFmtWriter* writer = 0;
    CvMat *temp = 0, *temp2 = 0;

    CV_FUNCNAME( "cvSaveImage" );

    __BEGIN__;

    CvMat stub, *image;
    int channels, ipl_depth;

    if( !filename || strlen(filename) == 0 )
        CV_ERROR( CV_StsNullPtr, "null filename" );

    CV_CALL( image = cvGetMat( arr, &stub ));

    if( CV_IS_IMAGE( arr ))
        origin = ((IplImage*)arr)->origin;

    channels = CV_MAT_CN( image->type );
    if( channels != 1 && channels != 3 && channels != 4 )
        CV_ERROR( CV_BadNumChannels, "" );

    writer = g_Filters.FindWriter( filename );
    if( !writer )
        CV_ERROR( CV_StsError, "could not find a filter for the specified extension" );

    if( origin )
    {
        CV_CALL( temp = cvCreateMat(image->rows, image->cols, image->type) );
        CV_CALL( cvFlip( image, temp, 0 ));
        image = temp;
    }

    ipl_depth = cvCvToIplDepth(image->type);

    if( !writer->IsFormatSupported(ipl_depth) )
    {
        assert( writer->IsFormatSupported(IPL_DEPTH_8U) );
        CV_CALL( temp2 = cvCreateMat(image->rows,
            image->cols, CV_MAKETYPE(CV_8U,channels)) );
        CV_CALL( cvConvertImage( image, temp2 ));
        image = temp2;
        ipl_depth = IPL_DEPTH_8U;
    }

    if( !writer->WriteImage( image->data.ptr, image->step, image->width,
                             image->height, ipl_depth, channels ))
        CV_ERROR( CV_StsError, "could not save the image" );

    __END__;

    delete writer;
    cvReleaseMat( &temp );
    cvReleaseMat( &temp2 );

    return cvGetErrStatus() >= 0;
}
// Main function, defines the entry point for the program.
int main( int argc, char** argv )
{

    // Structure for getting video from camera or avi
    CvCapture* capture = 0;

    // Images to capture the frame from video or camera or from file
    IplImage *frame, *frame_copy = 0;

    // Used for calculations
    int optlen = strlen("--cascade=");

    // Input file name for avi or image file.
    const char* input_name;

    // Check for the correct usage of the command line
    if( argc > 1 && strncmp( argv[1], "--cascade=", optlen ) == 0 )
    {
        cascade_name = argv[1] + optlen;
        input_name = argc > 2 ? argv[2] : 0;
    }
    else
    {
        fprintf( stderr,
        "Usage: facedetect --cascade=\"<cascade_path>\" [filename|camera_index]\n" );
        return -1;
        /*input_name = argc > 1 ? argv[1] : 0;*/
    }

    // Load the HaarClassifierCascade
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
    
    // Check whether the cascade has loaded successfully. Else report and error and quit
    if( !cascade )
    {
        fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
        return -1;
    }
    
    // Allocate the memory storage
    storage = cvCreateMemStorage(0);
    
    // Find whether to detect the object from file or from camera.
    if( !input_name || (isdigit(input_name[0]) && input_name[1] == '\0') )
        capture = cvCaptureFromCAM( !input_name ? 0 : input_name[0] - '0' );
    else
        capture = cvCaptureFromAVI( input_name ); 

    // Create a new named window with title: result
    cvNamedWindow( "result", 1 );

    // Find if the capture is loaded successfully or not.

    // If loaded succesfully, then:
    if( capture )
    {
        // Capture from the camera.
        for(;;)
        {
            // Capture the frame and load it in IplImage
            if( !cvGrabFrame( capture ))
                break;
            frame = cvRetrieveFrame( capture );

            // If the frame does not exist, quit the loop
            if( !frame )
                break;
            
            // Allocate framecopy as the same size of the frame
            if( !frame_copy )
                frame_copy = cvCreateImage( cvSize(frame->width,frame->height),
                                            IPL_DEPTH_8U, frame->nChannels );

            // Check the origin of image. If top left, copy the image frame to frame_copy. 
            if( frame->origin == IPL_ORIGIN_TL )
                cvCopy( frame, frame_copy, 0 );
            // Else flip and copy the image
            else
                cvFlip( frame, frame_copy, 0 );
            
            // Call the function to detect and draw the face
            detect_and_draw( frame_copy );

            // Wait for a while before proceeding to the next frame
            if( cvWaitKey( 10 ) >= 0 )
                break;
        }

        // Release the images, and capture memory
        cvReleaseImage( &frame_copy );
        cvReleaseCapture( &capture );
    }

    // If the capture is not loaded succesfully, then:
    else
    {
        // Assume the image to be lena.jpg, or the input_name specified
        const char* filename = input_name ? input_name : (char*)"lena.jpg";

        // Load the image from that filename
        IplImage* image = cvLoadImage( filename, 1 );

        // If Image is loaded succesfully, then:
        if( image )
        {
            // Detect and draw the face
            detect_and_draw( image );

            // Wait for user input
            cvWaitKey(0);

            // Release the image memory
            cvReleaseImage( &image );
        }
        else
        {
            /* assume it is a text file containing the
               list of the image filenames to be processed - one per line */
            FILE* f = fopen( filename, "rt" );
            if( f )
            {
                char buf[1000+1];

                // Get the line from the file
                while( fgets( buf, 1000, f ) )
                {

                    // Remove the spaces if any, and clean up the name
                    int len = (int)strlen(buf);
                    while( len > 0 && isspace(buf[len-1]) )
                        len--;
                    buf[len] = '\0';

                    // Load the image from the filename present in the buffer
                    image = cvLoadImage( buf, 1 );

                    // If the image was loaded succesfully, then:
                    if( image )
                    {
                        // Detect and draw the face from the image
                        detect_and_draw( image );
                        
                        // Wait for the user input, and release the memory
                        cvWaitKey(0);
                        cvReleaseImage( &image );
                    }
                }
                // Close the file
                fclose(f);
            }
        }

    }
    
    // Destroy the window previously created with filename: "result"
    cvDestroyWindow("result");

    // return 0 to indicate successfull execution of the program
    return 0;
}
int testfaceLib_pThread ( const char* str_video, int trackerType, int multiviewType, int recognizerType, const char* str_facesetxml, int threads, 
						 bool blink, bool smile, bool gender, bool age, bool recog, bool quiet, bool saveface, const char* sfolder, bool bEnableAutoCluster )
{
    FILE* fp_imaginfo = fopen( "imaginfo.txt", "w" );

	bool bAutoFocus = false;
	IplImage *imgAutoFocus = NULL;

	int  sampleRate =1;
	
	if(str_facesetxml == NULL)
		str_facesetxml = "faceset_model.xml";

	int  prob_estimate[7];
	char sState[256];
	EnumViewAngle  viewAngle = (EnumViewAngle)multiviewType;
	//dynamic clustering for smooth ID registration
	//bEnableAutoCluster =  true;

	CxlibFaceAnalyzer faceAnalyzer(viewAngle, (EnumTrackerType)trackerType, blink, smile, gender, age, recog, sampleRate, str_facesetxml, recognizerType, bEnableAutoCluster); 

	/////////////////////////////////////////////////////////////////////////////////////
	//	init GUI window
	const char* str_title = "Face Tester";
	if( ! quiet )
		cvNamedWindow( str_title, CV_WINDOW_AUTOSIZE );

	char sCaptionInfo[256] = "";
	CvFont *pFont = new CvFont;
	cvInitFont(pFont, CV_FONT_HERSHEY_PLAIN, 0.85, 0.85, 0, 1);

	// load GUI smile icon images
	IplImage *pImgSmileBGR;
	IplImage *pImgSmileMask;
	if(age == 0)
	{   // smile icon
		pImgSmileBGR  = cvLoadImage( "smile.bmp" );
		pImgSmileMask = cvLoadImage( "smilemask.bmp", 0 );
	}
	else
	{   // gender/age/smile icons
		pImgSmileBGR  = cvLoadImage( "faceicon.bmp" );
		pImgSmileMask = cvLoadImage( "faceiconMask.bmp", 0 );
	}

	IplImage *pImgSmileBGRA = cvCreateImage( cvSize(pImgSmileBGR->width, pImgSmileBGR->height), IPL_DEPTH_8U, 4 );
	cvCvtColor(pImgSmileBGR, pImgSmileBGRA, CV_BGR2BGRA );

	// open video source
    size_t len = strlen( str_video );
    bool is_piclist = (0 == stricmp( str_video + len - 4, ".txt" ));
    CxImageSeqReader* vidcap = NULL;
    if( is_piclist )
        vidcap = new CxPicListReader( str_video );
    else
        vidcap = new CxVideoReader( str_video );
	if( cvGetErrStatus() < 0 )
	{   
		cvSetErrStatus( CV_StsOk );
		return -1;
	}

	// when using camera, set to 640x480, 30fps
	if( isdigit(str_video[0]) != 0 && str_video[1] == '\0' )
	{
		vidcap->width( 640 );
		vidcap->height( 480 );
		vidcap->fps( 30 );
	}

	// print beginning info
	printf( "tracker cascade:  '%s'\n", trackerType== TRA_HAAR ? "haar" : (recognizerType== TRA_SURF ? "surf" : "pf tracker SURF"));
	printf( "face recognizer:  '%s'\n", recognizerType == RECOGNIZER_BOOST_GB240 ? "boost gabor240" : "cascade gloh"  );
	printf( "video:    '%s', %dx%d, %2.1f fps\n", str_video, 
		vidcap->width(), vidcap->height(), vidcap->fps() );

	// set mouse event process
	CxMouseParam mouse_faceparam;
	mouse_faceparam.updated = false;
	mouse_faceparam.play    = true;
	mouse_faceparam.ret_online_collecting = 0;

	static const int MAX_FACES = 16; 
	if(! quiet)
	{
		mouse_faceparam.play    = true;
		mouse_faceparam.updated = false;
		mouse_faceparam.face_num  = faceAnalyzer.getMaxFaceNum();
		mouse_faceparam.rects     = faceAnalyzer.getFaceRects();
		mouse_faceparam.image     = NULL;
		mouse_faceparam.cut_big_face= faceAnalyzer.getBigCutFace();
		mouse_faceparam.typeRecognizer = 0;
		mouse_faceparam.faceRecognizer = &faceAnalyzer;
		mouse_faceparam.ret_online_collecting = 0;
		cvSetMouseCallback(	str_title, my_mouse_callback, (void*)&mouse_faceparam );
		faceAnalyzer.setMouseParam(&mouse_faceparam);
	}

	// init count ticks                   
	int64  ticks, start_ticks, total_ticks;
	int64  tracker_total_ticks;
	double tracker_fps, total_fps; 

	start_ticks         = total_ticks  = 0;
	tracker_total_ticks = 0;
		
	// loop for each frame of a video/camera
	int frames = 0;
	IplImage *pImg = NULL;

	while( ! vidcap->eof() )
	{   
		// capture a video frame
		if( mouse_faceparam.play == true)
			pImg = vidcap->query();
		else 
			continue;

		if ( pImg == NULL )
			break;

		// make a copy, flip if upside-down
		CvImage image( cvGetSize(pImg), pImg->depth, pImg->nChannels );
		if( pImg->origin == IPL_ORIGIN_BL ) //flip live camera's frame
			cvFlip( pImg, image );
		else
			cvCopy( pImg, image );

		// convert to gray_image for face analysis
		CvImage gray_image( image.size(), image.depth(), 1 );
		if( image.channels() == 3 )
			cvCvtColor( image, gray_image, CV_BGR2GRAY );
		else
			cvCopy( image, gray_image );

		///////////////////////////////////////////////////////////////////
		// do face tracking and face recognition
		start_ticks = ticks = cvGetTickCount();	

        if( is_piclist )
            faceAnalyzer.detect(gray_image, prob_estimate, sState);
        else
		    faceAnalyzer.track(gray_image, prob_estimate, sState, image);   // track face in each frame but recognize by pthread
		//faceAnalyzer.detect(gray_image, prob_estimate, sState);// track and recognizer face in each frame 

		int face_num = faceAnalyzer.getFaceNum();

		ticks       = cvGetTickCount() - ticks;
		tracker_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
		tracker_total_ticks += ticks;

		
		//set param for mouse event processing
		if(!quiet)
		{
			mouse_faceparam.face_num = face_num;
			mouse_faceparam.image    = image;
		}

        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "%s  %d", vidcap->filename(), face_num );

		// blink/smile/gender/age/face recognize section
		for( int i=0; i<face_num; i++ )
		{
			// get face rect and id from face tracker
			CvRectItem rectItem = faceAnalyzer.getFaceRect(i);
			CvRect rect = rectItem.rc;
			int    face_trackid = rectItem.fid;
			float  probSmile = faceAnalyzer.getFaceSmileProb(i);
			int    bBlink  = faceAnalyzer.getFaceBlink(i);
			int    bSmile  = faceAnalyzer.getFaceSmile(i);
			int    bGender = faceAnalyzer.getFaceGender(i);
			int    nAgeID  = faceAnalyzer.getFaceAge(i);
			int    nFaceID = faceAnalyzer.getFaceID(i);
			float  fFaceProb= faceAnalyzer.getFaceProb(i);
			
			char *sFaceCaption = NULL;
			char sFaceNameBuff[256];
			char *sFaceName = faceAnalyzer.getFaceName(i);
			if(sFaceName[0] != '\0')
			{
				sprintf(sFaceNameBuff, "%s %.2f", sFaceName, fFaceProb);
				sFaceCaption = sFaceName;
				sFaceCaption = sFaceNameBuff;
			}

			if( ! quiet )
			{
				CvPoint2D32f *landmark6 = NULL;
				sprintf(sCaptionInfo, "FPS:%04d, %s", (int)tracker_fps, sState);

				int trackid = -1; //face_trackid , don't display trackid if -1
				cxlibDrawFaceBlob( image, pFont, trackid, rect, landmark6, probSmile, 
					bBlink, bSmile, bGender, nAgeID, sFaceCaption, NULL,
					pImgSmileBGR, pImgSmileBGRA, pImgSmileMask);
			}

            if( fp_imaginfo != NULL )
                fprintf( fp_imaginfo, "  %d %d %d %d", rect.x, rect.y, rect.width, rect.height );
		}
        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "\n" );

		///////////////////////////////////////////////////////////////////
		total_ticks += (cvGetTickCount() - start_ticks);
		
		// frame face_num
		frames++;

		//auto focus faces
		if(quiet == false && bAutoFocus)
		{
			if(imgAutoFocus)
				cvCopy(image, imgAutoFocus);
			else
				imgAutoFocus = cvCloneImage(image);

			CvRectItem *rects = faceAnalyzer.getFaceRects();
			cxlibAutoFocusFaceImage(imgAutoFocus, image, rects, face_num);
		}

		// next frame if quiet
		if( quiet )
			continue;
		else
		{
			// draw status info for custom interaction
			if(mouse_faceparam.ret_online_collecting == 1)
			{
				sprintf(sCaptionInfo, "Collecting faces for track_id = %d", mouse_faceparam.ret_facetrack_id);
				//draw face collecting region
				cvLine(image, cvPoint(image.width()/4, 0), cvPoint(image.width()/4, image.height()-1), CV_RGB(255,255,0), 2);
				cvLine(image, cvPoint(image.width()*3/4, 0), cvPoint(image.width()*3/4, image.height()-1), CV_RGB(255,255,0), 2);
			}
			else
				sprintf(sCaptionInfo, "FPS:%04d, %s", (int)tracker_fps, sState);

			cxlibDrawCaption( image, pFont, sCaptionInfo);
		}
		
		//show Image
		if (image.width() <= 800)
			cvShowImage( str_title, image );
		else
		{   // display scaled smaller aimge
			CvImage scale_image (cvSize(800, image.height()*800/image.width()), image.depth(), 3 );
			cvResize (image, scale_image);
			cvShowImage( str_title, scale_image );
		}

		// user interaction
		int key = cvWaitKey(1);
		//int key = cvWaitKey(0);
		if( key == ' ' )     // press space bar to pause the video play
			cvWaitKey( 0 );                           
		else if( key == 27 ) // press 'esc' to exit
			break;	                                   
		else if( key == 'a' )
		{  // add new face name
			if(face_num > 0)
			{   
				CvRect rect = faceAnalyzer.getFaceRect(0).rc;
				int x = rect.x+rect.width/2;
				int y = rect.y+rect.height/2;
				addFaceSet( x, y, &mouse_faceparam);
			}
		}
		else if( key == 'c' )
		{   //enable flag to collect face exemplars for the selected face name
			mouse_faceparam.ret_online_collecting = 1; //enable online face exemplar collecting
		}
		else if( key == 'z' )
			bAutoFocus = !bAutoFocus;
		else if(key >= 0)
		{
			if(mouse_faceparam.ret_online_collecting == 1)
			{   // stop collecting face exemplars
				mouse_faceparam.ret_online_collecting = 0; //disable online face exemplar collecting
				mouse_faceparam.ret_facetrack_id = -1;
			}

			if( key == 's')
			{
				// save faceset xml model
				faceAnalyzer.saveFaceModelXML("faceset_model.xml");
				sprintf(sCaptionInfo, "%s", "saved the face model");
				cxlibDrawCaption( pImg, pFont, sCaptionInfo);
				cvShowImage( str_title, pImg );
				cvWaitKey( 400 ); 
			}
		}
	}

	// print info about fps
	float temp    = 1e-6f / cvGetTickFrequency();
	tracker_fps   = 1.0f  / ( tracker_total_ticks * temp / frames );
	
	total_fps = 1.0f / (total_ticks * temp / frames);

	printf( "Total frames:%d  Speed:%.1f fps\n", frames, total_fps);
	printf( "FPS: %.1f ", tracker_fps);

	//save updated faceset model
	if(mouse_faceparam.updated == true)
	{
		sprintf(sCaptionInfo, "%s", "press key 's' to save updated face model or other keys to cancel");
		cxlibDrawCaption( pImg, pFont, sCaptionInfo);
		cvShowImage( str_title, pImg );

		int key = cvWaitKey();
		if( key == 's')
			faceAnalyzer.saveFaceModelXML("faceset_model.xml");
	}

	//save merged face model for dynamic clustering of smoothID
	vFaceSet vMergedFaceSet;
	int minWeight =10; 
	faceAnalyzer.getMergedFaceSet(vMergedFaceSet, minWeight);
	faceAnalyzer.saveFaceModelXML("faceset_modelMerged.xml", &vMergedFaceSet);

	//release global GUI data
	if( !quiet )
		cvDestroyWindow( str_title );

	cvReleaseImage(&pImgSmileBGR);
	cvReleaseImage(&pImgSmileBGRA);
	cvReleaseImage(&pImgSmileMask);
	delete pFont;

    delete vidcap;

    if( fp_imaginfo != NULL )
        fclose( fp_imaginfo );

    return 0;
}
Example #9
0
HRESULT MyFlipFilter::myTransform(void* self, IMediaSample *pInSample, CMediaType* pInMT, IMediaSample *pOutSample, CMediaType* pOutMT)
{
	if (self == NULL || pInSample == NULL || pInMT == NULL || pOutSample == NULL || pOutMT == NULL)
	{
		return E_FAIL;
	}
	MyFlipFilter* pSelf = (MyFlipFilter*)(GSMuxFilter*)self;

	if (IsEqualGUID(*pInMT->Type(), *pOutMT->Type()) && IsEqualGUID(*pInMT->Subtype(), *pOutMT->Subtype()) 
		&& pInMT->IsTemporalCompressed() == pOutMT->IsTemporalCompressed())
	{
		if (pInMT->FormatType() == NULL || pOutMT->FormatType() == NULL || 
			!IsEqualGUID(*pInMT->FormatType(), *pOutMT->FormatType()))
		{
			return E_FAIL;
		}
		if (IsEqualGUID(*pInMT->FormatType(), FORMAT_VideoInfo))
		{
			VIDEOINFOHEADER* pInFormat = (VIDEOINFOHEADER*)pInMT->Format();
			VIDEOINFOHEADER* pOutFormat = (VIDEOINFOHEADER*)pOutMT->Format();
			if (pInFormat == NULL || pOutFormat == NULL)
				return E_FAIL;
			if (pInFormat->bmiHeader.biWidth != pOutFormat->bmiHeader.biWidth || 
				pInFormat->bmiHeader.biHeight != pOutFormat->bmiHeader.biHeight)
			{
				return E_FAIL;
			}
		}
		else
		{
			return E_FAIL;
		}


		int camChannel;
		GUID guidSubType = pInMT->subtype;
		if (IsEqualGUID(guidSubType, MEDIASUBTYPE_RGB24))
		{
			camChannel = 3;
		}
		else if(IsEqualGUID(guidSubType, MEDIASUBTYPE_RGB32) || IsEqualGUID(guidSubType, MEDIASUBTYPE_ARGB32))
		{
			camChannel = 4;
		}

		BYTE* pInBuffer = NULL;
		BYTE* pOutBuffer = NULL;
		pInSample->GetPointer(&pInBuffer);
		pOutSample->GetPointer(&pOutBuffer);
		if (pInBuffer == NULL || pOutBuffer == NULL)
			return E_FAIL;
		//memcpy((void*)pOutBuffer, (void*)pInBuffer, pOutSample->GetSize());

		VIDEOINFOHEADER* pInFormat = (VIDEOINFOHEADER*)pInMT->Format();

		IplImage* cvImgSrc = cvCreateImageHeader(cvSize(pInFormat->bmiHeader.biWidth , pInFormat->bmiHeader.biHeight), 8, camChannel);
		cvImgSrc->imageData = (char*)pInBuffer;

		IplImage* cvImgDst = cvCreateImageHeader(cvSize(pInFormat->bmiHeader.biWidth , pInFormat->bmiHeader.biHeight), 8, camChannel);
		cvImgDst->imageData = (char*)pOutBuffer;


		cvFlip(cvImgSrc,cvImgDst,0); // ¤W¤UFlip

		cvReleaseImageHeader(&cvImgSrc);
		cvReleaseImageHeader(&cvImgDst);


		return S_OK;
	}
	else
	{
		return E_FAIL;
	}

}
Example #10
0
int main(int argc,char **argv)
{
    if(argc != 2)
    {
        printf("usage: %s <mode>\n0 - integrate webcam\n1 - external webcam\n",argv[0]);
        exit(-1);
    }
    else
    {
        int web=atoi(argv[1]);
        if(web >= 0 && web <= 1)
        {
            CvCapture *cam = cvCaptureFromCAM(web);
    	    cvSetCaptureProperty(cam,CV_CAP_PROP_FRAME_WIDTH,640);
            cvSetCaptureProperty(cam,CV_CAP_PROP_FRAME_HEIGHT,480);
            IplImage *img = cvQueryFrame(cam);
            IplImage *copia = cvCreateImage(cvGetSize(img),8,3);
            IplImage *prima = NULL;
            IplImage *binary = cvCreateImage(cvGetSize(img),8,1);
            IplImage *ris = cvCreateImage(cvGetSize(img),8,3);

            cvNamedWindow(NOME,1);
            //Variabili per prendere l'orario e la data correnti
            time_t tempo;
            struct tm *timeobj;
            time(&tempo);
            timeobj = localtime(&tempo);

            char nome[25];
            long int num=0;
            //Funzione per inserire i dati del tempo in nome
            strftime(nome,24,"%H-%M-%S_%F.avi",timeobj);
            //Creo il writer che si occuperà di scrivere i vari frame presi come video compresso in formato divx
            CvVideoWriter *video = cvCreateVideoWriter(nome,CV_FOURCC('D','I','V','X'),15,cvSize(640,480),1);
            //Inizializzo i font
            CvFont scritta,info;
            cvInitFont(&scritta,CV_FONT_HERSHEY_SIMPLEX,1.0,1.0,0,5,CV_AA);
            cvInitFont(&info,CV_FONT_HERSHEY_SIMPLEX,.6,.6,0,1,6);

            char tasto;
            int i,j,trovato=0,scelta,step = binary->widthStep/sizeof(uchar);
            uchar *target = (uchar*)binary->imageData;
            //Scelta fra dinamica e statica
            do
            {
                printf("-- Scelta modalita' --\n1)Dinamica -- Se ci saranno variazioni tra un frame e l'altro\n2)Statica -- Se ci sono variazioni fra un determinato frame e il frame corrente\nScelta: ");
                scanf("%1d",&scelta);
            }while(scelta < 1 || scelta > 2);

            while(img)
            {
                //Ruoto l'immagine
                cvFlip(img,img,1);
                //Prendo le informazioni sul tempo
                time(&tempo);
                timeobj = localtime(&tempo);
                strftime(nome,24,"%H:%M:%S %F",timeobj);
                //Scrivo le info a schermo
                cvPutText(img,nome,cvPoint(415,475),&info,CV_RGB(0,255,255));
                //Copio il frame
                cvCopy(img,copia);

                riduciNoise(img,img);
                //Dinamica
                if(scelta == 1)
                {
                    //Se è il primo frame preso
                    if(prima == NULL)
                    {
                        prima = cvCreateImage(cvGetSize(img),8,3);
                        //Copio img in prima
                        cvCopy(img,prima);
                    }
                    else
                    {
                        //Se non è il primo frame controllo se ci sono differenze
                        cvAbsDiff(img,prima,ris);
                        //Da colore a grigia
                        cvCvtColor(ris,binary,CV_BGR2GRAY);
                        //Il threshold dell'immagine
                        cvThreshold(binary,binary,62,255,CV_THRESH_BINARY);
                        riduciNoise(binary,binary);
                        cvCopy(img,prima);
                    }

                }
                //Statica
                else
                {
                    //Se ho preso il frame da monitorare
                    if(prima != NULL)
                    {
                        cvAbsDiff(img,prima,ris);
                        cvCvtColor(ris,binary,CV_BGR2GRAY);
                        cvThreshold(binary,binary,62,255,CV_THRESH_BINARY);
                        riduciNoise(binary,binary);

                    }

                }

                //Controllo l'immagine pixel per pixel
                for(i=0; i < binary->height; i++)
                {
                    for(j=0; j < binary->width; j++)
                    {
                        if(target[i*step+j] == 255)
                            trovato = 1;
                    }
                }

                //Se trovo un cambiamento
                if(trovato)
                {
                    num++;
                    //Inserisco "REC O" nell'immagine
                    cvPutText(copia,"REC",cvPoint(10,25),&scritta,CV_RGB(255,0,0));
                    cvCircle(copia,cvPoint(100,15),5,CV_RGB(255,0,0),20,8);
                    //Salvo il frame trovato
                    cvWriteFrame(video,copia);
                    trovato = 0;
                }
                //Mostro l'immagine
                cvShowImage(NOME,copia);

                tasto = cvWaitKey(15);

                if(tasto == 'q')
                    break;
                //Se premo v salvo il frame da monitorare
                else if(tasto == 'v' && scelta == 2)
                {
                    prima = cvCreateImage(cvGetSize(img),8,3);
                    cvCopy(img,prima);
                }

                img = cvQueryFrame(cam);
            }
            //Se ho preso dei frame
            if(num != 0)
            {
                //Scrivo il video
                cvReleaseVideoWriter(&video);
                printf("Video %s salvato\n",nome);
            }
        }
        else
            puts("webcam not found");
    }
    return 0;
}
int testfaceLib_sThread ( const char* str_video, int  trackerType, int multiviewType, int recognizerType, const char* str_facesetxml, int threads, 
						 bool blink, bool smile, bool gender, bool age, bool recog, bool quiet, bool saveface, const char* sfolder, bool bEnableAutoCluster)
{
	int  faceimgID = 0;
	char driver[8];
	char dir[1024];
	char fname[1024];
	char ext[8];
	char sImgPath[1024];

	if(sfolder)
	{
		char sysCommand[128];
		sprintf (sysCommand, "mkdir %s", sfolder);
		system (sysCommand);

		sprintf(sImgPath, "%s//%s", sfolder,  "imaginfo.txt");
		sprintf(fname,   "%s//%s", sfolder,  "faceinfo.txt");
	}
	else
	{
		sprintf(sImgPath, "%s", "imaginfo.txt");
		sprintf(fname,   "%s", "faceinfo.txt");
	}

	FILE* fp_imaginfo = fopen( sImgPath, "wt" );
    FILE* fp_faceinfo = fopen( fname, "wt" );

    bool bAutoFocus = false;
	IplImage *imgAutoFocus = NULL;

	/////////////////////////////////////////////////////////////////////////////////////
	//	init GUI window
	const char* str_title = "Face Tester";
	if( ! quiet )
		cvNamedWindow( str_title, CV_WINDOW_AUTOSIZE );

	char sCaptionInfo[256]="";
	CvFont *pFont = new CvFont;
	cvInitFont(pFont, CV_FONT_HERSHEY_PLAIN, 0.85, 0.85, 0, 1);
	
	// load GUI smile icon images
	IplImage *pImgSmileBGR;
	IplImage *pImgSmileMask;
	if(age == 0)
	{
		pImgSmileBGR  = cvLoadImage( "smile.bmp" );
		pImgSmileMask = cvLoadImage( "smilemask.bmp", 0 );
	}
	else
	{
		pImgSmileBGR  = cvLoadImage( "faceicon.bmp" );
		pImgSmileMask = cvLoadImage( "faceiconMask.bmp", 0 );
	}
	IplImage *pImgSmileBGRA = cvCreateImage( cvSize(pImgSmileBGR->width, pImgSmileBGR->height), IPL_DEPTH_8U, 4 );
	cvCvtColor(pImgSmileBGR, pImgSmileBGRA, CV_BGR2BGRA );

	// open video source
    size_t len = strlen( str_video );
    bool is_piclist = (0 == stricmp( str_video + len - 4, ".txt" ));
    CxImageSeqReader* vidcap = NULL;
    if( is_piclist )
        vidcap = new CxPicListReader( str_video );
    else
        vidcap = new CxVideoReader( str_video );

	if( cvGetErrStatus() < 0 )
	{   
		cvSetErrStatus( CV_StsOk );
		return -1;
	}

	// when using camera, set to 640x480, 30fps
	if( isdigit(str_video[0]) != 0 && str_video[1] == '\0' )
	{
		vidcap->width( 640 );
		vidcap->height( 480 );
		vidcap->fps( 30 );
	}

	// print beginning info
	printf( "tracker cascade:  '%s'\n", trackerType == TRA_HAAR ? "haar" : (trackerType== TRA_SURF ? "surf" : "pf tracker SURF"));
	printf( "face recognizer:  '%s'\n", recognizerType == RECOGNIZER_BOOST_GB240 ? "boost gabor240" : "cascade gloh"  );
	printf( "video:    '%s', %dx%d, %2.1f fps\n", str_video, 
		vidcap->width(), vidcap->height(), vidcap->fps() );

	// config face tracker
	const int  face_max = 16;
	CvRectItem rects[face_max];
	
	tagDetectConfig configParam;
	EnumViewAngle  viewAngle = (EnumViewAngle)multiviewType;

	CxlibFaceDetector detector;
	detector.init(viewAngle, (EnumFeaType)trackerType);
	detector.config( configParam );

	CxlibFaceTracker tracker;
	tracker.init(viewAngle, (EnumTrackerType)trackerType);
	tracker.config( configParam, TR_NLEVEL_3 );

	if( cvGetErrStatus() < 0 )
	{
		cvSetErrStatus( CV_StsOk );
		return -1;
	}

	// config landmark detector
	CvPoint2D32f   landmark6[6+1]; // consider both 6-pt and 7-pt
	float          parameters[16];
	bool      bLandmark = false;
	CxlibLandmarkDetector landmarkDetector(LDM_6PT);

	int size_smallface = 64;
	int size_bigface   = 128;
	CxlibAlignFace cutFace(size_smallface, size_bigface);
	
	// config blink/smile/gender detector
	int    bBlink = 0, bSmile = 0, bGender = 0, bAge = 0;  //+1, -1, otherwise 0: no process 
	float  probBlink = 0, probSmile = 0, probGender = 0, probAge[4];
	int    nAgeID = 0;

	CxlibBlinkDetector  blinkDetector(size_smallface);
	CxlibSmileDetector  smileDetector(size_smallface);
	CxlibGenderDetector genderDetector(size_smallface);
	CxlibAgeDetector    ageDetector(size_bigface);

	// config face recognizer
	float probFaceID = 0;
	if(str_facesetxml == NULL)
		str_facesetxml = "faceset_model.xml";

	CxlibFaceRecognizer faceRecognizer( size_bigface, recognizerType );
	if(recog) faceRecognizer.loadFaceModelXML(str_facesetxml);
	
	// set mouse event process
	CxMouseParam mouse_faceparam;
	mouse_faceparam.updated = false;
	mouse_faceparam.play = true;
	mouse_faceparam.ret_online_collecting = 0;
		
	if(! quiet)
	{
		mouse_faceparam.face_num  = face_max;
		mouse_faceparam.rects     = rects;
		mouse_faceparam.image     = NULL;
		mouse_faceparam.cut_big_face= cutFace.getBigCutFace();
		mouse_faceparam.typeRecognizer = 1;
		mouse_faceparam.faceRecognizer = &faceRecognizer;
		cvSetMouseCallback(	str_title, my_mouse_callback, (void*)&mouse_faceparam );
	}

	// init count ticks                   
	int64  ticks, start_ticks, total_ticks;
	int64  tracker_total_ticks, landmark_total_ticks, align_total_ticks,
		   blink_total_ticks, smile_total_ticks, gender_total_ticks, age_total_ticks, recg_total_ticks;
	double frame_fps, tracker_fps, landmark_fps, align_fps, blink_fps, smile_fps, gender_fps, age_fps, recg_fps, total_fps; 

	start_ticks         = total_ticks          = 0;
	tracker_total_ticks = landmark_total_ticks = align_total_ticks  = 0;
	blink_total_ticks   = smile_total_ticks    = gender_total_ticks = age_total_ticks = recg_total_ticks = 0;

	tracker_fps = landmark_fps = align_fps = blink_fps = smile_fps = gender_fps = age_fps = recg_fps = total_fps = 0.0;        

	// loop for each frame of a video/camera
	int frames = 0;
	IplImage *pImg = NULL;
	int   print_faceid=-1;
	float print_score = 0;
	std::string  print_facename;

	bool bRunLandmark = blink || smile|| gender|| age|| recog || saveface;
	IplImage *thumbnailImg   = cvCreateImage(cvSize(THUMBNAIL_WIDTH, THUMBNAIL_HEIGHT), IPL_DEPTH_8U, 3);   
	
	//dynamic clustering for smooth ID registration
	//bEnableAutoCluster = true;
	if( is_piclist ) bEnableAutoCluster = false;

	while( ! vidcap->eof() )
	{   
		// capture a video frame
		if( mouse_faceparam.play == true)
			pImg = vidcap->query();
		else 
			continue;

		if ( pImg == NULL )
			continue;

		// make a copy, flip if upside-down
		CvImage image( cvGetSize(pImg), pImg->depth, pImg->nChannels );
		if( pImg->origin == IPL_ORIGIN_BL ) //flip live camera's frame
			cvFlip( pImg, image );
		else
			cvCopy( pImg, image );

		// convert to gray_image for face analysis
		CvImage gray_image( image.size(), image.depth(), 1 );
		if( image.channels() == 3 )
			cvCvtColor( image, gray_image, CV_BGR2GRAY );
		else
			cvCopy( image, gray_image );

		// do face tracking
		start_ticks = ticks = cvGetTickCount();	
       
		int face_num = 0;
        if( is_piclist )
            face_num = detector.detect( gray_image, rects, face_max );
        else
            face_num = tracker.track( gray_image, rects, face_max, image ); // track in a video for faster speed
		  //face_num = tracker.detect( gray_image, rects, face_max ); // detect in an image

		//set param for mouse event processing
		if(!quiet)
		{
			mouse_faceparam.face_num = face_num;
			mouse_faceparam.image    = image;
		}

		ticks       = cvGetTickCount() - ticks;
		tracker_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
		tracker_total_ticks += ticks;

        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "%s  %d", vidcap->filename(), face_num );

        // blink/smile/gender/age/face recognize section
		for( int i=0; i<face_num; i++ )
		//for( int i=0; i< MIN(1,face_num); i++ )
		{
			// get face rect and id from face tracker
			CvRect rect = rects[i].rc;

            if( fp_imaginfo != NULL )
                fprintf( fp_imaginfo, "  %d %d %d %d %f", rect.x, rect.y, rect.width, rect.height, rects[i].prob );

			int    face_trackid = rects[i].fid;
			float  like = rects[i].prob;
			int    angle= rects[i].angle;

			// filter out outer faces
			if( rect.x+rect.width  > gray_image.width()   || rect.x < 0 ) continue;
			if( rect.y+rect.height > gray_image.height() || rect.y < 0 ) continue;

			//tracker.getThumbnail(image, rect, thumbnailImg);

			// detect landmark points 
			ticks = cvGetTickCount();	

			if(bRunLandmark)
			{
                if( is_piclist )
				    bLandmark = landmarkDetector.detect( gray_image, &rect, landmark6, parameters, angle ); //detect in an image
                else
				    bLandmark = landmarkDetector.track( gray_image, &rect, landmark6, parameters, angle ); // track in a video for faster speed

				ticks = cvGetTickCount() - ticks;
				landmark_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
				landmark_total_ticks += ticks;
			}
			else
				bLandmark = false;

	
			if(quiet == false && bLandmark == false) 
			{
				//DrawFaceRect
				cxlibDrawFaceRect(image, rect);
				continue;
			}

			// warped align face and hist eq to delighting
			ticks = cvGetTickCount();	

			cutFace.init(gray_image, rect, landmark6);

			ticks = cvGetTickCount() - ticks;
			if(ticks > 1)
				align_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
			else
			{	align_fps = 0;
				ticks = 0;
			}
			align_total_ticks += ticks;

			if(saveface)   //save face icon for training later
			{
				//save cutfaces
				if(sfolder)
				{
#ifdef WIN32
					_splitpath(vidcap->filename(),driver,dir,fname,ext);
					sprintf(sImgPath, "%s//%s%s", sfolder, fname,ext);
#else
					sprintf(sImgPath, "%s//%06d.jpg", sfolder, faceimgID++);
#endif
				}
				else
					sprintf(sImgPath, "%s#.jpg", vidcap->filename());
				
				cvSaveImage(sImgPath, cutFace.getBigCutFace());
			}

			// detect blink
			bBlink = 0;	
			probBlink = 0;
			if(blink && bLandmark)
			{
				ticks = cvGetTickCount();	
				float blink_threshold = blinkDetector.getDefThreshold();//0.5;
				int ret = blinkDetector.predict( &cutFace, &probBlink);
			
				if(probBlink > blink_threshold )
					bBlink = 1;  //eye close
				else 
					bBlink = -1; //eye open

				ticks = cvGetTickCount() - ticks;
				blink_fps = 1000.0/(1e-3*ticks/cvGetTickFrequency());
				blink_total_ticks += ticks;

				print_score = probBlink;
			}
			else blink_fps = 0;

			// detect smile
			bSmile    = 0;	
			probSmile = 0;
			if ( smile && bLandmark )
			{	
				ticks = cvGetTickCount();
				float smile_threshold = smileDetector.getDefThreshold(); //0.48;  
				int ret = smileDetector.predict(&cutFace, &probSmile);

				if(probSmile > smile_threshold)
					bSmile = 1;  //smile
				else 
					bSmile = -1; //not smile

				ticks	  = cvGetTickCount() - ticks;
				smile_fps = 1000.0 /( 1e-3 * ticks / cvGetTickFrequency() );
				smile_total_ticks += ticks;

				print_score = probSmile;
			}
			else smile_fps = 0;

			//detect gender
			bGender    = 0;	
			probGender = 0;
			if(gender && bLandmark)
			{
				ticks = cvGetTickCount();	
				float gender_threshold = genderDetector.getDefThreshold(); // 0.42; 
				int ret = genderDetector.predict(&cutFace, &probGender);

				if(probGender > gender_threshold)
					bGender =  1; //female
				else
					bGender = -1; //male

				//bGender = -1:male, 1:female, 0: null
				// smooth prediction result
                if( ! is_piclist )
				    bGender = genderDetector.voteLabel(face_trackid, bGender);
				
				ticks = cvGetTickCount() - ticks;
				gender_fps = 1000.0/(1e-3*ticks/cvGetTickFrequency());
				gender_total_ticks += ticks;

				print_score = probGender; 
			}
			else gender_fps = 0;

			//detect age
			nAgeID  = -1;
			if(age && bLandmark && rect.width*rect.height > 40*40)
			{
				ticks = cvGetTickCount();	

				//nAgeID = 0:"Baby", 1:"Kid", 2:"Adult", 3:"Senior"
				nAgeID = ageDetector.predict(&cutFace, probAge);

				// smooth prediction result
                if( ! is_piclist )
				    nAgeID = ageDetector.voteLabel(face_trackid, nAgeID); 

				ticks = cvGetTickCount() - ticks;
				age_fps = 1000.0/(1e-3*ticks/cvGetTickFrequency());
				age_total_ticks += ticks;

				print_score = probAge[nAgeID]; 
				//if( ! quiet )	cxDrawAignFace2Image(image, pCutFace2);
			}
			else 
			{
				age_fps = 0;
			}

			// recognize the face id
			// we only do recognition every 5 frames,interval
			char  *sFaceCaption = NULL;
			char  sFaceCaptionBuff[256];
            int face_id = 0;
			probFaceID = 0;
			if ( recog && bLandmark )
			{
				ticks = cvGetTickCount();
				float face_threshold = faceRecognizer.getDefThreshold(); 
				/////////////////////////////////////////////////////////////////////////////////////////
				int face_id  = -1;
				if(bEnableAutoCluster & !is_piclist)
				{
					bool bAutocluster = true;
					if(mouse_faceparam.ret_online_collecting) bAutocluster = false;
					//face clustering
					face_id  = faceRecognizer.predict(&cutFace, &probFaceID, bAutocluster, face_trackid, frames);
				}
				else//face recognition
					face_id  = faceRecognizer.predict(&cutFace, &probFaceID);
				/////////////////////////////////////////////////////////////////////////////////////////

				ticks    = cvGetTickCount() - ticks;
				recg_fps = 1000.0f / ( 1e-3 * ticks / cvGetTickFrequency() );
				recg_total_ticks += ticks;
				
				// smooth prediction result
                if( ! is_piclist && !bEnableAutoCluster)
                {
				    if(probFaceID > face_threshold*1.0)
					    face_id = faceRecognizer.voteLabel(face_trackid, face_id); 
				    else
					    face_id = faceRecognizer.voteLabel(face_trackid, -1);
                }
				else if(probFaceID <= face_threshold)
				{
					face_id =-1;
				}

				//set face name caption
				if(face_id >= 0)
				{
					// recognized face name
					const char* sFaceName = faceRecognizer.getFaceName(face_id);
					sprintf(sFaceCaptionBuff, "%s %.2f", sFaceName, probFaceID);
					//sprintf(sFaceCaptionBuff, "%s", sFaceName); //dispaly score
					sFaceCaption = sFaceCaptionBuff;
					
					print_score  = probFaceID;
					print_faceid = face_id;
				}
				else
				{   // failed to recognize 
					//sprintf(sFaceCaptionBuff, "N\A %.2f", probFaceID);
					//sFaceCaption = sFaceCaptionBuff;
				}

				// collect and save unknown face exemplars
				if(probFaceID < face_threshold*0.9 || face_id != mouse_faceparam.ret_faceset_id )
				{
					if(mouse_faceparam.ret_online_collecting && (face_num ==1 || face_trackid == mouse_faceparam.ret_facetrack_id))
					{
						if( rect.x > gray_image.width()/4 && rect.x+rect.width < gray_image.width()*3/4 ) 
						{
							mouse_faceparam.updated = true;
							int nFaceSetIdx = faceRecognizer.getFaceSetIdx(mouse_faceparam.ret_faceset_id);
							bool bflag = faceRecognizer.tryInsertFace(cutFace.getBigCutFace(), nFaceSetIdx);
							//printf("insert flag %d\n", bflag);
						}
					}
				}
			}
			else recg_fps = 0;

			if( ! quiet )
			{
				sprintf(sCaptionInfo, "FPS: %03d Fd:%04d Ld:%04d Fa:%04d Bl:%04d Sm:%04d Ge:%04d Ag:%03d Rc:%03d",
					(int)frame_fps, (int)tracker_fps, (int)landmark_fps, (int)align_fps, 
					(int)blink_fps,   (int)smile_fps,    (int)gender_fps, (int)age_fps, (int)recg_fps);

				//sprintf(sFaceCaptionBuff, "%.2f", print_score);
				//sFaceCaption = sFaceCaptionBuff;

				int trackid = -1; //face_trackid. don't display trackid if -1
				cxlibDrawFaceBlob( image, pFont, trackid, rect, landmark6, probSmile, 
					bBlink, bSmile, bGender, nAgeID, sFaceCaption, NULL,
					pImgSmileBGR, pImgSmileBGRA, pImgSmileMask);
			}

            // log file
            if( fp_faceinfo != NULL )
            {
                // index,  rect,  landmark6,  bBlink, probBlink, bSmile, probSmile, bGender, probGender, nAgeID, probAge[nAgeID], face_id, probFaceID
				//fprintf( fp_faceinfo, "#%s# @%s@ ",    vidcap->filename(), sImgPath);
				fprintf( fp_faceinfo, "#%s# ",    vidcap->filename());
                fprintf( fp_faceinfo, "faceidx=( %06d %02d )", vidcap->index(), i+1 );
				fprintf( fp_faceinfo, "   rect=( %3d %3d %3d %3d )", rect.x, rect.y, rect.width, rect.height );
                fprintf( fp_faceinfo, "   landmark6=(" );
                int l;
                for( l = 0; l < 6; l++ )
                    fprintf( fp_faceinfo, " %3.0f %3.0f", landmark6[l].x, landmark6[l].y );
                fprintf( fp_faceinfo, " )");
                fprintf( fp_faceinfo, "   blink=( %+d %f )", bBlink, probBlink );
                fprintf( fp_faceinfo, "   smile=( %+d %f )", bSmile, probSmile );
                fprintf( fp_faceinfo, "   gender=( %+d %f )", bGender, probGender );
                fprintf( fp_faceinfo, "   agegroup=( %+d %f )", nAgeID, (nAgeID >= 0 && nAgeID < 4) ? probAge[nAgeID] : 1.0f );
                fprintf( fp_faceinfo, "   identity=( %+d %f )", face_id, probFaceID );
                fprintf( fp_faceinfo, "\n" );
            }
        }
        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "\n" );

		ticks    = cvGetTickCount() - start_ticks;
		total_ticks += (ticks);
		frame_fps = 1000.0f / ( 1e-3 * ticks / cvGetTickFrequency() );

		// frame face_num
		frames++;

		//auto focus faces
		if(quiet == false && bAutoFocus)
		{
			if(imgAutoFocus)
				cvCopy(image, imgAutoFocus);
			else
				imgAutoFocus = cvCloneImage(image);
			cxlibAutoFocusFaceImage(imgAutoFocus, image, rects, face_num);
		}

		// next frame if quiet
		if( quiet )
			continue;
		else
		{
			// draw status info for custom interaction
			if(mouse_faceparam.ret_online_collecting == 1)
			{
				sprintf(sCaptionInfo, "Collecting faces for track_id = %d", mouse_faceparam.ret_facetrack_id);
				//draw face collecting region
				cvLine(image, cvPoint(image.width()/4, 0), cvPoint(image.width()/4, image.height()-1), CV_RGB(255,255,0), 2);
				cvLine(image, cvPoint(image.width()*3/4, 0), cvPoint(image.width()*3/4, image.height()-1), CV_RGB(255,255,0), 2);
			}
			else
				sprintf(sCaptionInfo, "FPS: %03d Fd:%04d Ld:%04d Fa:%04d Bl:%04d Sm:%04d Ge:%04d Ag:%03d Rc:%03d",
					(int)frame_fps, (int)tracker_fps, (int)landmark_fps, (int)align_fps, 
					(int)blink_fps,   (int)smile_fps,    (int)gender_fps, (int)age_fps, (int)recg_fps);

			cxlibDrawCaption( image, pFont, sCaptionInfo);
		}
	
		//show Image
		if (image.width() <= 800)
		{
			//show image
			cvShowImage( str_title, image );
		}
		else
		{   // show scaled smaller image
			CvImage scale_image (cvSize(800, image.height()*800/image.width()), image.depth(), 3 );
			cvResize (image, scale_image);
			cvShowImage( str_title, scale_image );
		}

		// user interaction
		int key = cvWaitKey( 30 );
		//int key = cvWaitKey( );
		if( key == ' ' ) // press the spacebar to pause the video play 
			cvWaitKey( 0 );                           
		else if( key == 27 )
			break;	    // press 'esc' to exit
		else if( key == 'a' )
		{  // add new face name
			if(face_num > 0)
			{   
				CvRect rect = rects[0].rc;
				int x = rect.x+rect.width/2;
				int y = rect.y+rect.height/2;
				addFaceSet( x, y, &mouse_faceparam);
			}
		}
		else if( key == 'c' )
		{   // collect face exemplars for current selected facename
			mouse_faceparam.ret_online_collecting = 1; //enable online face exemplar collecting
		}
		else if( key == 'z' )
			// turn on/off the autofocus flag
			bAutoFocus = !bAutoFocus;
		else if(key >= 0)
		{
			if(mouse_faceparam.ret_online_collecting == 1)
			{   // stop collecting faces
				mouse_faceparam.ret_online_collecting = 0; //disable online face exemplar collecting
				mouse_faceparam.ret_facetrack_id = -1;
			}

			if( key == 's')
			{   // save face models
				faceRecognizer.saveFaceModelXML("faceset_model.xml");
				sprintf(sCaptionInfo, "%s", "saved the face model");
				cxlibDrawCaption( pImg, pFont, sCaptionInfo);
				cvShowImage( str_title, pImg );
				cvWaitKey( 400 ); 
			}
		}
	}

	// print speed info about fps
	float temp    = 1e-6f / cvGetTickFrequency();
	tracker_fps   = 1.0f  / ( tracker_total_ticks * temp / frames );

	if (landmark_total_ticks != 0.0)
		landmark_fps = 1.0f  / ( landmark_total_ticks * temp / frames );

	if (align_total_ticks != 0.0)
		align_fps    = 1.0f  / ( align_total_ticks * temp / frames );

	if (blink_total_ticks != 0.0)
		blink_fps  = 1.0f  / (blink_total_ticks * temp / frames);

	if (smile_total_ticks != 0.0)
		smile_fps  = 1.0f  / (smile_total_ticks * temp / frames);

	if (gender_total_ticks != 0.0)
		gender_fps = 1.0f  / (gender_total_ticks * temp / frames);

	if (age_total_ticks != 0.0)
		age_fps = 1.0f  / (age_total_ticks * temp / frames);

	if (recg_total_ticks != 0.0)
		recg_fps   = 1.0f  / (recg_total_ticks  * temp / frames);

	total_fps = 1.0f / (total_ticks * temp / frames);

	printf( "Total frames:%d  Speed:%.1f fps\n", frames, total_fps);
	printf( "FPS: Fd:%.1f Ld:%.1f Fa:%.1f Bl:%.1f Sm:%.1f Ge:%.1f Ag:%.1f Rc:%.1f",
		tracker_fps, landmark_fps, align_fps, 
		blink_fps,   smile_fps,    gender_fps, age_fps, recg_fps);

	//save updated face model
	if(mouse_faceparam.updated == true)
	{
		sprintf(sCaptionInfo, "%s", "press key 's' to save updated face model or other keys to cancel");
		cxlibDrawCaption( pImg, pFont, sCaptionInfo);
		cvShowImage( str_title, pImg );

		int key = cvWaitKey();
		if( key == 's')
			faceRecognizer.saveFaceModelXML("faceset_model.xml");
	}

	
	//save merged face model for dynamic clustering of smoothID
	vFaceSet vMergedFaceSet;
	int minWeight = 10;
	faceRecognizer.getMergedFaceSet(vMergedFaceSet, minWeight);
	faceRecognizer.saveFaceModelXML("faceset_modelMerged.xml", &vMergedFaceSet);
	//faceRecognizer.saveFaceModelXML("faceset_modelMerged#.xml");

	//release buff 
	
	//release global GUI data
	if( !quiet )
		cvDestroyWindow( str_title );

	cvReleaseImage(&thumbnailImg);
	cvReleaseImage(&pImgSmileBGR);
	cvReleaseImage(&pImgSmileBGRA);
	cvReleaseImage(&pImgSmileMask);
	
	delete pFont;

    delete vidcap;

    if( fp_imaginfo != NULL )
        fclose( fp_imaginfo );
	
    if( fp_faceinfo != NULL )
        fclose( fp_faceinfo );

    return 0;
}
Example #12
0
void OpenCVManager::update(int timeScale)
{
	int stepTime = lastFrame + timeScale*16.f;

	///////////////////////////
	//  update vector field  //
	///////////////////////////

	//approach normal
	Particle* vect;
	for (int i = 0; i < m_fieldWidth; ++i)
	{
		for (int j = 0; j < m_fieldHeight; ++j)
		{
			vect = &m_vectorField[(i*m_fieldHeight) + j];

			ofVec3f target;
			if (vect->vel.lengthSquared() > 0.f)
				target = (vect->vel / vect->vel.length()) * m_vectorFieldNorm;
			else
				target = ofVec3f(m_vectorFieldNorm, 0.f);
			vect->vel += (target - vect->vel) * 0.01;
		}
	}

	//update from video
	if (m_vidGrabber.isInitialized())
	{
		bool isNewFrame = false;
		int id = 0;

		m_vidGrabber.update();
		isNewFrame = m_vidGrabber.isFrameNew();

		if (isNewFrame)
		{
			//grab new frame
			m_curImg.setFromPixels(m_vidGrabber.getPixels(), s_frameSize.width, s_frameSize.height);

			//populate image
			if (firstFrame)
			{
				cvCvtColor(m_curImg.getCvImage(), m_newFrame, CV_RGB2GRAY);;
			}

			//cycle new to old
			IplImage* buff = m_pyramidOld;
			m_pyramidOld = m_pyramidNew;
			m_pyramidNew = buff;

			buff = m_oldFrame;
			m_oldFrame = m_newFrame;
			m_newFrame = buff;

			m_numFeatures = s_maxFeatures;

			//convert color
			cvCvtColor(m_curImg.getCvImage(), buff, CV_RGB2GRAY);

			//mirror image
			cvFlip(buff, m_newFrame, -1);

			cvGoodFeaturesToTrack(m_oldFrame, m_eig_image, m_tmp_image, m_oldImgFeatures, &m_numFeatures, 0.02, 1.0);

			//run flow
			int level = 2;// num levels in pyramid
			m_pointFound = new char[m_numFeatures];
			float* err = new float[s_maxFeatures];

			cvCalcOpticalFlowPyrLK(m_oldFrame, m_newFrame, 
				m_pyramidOld, m_pyramidNew, 
				m_oldImgFeatures, m_newImgFeatures, m_numFeatures, 
				s_frameSize, level, m_pointFound, err, m_termCriteria, 0);

			//set flags if necessary
			if (firstFrame)
			{
				m_flowFlags = CV_LKFLOW_PYR_A_READY;
				firstFrame = false;
			}

			//affect vector field
			ofVec2f deltaVec;
			Particle* tmpVec;

			try
			{

				for (int i = 0; i < m_numFeatures; ++i)
				{
					if (!m_pointFound[i]
						|| m_newImgFeatures[i].x < 0
						|| m_newImgFeatures[i].y < 0
						|| m_newImgFeatures[i].x >= ofGetWidth()
						|| m_newImgFeatures[i].y >= ofGetHeight())
						continue;

					deltaVec = ofVec2f(m_newImgFeatures[i].x - m_oldImgFeatures[i].x, m_newImgFeatures[i].y - m_oldImgFeatures[i].y);

					if (deltaVec.lengthSquared() < m_vectorFieldNorm * m_vectorFieldNorm)
						continue;

					//closest field value
					int posX = (int)m_newImgFeatures[i].x * s_frameSizeInv.x * ofGetWidth() * s_vectorFieldDensityInv;
					int posY = (int)(s_frameSize.height - m_newImgFeatures[i].y) * s_frameSizeInv.y * ofGetHeight() * s_vectorFieldDensityInv;

					if (posX >= m_fieldWidth) continue;
					if (posY >= m_fieldHeight) continue;

					tmpVec = &m_vectorField[(posX * m_fieldHeight) + posY];

					//reverse for cv opposite y coord
					deltaVec.y *= -1;

					tmpVec->vel += deltaVec * timeScale * 0.5f;

					tmpVec->vel.limit(tmpVec->maxSpeed);
				}
			}
			catch (exception e)
			{
				cout << e.what() << endl;
			}
		}
	}
	else
	{
		//no video camera, use noise
		Particle* vect;
		for (int i = 0; i < m_fieldWidth; ++i)
		{
			for (int j = 0; j < m_fieldHeight; ++j)
			{
				vect = &m_vectorField[(i*m_fieldHeight) + j];

				float noiseNum = ((i*m_fieldHeight) + j) + ofGetFrameNum() * 0.001f;
				vect->vel = ofVec2f(-1.f + ofNoise(noiseNum)*2.f, -1.f + 2.f * ofNoise(noiseNum + 1000)) * vect->maxSpeed;
			}
		}
	}

	//////////////////////
	//  simulate crowd  //
	//////////////////////

	//generate a person each 5 seconds
	if (stepTime - crowdLastGenerated > 2000
		&& pPeople->size() < s_maxPeopleParticles)
	{
		float y = ofGetWindowHeight();
		y = ofRandom(y * 0.25f, y * 0.75f);

		float x = -s_generationBuffer;
		if (ofRandom(1) > 0.5)
			x = ofGetWidth() + s_generationBuffer;

		//debug
		x = ofRandom(ofGetWidth());
		y = ofRandom(ofGetHeight());

		Particle* p = ppCurrentScene[0]->addParticleOfProperType(
			ofVec2f(x, y)
			);
		
		p->maxSpeed = s_vectorFieldDensity * 0.1f;
		p->vel = ofVec2f( (x < 0) ? 0.5f : -0.5f, 0.f);
		crowdLastGenerated = stepTime;
	}

	//move people across screen or remove them
	for (vector<Particle*>::iterator p = pPeople->begin(); p != pPeople->end();)
	{
		ofVec3f targetVel = ofVec3f(0.f, 0.f);

		//calculate vector field that's close

		int fieldX = (*p)->pos.x / s_vectorFieldDensity;
		int fieldY = (*p)->pos.y / s_vectorFieldDensity;

		if (fieldX < 2) fieldX = 2;
		else if (fieldX > m_fieldWidth - 3) fieldX = m_fieldWidth - 3;
		if (fieldY < 2) fieldY = 2;
		else if (fieldY > m_fieldHeight - 3) fieldY = m_fieldHeight - 3;

		for (int i = -2; i < 3; ++i)
		{
			for (int j = -2; j < 3; ++j)
			{
				int pos = ((fieldX + i) * m_fieldHeight) + (fieldY + j);
				targetVel += (3.f - std::max(abs(i), abs(j))) * m_vectorField[pos].vel;
			}
		}
		targetVel *= 0.029f;
		(*p)->accel += (targetVel - (*p)->vel) * timeScale * 0.1f;

		//update person
		(*p)->update(timeScale);// stepTimeDelta;

		if ((*p)->pos.x > ofGetWindowWidth() + s_generationBuffer*1.5f
			|| (*p)->pos.x < -s_generationBuffer * 1.5f
			|| (*p)->pos.y > ofGetHeight() + s_generationBuffer * 1.5f
			|| (*p)->pos.y < -s_generationBuffer * 1.5f)
		{
			p = pPeople->erase(p);
		}
		else
		{
			++p;
		}
	}
	lastFrame = stepTime;

}
void KeyboardHandler(unsigned char Key, int x, int y){

	switch(Key){

	case 27: 	cvReleaseImage(&Image);
			cvDestroyWindow("Image");
			exit(0); break;


	case 'r': WorldRoll+=5; break;
	case 'R': WorldRoll-=5; break;

	case 'p': WorldPitch+=5; break;
	case 'P': WorldPitch-=5; break;

	case 'y': WorldYaw+=5; break;
	case 'Y': WorldYaw-=5; break;





	case '7': FaceRoll+=2; break;
	case '1': FaceRoll-=2; break;

	case '9': FacePitch+=2; break;
	case '3': FacePitch-=2; break;

	case '/': FaceYaw+=2; break;
	case '*': FaceYaw-=2; break;

	case '6': FaceTx+=0.5; break;
	case '4': FaceTx-=0.5; break;

	case '8': FaceTy+=0.5; break;
	case '2': FaceTy-=0.5; break;

	case '+': FaceTz+=0.5; break;
	case '-': FaceTz-=0.5; break;




	case 's':	cvFlip(Image,Image); 
			cvShowImage("Image",Image);
			cvWaitKey(50);
			char Buffer[50];
			static int FrameIndex=0;
			sprintf(Buffer,"Frame-%03d.jpg",FrameIndex++);
			cvSaveImage(Buffer,Image);
			break;



	}




	glRotatef(WorldRoll,0,0,1);
	glRotatef(WorldPitch,0,1,0);
	glRotatef(WorldYaw,1,0,0);


	WorldRoll=0;
	WorldPitch=0;
	WorldYaw=0;

	glutPostRedisplay();
}
Example #14
0
int main (int argc, char **argv)
{

	CvCapture *capture = 0;
	IplImage *frame, *frame_copy = 0;
	cascade = (CvHaarClassifierCascade *) cvLoad ("yolo.xml", 0, 0, 0);

	if (!cascade)
	{
		printf ("ERROR: Could not load classifier cascade\n");
		return -1;
	}

	storage = cvCreateMemStorage (0);

	capture = cvCaptureFromCAM (0);

	if (capture){
		int j = 0;

		for (;;){

			FILE *fin;
			int i = 0;
			flag = 0, f = 0;

			if(!cvGrabFrame (capture)){
				break;
			}

			frame = cvRetrieveFrame (capture);

			if (!frame){
				break;
			}

			if (!frame_copy){
				frame_copy = cvCreateImage(
					cvSize (frame->width, frame->height),
					IPL_DEPTH_8U, frame->nChannels);
			}

			system ("ps -e | grep totem > sample.txt");

			fin = fopen ("sample.txt", "r");

			fflush (fin);

			while (!feof (fin)){
				char a[40];
				fscanf (fin, "%s\n", a);
				if (a[i] == 't' && a[i + 1] == 'o' && a[i + 2] == 't'
				&& a[i + 3] == 'e' && a[i + 4] == 'm'){
					f = 1;
					break;
				}
      			else{
					f = 0;
				}
    		}

			fclose (fin);

			if (frame->origin == IPL_ORIGIN_TL){
				cvCopy (frame, frame_copy, 0);
			}
			else{
				cvFlip (frame, frame_copy, 0);
			}

  			flag = detect_and_draw (frame_copy);

			if (f == 0)
			{
				printf("no totem playing\n
						please switch off the application from the command centre\n
						or open a video file\n");
				sleep (5);
			}
			else if (flag == 0 && f == 1 && played == 1)
			{
				system ("totem --pause");
				played = 0;
			}
			else if (flag == 1 && f == 1 && played == 0)
			{
				system ("totem --play");
				played = 1;
			}

			if (cvWaitKey (10) >= 0)
				break;

		}
Example #15
0
/// write a frame with FFMPEG
bool CvVideoWriter_FFMPEG::writeFrame( const IplImage * image )
{
	bool ret = false;

    CV_FUNCNAME("CvVideoWriter_FFMPEG::writerFrame");

	__BEGIN__;

	// typecast from opaque data type to implemented struct
#if LIBAVFORMAT_BUILD > 4628
    AVCodecContext *c = video_st->codec;
#else
	AVCodecContext *c = &(video_st->codec);
#endif

#if LIBAVFORMAT_BUILD < 5231
    // It is not needed in the latest versions of the ffmpeg
    if( c->codec_id == CODEC_ID_RAWVIDEO && image->origin != IPL_ORIGIN_BL )
    {
        if( !temp_image )
            temp_image = cvCreateImage( cvGetSize(image),
                                    image->depth, image->nChannels );
        cvFlip( image, temp_image, 0 );
        image = temp_image;
    }
#endif

    // check parameters
    if (input_pix_fmt == PIX_FMT_BGR24) {
        if (image->nChannels != 3 || image->depth != IPL_DEPTH_8U) {
            CV_ERROR(CV_StsUnsupportedFormat, "cvWriteFrame() needs images with depth = IPL_DEPTH_8U and nChannels = 3.");
        }
    }
	else if (input_pix_fmt == PIX_FMT_GRAY8) {
        if (image->nChannels != 1 || image->depth != IPL_DEPTH_8U) {
            CV_ERROR(CV_StsUnsupportedFormat, "cvWriteFrame() needs images with depth = IPL_DEPTH_8U and nChannels = 1.");
        }
    }
	else {
        assert(false);
    }

	// check if buffer sizes match, i.e. image has expected format (size, channels, bitdepth, alignment)
	assert (image->imageSize == avpicture_get_size( input_pix_fmt, image->width, image->height ));

	if ( c->pix_fmt != input_pix_fmt ) {
		assert( input_picture );
		// let input_picture point to the raw data buffer of 'image'
		avpicture_fill((AVPicture *)input_picture, (uint8_t *) image->imageData,
				(PixelFormat)input_pix_fmt, image->width, image->height);

#if !defined(HAVE_FFMPEG_SWSCALE)
		// convert to the color format needed by the codec
		if( img_convert((AVPicture *)picture, c->pix_fmt,
					(AVPicture *)input_picture, (PixelFormat)input_pix_fmt,
					image->width, image->height) < 0){
			CV_ERROR(CV_StsUnsupportedFormat, "FFMPEG::img_convert pixel format conversion from BGR24 not handled");
		}
#else
		img_convert_ctx = sws_getContext(image->width,
		             image->height,
		             PIX_FMT_BGR24,
		             c->width,
		             c->height,
		             c->pix_fmt,
		             SWS_BICUBIC,
		             NULL, NULL, NULL);

		    if ( sws_scale(img_convert_ctx, input_picture->data,
		             input_picture->linesize, 0,
		             image->height,
		             picture->data, picture->linesize) < 0 )
		    {
		      CV_ERROR(CV_StsUnsupportedFormat, "FFMPEG::img_convert pixel format conversion from BGR24 not handled");
		    }
		sws_freeContext(img_convert_ctx);
#endif
	}
	else{
		avpicture_fill((AVPicture *)picture, (uint8_t *) image->imageData,
				(PixelFormat)input_pix_fmt, image->width, image->height);
	}

	ret = icv_av_write_frame_FFMPEG( oc, video_st, outbuf, outbuf_size, picture) >= 0;

	__END__;
	return ret;
}
Example #16
0
void CV_SolvePolyTest::run( int )
{
    CvRNG rng = cvRNG();
    int fig = 100;
    double range = 50;
    double err_eps = 1e-4;

    for (int idx = 0, max_idx = 1000, progress = 0; idx < max_idx; ++idx)
    {
        progress = update_progress(progress, idx-1, max_idx, 0);
        int n = cvRandInt(&rng) % 13 + 1;
        std::vector<complex_type> r(n), ar(n), c(n + 1, 0);
        std::vector<double> a(n + 1), u(n * 2), ar1(n), ar2(n);

        int rr_odds = 3; // odds that we get a real root
        for (int j = 0; j < n;)
        {
            if (cvRandInt(&rng) % rr_odds == 0 || j == n - 1)
	            r[j++] = cvRandReal(&rng) * range;
            else
            {
	            r[j] = complex_type(cvRandReal(&rng) * range,
			    cvRandReal(&rng) * range + 1);
	            r[j + 1] = std::conj(r[j]);
	            j += 2;
            }
        }

        for (int j = 0, k = 1 << n, jj, kk; j < k; ++j)
        {
            int p = 0;
            complex_type v(1);
            for (jj = 0, kk = 1; jj < n && !(j & kk); ++jj, ++p, kk <<= 1)
                ;
            for (; jj < n; ++jj, kk <<= 1)
            {
	            if (j & kk)
	                v *= -r[jj];
	            else
	                ++p;
            }
            c[p] += v;
        }

        bool pass = false;
        double div = 0, s = 0;
        int cubic_case = idx & 1;
        for (int maxiter = 100; !pass && maxiter < 10000; maxiter *= 2, cubic_case = (cubic_case + 1) % 2)
        {
            for (int j = 0; j < n + 1; ++j)
	            a[j] = c[j].real();

            CvMat amat, umat;
            cvInitMatHeader(&amat, n + 1, 1, CV_64FC1, &a[0]);
            cvInitMatHeader(&umat, n, 1, CV_64FC2, &u[0]);
            cvSolvePoly(&amat, &umat, maxiter, fig);

            for (int j = 0; j < n; ++j)
	            ar[j] = complex_type(u[j * 2], u[j * 2 + 1]);

            sort(r.begin(), r.end(), pred_complex());
            sort(ar.begin(), ar.end(), pred_complex());

            pass = true;
            if( n == 3 )
            {
                ar2.resize(n);
                cv::Mat _umat2(3, 1, CV_64F, &ar2[0]), umat2 = _umat2;
                cvFlip(&amat, &amat, 0);
                int nr2;
                if( cubic_case == 0 )
                    nr2 = cv::solveCubic(cv::Mat(&amat),umat2);
                else
                    nr2 = cv::solveCubic(cv::Mat_<float>(cv::Mat(&amat)), umat2);
                cvFlip(&amat, &amat, 0);
                if(nr2 > 0)
                    sort(ar2.begin(), ar2.begin()+nr2, pred_double());
                ar2.resize(nr2);

                int nr1 = 0;
                for(int j = 0; j < n; j++)
                    if( fabs(r[j].imag()) < DBL_EPSILON )
                        ar1[nr1++] = r[j].real();

                pass = pass && nr1 == nr2;
                if( nr2 > 0 )
                {
                    div = s = 0;
                    for(int j = 0; j < nr1; j++)
                    {
                        s += fabs(ar1[j]);
                        div += fabs(ar1[j] - ar2[j]);
                    }
                    div /= s;
                    pass = pass && div < err_eps;
                }
            }

            div = s = 0;
            for (int j = 0; j < n; ++j)
            {
                s += fabs(r[j].real()) + fabs(r[j].imag());
                div += sqrt(pow(r[j].real() - ar[j].real(), 2) + pow(r[j].imag() - ar[j].imag(), 2));
            }
            div /= s;
            pass = pass && div < err_eps;
        }

        if (!pass)
        {
            ts->set_failed_test_info(CvTS::FAIL_INVALID_OUTPUT);
            ts->printf( CvTS::LOG, "too big diff = %g\n", div );

            for (size_t j=0;j<ar2.size();++j)
                ts->printf( CvTS::LOG, "ar2[%d]=%g\n", j, ar2[j]);
            ts->printf(CvTS::LOG, "\n");

            for (size_t j=0;j<r.size();++j)
	            ts->printf( CvTS::LOG, "r[%d]=(%g, %g)\n", j, r[j].real(), r[j].imag());
            ts->printf( CvTS::LOG, "\n" );
            for (size_t j=0;j<ar.size();++j)
	            ts->printf( CvTS::LOG, "ar[%d]=(%g, %g)\n", j, ar[j].real(), ar[j].imag());
            break;
        }
    }
}
Example #17
0
///*****************************************************************
///                            UPDATE CVCINEMA
///*****************************************************************
void testApp::updateCvCinema() {
  // Calculate video framerate
    int currTime = ofGetElapsedTimeMillis();
    videoFPS = 1000.0/(currTime-lastTime);
    lastTime = currTime;
    if (bLiveVideo) {
        // set current frame number
        currFrameNum++;
        fullFrame.setRoiFromPixels(vidGrabber.getPixels(), inW, inH);
    }
    else {
        // get current frame number
        currFrameNum = vidPlayer.getCurrentFrame();
        fullFrame.setRoiFromPixels(vidPlayer.getPixels(), inW, inH);
    }
    // save last frame & update current frame
    lastFrame = colorImg;
    colorImg.scaleIntoMe(fullFrame, CV_INTER_NN);
    // for the advancedBGS
    RgbImage rgbImage = cvCloneImage(colorImg.getCvImage());
    if (bLearnBakground) {
        bgImage = colorImg;		// let this frame be the background image from now on
        bgs.bgsPtr->InitModel(rgbImage);
        bLearnBakground = false;
    }
    else {
        // Subtract the current frame from the background model and produce a binary foreground mask using
        // both a low and high threshold value.
        bgs.bgsPtr->Subtract(currFrameNum, rgbImage, bgs.low, bgs.high);
        // Update the background model. Only pixels set to background in update_mask are updated.
        bgs.bgsPtr->Update(currFrameNum, rgbImage, bgs.low );
    }
    switch (algorithm) {
       case 0:     // color frame differencing
                // take the abs value of the difference between last and current frame
                cvAbsDiff(colorImg.getCvImage(), lastFrame.getCvImage(), colorDiff.getCvImage());
                grayDiff  = colorDiff;
                // apply dilate to reduce pixel noise interference
                grayDiff.dilate_3x3();
                // apply the threshold
                grayDiff.threshold(threshold);
                break;
        case 1:     // grayscale frame differencing
                lastFrameGray = lastFrame;
                grayImage = colorImg;
                // take the abs value of the difference between last and current frame
                grayDiff.absDiff(lastFrameGray, grayImage);
                // apply dilate to reduce pixel noise interference
                grayDiff.dilate_3x3();
                // apply the threshold
                grayDiff.threshold(threshold);
                break;
        case 2:     //  backbround subtraction
                grayImage = colorImg;
                grayBgImage = bgImage;
                //grayImage.dilate_3x3();
                //grayBgImage.dilate_3x3();
                // take the abs value of the difference between background and incoming and then threshold:
                grayDiff.absDiff(grayBgImage, grayImage);
                // apply erode & dilate to reduce pixel noise interference
                grayDiff.erode_3x3();
                grayDiff.dilate_3x3();
                // apply the threshold
                grayDiff.threshold(threshold);
                break;
        case 3:     // brightness tracking
                grayDiff = colorImg;
                // get the brightest pixel and set the threshold to its brightness
                // NOTE: surprisingly, initializing the variable (with this sentence: int max_brightness = 0;)
                // would interfere with the "case" statement
                int max_brightness;
                max_brightness = 0;
                // Same NOTE here about the variable initialization
                unsigned char * gray_pixels;
                gray_pixels = grayDiff.getPixels();
                for (int i = 0; i < analysisW*analysisH; i++) {
                   if (gray_pixels[i] > max_brightness) max_brightness = gray_pixels[i];
                }
                // apply erode & dilate to reduce pixel noise interference
                grayDiff.erode_3x3();
                grayDiff.dilate_3x3();
                // apply the threshold
                grayDiff.threshold(max_brightness-threshold);
                break;
        case 4:     //  backbround subtraction + frame differencing
                // grayscale frame differencing
                lastFrameGray = lastFrame;
                grayImage = colorImg;
                // take the abs value of the difference between last and current frame
                grayTemp.absDiff(lastFrameGray, grayImage);
                // apply dilate to reduce pixel noise interference
                //grayTemp.dilate_3x3();
                // background subtraction
                grayImage = colorImg;
                grayBgImage = bgImage;
                // take the abs value of the difference between background and incoming and then threshold:
                grayDiff.absDiff(grayBgImage, grayImage);
                // apply erode & dilate to reduce pixel noise interference
                grayDiff.erode_3x3();
                grayDiff.dilate_3x3();
                // add: BGS + FD
                grayDiff += grayTemp;
                // apply the threshold
                grayDiff.threshold(threshold);
                break;
        case 5:     //  Adaptive GMM BG subtraction
                bgImage.setFromPixels((unsigned char *)(bgs.bgsPtr->Background()->Ptr()->imageData),analysisW,analysisH);
                grayDiff.setFromPixels((unsigned char *)bgs.low.Ptr()->imageData,analysisW,analysisH);
                break;
    }

    // the contourfinder raises an exception depending on the preprocessing
    // if there is not much difference within the thresholded image
    try { // find contours which are between the size of 20 pixels and 1/3 the w*h pixels.
        contourFinder.findContours(grayDiff, 20, (analysisW*analysisH)/3, MAX_BLOBS, false);	// find holes = false
    }
    catch( char * str ) { cout << "Exception raised: " << str << '\n'; }
    // if we find blobs we use them, if not we use the previous ones
    if (contourFinder.nBlobs != 0 ) lastBlobs = contourFinder.blobs;
    // Set to black the output pixels background
    memset( outputImg.getCvImage()->imageData, 0, outW*outH*3 );
    // -----------------  GENERATE OUTPUT IMAGE ----------------
    switch (cropping) {
        case '6': monoCmonoB(); break;
        case '7': monoCmultiB(); break;
        case '8': if (lastBlobs.size() > 1) multiC();
                  else monoCmonoB();
                  break;
    }
    if ((hFlip) || (vFlip)) {
        int flipMode;
        if ((hFlip) && (vFlip)) flipMode = -1;
        else if (hFlip) flipMode = 1;
        else if (vFlip) flipMode = 0;
        cvFlip( outputImg.getCvImage(), NULL, flipMode);
    }
}
Example #18
0
int main(int argc, char** argv)
{
	pthread_t 	thread_s;
	int			key;

	if (argc == 2) {
		capture = cvCaptureFromFile(argv[1]);
	} else {
		capture = cvCaptureFromCAM(0);
	}

	if (!capture) {
		quit("cvCapture failed", 1);
	}

	img0 = cvQueryFrame(capture);
	img1 = cvCreateImage(cvGetSize(img0), IPL_DEPTH_8U, 1);

	cvZero(img1);
	cvNamedWindow("stream_server", CV_WINDOW_AUTOSIZE);

	/* print the width and height of the frame, needed by the client */
	fprintf(stdout, "width:  %d\nheight: %d\n\n", img0->width, img0->height);
	fprintf(stdout, "Press 'q' to quit.\n\n");

	/* run the streaming server as a separate thread */
	if (pthread_create(&thread_s, NULL, streamServer, NULL)) {
		quit("pthread_create failed.", 1);
	}

	while(key != 'q') {
		/* get a frame from camera */
		img0 = cvQueryFrame(capture);
		if (!img0) break;

		img0->origin = 0;
		cvFlip(img0, img0, -1);

		/**
		 * convert to grayscale 
		 * note that the grayscaled image is the image to be sent to the client 
		 * so we enclose it with pthread_mutex_lock to make it thread safe 
		 */
		pthread_mutex_lock(&mutex);
		cvCvtColor(img0, img1, CV_BGR2GRAY);
		is_data_ready = 1;
		pthread_mutex_unlock(&mutex);

		/* also display the video here on server */
		cvShowImage("stream_server", img0);
		key = cvWaitKey(30);
	}

	/* user has pressed 'q', terminate the streaming server */
	if (pthread_cancel(thread_s)) {
		quit("pthread_cancel failed.", 1);
	}

	/* free memory */
	cvDestroyWindow("stream_server");
	quit(NULL, 0);
}
Example #19
0
void MainWindow::capture()
{
    float matchresult = 1;
    m_capWebcam = cvCaptureFromCAM(0);
    cvNamedWindow("Original", CV_WINDOW_AUTOSIZE);

    IplImage* tableauxImage[6];
    for (int i = 0; i < 6; i++)
    {
        QString b = QString("C:/Users/Madalina/Downloads/SignsLanguageRecognition-master/build-untitled-Desktop_Qt_5_7_0_MSVC2015_64bit-Release/release/%1").arg(m_alphabetTable.at(i));
        tableauxImage[i]= new IplImage(cv::imread(b.toStdString(), CV_LOAD_IMAGE_GRAYSCALE));
    }

    while (1)
    {
        m_imgOriginal = cvQueryFrame(m_capWebcam);
        m_gray= cvCreateImage(cvGetSize(m_imgOriginal), 8, 1);
        cvFlip(m_imgOriginal, m_imgOriginal, 1);
        cvCvtColor(m_imgOriginal, m_gray, CV_BGR2GRAY);
        cvSetImageROI(m_gray, cvRect(100,100,200,200));
        cvThreshold(m_gray, m_gray, 100, 255, CV_THRESH_BINARY_INV);
        MainWindow::drawBox(m_imgOriginal, cvRect(100,100,200,200));

        // when show Roi Button is clicked
        if (m_showGray)
        {
            cvNamedWindow("template gray", CV_WINDOW_AUTOSIZE);
            cvShowImage("template gray", m_gray);
        }

        cvShowImage("Original", m_imgOriginal);
        for( int i = 0; i < 6; i++)
        {
            if (m_startmatching)
            {
                matchresult = MainWindow::matchTwoShapes(tableauxImage[i], m_gray);
            }

            if (matchresult < 0.1)
            {
                m_ui->signname->setText(QString((m_alphabetTable.at(i))[0])); //good match
                m_timer->start(1000);
            }
            else
            {
                m_color = matchresult < 0.25 ? cvScalar(0x00,0xff,0x00) : cvScalar(0x00,0x00,0xff);
            }

            m_ui->matchresultlabel->setText(QString::number(matchresult));
        }

        m_charCheckForEscKey = cvWaitKey(m_ui->Delais->value());// delay (in ms), and get key press, if any
        if((m_charCheckForEscKey == 27) || (m_stopCapture))
            break;
    }

    for (int i = 0; i < 6; i++)
    {
        cvReleaseImage(&tableauxImage[i]);
    }

    cvReleaseCapture(&m_capWebcam);
    cvDestroyAllWindows();
}
Example #20
0
void main()
{
	windage::Logger logger(&std::cout);

	IplImage* grabImage;
	IplImage* inputImage = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 4);
	IplImage* resizeImage = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3);
	IplImage* grayImage = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1);
	IplImage* resultImage = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3);

	FleaCamera* capture = new FleaCamera();
	capture->open();
	capture->start();
//	CvCapture* capture = cvCaptureFromCAM(CV_CAP_ANY);
	cvNamedWindow("result");

	// create and initialize tracker
	//IMPORTANT
	windage::Frameworks::PlanarObjectTracking tracking;

	windage::Calibration* calibration;
	windage::Algorithms::FeatureDetector* detector;
	windage::Algorithms::SearchTree* searchtree;
	windage::Algorithms::OpticalFlow* opticalflow;
	windage::Algorithms::HomographyEstimator* estimator;
	windage::Algorithms::OutlierChecker* checker;
	windage::Algorithms::HomographyRefiner* refiner;

	calibration = new windage::Calibration();
	detector = new windage::Algorithms::WSURFdetector();
	searchtree = new windage::Algorithms::KDtree();
	opticalflow = new windage::Algorithms::OpticalFlow();
	estimator = new windage::Algorithms::RANSACestimator();
	checker = new windage::Algorithms::OutlierChecker();
	refiner = new windage::Algorithms::LMmethod();

	calibration->Initialize(INTRINSIC[0], INTRINSIC[1], INTRINSIC[2], INTRINSIC[3], INTRINSIC[4], INTRINSIC[5], INTRINSIC[6], INTRINSIC[7]);
	searchtree->SetRatio(0.7);
	opticalflow->Initialize(WIDTH, HEIGHT, cvSize(15, 15), 3);
	estimator->SetReprojectionError(REPROJECTION_ERROR);
	checker->SetReprojectionError(REPROJECTION_ERROR * 3);
	refiner->SetMaxIteration(10);

	tracking.AttatchCalibration(calibration);
	tracking.AttatchDetetor(detector);
	tracking.AttatchMatcher(searchtree);
	tracking.AttatchTracker(opticalflow);
	tracking.AttatchEstimator(estimator);
	tracking.AttatchChecker(checker);
	tracking.AttatchRefiner(refiner);
//	tracking.AttatchFilter(filter);

	tracking.SetDitectionRatio(1);
	tracking.Initialize(WIDTH, HEIGHT, (double)WIDTH, (double)HEIGHT);

	int keypointCount = 0;
	int matchingCount = 0;
	double threshold = detector->GetThreshold();
	double processingTime = 0.0;

	bool trained = false;

#if USE_TEMPLATE_IMAEG
	IplImage* sampleImage = cvLoadImage(TEMPLATE_IMAGE, 0);
	detector->SetThreshold(threshold);
	tracking.AttatchReferenceImage(sampleImage);
	tracking.TrainingReference(SCALE_FACTOR, SCALE_STEP);
	detector->SetThreshold(threshold);
	trained = true;
#endif

	char message[100];
	bool flip = false;
	bool processing = true;
	while(processing)
	{
		// capture image
		capture->update();
		grabImage = capture->GetIPLImage();
//		inputImage = cvRetrieveFrame(capture);
		cvResize(grabImage, inputImage);
		cvCvtColor(inputImage, resultImage, CV_BGRA2BGR);
		cvCvtColor(resultImage, grayImage, CV_BGR2GRAY);
		if(flip)
			cvFlip(inputImage, inputImage);

		logger.updateTickCount();

		// track object
		if(trained)
		{
			//IMPORTANT
			tracking.UpdateCamerapose(grayImage);

			// adaptive threshold
#if USE_ADAPTIVE_THRESHOLD
			int localcount = detector->GetKeypointsCount();
			if(keypointCount != localcount)
			{
				if(localcount > FEATURE_COUNT)
					threshold += 1;
				if(localcount < FEATURE_COUNT)
					threshold -= 1;
				detector->SetThreshold(threshold);
				keypointCount = localcount;
			}
#endif
			// draw result
//			detector->DrawKeypoints(resultImage);

			tracking.DrawOutLine(resultImage, true);
			tracking.DrawDebugInfo(resultImage);

			windage::Calibration* result = tracking.GetCameraParameter();

			calibration->DrawInfomation(resultImage, 100);
		}
		matchingCount = tracking.GetMatchingCount();

		processingTime = logger.calculateProcessTime();
		logger.log("processingTime", processingTime);
		logger.logNewLine();

		sprintf_s(message, "Processing Time : %.2lf ms", processingTime);
		windage::Utils::DrawTextToImage(resultImage, cvPoint(10, 20), 0.6, message);
		sprintf_s(message, "Feature Count : %d, Threshold : %.0lf", keypointCount, threshold);
		windage::Utils::DrawTextToImage(resultImage, cvPoint(10, 40), 0.6, message);
		sprintf_s(message, "Matching Count : %d", matchingCount);
		windage::Utils::DrawTextToImage(resultImage, cvPoint(10, 60), 0.6, message);

		sprintf_s(message, "Press 'Space' to track the current image");
		windage::Utils::DrawTextToImage(resultImage, cvPoint(WIDTH-270, HEIGHT-10), 0.5, message);
		sprintf_s(message, "Press 'F' to flip image");
		windage::Utils::DrawTextToImage(resultImage, cvPoint(WIDTH-270, HEIGHT-25), 0.5, message);
		cvShowImage("result", resultImage);

		char ch = cvWaitKey(1);
		switch(ch)
		{
		case 'q':
		case 'Q':
			processing = false;
			break;
		case 'f':
		case 'F':
			flip = !flip;
			break;
		case ' ':
		case 's':
		case 'S':
			detector->SetThreshold(30.0);
			tracking.AttatchReferenceImage(grayImage);
			tracking.TrainingReference(SCALE_FACTOR, SCALE_STEP);
			detector->SetThreshold(threshold);
			trained = true;
			break;
		}		
	}

	capture->stop();
	capture->close();
	delete capture;
//	cvReleaseCapture(&capture);
	cvDestroyAllWindows();
}
Example #21
0
void LegsDetector::update(const std::vector< laser_t >& laserBuffer)
{
   // first remove high peaks due to absorving materials
   laser_t laser[BUFFERLENGTH];
   for (int i = 0; i < _bufferLength; i++)
   {
      laser[i].range = DBL_MAX;
      double angle = laser[i].angle = laserBuffer[i].angle;
	  for (int k = max(0, i-_delta); k <= min( _bufferLength-1, i+_delta); k++)
      {
         double range;
         if (laserBuffer[k].range < laser[i].range)
         {
            range = laser[i].range = laserBuffer[k].range;
            laser[i].x = range * cos(angle);
            laser[i].y = range * sin(angle);
         }
      }
   }
   //                       (0)
   //                        |
   //                        |
   //                        |
   // (+90)------------------|-------------------(-90)
   // reading from right to left i.e. from -90 to +90
   //
   // start extracting all the vertical edges of interest
   // remembering the scan goes from right (-PI/2) to left (+PI/2)
   // left and right edges correspond to the robot's point of view
   //
   //                 -(p1)             (p1)-
   //                   |    (p1)-(p1)   |
   //                   |     |    |     |
   //                   |     |   l|     |r
   //                   |     |    |     |
   //                  L|     |R  (p2)--(p2)
   //                   |     |
   //                   |     |
   //                  (p2)--(p2)
   //
   vector< edge_t<point_t> > vEdge;
   double prevRange = laser[0].range;
   for (int id = 1; id < _bufferLength; id++)
   {
      double range = laser[id].range;

      //if ( range == MAXIMUM_RANGE  || prevRange == MAXIMUM_RANGE ) ;
	  if ((prevRange - range) > MIN_LONG_EDGE)      // possible left long edge
      {
		  edge_t<point_t> e = {Point(laser[id-1].x, laser[id-1].y, laser[id-1].range, laser[id-1].angle),
                              Point(laser[id].x, laser[id].y, laser[id].range, laser[id].angle), 'R'};
         vEdge.push_back(e);
      }
      else if ((range - prevRange) > MIN_LONG_EDGE) // possible right long edge
      {
         edge_t<point_t> e = {Point(laser[id].x, laser[id].y, laser[id].range, laser[id].angle),
                              Point(laser[id-1].x, laser[id-1].y, laser[id-1].range, laser[id-1].angle), 'L'};
         vEdge.push_back(e);
      }
      else if ((prevRange - range) > MIN_SHORT_EDGE) // possible left short edge
      {
         edge_t<point_t> e = {Point(laser[id-1].x, laser[id-1].y, laser[id-1].range, laser[id-1].angle),
                              Point(laser[id].x, laser[id].y, laser[id].range, laser[id].angle), 'r'};
         vEdge.push_back(e);
      }
      else if ((range - prevRange) > MIN_SHORT_EDGE) // possible right short edge
      {
         edge_t<point_t> e = {Point(laser[id].x, laser[id].y, laser[id].range, laser[id].angle),
                              Point(laser[id-1].x, laser[id-1].y, laser[id-1].range, laser[id-1].angle), 'l'};
         vEdge.push_back(e);
      }

      prevRange = range;
   }
   // remove edges too close to each other
   if ( vEdge.empty() ) return;
   vector<edge_t<point_t> >::iterator first = vEdge.begin();
   vector<edge_t<point_t> >::iterator second = first + 1;
   double d1, d2;
   char t1, t2;
   while (second < vEdge.end())
   {
	   t1 = toupper(first->type);
       t2 = toupper(second->type);
	   d1 = getDistance(second->p1, first->p2);
	   d2 = getDistance(first->p1, second->p2);
       if ( t1 == 'R' && t2 == 'R' && d1 < MIN_EDGE_DIST )
       {
		   first->p2 = second->p2;
           first->type = 'R';
           second = vEdge.erase(second);
        }
        else if ( t1 == 'L' && t2 == 'L' && d2 < MIN_EDGE_DIST )
        {
			first->p1 = second->p1;
            first->type = 'L';
            second = vEdge.erase(second);
	   }
       else
       {
		   first++;
           second++;
       }
   }
   if ( vEdge.empty() ) return;
   // draw some stuff for debugging... (must be done now, before vEdge is modified)
   if (_debug)
   {
      CvPoint start;
	  cvSet(_tmpImg, cvScalar(255,255,255));

	  start = cvPoint(DEBUG_WINDOW_WIDTH/2, 0);
	  cvCircle(_tmpImg, start, 1*DEBUG_WINDOW_WIDTH/80, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 1*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 2*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 3*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 4*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 5*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 6*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 7*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));
	  cvCircle(_tmpImg, start, 8*DEBUG_WINDOW_WIDTH/16, cvScalar(255,0,0));

      start = cvPoint(METER2PIXEL(laser[0].y) + DEBUG_WINDOW_WIDTH/2,
                              METER2PIXEL(laser[0].x));
      // draw the laser data
      for (int i = 1; i < _bufferLength; i++)
      {
         CvPoint end = cvPoint(METER2PIXEL(laser[i].y) + DEBUG_WINDOW_WIDTH/2,
                               METER2PIXEL(laser[i].x));

		 if (laser[i].range == MAXIMUM_RANGE && laser[i-1].range == MAXIMUM_RANGE)
			 cvLine(_tmpImg, start, end, cvScalar(0,0,0));
		 if (laser[i].range <  MAXIMUM_RANGE && laser[i-1].range <  MAXIMUM_RANGE)
			 cvLine(_tmpImg, start, end, cvScalar(0,0,0));

		 start = end;
      }
      // draw the extremes
      for (unsigned int i = 0; i < vEdge.size(); i++)
      {
         CvScalar color;
		 switch (vEdge[i].type)
         {
            case 'R':
               color = cvScalar(0,0,255); // red
               break;
            case 'L':
               color = cvScalar(255,0,0); // blue
               break;
            case 'r':
               color = cvScalar(0,196,255);  // yellow
               break;
            case 'l':
               color = cvScalar(64,255,0);  // green
               break;
         }
		 // draw min extremes
		 CvPoint center = cvPoint(METER2PIXEL(vEdge[i].p1.y) + DEBUG_WINDOW_WIDTH/2,
                                  METER2PIXEL(vEdge[i].p1.x));
         cvCircle(_tmpImg, center, 2, color);
         // draw max extremes
         CvPoint c1 = cvPoint(METER2PIXEL(vEdge[i].p2.y) - 3 + DEBUG_WINDOW_WIDTH/2,
                              METER2PIXEL(vEdge[i].p2.x) - 3);
         CvPoint c2 = cvPoint(METER2PIXEL(vEdge[i].p2.y) + 3 + DEBUG_WINDOW_WIDTH/2,
                              METER2PIXEL(vEdge[i].p2.x) + 3);
         cvRectangle(_tmpImg, c1, c2, color);
      }
   }

   // extract the horizontal lines of interest
   vector< edge_t<point_t> > hEdge;
   int temp = 1;
   while ( temp > 0 ) { temp = getUpattern(vEdge, hEdge); }
   temp = 1;
   while ( _selectivity < 2 && temp > 0 ) { temp = getPpattern(vEdge, hEdge);}
   temp = 1;
   while ( _selectivity < 1 && temp > 0 ) { temp = getOpattern(vEdge, hEdge);}

   // finally calculate distance and direction of each horizontal line
   _target.clear();
   vector< edge_t<point_t> >::iterator itend = hEdge.end();
   for (vector< edge_t<point_t> >::iterator it = hEdge.begin(); it < itend; it++)
   {
      target_t t;
      // the distance is an average between the two points
      double xm = ((it->p1).x + (it->p2).x) / 2;
      double ym = ((it->p1).y + (it->p2).y) / 2;
      t.distance = sqrt(sqr(xm) + sqr(ym));
      // left PI/2, right -PI/2
      t.bearing = atan2(ym, xm);
      // no height information of course...
      t.pattern = it->type;
      _target.push_back(t);
   }
   // final number of detected people
   _howMany = _target.size();
   // draw the last things for debugging
   if (_debug)
   {
      // draw horizontal edges
      for (unsigned int i = 0; i < hEdge.size(); i++)
      {
         CvPoint p1 = cvPoint(METER2PIXEL(hEdge[i].p1.y) + DEBUG_WINDOW_WIDTH/2,
                              METER2PIXEL(hEdge[i].p1.x));
         CvPoint p2 = cvPoint(METER2PIXEL(hEdge[i].p2.y) + DEBUG_WINDOW_WIDTH/2,
                              METER2PIXEL(hEdge[i].p2.x));
//          cvLine(_tmpImg, p1, p2, cvScalar(0,128,255), 2);
         CvPoint pm = cvPoint((p1.x + p2.x) / 2, (p1.y + p2.y) / 2);
         int thick;
         if (hEdge[i].type == 'U')
            thick = 3;
         else if (hEdge[i].type == 'P')
            thick = 2;
         else
            thick = 1;
         cvLine(_tmpImg, cvPoint(DEBUG_WINDOW_WIDTH/2, 0), pm, cvScalar(0,128,255), thick);
      }

      cvFlip(_tmpImg, NULL, -1);
      cvResize(_tmpImg, _debugImage, CV_INTER_NN);
      cvShowImage("Legs detector", _debugImage);
 	  if (_delay)
        cvWaitKey(_delay);  // handles event processing of HIGHGUI library
   }
   return;
}
void RenderVideoFrame::queryFrame() {
    cvCopy(frame, prev_frame);
    frame = cvQueryFrame(capture);
    //capture >> frame;

    if (!frame) {
        this->error = "Cannot get frame.";
        return;
    }

    //make mirror reflection
    //it's more natural
    cvFlip(frame, NULL, 1);

    // monochrome are not as good as true color images for tracking
    // but they speed up the algorithm a lot
    IplImage *frame1_mono = cvCreateImage(cvSize(frame->width, frame->height), IPL_DEPTH_8U, 1),
             *frame2_mono = cvCreateImage(cvSize(frame->width, frame->height), IPL_DEPTH_8U, 1);

    if (point_x != -1 && point_y != -1) {
        // let's run optical flow!

        out.clear(); status.clear(); errors.clear();

        cvConvertImage(prev_frame, frame1_mono);
        cvConvertImage(frame, frame2_mono);

        Mat mat1(frame1_mono);
        Mat mat2(frame2_mono);
//        Mat mat1 = cvCreateMat(frame1_mono->height, frame1_mono->width, CV_32FC3);
//        Mat mat2 = cvCreateMat(frame2_mono->height, frame2_mono->width, CV_32FC3);
//        cvConvert(frame1_mono, mat1);
//        cvConvert(frame2_mono, mat2);

        calcOpticalFlowPyrLK(mat1, mat2, in, out, status, errors);
//        calcOpticalFlowPyrLK(frame1_mono, frame2_mono, in, out, status, errors);

        // if the point has been found by PyrLK algorithm
        // set the new point's values
        for (unsigned int i=0; i<status.size(); i++) {
            if (status[i]==1) {
                in[i] = out[i];
            }
        }
        point_x = in[2].x;
        point_y = in[2].y;

        // check if the point is within any circle
        double r = d / 2, x, y, dist; // circle's radius
        vector<Circle>::iterator it;
        for (it=circles.begin(); it!=circles.end(); it++) {
            x=(*it).x+r;
            y=(*it).y+r;
            dist = sqrt(pow(x-point_x, 2) + pow(y-point_y, 2));
            if (dist<=r) {
                (*it).setSelected(true);
                break;
            }
        }
    }

    image = QImage(
                (const uchar *)frame->imageData,
                frame->width,
                frame->height,
                QImage::Format_RGB888
            ).rgbSwapped();
    this->update();
}
Example #23
0
ColorChecker find_colorchecker(CvSeq * quads, CvSeq * boxes, CvMemStorage *storage, IplImage *image, IplImage *original_image)
{
    CvPoint2D32f box_corners[4];
    bool passport_box_flipped = false;
    bool rotated_box = false;
    
    CvMat* points = cvCreateMat( boxes->total , 1, CV_32FC2 );
    for(int i = 0; i < boxes->total; i++)
    {
        CvBox2D box = (*(CvBox2D*)cvGetSeqElem(boxes, i));
        cvSet1D(points, i, cvScalar(box.center.x,box.center.y));
    }
    CvBox2D passport_box = cvMinAreaRect2(points,storage);
    fprintf(stderr,"Box:\n\tCenter: %f,%f\n\tSize: %f,%f\n\tAngle: %f\n",passport_box.center.x,passport_box.center.y,passport_box.size.width,passport_box.size.height,passport_box.angle);
    if(passport_box.angle < 0.0) {
      passport_box_flipped = true;
    }
    
    cvBoxPoints(passport_box, box_corners);
    // for(int i = 0; i < 4; i++)
    // {
    //   fprintf(stderr,"Box corner %d: %d,%d\n",i,cvPointFrom32f(box_corners[i]).x,cvPointFrom32f(box_corners[i]).y);
    // }
    
    // cvBox(passport_box, image, cvScalarAll(128), 10);
    
    if(euclidean_distance(cvPointFrom32f(box_corners[0]),cvPointFrom32f(box_corners[1])) <
       euclidean_distance(cvPointFrom32f(box_corners[1]),cvPointFrom32f(box_corners[2]))) {
        fprintf(stderr,"Box is upright, rotating\n");
        rotate_box(box_corners);
        rotated_box = true && passport_box_flipped;
    }

    double horizontal_spacing = euclidean_distance(
        cvPointFrom32f(box_corners[0]),cvPointFrom32f(box_corners[1]))/(double)(MACBETH_WIDTH-1);
    double vertical_spacing = euclidean_distance(
        cvPointFrom32f(box_corners[1]),cvPointFrom32f(box_corners[2]))/(double)(MACBETH_HEIGHT-1);
    double horizontal_slope = (box_corners[1].y - box_corners[0].y)/(box_corners[1].x - box_corners[0].x);
    double horizontal_mag = sqrt(1+pow(horizontal_slope,2));
    double vertical_slope = (box_corners[3].y - box_corners[0].y)/(box_corners[3].x - box_corners[0].x);
    double vertical_mag = sqrt(1+pow(vertical_slope,2));
    double horizontal_orientation = box_corners[0].x < box_corners[1].x ? -1 : 1;
    double vertical_orientation = box_corners[0].y < box_corners[3].y ? -1 : 1;
        
    fprintf(stderr,"Spacing is %f %f\n",horizontal_spacing,vertical_spacing);
    fprintf(stderr,"Slope is %f %f\n", horizontal_slope,vertical_slope);
    
    int average_size = 0;
    for(int i = 0; i < boxes->total; i++)
    {
        CvBox2D box = (*(CvBox2D*)cvGetSeqElem(boxes, i));
        
        CvRect rect = contained_rectangle(box);
        average_size += MIN(rect.width, rect.height);
    }
    average_size /= boxes->total;
    
    fprintf(stderr,"Average contained rect size is %d\n", average_size);
    
    CvMat * this_colorchecker = cvCreateMat(MACBETH_HEIGHT, MACBETH_WIDTH, CV_32FC3);
    CvMat * this_colorchecker_points = cvCreateMat( MACBETH_HEIGHT, MACBETH_WIDTH, CV_32FC2 );
    
    // calculate the averages for our oriented colorchecker
    for(int x = 0; x < MACBETH_WIDTH; x++) {
        for(int y = 0; y < MACBETH_HEIGHT; y++) {
            CvPoint2D32f row_start;
            
            if ( ((image->origin == IPL_ORIGIN_BL) || !rotated_box) && !((image->origin == IPL_ORIGIN_BL) && rotated_box) )
            {
                row_start.x = box_corners[0].x + vertical_spacing * y * (1 / vertical_mag);
                row_start.y = box_corners[0].y + vertical_spacing * y * (vertical_slope / vertical_mag);
            }
            else
            {
                row_start.x = box_corners[0].x - vertical_spacing * y * (1 / vertical_mag);
                row_start.y = box_corners[0].y - vertical_spacing * y * (vertical_slope / vertical_mag);
            }
            
            CvRect rect = cvRect(0,0,average_size,average_size);
            
            rect.x = row_start.x - horizontal_spacing * x * ( 1 / horizontal_mag ) * horizontal_orientation;
            rect.y = row_start.y - horizontal_spacing * x * ( horizontal_slope / horizontal_mag ) * vertical_orientation;
            
            cvSet2D(this_colorchecker_points, y, x, cvScalar(rect.x,rect.y));
            
            rect.x = rect.x - average_size / 2;
            rect.y = rect.y - average_size / 2;
            
            // cvRectangle(
            //     image,
            //     cvPoint(rect.x,rect.y),
            //     cvPoint(rect.x+rect.width, rect.y+rect.height),
            //     cvScalarAll(0),
            //     10
            // );
            
            CvScalar average_color = rect_average(rect, original_image);
            
            cvSet2D(this_colorchecker,y,x,average_color);
        }
    }
    
    double orient_1_error = check_colorchecker(this_colorchecker);
    cvFlip(this_colorchecker,NULL,-1);
    double orient_2_error = check_colorchecker(this_colorchecker);
    
    fprintf(stderr,"Orientation 1: %f\n",orient_1_error);
    fprintf(stderr,"Orientation 2: %f\n",orient_2_error);
    
    if(orient_1_error < orient_2_error) {
        cvFlip(this_colorchecker,NULL,-1);
    }
    else {
        cvFlip(this_colorchecker_points,NULL,-1);
    }
    
    // draw_colorchecker(this_colorchecker,this_colorchecker_points,image,average_size);
    
    ColorChecker found_colorchecker;
    
    found_colorchecker.error = MIN(orient_1_error,orient_2_error);
    found_colorchecker.values = this_colorchecker;
    found_colorchecker.points = this_colorchecker_points;
    found_colorchecker.size = average_size;
    
    return found_colorchecker;
}
Example #24
0
int main(int argc, char** argv)
{
	CvMemStorage* mstrg = cvCreateMemStorage();
	CvSeq* contours = 0; 
	CvSeq* contours2 = 0; 

	const char* filename = 0;
	IplImage* rawImage = 0, *yuvImage = 0, *borde = 0; //yuvImage is for codebook method
	IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;
	CvCapture* capture = 0;		

	int c, n, nframes = 0;
	int nframesToLearnBG = 300;

	model = cvCreateBGCodeBookModel();

	//Set color thresholds to default values
	model->modMin[0] = 3;
	model->modMin[1] = model->modMin[2] = 3;
	model->modMax[0] = 10;
	model->modMax[1] = model->modMax[2] = 10;
	model->cbBounds[0] = model->cbBounds[1] = model->cbBounds[2] = 10;

	bool pause = false;
	bool singlestep = false;

	printf("Capturando de la camara...\n");
	capture = cvCaptureFromCAM( 0 );

	if( !capture )
	{
		printf( "No se pudo inicializar la captura de video\n\n" );
		return -1;
	}

	while (true)
	{

		rawImage = cvQueryFrame( capture );
		++nframes;
		if(!rawImage) 
			break;


		//First time:
		if( nframes == 1 && rawImage )
		{
			borde = cvLoadImage("Borde.png",0);

			// CODEBOOK METHOD ALLOCATION
			yuvImage = cvCloneImage(rawImage);

			int w = yuvImage->width;
			cvSetImageROI(yuvImage, cvRect(w-250,0,250,250));
			IplImage *tmp = cvCreateImage(cvGetSize(yuvImage),yuvImage->depth,yuvImage->nChannels);
			cvCopy(yuvImage, tmp, NULL);
			cvResetImageROI(yuvImage);
			yuvImage = cvCloneImage(tmp);

			ImaskCodeBook = cvCreateImage( cvGetSize(yuvImage), IPL_DEPTH_8U, 1 );
			ImaskCodeBookCC = cvCreateImage( cvGetSize(yuvImage), IPL_DEPTH_8U, 1 );

			cvSet(ImaskCodeBook,cvScalar(255));

			cvNamedWindow("CapturaCam",CV_WINDOW_AUTOSIZE);
			cvNamedWindow( "ForegroundCodeBook",CV_WINDOW_AUTOSIZE);
			cvNamedWindow( "CodeBook_ConnectComp",CV_WINDOW_AUTOSIZE);

			printf (">>Aprendiendo fondo\n");
		}

		// If we've got an rawImage and are good to go:                
		if( rawImage )
		{
			cvFlip(rawImage, NULL, 1);
			int w = rawImage->width;

			cvFindContours(borde,mstrg,&contours,sizeof(CvContour),CV_RETR_EXTERNAL);

			//Dibujar contorno
			cvLine(rawImage, cv::Point (w-250,0), cv::Point (w-250,250), CV_RGB(255,0,0),1, CV_AA, 0) ;
			cvLine(rawImage, cv::Point (w-250,250), cv::Point (w,250), CV_RGB(255,0,0),1, CV_AA, 0) ;
			//
			if(nframes - 1 < nframesToLearnBG)
			{
				char buffer [33];
				_itoa (nframesToLearnBG - nframes,buffer,10);
				CvFont font2;
				cvInitFont(&font2, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 3, CV_AA);
				cvPutText(rawImage, buffer, cvPoint(50, 50), &font2, cvScalar(0, 0, 255, 0));
			}

			cvSetImageROI(rawImage, cvRect(w-250,0,250,250));
			IplImage *temp = cvCreateImage(cvGetSize(rawImage),rawImage->depth,rawImage->nChannels);

			cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );
			//YUV para el metodo del codebook

			//Construccion del modelo del fondo
			if(nframes-1 < nframesToLearnBG  )
				cvBGCodeBookUpdate( model, yuvImage );


			if( nframes-1 == nframesToLearnBG  )
			{
				cvBGCodeBookClearStale( model, model->t/2 );
				printf (">>Fondo aprendido\n");
			}

			//Se encuentran objetos por el metodo de codebook
			if( nframes-1 >= nframesToLearnBG  )
			{
				cvBGCodeBookDiff( model, yuvImage, ImaskCodeBook );

				cvCopy(ImaskCodeBook,ImaskCodeBookCC);	
				cvSegmentFGMask( ImaskCodeBookCC );

				cvShowImage( "CodeBook_ConnectComp",ImaskCodeBookCC);

				//deteccion de imagen
				detect(ImaskCodeBookCC,rawImage);

				//base para dibujar la mano
				if(contours)
					cvDrawContours(rawImage,contours, cvScalar(255, 0, 0, 0), cvScalarAll(128), 1 );


			}
			//Display
			cvResetImageROI(rawImage);
			cvShowImage( "CapturaCam", rawImage );
			cvShowImage( "ForegroundCodeBook",ImaskCodeBook);

		}

		// User input:
		c = cvWaitKey(10)&0xFF;
		c = tolower(c);
		// End processing on ESC, q or Q
		if(c == 27 || c == 'q')
			break;
		//Else check for user input
		switch( c )
		{
		case 'c':
			saveLength = true;
			break;        
		case ' ':
			cvBGCodeBookClearStale( model, 0 );
			nframes = 0;
			break;            
		}

		if (c != 'c')
			saveLength=false;
	}		

	cvReleaseCapture( &capture );
	cvReleaseMemStorage(&mstrg);
	cvDestroyWindow( "CapturaCam" );
	cvDestroyWindow( "ForegroundCodeBook");
	cvDestroyWindow( "CodeBook_ConnectComp");
	return 0;
}
Example #25
0
IplImage* CvCaptureCAM_VFW::retrieveFrame(int)
{
    BITMAPINFO vfmt;
    memset( &vfmt, 0, sizeof(vfmt));
    BITMAPINFOHEADER& vfmt0 = vfmt.bmiHeader;

    if( !capWnd )
        return 0;

    const DWORD sz = capGetVideoFormat( capWnd, &vfmt, sizeof(vfmt));
    const int prevWidth = frame ? frame->width : 0;
    const int prevHeight = frame ? frame->height : 0;

    if( !hdr || hdr->lpData == 0 || sz == 0 )
        return 0;

    if( !frame || frame->width != vfmt0.biWidth || frame->height != vfmt0.biHeight )
    {
        cvReleaseImage( &frame );
        frame = cvCreateImage( cvSize( vfmt0.biWidth, vfmt0.biHeight ), 8, 3 );
    }

    if ( vfmt0.biCompression == MAKEFOURCC('N','V','1','2') )
    {
        // Frame is in YUV 4:2:0 NV12 format, convert to BGR color space
        // See https://msdn.microsoft.com/en-us/library/windows/desktop/dd206750(v=vs.85).aspx#nv12)
        IplImage src;
        cvInitImageHeader( &src, cvSize( vfmt0.biWidth, vfmt0.biHeight * 3 / 2 ), IPL_DEPTH_8U, 1, IPL_ORIGIN_BL, 4 );
        cvSetData( &src, hdr->lpData, src.widthStep );
        cvCvtColor( &src, frame, CV_YUV2BGR_NV12 );
    }
    else if( vfmt0.biCompression != BI_RGB ||
             vfmt0.biBitCount != 24 )
    {
        BITMAPINFOHEADER vfmt1 = icvBitmapHeader( vfmt0.biWidth, vfmt0.biHeight, 24 );

        if( hic == 0 || fourcc != vfmt0.biCompression ||
            prevWidth != vfmt0.biWidth || prevHeight != vfmt0.biHeight )
        {
            closeHIC();
            hic = ICOpen( MAKEFOURCC('V','I','D','C'),
                          vfmt0.biCompression, ICMODE_DECOMPRESS );
            if( hic )
            {
                if( ICDecompressBegin( hic, &vfmt0, &vfmt1 ) != ICERR_OK )
                {
                    closeHIC();
                    return 0;
                }
            }
        }

        if( !hic || ICDecompress( hic, 0, &vfmt0, hdr->lpData,
            &vfmt1, frame->imageData ) != ICERR_OK )
        {
            closeHIC();
            return 0;
        }

        cvFlip( frame, frame, 0 );
    }
    else
    {
        IplImage src;
        cvInitImageHeader( &src, cvSize(vfmt0.biWidth, vfmt0.biHeight),
            IPL_DEPTH_8U, 3, IPL_ORIGIN_BL, 4 );
        cvSetData( &src, hdr->lpData, src.widthStep );
        cvFlip( &src, frame, 0 );
    }

    return frame;
}
Example #26
0
void main()
{
	// communication
	UdpTransmitSocket transmitSocket(IpEndpointName( ADDRESS, PORT ));
	char buffer[OUTPUT_BUFFER_SIZE];
	osc::OutboundPacketStream PacketSender(buffer, OUTPUT_BUFFER_SIZE);

	// tracking
	windage::Logger logger(&std::cout);

	IplImage* inputImage;
	IplImage* resizeImage = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3);
	IplImage* grayImage = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1);
	IplImage* resultImage = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 3);

	CvCapture* capture = cvCaptureFromCAM(CV_CAP_ANY);
	cvNamedWindow("result");

	// create and initialize tracker
	double threshold = 50.0;

	windage::Frameworks::MultiplePlanarObjectTracking tracking;
	windage::Calibration* calibration;
	windage::Algorithms::FeatureDetector* detector;
	windage::Algorithms::OpticalFlow* opticalflow;
	windage::Algorithms::HomographyEstimator* estimator;
	windage::Algorithms::OutlierChecker* checker;
	windage::Algorithms::HomographyRefiner* refiner;

	calibration = new windage::Calibration();
	detector = new windage::Algorithms::SIFTGPUdetector();
	opticalflow = new windage::Algorithms::OpticalFlow();
	estimator = new windage::Algorithms::ProSACestimator();
	checker = new windage::Algorithms::OutlierChecker();
	refiner = new windage::Algorithms::LMmethod();

	calibration->Initialize(INTRINSIC[0], INTRINSIC[1], INTRINSIC[2], INTRINSIC[3], INTRINSIC[4], INTRINSIC[5], INTRINSIC[6], INTRINSIC[7]);
	detector->SetThreshold(50.0);
	opticalflow->Initialize(WIDTH, HEIGHT, cvSize(15, 15), 3);
	estimator->SetReprojectionError(REPROJECTION_ERROR);
	checker->SetReprojectionError(REPROJECTION_ERROR * 3);
	refiner->SetMaxIteration(10);

	tracking.AttatchCalibration(calibration);
	tracking.AttatchDetetor(detector);
	tracking.AttatchTracker(opticalflow);
	tracking.AttatchEstimator(estimator);
	tracking.AttatchChecker(checker);
	tracking.AttatchRefiner(refiner);
	
	tracking.Initialize(WIDTH, HEIGHT, (double)WIDTH, (double)HEIGHT);
	tracking.SetFilter(false);
	tracking.SetDitectionRatio(5);

	bool trained = false;
#if USE_TEMPLATE_IMAEG
	for(int i=0; i<TEMPLATE_IMAGE_COUNT; i++)
	{
		char message[100];
		sprintf_s(message, TEMPLATE_IMAGE, i+1);

		IplImage* sampleImage = cvLoadImage(message, 0);
		detector->SetThreshold(30.0);
		tracking.AttatchReferenceImage(sampleImage);

		cvReleaseImage(&sampleImage);
	}
	tracking.TrainingReference(SCALE_FACTOR, SCALE_STEP);
	trained = true;
	detector->SetThreshold(threshold);
#endif
	
	int keypointCount = 0;
	int matchingCount = 0;
	double processingTime = 0.0;

	char message[100];
	bool fliping = true;
	bool processing = true;
	while(processing)
	{
		// capture image
		inputImage = cvRetrieveFrame(capture);
		cvResize(inputImage, resizeImage);
		if(fliping)
			cvFlip(resizeImage, resizeImage);

		cvCvtColor(resizeImage, grayImage, CV_BGR2GRAY);
		cvCopyImage(resizeImage, resultImage);

		logger.updateTickCount();

		// track object
		if(trained)
		{
			tracking.UpdateCamerapose(grayImage);
//			tracking.GetDetector()->DrawKeypoints(resultImage);

			// adaptive threshold
#if ADAPTIVE_THRESHOLD
			int localcount = detector->GetKeypointsCount();
			if(keypointCount != localcount)
			{
				if(localcount > FEATURE_COUNT)
					threshold += 1;
				if(localcount < FEATURE_COUNT)
					threshold -= 1;
				detector->SetThreshold(threshold);
				keypointCount = localcount;
			}
#endif
			// draw result
			std::vector<int> matchingCount; matchingCount.resize(tracking.GetObjectCount());
			for(int i=0; i<tracking.GetObjectCount(); i++)
			{
				matchingCount[i] = tracking.GetMatchingCount(i);
				if(tracking.GetMatchingCount(i) > 10)
				{
//					tracking.DrawDebugInfo(resultImage, i);
					tracking.DrawOutLine(resultImage, i, true);
					windage::Calibration* calibrationTemp = tracking.GetCameraParameter(i);
					calibrationTemp->DrawInfomation(resultImage, 100);
					CvPoint centerPoint = calibrationTemp->ConvertWorld2Image(0.0, 0.0, 0.0);
					
					centerPoint.x += 5;
					centerPoint.y += 10;
					sprintf_s(message, "object #%d (%03d)", i+1, matchingCount[i]);
					windage::Utils::DrawTextToImage(resultImage, centerPoint, 0.6, message);
				}
			}

			// calcuate relation
			if(tracking.GetMatchingCount(0) > 10 && tracking.GetMatchingCount(1) > 10)
			{
				windage::Matrix3 rotation = windage::Coordinator::MultiMarkerCoordinator::GetRotation(tracking.GetCameraParameter(0), tracking.GetCameraParameter(1));
				windage::Vector3 translation = windage::Coordinator::MultiMarkerCoordinator::GetTranslation(tracking.GetCameraParameter(0), tracking.GetCameraParameter(1));

				PacketSender << osc::BeginBundleImmediate << osc::BeginMessage("MultimarkerRelation")
					<< rotation.m[0][0] << rotation.m[0][1] << rotation.m[0][2]
					<< rotation.m[1][0] << rotation.m[1][1] << rotation.m[1][2]
					<< rotation.m[2][0] << rotation.m[2][1] << rotation.m[2][2]
					<< translation.x << translation.y << translation.z
					<< osc::EndMessage << osc::EndBundle;
				transmitSocket.Send( PacketSender.Data(), PacketSender.Size() );
				PacketSender.Clear();
			}
		}

		processingTime = logger.calculateProcessTime();
		logger.log("processingTime", processingTime);
		logger.logNewLine();

		sprintf_s(message, "Processing Time : %.2lf ms", processingTime);
		windage::Utils::DrawTextToImage(resultImage, cvPoint(10, 20), 0.6, message);
		sprintf_s(message, "Feature Count : %d, Threshold : %.0lf", keypointCount, threshold);
		windage::Utils::DrawTextToImage(resultImage, cvPoint(10, 40), 0.6, message);
		sprintf_s(message, "Matching Count : %d", matchingCount);
		windage::Utils::DrawTextToImage(resultImage, cvPoint(10, 60), 0.6, message);

		sprintf_s(message, "Press 'Space' to add tracking the current image", keypointCount, threshold);
		windage::Utils::DrawTextToImage(resultImage, cvPoint(WIDTH-315, HEIGHT-10), 0.5, message);
		cvShowImage("result", resultImage);

		char ch = cvWaitKey(1);
		switch(ch)
		{
		case 'q':
		case 'Q':
			processing = false;
			break;
		case 'f':
		case 'F':
			fliping = !fliping;
			break;
		case ' ':
		case 's':
		case 'S':
			detector->SetThreshold(30.0);
			tracking.AttatchReferenceImage(grayImage);
			tracking.TrainingReference(SCALE_FACTOR, SCALE_STEP);
			detector->SetThreshold(threshold);
			trained = true;
			break;
		}		
	}

	cvReleaseCapture(&capture);
	cvDestroyAllWindows();
}
Example #27
0
int main( int argc, char** argv )
{
    CvSize board_size = {0,0};
    float square_size = 1.f, aspect_ratio = 1.f;
    const char* out_filename = "out_camera_data.yml";
    const char* input_filename = 0;
    int i, image_count = 10;
    int write_extrinsics = 0, write_points = 0;
    int flags = 0;
    CvCapture* capture = 0;
    FILE* f = 0;
    char imagename[1024];
    CvMemStorage* storage;
    CvSeq* image_points_seq = 0;
    int elem_size, flip_vertical = 0;
    int delay = 1000;
    clock_t prev_timestamp = 0;
    CvPoint2D32f* image_points_buf = 0;
    CvFont font = cvFont( 1, 1 );
    double _camera[9], _dist_coeffs[4];
    CvMat camera = cvMat( 3, 3, CV_64F, _camera );
    CvMat dist_coeffs = cvMat( 1, 4, CV_64F, _dist_coeffs );
    CvMat *extr_params = 0, *reproj_errs = 0;
    double avg_reproj_err = 0;
    int mode = DETECTION;
    int undistort_image = 0;
    CvSize img_size = {0,0};
    const char* live_capture_help = 
        "When the live video from camera is used as input, the following hot-keys may be used:\n"
            "  <ESC>, 'q' - quit the program\n"
            "  'g' - start capturing images\n"
            "  'u' - switch undistortion on/off\n";

    if( argc < 2 )
    {
  // calibration -w 6 -h 8 -s 2 -n 10 -o camera.yml -op -oe [<list_of_views.txt>]
      printf( "This is a camera calibration sample.\n"
            "Usage: calibration\n"
            "     -w <board_width>         # the number of inner corners per one of board dimension\n"
            "     -h <board_height>        # the number of inner corners per another board dimension\n"
            "     [-n <number_of_frames>]  # the number of frames to use for calibration\n"
            "                              # (if not specified, it will be set to the number\n"
            "                              #  of board views actually available)\n"
	    "     [-di <disk_images>       # Number of disk images before triggering undistortion\n"
            "     [-d <delay>]             # a minimum delay in ms between subsequent attempts to capture a next view\n"
            "                              # (used only for video capturing)\n"
            "     [-s <square_size>]       # square size in some user-defined units (1 by default)\n"
            "     [-o <out_camera_params>] # the output filename for intrinsic [and extrinsic] parameters\n"
            "     [-op]                    # write detected feature points\n"
            "     [-oe]                    # write extrinsic parameters\n"
            "     [-zt]                    # assume zero tangential distortion\n"
            "     [-a <aspect_ratio>]      # fix aspect ratio (fx/fy)\n"
            "     [-p]                     # fix the principal point at the center\n"
            "     [-v]                     # flip the captured images around the horizontal axis\n"
            "     [input_data]             # input data, one of the following:\n"
            "                              #  - text file with a list of the images of the board\n"
            "                              #  - name of video file with a video of the board\n"
            "                              # if input_data not specified, a live view from the camera is used\n"
            "\n" );
        printf( "%s", live_capture_help );
        return 0;
    }

    for( i = 1; i < argc; i++ )
    {
        const char* s = argv[i];
        if( strcmp( s, "-w" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &board_size.width ) != 1 || board_size.width <= 0 )
                return fprintf( stderr, "Invalid board width\n" ), -1;
        }
        else if( strcmp( s, "-h" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &board_size.height ) != 1 || board_size.height <= 0 )
                return fprintf( stderr, "Invalid board height\n" ), -1;
        }
        else if( strcmp( s, "-s" ) == 0 )
        {
            if( sscanf( argv[++i], "%f", &square_size ) != 1 || square_size <= 0 )
                return fprintf( stderr, "Invalid board square width\n" ), -1;
        }
        else if( strcmp( s, "-n" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &image_count ) != 1 || image_count <= 3 )
                return printf("Invalid number of images\n" ), -1;
        }
	else if( strcmp( s, "-di") == 0)
	{
	    if( sscanf( argv[++i], "%d", &images_from_file) != 1 || images_from_file < 3)
		return printf("Invalid di, must be >= 3\n"), -1;
	}
        else if( strcmp( s, "-a" ) == 0 )
        {
            if( sscanf( argv[++i], "%f", &aspect_ratio ) != 1 || aspect_ratio <= 0 )
                return printf("Invalid aspect ratio\n" ), -1;
        }
        else if( strcmp( s, "-d" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &delay ) != 1 || delay <= 0 )
                return printf("Invalid delay\n" ), -1;
        }
        else if( strcmp( s, "-op" ) == 0 )
        {
            write_points = 1;
        }
        else if( strcmp( s, "-oe" ) == 0 )
        {
            write_extrinsics = 1;
        }
        else if( strcmp( s, "-zt" ) == 0 )
        {
            flags |= CV_CALIB_ZERO_TANGENT_DIST;
        }
        else if( strcmp( s, "-p" ) == 0 )
        {
            flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
        }
        else if( strcmp( s, "-v" ) == 0 )
        {
            flip_vertical = 1;
        }
        else if( strcmp( s, "-o" ) == 0 )
        {
            out_filename = argv[++i];
        }
        else if( s[0] != '-' )
            input_filename = s;
        else
            return fprintf( stderr, "Unknown option %s", s ), -1;
    }

    if( input_filename )
    {
        capture = cvCreateFileCapture( input_filename );
        if( !capture )
        {
            f = fopen( input_filename, "rt" );
            if( !f )
                return fprintf( stderr, "The input file could not be opened\n" ), -1;
            image_count = -1;
        }
        mode = CAPTURING;
    }
    else
        capture = cvCreateCameraCapture(0);

    if( !capture && !f )
        return fprintf( stderr, "Could not initialize video capture\n" ), -2;

    if( capture )
        printf( "%s", live_capture_help );

    elem_size = board_size.width*board_size.height*sizeof(image_points_buf[0]);
    storage = cvCreateMemStorage( MAX( elem_size*4, 1 << 16 ));
    image_points_buf = (CvPoint2D32f*)cvAlloc( elem_size );
    image_points_seq = cvCreateSeq( 0, sizeof(CvSeq), elem_size, storage );

    cvNamedWindow( "Image View", 1 );
    cvNamedWindow( "Undistort",1);
    int disk_image_cnt = 0;

    for(;;)
    {
        IplImage *view = 0, *view_gray = 0;
        int count = 0, found, blink = 0;
        CvPoint text_origin;
        CvSize text_size = {0,0};
        int base_line = 0;
        char s[100];
        int key;
        
        if( f && fgets( imagename, sizeof(imagename)-2, f ))
        {
            int l = strlen(imagename);
            if( l > 0 && imagename[l-1] == '\n' )
                imagename[--l] = '\0';
            if( l > 0 )
            {
                if( imagename[0] == '#' )
                    continue;
                view = cvLoadImage( imagename, 1 );
                disk_image_cnt++;
           }
        }
        else if( capture )
        {
            IplImage* view0 = cvQueryFrame( capture );
            if( view0 )
            {
                view = cvCreateImage( cvGetSize(view0), IPL_DEPTH_8U, view0->nChannels );
                if( view0->origin == IPL_ORIGIN_BL )
                    cvFlip( view0, view, 0 );
                else
                    cvCopy( view0, view );
            }
        }

        if( !view || (disk_image_cnt == images_from_file))
        {
            if( image_points_seq->total > 0 )
            {
                image_count = image_points_seq->total;
                goto calibrate;
            }
            break;
        }

        if( flip_vertical )
            cvFlip( view, view, 0 );

        img_size = cvGetSize(view);
        found = cvFindChessboardCorners( view, board_size,
            image_points_buf, &count, CV_CALIB_CB_ADAPTIVE_THRESH );

#if 1
        // improve the found corners' coordinate accuracy
        view_gray = cvCreateImage( cvGetSize(view), 8, 1 );
        cvCvtColor( view, view_gray, CV_BGR2GRAY );
        cvFindCornerSubPix( view_gray, image_points_buf, count, cvSize(11,11),
            cvSize(-1,-1), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
        cvReleaseImage( &view_gray );
#endif

        if( mode == CAPTURING && found && (f || clock() - prev_timestamp > delay*1e-3*CLOCKS_PER_SEC) )
        {
            cvSeqPush( image_points_seq, image_points_buf );
            prev_timestamp = clock();
            blink = !f;
#if 1
            if( capture )
            {
                sprintf( imagename, "view%03d.png", image_points_seq->total - 1 );
                cvSaveImage( imagename, view );
            }
#endif
        }

        cvDrawChessboardCorners( view, board_size, image_points_buf, count, found );

        cvGetTextSize( "100/100", &font, &text_size, &base_line );
        text_origin.x = view->width - text_size.width - 10;
        text_origin.y = view->height - base_line - 10;

        if( mode == CAPTURING )
        {
            if( image_count > 0 )
                sprintf( s, "%d/%d", image_points_seq ? image_points_seq->total : 0, image_count );
            else
                sprintf( s, "%d/?", image_points_seq ? image_points_seq->total : 0 );
        }
        else if( mode == CALIBRATED )
            sprintf( s, "Calibrated" );
        else
            sprintf( s, "Press 'g' to start" );

        cvPutText( view, s, text_origin, &font, mode != CALIBRATED ?
                                   CV_RGB(255,0,0) : CV_RGB(0,255,0));

        if( blink )
            cvNot( view, view );
        //Rectify or Undistort the image
        if( mode == CALIBRATED && undistort_image )
        {
            IplImage* t = cvCloneImage( view );
            cvShowImage("Image View", view);
            cvUndistort2( t, view, &camera, &dist_coeffs );
            cvReleaseImage( &t );
 	    cvShowImage( "Undistort", view );
            cvWaitKey(0);
       }
	else{
	        cvShowImage( "Image View", view );
        	key = cvWaitKey(capture ? 50 : 500);
	}
        if( key == 27 )
            break;
        
        if( key == 'u' && mode == CALIBRATED ){
            undistort_image = !undistort_image;
	}

        if( capture && key == 'g' )
        {
            mode = CAPTURING;
            cvClearMemStorage( storage );
            image_points_seq = cvCreateSeq( 0, sizeof(CvSeq), elem_size, storage );
        }

        if( mode == CAPTURING && (unsigned)image_points_seq->total >= (unsigned)image_count )
        {
calibrate:
            if(disk_image_cnt == images_from_file)
                 undistort_image = !undistort_image;
            cvReleaseMat( &extr_params );
            cvReleaseMat( &reproj_errs );
            int code = run_calibration( image_points_seq, img_size, board_size,
                square_size, aspect_ratio, flags, &camera, &dist_coeffs, &extr_params,
                &reproj_errs, &avg_reproj_err );
            // save camera parameters in any case, to catch Inf's/NaN's
            save_camera_params( out_filename, image_count, img_size,
                board_size, square_size, aspect_ratio, flags,
                &camera, &dist_coeffs, write_extrinsics ? extr_params : 0,
                write_points ? image_points_seq : 0, reproj_errs, avg_reproj_err );
            if( code )
                mode = CALIBRATED;
            else
                mode = DETECTION;
        }

        if( !view )
            break;
        cvReleaseImage( &view );
    }

    if( capture )
        cvReleaseCapture( &capture );
    return 0;
}
Example #28
0
void comp_dsi (IplImage *left_img, IplImage *right_img, int maxdisp, int winSize, uchar *&dsi, int &dsi_width, int &dsi_height, int bordercosts, int pixdisthresh, int useBT, int scale)
{
	int imgW = left_img->width;
	int imgH = left_img->height;	

	int paddingpix = (int) winSize / 2;

	// The dsi is padded to handle border pixels
	dsi_width = imgW + 2 * paddingpix;
	dsi_height = imgH + 2 * paddingpix;

	// The dsi width muss be dividable by 16 to allow for fast float4 reads on the GPU
	// CUDA is most efficient for 128 Bit loads
	int modulo_val = dsi_width % 16;
	if (modulo_val != 0)
		dsi_width += 16 - modulo_val;

	// compute dsi
	int *dsi_int = (int*) _mm_malloc (imgW * imgH * (maxdisp + 1) * sizeof(int), 16);

	#define MI
	#ifndef MI
		CDsi dsiobj (left_img->width, left_img->height, maxdisp, 4);
		dsiobj.Generate_DSI (left_img, right_img, TRUE, RGB_, RANK_TRANSFORM_35, L1, bordercosts, pixdisthresh, useBT, dsi_int);
	#else
		CHmiDsi dsiobj (imgW, imgH, maxdisp, 4);
	
		IplImage *loaddisp = 0;

		if (maxdisp == 16)
		{
			printf ("\nloading Tsukuba disp\n");
			loaddisp = cvLoadImage ("MI_disps\\tsukuba_prevdisp_it1.png");
		}
		else if (maxdisp == 31)
		{
			printf ("\nloading Venus disp\n");
			loaddisp = cvLoadImage ("MI_disps\\venus_prevdisp_it1.png");
		}
		else if (maxdisp == 59)
		{
			printf ("\nloading Cones disp\n");
			loaddisp = cvLoadImage ("MI_disps\\cones_prevdisp_it1.png");
		}
		else if (maxdisp == 60)
		{
			printf ("\nloading Teddy disp\n");
			loaddisp = cvLoadImage ("MI_disps\\teddy_prevdisp_it1.png");
		}
		else if (maxdisp == 89)
		{
			printf ("\nloading Art disp\n");
			loaddisp = cvLoadImage ("MI_disps\\moebius_prevdisp_it1.png");
		}

		if (!loaddisp)
			printf ("Could not load image for Mutual Information Score\n");

		IplImage *disp, *occmask;

		disp = cvCreateImage (cvSize(imgW, imgH), IPL_DEPTH_8U, 1);

		// one channel image, divide by scale
		for (int y = 0; y < imgH; y++)
			for (int x = 0; x < imgW; x++)
			{
				float origd = (float) (uchar) loaddisp->imageData[y * loaddisp->widthStep + 3 * x];
				disp->imageData[y * disp->widthStep + x] = (char) (int) (origd / (float) scale);
			}

		// check if left or right reference
		static int left_reference = 0;
		if (!left_reference)
		{
			IplImage *disp_transformed;
			Gererate_right_disp_and_occmask (disp, disp_transformed, occmask);
			cvReleaseImage (&disp);
			disp = disp_transformed;

			cvFlip (disp_transformed, disp_transformed, 1);
			cvFlip (occmask, occmask, 1);
		}
		else
			occmask = GenerateOccMask (disp);
		left_reference++;


/*		cvvNamedWindow ("disp_transformed", CV_WINDOW_AUTOSIZE);
		cvMoveWindow ("disp_transformed", 0, 0);
		cvShowImage ("disp_transformed", disp);

		cvvNamedWindow ("occmask", CV_WINDOW_AUTOSIZE);
		cvMoveWindow ("occmask", imgW, 0);
		cvShowImage ("occmask", occmask);

		cvvNamedWindow ("left_img", CV_WINDOW_AUTOSIZE);
		cvMoveWindow ("left_img", imgW, 0);
		cvShowImage ("left_img", left_img);

		cvvNamedWindow ("right_img", CV_WINDOW_AUTOSIZE);
		cvMoveWindow ("right_img", imgW, 0);
		cvShowImage ("right_img", right_img);

		cvWaitKey(0);*/

		dsiobj.Generate_DSI (left_img, right_img, left_img, right_img, disp, occmask, true, RGB, bordercosts, dsi_int);
	#endif

	
	int padded_dsi_size = dsi_width * dsi_height * (maxdisp + 1) * sizeof(uchar);

	uchar *dsi_uchar = (uchar*) malloc (padded_dsi_size);
	// set zero
	memset (dsi_uchar, 0, padded_dsi_size);


	// we need to reorder dsi and convert it to uchar
	int* read = dsi_int;
	uchar *write = 0;
	int curval;

	for (int y = 0; y < imgH; y++)
	{
		for (int x = 0; x < imgW; x++)
		{
			for (int d = 0; d <= maxdisp; d++)
			{
				uchar *write = get_padded_dsi_ptr_at (dsi_uchar, x, y, d, dsi_width, dsi_height, maxdisp, paddingpix);		
				*write = (uchar) std::min (*read, std::min (pixdisthresh, 255));
				//*write = (uchar) min (x, 255);
				read++;
			}
		}
	}

	dsi = dsi_uchar;
}
Example #29
0
// Main function, defines the entry point for the program.
int main( int argc, char** argv )
{

    // Structure for getting video from camera or avi
    CvCapture* capture = 0;

    // Images to capture the frame from video or camera or from file
    IplImage *frame, *frame_copy = 0;

    // Used for calculations
    int optlen = strlen("--cascade=");

    // Input file name for avi or image file.
    const char* input_name;


    // Load the HaarClassifierCascade

    
    // Allocate the memory storage
    storage = cvCreateMemStorage(0);


    capture = cvCaptureFromCAM( 0);
    // Create a new named window with title: result
    cvNamedWindow( "result", 1 );

  
    // If loaded succesfully, then:
    if( capture )
    {
        // Capture from the camera.
        for(;;)
        {
            // Capture the frame and load it in IplImage
            if( !cvGrabFrame( capture ))
                break;
            frame = cvRetrieveFrame( capture );

            // If the frame does not exist, quit the loop
            if( !frame )
                break;
            
            // Allocate framecopy as the same size of the frame
            if( !frame_copy )
                frame_copy = cvCreateImage( cvSize(frame->width,frame->height),
                                            IPL_DEPTH_8U, frame->nChannels );

            // Check the origin of image. If top left, copy the image frame to frame_copy. 
            if( frame->origin == IPL_ORIGIN_TL )
                cvCopy( frame, frame_copy, 0 );
            // Else flip and copy the image
            else
                cvFlip( frame, frame_copy, 0 );
            

		for(int i=0;i<4;i++)
		{
	
	    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name[i], 0, 0, 0 );
    
	    // Check whether the cascade has loaded successfully. Else report and error and quit
	    if( !cascade )
    		{
       		 fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
       		 return -1;
    		}
            // Call the function to detect and draw the face
            	if(detect_and_draw(frame_copy,cascade))
		{

			std::cout<< i <<std::endl;
			switch(i)
			{
			case 0:	
				std::cout<<"face detetected"<<std::endl;
				break;
			case 1:	
				std::cout<<"eye detetected"<<std::endl;
				break;
			case 2:	
				std::cout<<"nose detetected"<<std::endl;
				break;
			case 3:	
				std::cout<<"mouth detetected"<<std::endl;
				break;
					
			}	
				
		}						

		}
            // Wait for a while before proceeding to the next frame
            if( cvWaitKey( 1 ) >= 0 )
             break;

//              cvWaitKey (10);
   


     }

        // Release the images, and capture memory
	cvReleaseHaarClassifierCascade(&cascade);

        cvReleaseImage( &frame_copy );
        cvReleaseCapture( &capture );
	cvReleaseMemStorage(&storage);


    }

    // If the capture is not loaded succesfully, then:
 return 0;

}
Example #30
0
int videoFunc() {
	IplImage *frame = NULL; 
	IplImage *image = NULL, *image2 = NULL;
	IplImage *prev_img = NULL;

	CvPoint2D32f facePt1, facePt2;

	int isTrackingFace = FALSE;
	int initOptFlow = FALSE;

	while (!quitFlag) {
		char c;

		// New detected face position
		CvPoint2D32f newFacePt1, newFacePt2;

		int detectedFace = FALSE;

		if (numImgs == 0) {
			frame = cvQueryFrame(capture);
			//cvGrabFrame(capture);
			//img = cvRetrieveFrame(capture);
			if (frame == NULL) {
				fprintf(stderr, "Failed to grab frame\n");
				return -1;
			}
		}
		else {
			frame = cvLoadImage(imgPaths[videoTick % numImgs], CV_LOAD_IMAGE_UNCHANGED);

			if (frame == NULL) {
				fprintf(stderr, "Failed to load input image\n");
				return -1;
			}
		}

		image = cvCloneImage(frame);
		image2 = cvCloneImage(frame);
		if (image == NULL || image2 == NULL) {
			fprintf(stderr, "Failed to clone image\n");
			return -1;
		}

		if (videoTick % 10 == 0 && faceDetect(image, &newFacePt1, &newFacePt2) > -1) {
		//if (videoTick == 0 && faceDetect(image, &newFacePt1, &newFacePt2) > -1) {
			//if (((float)(newFacePos.x - facePos.x) / image->width < FACE_MOVEMENT_THRESHOLD && (float)(newFacePos.y - facePos.y) / image->height < FACE_MOVEMENT_THRESHOLD)
			//	|| !isTrackingFace) {
				detectedFace = TRUE;
				initOptFlow = TRUE;
				facePt1 = newFacePt1;
				facePt2 = newFacePt2;
			//}
		}

		if (!detectedFace && prev_img != NULL) {
			fprintf(stderr, "Failed to detect face, trying to track\n");

			if (opt_flow_find_points(prev_img, image, initOptFlow, &facePt1, &facePt2, &newFacePt1, &newFacePt2, image2) < 0) {
				fprintf(stderr, "Warning: couldn't track any flow points\n");
				isTrackingFace = FALSE;
			}
			else {
				//if ((float)(newFacePos.x - facePos.x) / image->width < FACE_MOVEMENT_THRESHOLD && (float)(newFacePos.y - facePos.y) / image->height < FACE_MOVEMENT_THRESHOLD) {
					isTrackingFace = TRUE;
					facePt1 = newFacePt1;
					facePt2 = newFacePt2;
				/*}
				else {
					isTrackingFace = FALSE;
				}*/
			}

			if (initOptFlow)
				initOptFlow = FALSE;
		}
		else
			isTrackingFace = TRUE;



		// Set previous image pointer and free old previouse image
		cvReleaseImage(&prev_img);
		prev_img = image;


		// Draw the rectangle in the input image
		if (isTrackingFace)
			draw_rect(image2, facePt1, facePt2, detectedFace);
		else {
			drawFullscreenRect(image2);
		}

		cvFlip(image2, NULL, 1);
		cvShowImage("preview", image2);

		c = cvWaitKey(10);
		if(c == 27 || c == 'q')
			break;

		cvReleaseImage(&image2);

		if (numImgs > 0)
			cvReleaseImage(&frame);

		videoTick++;
	}

	return 0;
}