Пример #1
0
// Convert a video to grayscale
// argv[1]: input video file
// argv[2]: name of new output file
//
main( int argc, char* argv[] ) {
    CvCapture* capture = cvCreateFileCapture( argv[1] );
    if (!capture){
        return -1;
    }
    IplImage* bgr_frame;
    double fps = cvGetCaptureProperty (
        capture,
        CV_CAP_PROP_FPS
    );

    CvSize size = cvSize(
        (int)cvGetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH),
        (int)cvGetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT)
    );
    
    CvVideoWriter* writer = cvCreateVideoWriter(
        argv[2],
        CV_FOURCC('M','J','P','G'),
        fps,
        size
    );
    
    IplImage* logpolar_frame = cvCreateImage(
        size,
        IPL_DEPTH_8U,
        3
    );

    IplImage* gray_frame = cvCreateImage(
        size,
        IPL_DEPTH_8U,
        1
    );

    while( (bgr_frame=cvQueryFrame(capture)) != NULL ) {
        cvConvertImage(
            bgr_frame,
            gray_frame,
            CV_RGB2GRAY
        );
        cvLogPolar( bgr_frame, logpolar_frame, 
                    cvPoint2D32f(bgr_frame->width/2,
                    bgr_frame->height/2), 
                    40, 
                    CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS );
        cvWriteToAVI( writer, logpolar_frame );
    }
    cvReleaseVideoWriter( &writer );
    cvReleaseImage( &gray_frame );
    cvReleaseImage( &logpolar_frame );
    cvReleaseImage( &bgr_frame );
    cvReleaseCapture( &capture );
}
Пример #2
0
static void get_next_frame(void*)
{
    static int repositioning = 0;
    IplImage* frame = 0;
    double new_pos = video_pos->value();
    
    if( (new_pos-old_pos >= 1e-10 || new_pos-old_pos <= -1e-10) && !repositioning)
    {
        video_window->redraw();
        cvSetCaptureProperty( capture, CV_CAP_PROP_POS_AVI_RATIO, new_pos );
        new_pos = cvGetCaptureProperty( capture, CV_CAP_PROP_POS_AVI_RATIO );
        printf("Repositioning\n");
        repositioning = 1;
    }
    else
    {
        new_pos = cvGetCaptureProperty( capture, CV_CAP_PROP_POS_AVI_RATIO );
        video_pos->value(new_pos);
        repositioning = 0;
    }
    old_pos = new_pos;
    frame = cvQueryFrame( capture );

    if( frame == 0 && is_avi )
    {
        cb_Stop(0,0);
        return;
    }

    if( video_window && frame )
    {
        if( video_window->w() < frame->width || video_window->h() < frame->height )
            root_window->size( (short)(frame->width + 40), (short)(frame->height + 150));

        CvRect rect = { video_window->x(), video_window->y(),
                        frame->width, frame->height };
        
        if( !video_image || video_image->width < rect.width ||
            video_image->height < rect.height )
        {
            cvReleaseImage( &video_image );
            video_image = cvCreateImage( cvSize( rect.width, rect.height ), 8, 3 );
        }

        cvSetImageROI( video_image, cvRect(0,0,rect.width, rect.height));
        if( frame->origin == 1 )
            cvFlip( frame, video_image, 0 );
        else
            cvCopy( frame, video_image, 0 );

        DetectAndDrawFaces( video_image );
        if( writer && is_recorded )
        {
            cvWriteToAVI( writer, video_image );
        }
        cvCvtColor( video_image, video_image, CV_RGB2BGR );

        uchar* data = 0;
        int step = 0;
        CvSize size;
        cvGetRawData( video_image, &data, &step, &size );

        video_window->redraw();
        fl_draw_image( (uchar*)data, video_window->x(), video_window->y(),
                       size.width, size.height, 3, step );
    }

    if( started )
    {
        double cur_frame_stamp = get_time_accurate();
        // update fps
        if( fps < 0 )
            fps = 1000/(cur_frame_stamp - prev_frame_stamp);
        else
            fps = (1-fps_alpha)*fps + fps_alpha*1000/(cur_frame_stamp - prev_frame_stamp);
        prev_frame_stamp = cur_frame_stamp;
        sprintf( fps_buffer, "FPS: %5.1f", fps );
        fps_box->label( fps_buffer );
        fps_box->redraw();
        if( total_frames > 0 )
        {
            if( --total_frames == 0 )
                if( !is_loopy )
                    cb_Exit(0,0);
                else
                {
                    total_frames = total_frames0;
                    cvSetCaptureProperty( capture, CV_CAP_PROP_POS_FRAMES, start_pos );
                }
        }
        Fl::add_timeout( timeout, get_next_frame, 0 );
    }
}
Пример #3
0
//
//USAGE:  ch9_background startFrameCollection# endFrameCollection# [movie filename, else from camera]
//If from AVI, then optionally add HighAvg, LowAvg, HighCB_Y LowCB_Y HighCB_U LowCB_U HighCB_V LowCB_V
//
int main(int argc, char** argv)
{
 	IplImage* rawImage = 0, *yuvImage = 0; //yuvImage is for codebook method
    IplImage *ImaskAVG = 0,*ImaskAVGCC = 0;
    IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;
    CvCapture* capture = 0;

	int startcapture = 1;
	int endcapture = 30;
	int c,n;

	maxMod[0] = 3;  //Set color thresholds to default values
	minMod[0] = 10;
	maxMod[1] = 1;
	minMod[1] = 1;
	maxMod[2] = 1;
	minMod[2] = 1;
	float scalehigh = HIGH_SCALE_NUM;
	float scalelow = LOW_SCALE_NUM;

	if(argc < 3) {
		printf("ERROR: Too few parameters\n");
		help();
	}else{
		if(argc == 3){
			printf("Capture from Camera\n");
			capture = cvCaptureFromCAM( 0 );
		}
		else {
			printf("Capture from file %s\n",argv[3]);
	//		capture = cvCaptureFromFile( argv[3] );
			capture = cvCreateFileCapture( argv[3] );
			if(!capture) { printf("Couldn't open %s\n",argv[3]); return -1;}

            minMod = {42, 24, 33};
            maxMod = {14, 3, 2};
		}
		if(isdigit(argv[1][0])) { //Start from of background capture
			startcapture = atoi(argv[1]);
			printf("startcapture = %d\n",startcapture);
		}
		if(isdigit(argv[2][0])) { //End frame of background capture
			endcapture = atoi(argv[2]);
			printf("endcapture = %d\n",endcapture);
		}

		if(argc > 4){ //See if parameters are set from command line
			//FOR AVG MODEL
			if(argc >= 5){
				if(isdigit(argv[4][0])){
					scalehigh = (float)atoi(argv[4]);
				}
			}
			if(argc >= 6){
				if(isdigit(argv[5][0])){
					scalelow = (float)atoi(argv[5]);
				}
			}
			//FOR CODEBOOK MODEL, CHANNEL 0
			if(argc >= 7){
				if(isdigit(argv[6][0])){
					maxMod[0] = atoi(argv[6]);
				}
			}
			if(argc >= 8){
				if(isdigit(argv[7][0])){
					minMod[0] = atoi(argv[7]);
				}
			}
			//Channel 1
			if(argc >= 9){
				if(isdigit(argv[8][0])){
					maxMod[1] = atoi(argv[8]);
				}
			}
			if(argc >= 10){
				if(isdigit(argv[9][0])){
					minMod[1] = atoi(argv[9]);
				}
			}
			//Channel 2
			if(argc >= 11){
				if(isdigit(argv[10][0])){
					maxMod[2] = atoi(argv[10]);
				}
			}
			if(argc >= 12){
				if(isdigit(argv[11][0])){
					minMod[2] = atoi(argv[11]);
				}
			}

		}
	}

    /*dancer jiwei*/
    double vdfps = 0.0;
    CvSize vdsize = cvSize(0,0);
    //vdfps = cvGetCaptureProperty ( capture, CV_CAP_PROP_FPS);
    getVideoInfo( capture, vdfps, vdsize);
    CvVideoWriter* writer = cvCreateVideoWriter( "dancer.avi",
                                                CV_FOURCC('D','X','5','0'),
                                                vdfps,
                                                vdsize);
    //end dancer jiwei


	//MAIN PROCESSING LOOP:
	bool pause = false;
	bool singlestep = false;

    if( capture )
    {
      cvNamedWindow( "Raw", 1 );
		cvNamedWindow( "AVG_ConnectComp",1);
		cvNamedWindow( "ForegroundCodeBook",1);
		cvNamedWindow( "CodeBook_ConnectComp",1);
 		cvNamedWindow( "ForegroundAVG",1);
 		//Only dancer jiwei 2012.3.3
 		cvNamedWindow( "OnlyDancer",1);
        cvNamedWindow( "RectDancer",1);
        int i = -1;

        for(;;)
        {
    			if(!pause){
//        		if( !cvGrabFrame( capture ))
//                	break;
//            	rawImage = cvRetrieveFrame( capture );
				rawImage = cvQueryFrame( capture );
				++i;//count it
//				printf("%d\n",i);
				if(!rawImage)
					break;
				//REMOVE THIS FOR GENERAL OPERATION, JUST A CONVIENIENCE WHEN RUNNING WITH THE SMALL tree.avi file
				//if(i == 56){
				if(i==0){
					pause = 1;
					printf("\n\nVideo paused for your convienience at frame 50 to work with demo\n"
					"You may adjust parameters, single step or continue running\n\n");
					help();
				}
			}
			if(singlestep){
				pause = true;
			}
			//First time:
			if(0 == i) {
				printf("\n . . . wait for it . . .\n"); //Just in case you wonder why the image is white at first
				//AVG METHOD ALLOCATION
				AllocateImages(rawImage);
				scaleHigh(scalehigh);
				scaleLow(scalelow);
				ImaskAVG = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				ImaskAVGCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				cvSet(ImaskAVG,cvScalar(255));
				//CODEBOOK METHOD ALLOCATION:
				yuvImage = cvCloneImage(rawImage);
				ImaskCodeBook = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				cvSet(ImaskCodeBook,cvScalar(255));
				imageLen = rawImage->width*rawImage->height;
				cB = new codeBook [imageLen];
				for(int f = 0; f<imageLen; f++)
				{
 					cB[f].numEntries = 0;
				}
				for(int nc=0; nc<nChannels;nc++)
				{
					cbBounds[nc] = 10; //Learning bounds factor
				}
				ch[0] = true; //Allow threshold setting simultaneously for all channels
				ch[1] = true;
				ch[2] = true;
			}
			//If we've got an rawImage and are good to go:
        	if( rawImage )
        	{
				cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );//YUV For codebook method
				//This is where we build our background model
				if( !pause && i >= startcapture && i < endcapture  ){
					//LEARNING THE AVERAGE AND AVG DIFF BACKGROUND
					accumulateBackground(rawImage);
					//LEARNING THE CODEBOOK BACKGROUND
					pColor = (uchar *)((yuvImage)->imageData);
					for(int c=0; c<imageLen; c++)
					{
						cvupdateCodeBook(pColor, cB[c], cbBounds, nChannels);
						pColor += 3;
					}
				}
				//When done, create the background model
				if(i == endcapture){
					createModelsfromStats();
				}
				//Find the foreground if any
				if(i >= endcapture) {
					//FIND FOREGROUND BY AVG METHOD:
					backgroundDiff(rawImage,ImaskAVG);
					cvCopy(ImaskAVG,ImaskAVGCC);
					cvconnectedComponents(ImaskAVGCC);
					//FIND FOREGROUND BY CODEBOOK METHOD
					uchar maskPixelCodeBook;
					pColor = (uchar *)((yuvImage)->imageData); //3 channel yuv image
					uchar *pMask = (uchar *)((ImaskCodeBook)->imageData); //1 channel image
					for(int c=0; c<imageLen; c++)
					{
						 maskPixelCodeBook = cvbackgroundDiff(pColor, cB[c], nChannels, minMod, maxMod);
						*pMask++ = maskPixelCodeBook;
						pColor += 3;
					}
					//This part just to visualize bounding boxes and centers if desired
					cvCopy(ImaskCodeBook,ImaskCodeBookCC);
					cvconnectedComponents(ImaskCodeBookCC);
				}

				/* Only Dancer
                    jiwei 2012.3.3*/
				IplImage *ImaDancer = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 3 );
				cvZero(ImaDancer);
				cvCopy( rawImage, ImaDancer, ImaskCodeBookCC);
				cvShowImage( "OnlyDancer", ImaDancer);
				//cvWriteToAVI( writer,  ImaDancer);
				/*IplImage *ImaCBvideo = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 3 );
				cvConvertImage(ImaskCodeBook, ImaCBvideo, CV_GRAY2RGB);
				cvWriteToAVI( writer,  ImaCBvideo);*/
				IplImage * imgRect = cvCreateImage( cvGetSize( ImaDancer), ImaDancer->depth,
                                              ImaDancer->nChannels);
                CvPoint pntmin, pntmax;
                drawRect( ImaDancer, pntmin, pntmax);
                cvCopy( rawImage, imgRect);
                cvRectangle( imgRect, pntmin, pntmax, cvScalar(0,0,255), 1);
				CvFont font;
                double hScale=0.4;
                double vScale=0.4;
                int    lineWidth=1;
                cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, hScale,vScale,0,lineWidth);
                cvPutText (imgRect,"The Dancer", pntmin, &font, cvScalar(255,255,255));
				cvShowImage( "RectDancer", imgRect);
                cvWriteToAVI( writer,  imgRect);
				/*end of Only Dancer*/
				//Display
           		cvShowImage( "Raw", rawImage );
				cvShowImage( "AVG_ConnectComp",ImaskAVGCC);
   				cvShowImage( "ForegroundAVG",ImaskAVG);
 				cvShowImage( "ForegroundCodeBook",ImaskCodeBook);
 				cvShowImage( "CodeBook_ConnectComp",ImaskCodeBookCC);


				//USER INPUT:
	         	c = cvWaitKey(10)&0xFF;
				//End processing on ESC, q or Q
				if(c == 27 || c == 'q' || c == 'Q')
					break;
				//Else check for user input
				switch(c)
				{
					case 'h':
						help();
						break;
					case 'p':
						pause ^= 1;
						break;
					case 's':
						singlestep = 1;
						pause = false;
						break;
					case 'r':
						pause = false;
						singlestep = false;
						break;
					//AVG BACKROUND PARAMS
					case '-':
						if(i > endcapture){
							scalehigh += 0.25;
							printf("AVG scalehigh=%f\n",scalehigh);
							scaleHigh(scalehigh);
						}
						break;
					case '=':
						if(i > endcapture){
							scalehigh -= 0.25;
							printf("AVG scalehigh=%f\n",scalehigh);
							scaleHigh(scalehigh);
						}
						break;
					case '[':
						if(i > endcapture){
							scalelow += 0.25;
							printf("AVG scalelow=%f\n",scalelow);
							scaleLow(scalelow);
						}
						break;
					case ']':
						if(i > endcapture){
							scalelow -= 0.25;
							printf("AVG scalelow=%f\n",scalelow);
							scaleLow(scalelow);
						}
						break;
				//CODEBOOK PARAMS
                case 'y':
                case '0':
                        ch[0] = 1;
                        ch[1] = 0;
                        ch[2] = 0;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'u':
                case '1':
                        ch[0] = 0;
                        ch[1] = 1;
                        ch[2] = 0;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'v':
                case '2':
                        ch[0] = 0;
                        ch[1] = 0;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'a': //All
                case '3':
                        ch[0] = 1;
                        ch[1] = 1;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'b':  //both u and v together
                        ch[0] = 0;
                        ch[1] = 1;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
				case 'i': //modify max classification bounds (max bound goes higher)
					for(n=0; n<nChannels; n++){
						if(ch[n])
							maxMod[n] += 1;
						printf("%.4d,",maxMod[n]);
					}
					printf(" CodeBook High Side\n");
					break;
				case 'o': //modify max classification bounds (max bound goes lower)
					for(n=0; n<nChannels; n++){
						if(ch[n])
							maxMod[n] -= 1;
						printf("%.4d,",maxMod[n]);
					}
					printf(" CodeBook High Side\n");
					break;
				case 'k': //modify min classification bounds (min bound goes lower)
					for(n=0; n<nChannels; n++){
						if(ch[n])
							minMod[n] += 1;
						printf("%.4d,",minMod[n]);
					}
					printf(" CodeBook Low Side\n");
					break;
				case 'l': //modify min classification bounds (min bound goes higher)
					for(n=0; n<nChannels; n++){
						if(ch[n])
							minMod[n] -= 1;
						printf("%.4d,",minMod[n]);
					}
					printf(" CodeBook Low Side\n");
					break;
				}

            }
		}
      cvReleaseCapture( &capture );
      cvDestroyWindow( "Raw" );
		cvDestroyWindow( "ForegroundAVG" );
		cvDestroyWindow( "AVG_ConnectComp");
		cvDestroyWindow( "ForegroundCodeBook");
		cvDestroyWindow( "CodeBook_ConnectComp");
        cvDestroyWindow( "RectDancer");
		DeallocateImages();
		if(yuvImage) cvReleaseImage(&yuvImage);
		if(ImaskAVG) cvReleaseImage(&ImaskAVG);
		if(ImaskAVGCC) cvReleaseImage(&ImaskAVGCC);
		if(ImaskCodeBook) cvReleaseImage(&ImaskCodeBook);
		if(ImaskCodeBookCC) cvReleaseImage(&ImaskCodeBookCC);
		delete [] cB;
		/*dancer*/
		cvDestroyWindow( "OnlyDancer");
		//if( ImaDancer) cvReleaseImage(&ImaDancer);
    }
	else{ printf("\n\nDarn, Something wrong with the parameters\n\n"); help();
	}
    return 0;
}
Пример #4
0
void VideoDisplay::OnTimerDrawImage()
{
    if (m_worker->m_IsCapturing)
    {
        _nCaptureTimeOut++;
        repaint();
    }

    else
        _nCaptureTimeOut = 0;

    if (_nCaptureTimeOut >= 500) // timeout 20 seconds
    {
        QApplication::quit();
//        _nCaptureTimeOut = 0;

//        QMessageBox::StandardButton resBtn = QMessageBox::question( this, "TrackCam",
//                                                                    tr("Video not found!\n Exit Application?\n"),
//                                                                    QMessageBox::No | QMessageBox::Yes,
//                                                                    QMessageBox::Yes);
//        if (resBtn == QMessageBox::Yes)
//        {
//            QApplication::quit();
//        }
    }


    if (!m_worker->m_pFrame)
    {
        repaint();
        return;
    }


    if (!_bufferFrame)
        _bufferFrame = cvCreateImage(cvSize(m_Config._config.frmWidth, m_Config._config.frmHeight), 8, 3);

    cvCopy(m_worker->m_pFrame, _bufferFrame);

    if (m_worker->m_IsTracking)
    {
        CvRect	nCvRectBox = cvRect(0, 0, 0, 0);
        utl_ConvertRectToBox(m_worker->m_rectCurrent, &nCvRectBox);
        m_centerX = ((nCvRectBox.x + nCvRectBox.width / 2)
                    - (m_Config._config.frmWidth / 2)) * 100 / m_Config._config.frmWidth;
        m_centerY = ((nCvRectBox.y + nCvRectBox.height / 2)
                    - (m_Config._config.frmHeight / 2)) * 100 / m_Config._config.frmHeight;
    }
    else
    {
        m_centerX = 0;
        m_centerY = 0;
    }



    cvInitFont(&cvTxtFont, CV_FONT_HERSHEY_SIMPLEX | CV_FONT_ITALIC, hScale, vScale, 0, lineWidth);

    if (m_strVideoFile == "")
    {
        szTime  = GetTimeString().toStdString();
        fn_ConvStrChar(szTime, szTmp);
        cvPutText(_bufferFrame, szTmp, cvPoint(3, m_Config._config.frmHeight - 15), &cvTxtFont, cvScalar(255, 255, 255));
    }

    repaint();


    if (m_Writer != NULL)
    {
        szDay   = GetDateString().toStdString();
        fn_ConvStrChar(szDay, szTmp);
        cvPutText(_bufferFrame, szTmp, cvPoint(m_Config._config.frmWidth - 100, 15),
                  &cvTxtFont, cvScalar(255, 255, 255));        

        cvWriteToAVI(m_Writer, _bufferFrame);
    }

}