//-------------------------------------------------------------- void testApp::setup(){ vidGrabber.setVerbose(true); vidGrabber.initGrabber(320,240); colorImg.allocate(320,240); gauss_foregroundImg.allocate(320,240); gauss_backgroundImg.allocate(320,240); fgd_foregroundImg.allocate(320,240); fgd_backgroundImg.allocate(320,240); CvGaussBGStatModelParams* params = new CvGaussBGStatModelParams; params->win_size=2; params->n_gauss=5; params->bg_threshold=0.7; params->std_threshold=3.5; params->minArea=15; params->weight_init=0.05; params->variance_init=30; //bgModel = cvCreateGaussianBGModel(colorImg.getCvImage() ,params); gauss_bgModel = cvCreateGaussianBGModel(colorImg.getCvImage()); fgd_bgModel = cvCreateFGDStatModel(colorImg.getCvImage()); }
// Function cvCreateBGStatModel creates and returns initialized BG model. // Parameters: // first_frame - frame from video sequence // model_type ñ type of BG model (CV_BG_MODEL_MOG, CV_BG_MODEL_FGD,Ö) // parameters - (optional) if NULL the default parameters of the algorithm will be used static CvBGStatModel* cvCreateBGStatModel( IplImage* first_frame, int model_type, void* params ) { CvBGStatModel* bg_model = NULL; if( model_type == CV_BG_MODEL_FGD || model_type == CV_BG_MODEL_FGD_SIMPLE ) bg_model = cvCreateFGDStatModel( first_frame, (CvFGDStatModelParams*)params ); else if( model_type == CV_BG_MODEL_MOG ) bg_model = cvCreateGaussianBGModel( first_frame, (CvGaussBGStatModelParams*)params ); return bg_model; }
CvBGStatModel* BGModelCvMoGFactory::createModel(const jderobotutil::ParamDict params, IplImage* firstFrame) const{ CvGaussBGStatModelParams tmpParams; tmpParams.win_size = params.getParamAsIntWithDefault("win_size",defaultParams.win_size); tmpParams.n_gauss = params.getParamAsIntWithDefault("n_gauss",defaultParams.n_gauss); tmpParams.bg_threshold = params.getParamAsFloatWithDefault("bg_threshold",defaultParams.bg_threshold); tmpParams.std_threshold = params.getParamAsFloatWithDefault("std_threshold",defaultParams.std_threshold); tmpParams.minArea = params.getParamAsFloatWithDefault("minArea",defaultParams.minArea); tmpParams.weight_init = params.getParamAsFloatWithDefault("weight_init",defaultParams.weight_init); tmpParams.variance_init = params.getParamAsFloatWithDefault("variance_init",defaultParams.variance_init); return cvCreateGaussianBGModel(firstFrame,&tmpParams); }
void initBackgroundModel(CvBGStatModel ** bgmodel, IplImage* tmp_frame, CvGaussBGStatModelParams* paramMoG){ paramMoG->win_size = 200; //200; paramMoG->n_gauss = 3; //5; paramMoG->bg_threshold = 0.1; //0.7; paramMoG->std_threshold = 5; //2.5; paramMoG->minArea = 200.f; //15.f; paramMoG->weight_init = 0.01; //0.05; paramMoG->variance_init = 30; //30*30; *bgmodel = cvCreateGaussianBGModel(tmp_frame, paramMoG); }
//this is a sample for foreground detection functions int main(int argc, char** argv) { IplImage* tmp_frame = NULL; CvCapture* cap = NULL; if( argc < 2 ) { printf("please specify video file name \n"); exit(0); } cap = cvCaptureFromFile(argv[1]); tmp_frame = cvQueryFrame(cap); if(!tmp_frame) { printf("bad video \n"); exit(0); } cvNamedWindow("BG", 1); cvNamedWindow("FG", 1); //create BG model CvBGStatModel* bg_model = cvCreateGaussianBGModel( tmp_frame ); for( int fr = 1;tmp_frame; tmp_frame = cvQueryFrame(cap), fr++ ) { cvUpdateBGStatModel( tmp_frame, bg_model ); cvShowImage("BG", bg_model->background); cvShowImage("FG", bg_model->foreground); int k = cvWaitKey(5); if( k == 27 ) break; } cvReleaseBGStatModel( &bg_model ); cvReleaseCapture(&cap); return 0; }
int main(int argc, char ** argv) { CvCapture* capture = cvCreateCameraCapture( 0 ); assert(capture); /* print a welcome message, and the OpenCV version */ // CV_VERSION, // CV_MAJOR_VERSION, CV_MINOR_VERSION, CV_SUBMINOR_VERSION); /* Capture 1 video frame for initialization */ IplImage* videoFrame = NULL; videoFrame = cvQueryFrame(capture); if(!videoFrame) { printf("Bad frame \n"); exit(0); } // Create windows cvNamedWindow("BG", 1); cvNamedWindow("FG", 1); // Select parameters for Gaussian model. CvGaussBGStatModelParams* params = new CvGaussBGStatModelParams; params->win_size=2; params->n_gauss=3; params->bg_threshold=0.9; params->std_threshold=.5; params->minArea=15; params->weight_init=0.05; params->variance_init=30; // Creat CvBGStatModel // cvCreateGaussianBGModel( IplImage* first_frame, CvGaussBGStatModelParams* parameters ) // or // cvCreateGaussianBGModel( IplImage* first_frame ) CvBGStatModel* bgModel = cvCreateGaussianBGModel(videoFrame ,params); int key=-1; // while(key != 'q') //{ // Grab a fram videoFrame = cvQueryFrame(capture); //if( !videoFrame ) // break; int sv = 2; // Step number 1: Update model(Background subtraction) // cvUpdateBGStatModel(videoFrame,bgModel); for(int x = 0; x<videoFrame->width; x++) { for(int y = 0; y<videoFrame->height; y++) { if(checkForSkinColor(x,y, videoFrame)) { connectedComp(x,y, videoFrame); } } } for(int k =0;k < i; k++) { printf("\n"); for( int j =0; j<2; j++) printf("%d", ConnectedComponent[k][j]); } // Display results //cvShowImage("BG", bgModel->background); //cvShowImage("FG", bgModel->foreground); // key = cvWaitKey(100); // } cvDestroyWindow("BG"); cvDestroyWindow("FG"); cvReleaseBGStatModel( &bgModel ); cvReleaseCapture(&capture); return 0; }
int main(int argc, char** argv) { IplImage* temp1 = NULL; IplImage* temp2 = NULL; IplImage* result = NULL; IplImage* result1 = NULL; IplImage* result2 = NULL; CvBGStatModel* bg_model=0; CvBGStatModel* bg_model1=0; IplImage* rawImage = 0; IplImage* yuvImage = 0; IplImage* rawImage1 = 0; IplImage* pFrImg = 0; IplImage* pFrImg1= 0; IplImage* pFrImg2= 0; IplImage* ImaskCodeBookCC = 0; CvCapture* capture = 0; int c,n; maxMod[0] = 25; minMod[0] = 35; maxMod[1] = 8; minMod[1] = 8; maxMod[2] = 8; minMod[2] = 8; argc=2; argv[1]="intelligentroom_raw.avi"; if( argc > 2 ) { fprintf(stderr, "Usage: bkgrd [video_file_name]\n"); return -1; } if (argc ==1) if( !(capture = cvCaptureFromCAM(-1))) { fprintf(stderr, "Can not open camera.\n"); return -2; } if(argc == 2) if( !(capture = cvCaptureFromFile(argv[1]))) { fprintf(stderr, "Can not open video file %s\n", argv[1]); return -2; } bool pause = false; bool singlestep = false; if( capture ) { cvNamedWindow( "原视频序列图像", 1 ); cvNamedWindow("不实时更新的Codebook算法[本文]",1); cvNamedWindow("实时更新的Codebook算法[本文]",1); cvNamedWindow("基于MOG的方法[Chris Stauffer'2001]",1); cvNamedWindow("三帧差分", 1); cvNamedWindow("基于Bayes decision的方法[Liyuan Li'2003]", 1); cvMoveWindow("原视频序列图像", 0, 0); cvMoveWindow("不实时更新的Codebook算法[本文]", 360, 0); cvMoveWindow("实时更新的Codebook算法[本文]", 720, 350); cvMoveWindow("基于MOG的方法[Chris Stauffer'2001]", 0, 350); cvMoveWindow("三帧差分", 720, 0); cvMoveWindow("基于Bayes decision的方法[Liyuan Li'2003]",360, 350); int nFrmNum = -1; for(;;) { if(!pause) { rawImage = cvQueryFrame( capture ); ++nFrmNum; printf("第%d帧\n",nFrmNum); if(!rawImage) break; } if(singlestep) { pause = true; } if(0 == nFrmNum) { printf(". . . wait for it . . .\n"); temp1 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 3); temp2 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 3); result1 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 1); result2 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 1); result = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 1); bg_model = cvCreateGaussianBGModel(rawImage); bg_model1 = cvCreateFGDStatModel(rawImage); rawImage1 = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 3 ); yuvImage = cvCloneImage(rawImage); pFrImg = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); pFrImg1 = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); pFrImg2 = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); imageLen = rawImage->width*rawImage->height; cA = new codeBook [imageLen]; cC = new codeBook [imageLen]; cD = new codeBook [imageLen]; for(int f = 0; f<imageLen; f++) { cA[f].numEntries = 0; cA[f].t = 0; cC[f].numEntries = 0; cC[f].t = 0; cD[f].numEntries = 0; cD[f].t = 0; } for(int nc=0; nc<nChannels;nc++) { cbBounds[nc] = 10; } ch[0] = true; ch[1] = true; ch[2] = true; } if( rawImage ) { if(!pause) { cvSmooth(rawImage, rawImage1, CV_GAUSSIAN,3,3); cvChangeDetection(temp1, temp2, result1); cvChangeDetection(rawImage1, temp1, result2); cvAnd(result1, result2, result, NULL); cvCopy(temp1,temp2, NULL); cvCopy(rawImage,temp1, NULL); cvUpdateBGStatModel( rawImage, bg_model ); cvUpdateBGStatModel( rawImage, bg_model1 ); } cvCvtColor( rawImage1, yuvImage, CV_BGR2YCrCb ); if( !pause && nFrmNum >= 1 && nFrmNum < T ) { pColor = (uchar *)((yuvImage)->imageData); for(int c=0; c<imageLen; c++) { update_codebook_model(pColor, cA[c],cbBounds,nChannels); trainig_codebook(pColor, cC[c],cbBounds,nChannels); pColor += 3; } } if( nFrmNum == T) { for(c=0; c<imageLen; c++) { clear_stale_entries(cA[c]); training_clear_stale_entries(cC[c]); } } if(nFrmNum > T) { pColor = (uchar *)((yuvImage)->imageData); uchar maskPixelCodeBook; uchar maskPixelCodeBook1; uchar maskPixelCodeBook2; uchar *pMask = (uchar *)((pFrImg)->imageData); uchar *pMask1 = (uchar *)((pFrImg1)->imageData); uchar *pMask2 = (uchar *)((pFrImg2)->imageData); for(int c=0; c<imageLen; c++) { //本文中不带自动背景更新的算法输出 maskPixelCodeBook1=background_Diff(pColor, cA[c],nChannels,minMod,maxMod); *pMask1++ = maskPixelCodeBook1; //本文中带自动背景更新的算法输出 if ( !pause && det_update_codebook_cC(pColor, cC[c],cbBounds,nChannels)) { det_update_codebook_cD(pColor, cD[c],cbBounds,nChannels, nFrmNum); realtime_clear_stale_entries_cD(cD[c], nFrmNum); cD_to_cC(cD[c], cC[c], (nFrmNum - T)/5); } else { realtime_clear_stale_entries_cC(cC[c], nFrmNum); } maskPixelCodeBook2=background_Diff(pColor, cC[c],nChannels,minMod,maxMod); *pMask2++ = maskPixelCodeBook2; pColor += 3; } cvCopy(pFrImg2,ImaskCodeBookCC); if(!pause) { count_Segmentation(cC,yuvImage,nChannels,minMod,maxMod); forgratio = (float) (fgcount)/ imageLen; } } bg_model1->foreground->origin=1; bg_model->foreground->origin=1; pFrImg->origin=1; pFrImg1->origin=1; pFrImg2->origin=1; ImaskCodeBookCC->origin=1; result->origin=1; //connected_Components(pFrImg1,1,40); //connected_Components(pFrImg2,1,40); cvShowImage("基于MOG的方法[Chris Stauffer'2001]", bg_model->foreground); cvShowImage( "原视频序列图像", rawImage ); cvShowImage("三帧差分", result); cvShowImage( "不实时更新的Codebook算法[本文]",pFrImg1); cvShowImage("实时更新的Codebook算法[本文]",pFrImg2); cvShowImage("基于Bayes decision的方法[Liyuan Li'2003]", bg_model1->foreground); c = cvWaitKey(1)&0xFF; //End processing on ESC, q or Q if(c == 27 || c == 'q' || c == 'Q') break; //Else check for user input switch(c) { case 'h': help(); break; case 'p': pause ^= 1; break; case 's': singlestep = 1; pause = false; break; case 'r': pause = false; singlestep = false; break; //CODEBOOK PARAMS case 'y': case '0': ch[0] = 1; ch[1] = 0; ch[2] = 0; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'u': case '1': ch[0] = 0; ch[1] = 1; ch[2] = 0; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'v': case '2': ch[0] = 0; ch[1] = 0; ch[2] = 1; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'a': //All case '3': ch[0] = 1; ch[1] = 1; ch[2] = 1; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'b': //both u and v together ch[0] = 0; ch[1] = 1; ch[2] = 1; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'z': printf(" Fadd加1 "); Fadd += 1; printf("Fadd=%.4d\n",Fadd); break; case 'x': printf(" Fadd减1 "); Fadd -= 1; printf("Fadd=%.4d\n",Fadd); break; case 'n': printf(" Tavgstale加1 "); Tavgstale += 1; printf("Tavgstale=%.4d\n",Tavgstale); break; case 'm': printf(" Tavgstale减1 "); Tavgstale -= 1; printf("Tavgstale=%.4d\n",Tavgstale); break; case 'i': //modify max classification bounds (max bound goes higher) for(n=0; n<nChannels; n++) { if(ch[n]) maxMod[n] += 1; printf("%.4d,",maxMod[n]); } printf(" CodeBook High Side\n"); break; case 'o': //modify max classification bounds (max bound goes lower) for(n=0; n<nChannels; n++) { if(ch[n]) maxMod[n] -= 1; printf("%.4d,",maxMod[n]); } printf(" CodeBook High Side\n"); break; case 'k': //modify min classification bounds (min bound goes lower) for(n=0; n<nChannels; n++) { if(ch[n]) minMod[n] += 1; printf("%.4d,",minMod[n]); } printf(" CodeBook Low Side\n"); break; case 'l': //modify min classification bounds (min bound goes higher) for(n=0; n<nChannels; n++) { if(ch[n]) minMod[n] -= 1; printf("%.4d,",minMod[n]); } printf(" CodeBook Low Side\n"); break; } } } cvReleaseCapture( &capture ); cvReleaseBGStatModel((CvBGStatModel**)&bg_model); cvReleaseBGStatModel((CvBGStatModel**)&bg_model1); cvDestroyWindow( "原视频序列图像" ); cvDestroyWindow( "不实时更新的Codebook算法[本文]"); cvDestroyWindow( "实时更新的Codebook算法[本文]"); cvDestroyWindow( "基于MOG的方法[Chris Stauffer'2001]"); cvDestroyWindow( "三帧差分" ); cvDestroyWindow( "基于Bayes decision的方法[Liyuan Li'2003]"); cvReleaseImage(&temp1); cvReleaseImage(&temp2); cvReleaseImage(&result); cvReleaseImage(&result1); cvReleaseImage(&result2); cvReleaseImage(&pFrImg); cvReleaseImage(&pFrImg1); cvReleaseImage(&pFrImg2); if(yuvImage) cvReleaseImage(&yuvImage); if(rawImage) cvReleaseImage(&rawImage); if(rawImage1) cvReleaseImage(&rawImage1); if(ImaskCodeBookCC) cvReleaseImage(&ImaskCodeBookCC); delete [] cA; delete [] cC; delete [] cD; } else { printf("\n\nDarn, Something wrong with the parameters\n\n"); help(); } return 0; }
// update Gaussian Mixture Model void GMM::update(IplImage* curr){ if(bgModel == NULL){ bgModel = cvCreateGaussianBGModel(curr, NULL); }else{ // check channels IplImage* curr_gray; if(curr->nChannels == 3){ curr_gray = cvCreateImage(cvGetSize(curr), IPL_DEPTH_8U, 1); cvCvtColor(curr, curr_gray, CV_RGB2GRAY); cvEqualizeHist(curr_gray, curr_gray); }else if(curr->nChannels == 1){ curr_gray = curr; cvEqualizeHist(curr_gray, curr_gray); }else{ // exception } //learning rate if(frameCnt++ <= learnCnt){ cvUpdateBGStatModel(curr_gray, bgModel, -1); // learn }else{ cvUpdateBGStatModel(curr_gray, bgModel, 0); } if(curr->nChannels == 3){ cvReleaseImage(&curr_gray); } } if(fgclone){ cvCopy(bgModel->foreground, fgclone, NULL); // remove salt & pepper noise cvSmooth(fgclone, fgclone, CV_MEDIAN, 5); }else{ fgclone = cvCloneImage(bgModel->foreground); //remove salt & pepper noise cvSmooth(fgclone, fgclone, CV_MEDIAN, 5); } }