IplImage* updateBackground(CvBGStatModel *bg_model, IplImage * tmp_frame){ //Updating the Gaussian Model cvUpdateBGStatModel(tmp_frame, bg_model); // if(!cvSaveImage("./data/background.jpg",bg_model->background)) printf("Could not save the background image\n"); //if(!cvSaveImage("./data/foreground.jpg",bg_model->foreground)) printf("Could not save the foreground image\n"); //returing the binary background return bg_model->foreground; }
//-------------------------------------------------------------- void testApp::update(){ ofBackground(100,100,100); vidGrabber.grabFrame(); if (vidGrabber.isFrameNew()){ colorImg.setFromPixels(vidGrabber.getPixels(), 320,240); // Update models cvUpdateBGStatModel(colorImg.getCvImage() ,gauss_bgModel); gauss_foregroundImg = gauss_bgModel->foreground; gauss_backgroundImg = gauss_bgModel->background; cvUpdateBGStatModel(colorImg.getCvImage() ,fgd_bgModel); fgd_foregroundImg = fgd_bgModel->foreground; fgd_backgroundImg = fgd_bgModel->background; } }
//this is a sample for foreground detection functions int main(int argc, char** argv) { IplImage* tmp_frame = NULL; CvCapture* cap = NULL; if( argc < 2 ) { printf("please specify video file name \n"); exit(0); } cap = cvCaptureFromFile(argv[1]); tmp_frame = cvQueryFrame(cap); if(!tmp_frame) { printf("bad video \n"); exit(0); } cvNamedWindow("BG", 1); cvNamedWindow("FG", 1); //create BG model CvBGStatModel* bg_model = cvCreateGaussianBGModel( tmp_frame ); for( int fr = 1;tmp_frame; tmp_frame = cvQueryFrame(cap), fr++ ) { cvUpdateBGStatModel( tmp_frame, bg_model ); cvShowImage("BG", bg_model->background); cvShowImage("FG", bg_model->foreground); int k = cvWaitKey(5); if( k == 27 ) break; } cvReleaseBGStatModel( &bg_model ); cvReleaseCapture(&cap); return 0; }
// ###################################################################### void ForegroundDetectionChannel::doInput(const InputFrame& inframe) { #ifdef HAVE_OPENCV ASSERT(inframe.colorByte().initialized()); LINFO("Input to Foreground Detection Channel ok."); //Convert the input frame to opencv format IplImage* inFrame_cv = img2ipl(inframe.colorByte()); //If the statistics model has not been created (i.e. this is the first frame), //then create it. if(itsStatModel_cv == NULL) itsStatModel_cv = cvCreateFGDStatModel( inFrame_cv ); //Update the statistics model cvUpdateBGStatModel( inFrame_cv, itsStatModel_cv ); //Assign the foreground and background maps //OpenCV clears out the foreground and background memory at the beginning //of every icvUpdateFGDStatModel, so let's do a deep copy of the image //data just to be safe. //Also, because the source image is byte-valued, let's divide by 255 to get //a true probability itsForegroundMap = ipl2gray(itsStatModel_cv->foreground).deepcopy() / 255.0; //Rescale the image to the correct dimensions itsMap = rescale(itsForegroundMap, this->getMapDims()); float mi, ma; getMinMax(itsMap,mi, ma); LINFO("FOREGROUND MAP RANGE: [%f .. %f]", mi, ma); //Free the memory allocated to the input frame - OpenCV makes it's own deep //copy of this data internally. cvReleaseImage( &inFrame_cv ); #endif }
/* Process current image: */ virtual void Process(IplImage* pImg) { if(m_pFG == NULL) { void* param = m_pFGParam; if( m_FGType == CV_BG_MODEL_FGD || m_FGType == CV_BG_MODEL_FGD_SIMPLE ) { param = &m_ParamFGD; } else if( m_FGType == CV_BG_MODEL_MOG ) { param = &m_ParamMOG; } m_pFG = cvCreateBGStatModel( pImg, m_FGType, param); LoadState(0, 0); } else { cvUpdateBGStatModel( pImg, m_pFG ); } };
void App::process() { if (sources.empty()) { std::cout << "Using default frames source...\n"; sources.push_back(new VideoSource("data/bgfg/haut-640x480.avi")); } cv::Mat frame; cv::gpu::GpuMat d_frame; IplImage ipl_frame; cv::Mat fgmask; cv::gpu::GpuMat d_fgmask; cv::Mat buf; cv::Mat outImg; cv::Mat foreground; while (!exited) { int64 total_time = cv::getTickCount(); sources[curSource]->next(frame); d_frame.upload(frame); ipl_frame = frame; frame.copyTo(outImg); double total_fps = 0.0; double proc_fps = 0.0; try { int64 proc_time = cv::getTickCount(); switch (method) { case MOG: { if (useGpu) { if (reinitialize) mog_gpu.initialize(d_frame.size(), d_frame.type()); mog_gpu(d_frame, d_fgmask, 0.01f); } else { if (reinitialize) mog_cpu.initialize(frame.size(), frame.type()); mog_cpu(frame, fgmask, 0.01); } break; } //case MOG2: // { // if (useGpu) // { // if (reinitialize) // mog2_gpu.initialize(d_frame.size(), d_frame.type()); // mog2_gpu(d_frame, d_fgmask); // } // else // { // if (reinitialize) // { // mog2_cpu.set("detectShadows", false); // mog2_cpu.initialize(frame.size(), frame.type()); // } // mog2_cpu(frame, fgmask); // } // break; // } case FGD: { if (useGpu) { if (reinitialize) fgd_gpu.create(d_frame); fgd_gpu.update(d_frame); fgd_gpu.foreground.copyTo(d_fgmask); } else { if (reinitialize) fgd_cpu = cvCreateFGDStatModel(&ipl_frame); cvUpdateBGStatModel(&ipl_frame, fgd_cpu); cv::Mat(fgd_cpu->foreground).copyTo(fgmask); } break; } //case GMG: // { // if (useGpu) // { // if (reinitialize) // gmg_gpu.initialize(d_frame.size()); // gmg_gpu(d_frame, d_fgmask); // } // else // { // if (reinitialize) // gmg_cpu.initialize(frame.size(), 0, 255); // gmg_cpu(frame, fgmask); // } // break; // } case VIBE: { if (useGpu) { if (reinitialize) vibe_gpu.initialize(d_frame); vibe_gpu(d_frame, d_fgmask); } break; } } proc_fps = cv::getTickFrequency() / (cv::getTickCount() - proc_time); if (useGpu) d_fgmask.download(fgmask); cv::filterSpeckles(fgmask, 0, 100, 1, buf); cv::add(outImg, cv::Scalar(100, 100, 0), outImg, fgmask); foreground.create(frame.size(), frame.type()); foreground.setTo(0); frame.copyTo(foreground, fgmask); total_fps = cv::getTickFrequency() / (cv::getTickCount() - total_time); reinitialize = false; } catch (const cv::Exception&) { std::string msg = "Can't allocate memory"; int fontFace = cv::FONT_HERSHEY_DUPLEX; int fontThickness = 2; double fontScale = 0.8; cv::Size fontSize = cv::getTextSize("T[]", fontFace, fontScale, fontThickness, 0); cv::Point org(outImg.cols / 2, outImg.rows / 2); org.x -= fontSize.width; org.y -= fontSize.height / 2; cv::putText(outImg, msg, org, fontFace, fontScale, cv::Scalar(0,0,0,255), 5 * fontThickness / 2, 16); cv::putText(outImg, msg, org, fontFace, fontScale, CV_RGB(255, 0, 0), fontThickness, 16); } displayState(outImg, proc_fps, total_fps); cv::imshow("Background Subtraction Demo", outImg); cv::imshow("Foreground", foreground); processKey(cv::waitKey(30) & 0xff); } }
bool FindPigBlobs(IplImage *srcImg, IplImage *maskImg,void* userdata) { assert(CV_IS_IMAGE(srcImg) && CV_IS_IMAGE(maskImg) && srcImg->nChannels==3 && maskImg->nChannels==1 && srcImg->width==maskImg->width && srcImg->height==maskImg->height && userdata); findpigblobs_userdata* param = (findpigblobs_userdata*)userdata; CvGaussBGModel *bg_model=param->bg_model; int *table=param->LUPTable; int rows=param->rows; int cols=param->cols; CvMemStorage *storage=param->storage; int& nModelFrames=param->modelframes; bool isRemoveShadow=param->isRemoveShadow; nModelFrames=nModelFrames< 0 ? 0 : nModelFrames; IplImage*mask=cvCreateImage(cvGetSize(srcImg),IPL_DEPTH_8U,1); IplImage *tmpImg = cvCreateImage(cvSize(srcImg->width, srcImg->height), srcImg->depth, srcImg->nChannels); IplImage *imgs[4]; for(int i=0; i<4; ++i) { imgs[i]= cvCreateImage(cvSize(srcImg->width, srcImg->height), srcImg->depth, 1); } cvZero(maskImg); cvCvtColor(srcImg,tmpImg,CV_BGR2YCrCb); cvSplit(tmpImg,imgs[0], imgs[1], imgs[2], 0); cvCvtColor(srcImg,tmpImg,CV_BGR2HSV); cvSplit(tmpImg, 0, imgs[3], 0, 0); cvLookUpTable(table,rows,cols,5,imgs,mask); cvErode(mask,mask,NULL,1); cvDilate(mask,mask,0,4); cvClearMemStorage(storage); CvSeq *contour=GetMaxContour(mask,storage); if (!contour){ return false; } cvDrawContours(maskImg,contour,cvScalarAll(255),cvScalarAll(255),-1,CV_FILLED, 8 ); CvRect maxcontourRc=((CvContour*)contour)->rect; cvUpdateBGStatModel( srcImg,(CvBGStatModel *)bg_model, -1.0 ); if (isRemoveShadow){ bool b=GetAvgBackgroudImg(srcImg,tmpImg,0.1);//平均背景法生成背景,tmpImg为背景 assert(b); } if (nModelFrames>20){//20帧之后才产生背景 nModelFrames=21; if (isRemoveShadow){ bool b=ShadowDetect(srcImg,tmpImg,bg_model->foreground,0.5,1,20,20,mask);//检测阴影 assert(b); cvSub(bg_model->foreground,mask,mask);//得到去除阴影后的二值图 }else{ cvCopy(bg_model->foreground,mask); } cvErode(mask,mask); cvDilate(mask,mask,0,5); cvClearMemStorage(storage); cvFindContours( mask, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE ); for( ; contour != 0; contour = contour->h_next ){ CvRect rc=((CvContour*)contour)->rect; if(!CommonArea(rc,maxcontourRc,0.3) && rc.width*rc.height>mask->imageSize*0.008 && rc.x+rc.width>40 && rc.y+rc.height>40)//删除位于保温室的误检测 { cvDrawContours(maskImg,contour,cvScalarAll(255),cvScalarAll(255),-1,CV_FILLED, 8 ); } } }else{ nModelFrames++; } cvReleaseImage(&mask); cvReleaseImage(&tmpImg); for(int i=0; i<4; ++i) { cvReleaseImage(&imgs[i]); } return true; }
int main(int argc, char** argv) { IplImage* temp1 = NULL; IplImage* temp2 = NULL; IplImage* result = NULL; IplImage* result1 = NULL; IplImage* result2 = NULL; CvBGStatModel* bg_model=0; CvBGStatModel* bg_model1=0; IplImage* rawImage = 0; IplImage* yuvImage = 0; IplImage* rawImage1 = 0; IplImage* pFrImg = 0; IplImage* pFrImg1= 0; IplImage* pFrImg2= 0; IplImage* ImaskCodeBookCC = 0; CvCapture* capture = 0; int c,n; maxMod[0] = 25; minMod[0] = 35; maxMod[1] = 8; minMod[1] = 8; maxMod[2] = 8; minMod[2] = 8; argc=2; argv[1]="intelligentroom_raw.avi"; if( argc > 2 ) { fprintf(stderr, "Usage: bkgrd [video_file_name]\n"); return -1; } if (argc ==1) if( !(capture = cvCaptureFromCAM(-1))) { fprintf(stderr, "Can not open camera.\n"); return -2; } if(argc == 2) if( !(capture = cvCaptureFromFile(argv[1]))) { fprintf(stderr, "Can not open video file %s\n", argv[1]); return -2; } bool pause = false; bool singlestep = false; if( capture ) { cvNamedWindow( "原视频序列图像", 1 ); cvNamedWindow("不实时更新的Codebook算法[本文]",1); cvNamedWindow("实时更新的Codebook算法[本文]",1); cvNamedWindow("基于MOG的方法[Chris Stauffer'2001]",1); cvNamedWindow("三帧差分", 1); cvNamedWindow("基于Bayes decision的方法[Liyuan Li'2003]", 1); cvMoveWindow("原视频序列图像", 0, 0); cvMoveWindow("不实时更新的Codebook算法[本文]", 360, 0); cvMoveWindow("实时更新的Codebook算法[本文]", 720, 350); cvMoveWindow("基于MOG的方法[Chris Stauffer'2001]", 0, 350); cvMoveWindow("三帧差分", 720, 0); cvMoveWindow("基于Bayes decision的方法[Liyuan Li'2003]",360, 350); int nFrmNum = -1; for(;;) { if(!pause) { rawImage = cvQueryFrame( capture ); ++nFrmNum; printf("第%d帧\n",nFrmNum); if(!rawImage) break; } if(singlestep) { pause = true; } if(0 == nFrmNum) { printf(". . . wait for it . . .\n"); temp1 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 3); temp2 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 3); result1 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 1); result2 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 1); result = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 1); bg_model = cvCreateGaussianBGModel(rawImage); bg_model1 = cvCreateFGDStatModel(rawImage); rawImage1 = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 3 ); yuvImage = cvCloneImage(rawImage); pFrImg = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); pFrImg1 = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); pFrImg2 = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); imageLen = rawImage->width*rawImage->height; cA = new codeBook [imageLen]; cC = new codeBook [imageLen]; cD = new codeBook [imageLen]; for(int f = 0; f<imageLen; f++) { cA[f].numEntries = 0; cA[f].t = 0; cC[f].numEntries = 0; cC[f].t = 0; cD[f].numEntries = 0; cD[f].t = 0; } for(int nc=0; nc<nChannels;nc++) { cbBounds[nc] = 10; } ch[0] = true; ch[1] = true; ch[2] = true; } if( rawImage ) { if(!pause) { cvSmooth(rawImage, rawImage1, CV_GAUSSIAN,3,3); cvChangeDetection(temp1, temp2, result1); cvChangeDetection(rawImage1, temp1, result2); cvAnd(result1, result2, result, NULL); cvCopy(temp1,temp2, NULL); cvCopy(rawImage,temp1, NULL); cvUpdateBGStatModel( rawImage, bg_model ); cvUpdateBGStatModel( rawImage, bg_model1 ); } cvCvtColor( rawImage1, yuvImage, CV_BGR2YCrCb ); if( !pause && nFrmNum >= 1 && nFrmNum < T ) { pColor = (uchar *)((yuvImage)->imageData); for(int c=0; c<imageLen; c++) { update_codebook_model(pColor, cA[c],cbBounds,nChannels); trainig_codebook(pColor, cC[c],cbBounds,nChannels); pColor += 3; } } if( nFrmNum == T) { for(c=0; c<imageLen; c++) { clear_stale_entries(cA[c]); training_clear_stale_entries(cC[c]); } } if(nFrmNum > T) { pColor = (uchar *)((yuvImage)->imageData); uchar maskPixelCodeBook; uchar maskPixelCodeBook1; uchar maskPixelCodeBook2; uchar *pMask = (uchar *)((pFrImg)->imageData); uchar *pMask1 = (uchar *)((pFrImg1)->imageData); uchar *pMask2 = (uchar *)((pFrImg2)->imageData); for(int c=0; c<imageLen; c++) { //本文中不带自动背景更新的算法输出 maskPixelCodeBook1=background_Diff(pColor, cA[c],nChannels,minMod,maxMod); *pMask1++ = maskPixelCodeBook1; //本文中带自动背景更新的算法输出 if ( !pause && det_update_codebook_cC(pColor, cC[c],cbBounds,nChannels)) { det_update_codebook_cD(pColor, cD[c],cbBounds,nChannels, nFrmNum); realtime_clear_stale_entries_cD(cD[c], nFrmNum); cD_to_cC(cD[c], cC[c], (nFrmNum - T)/5); } else { realtime_clear_stale_entries_cC(cC[c], nFrmNum); } maskPixelCodeBook2=background_Diff(pColor, cC[c],nChannels,minMod,maxMod); *pMask2++ = maskPixelCodeBook2; pColor += 3; } cvCopy(pFrImg2,ImaskCodeBookCC); if(!pause) { count_Segmentation(cC,yuvImage,nChannels,minMod,maxMod); forgratio = (float) (fgcount)/ imageLen; } } bg_model1->foreground->origin=1; bg_model->foreground->origin=1; pFrImg->origin=1; pFrImg1->origin=1; pFrImg2->origin=1; ImaskCodeBookCC->origin=1; result->origin=1; //connected_Components(pFrImg1,1,40); //connected_Components(pFrImg2,1,40); cvShowImage("基于MOG的方法[Chris Stauffer'2001]", bg_model->foreground); cvShowImage( "原视频序列图像", rawImage ); cvShowImage("三帧差分", result); cvShowImage( "不实时更新的Codebook算法[本文]",pFrImg1); cvShowImage("实时更新的Codebook算法[本文]",pFrImg2); cvShowImage("基于Bayes decision的方法[Liyuan Li'2003]", bg_model1->foreground); c = cvWaitKey(1)&0xFF; //End processing on ESC, q or Q if(c == 27 || c == 'q' || c == 'Q') break; //Else check for user input switch(c) { case 'h': help(); break; case 'p': pause ^= 1; break; case 's': singlestep = 1; pause = false; break; case 'r': pause = false; singlestep = false; break; //CODEBOOK PARAMS case 'y': case '0': ch[0] = 1; ch[1] = 0; ch[2] = 0; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'u': case '1': ch[0] = 0; ch[1] = 1; ch[2] = 0; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'v': case '2': ch[0] = 0; ch[1] = 0; ch[2] = 1; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'a': //All case '3': ch[0] = 1; ch[1] = 1; ch[2] = 1; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'b': //both u and v together ch[0] = 0; ch[1] = 1; ch[2] = 1; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'z': printf(" Fadd加1 "); Fadd += 1; printf("Fadd=%.4d\n",Fadd); break; case 'x': printf(" Fadd减1 "); Fadd -= 1; printf("Fadd=%.4d\n",Fadd); break; case 'n': printf(" Tavgstale加1 "); Tavgstale += 1; printf("Tavgstale=%.4d\n",Tavgstale); break; case 'm': printf(" Tavgstale减1 "); Tavgstale -= 1; printf("Tavgstale=%.4d\n",Tavgstale); break; case 'i': //modify max classification bounds (max bound goes higher) for(n=0; n<nChannels; n++) { if(ch[n]) maxMod[n] += 1; printf("%.4d,",maxMod[n]); } printf(" CodeBook High Side\n"); break; case 'o': //modify max classification bounds (max bound goes lower) for(n=0; n<nChannels; n++) { if(ch[n]) maxMod[n] -= 1; printf("%.4d,",maxMod[n]); } printf(" CodeBook High Side\n"); break; case 'k': //modify min classification bounds (min bound goes lower) for(n=0; n<nChannels; n++) { if(ch[n]) minMod[n] += 1; printf("%.4d,",minMod[n]); } printf(" CodeBook Low Side\n"); break; case 'l': //modify min classification bounds (min bound goes higher) for(n=0; n<nChannels; n++) { if(ch[n]) minMod[n] -= 1; printf("%.4d,",minMod[n]); } printf(" CodeBook Low Side\n"); break; } } } cvReleaseCapture( &capture ); cvReleaseBGStatModel((CvBGStatModel**)&bg_model); cvReleaseBGStatModel((CvBGStatModel**)&bg_model1); cvDestroyWindow( "原视频序列图像" ); cvDestroyWindow( "不实时更新的Codebook算法[本文]"); cvDestroyWindow( "实时更新的Codebook算法[本文]"); cvDestroyWindow( "基于MOG的方法[Chris Stauffer'2001]"); cvDestroyWindow( "三帧差分" ); cvDestroyWindow( "基于Bayes decision的方法[Liyuan Li'2003]"); cvReleaseImage(&temp1); cvReleaseImage(&temp2); cvReleaseImage(&result); cvReleaseImage(&result1); cvReleaseImage(&result2); cvReleaseImage(&pFrImg); cvReleaseImage(&pFrImg1); cvReleaseImage(&pFrImg2); if(yuvImage) cvReleaseImage(&yuvImage); if(rawImage) cvReleaseImage(&rawImage); if(rawImage1) cvReleaseImage(&rawImage1); if(ImaskCodeBookCC) cvReleaseImage(&ImaskCodeBookCC); delete [] cA; delete [] cC; delete [] cD; } else { printf("\n\nDarn, Something wrong with the parameters\n\n"); help(); } return 0; }
// update Gaussian Mixture Model void GMM::update(IplImage* curr){ if(bgModel == NULL){ bgModel = cvCreateGaussianBGModel(curr, NULL); }else{ // check channels IplImage* curr_gray; if(curr->nChannels == 3){ curr_gray = cvCreateImage(cvGetSize(curr), IPL_DEPTH_8U, 1); cvCvtColor(curr, curr_gray, CV_RGB2GRAY); cvEqualizeHist(curr_gray, curr_gray); }else if(curr->nChannels == 1){ curr_gray = curr; cvEqualizeHist(curr_gray, curr_gray); }else{ // exception } //learning rate if(frameCnt++ <= learnCnt){ cvUpdateBGStatModel(curr_gray, bgModel, -1); // learn }else{ cvUpdateBGStatModel(curr_gray, bgModel, 0); } if(curr->nChannels == 3){ cvReleaseImage(&curr_gray); } } if(fgclone){ cvCopy(bgModel->foreground, fgclone, NULL); // remove salt & pepper noise cvSmooth(fgclone, fgclone, CV_MEDIAN, 5); }else{ fgclone = cvCloneImage(bgModel->foreground); //remove salt & pepper noise cvSmooth(fgclone, fgclone, CV_MEDIAN, 5); } }
// chain function - this function does the actual processing static GstFlowReturn gst_bgfg_acmmm2003_chain(GstPad *pad, GstBuffer *buf) { GstBgFgACMMM2003 *filter; // sanity checks g_return_val_if_fail(pad != NULL, GST_FLOW_ERROR); g_return_val_if_fail(buf != NULL, GST_FLOW_ERROR); filter = GST_BGFG_ACMMM2003(GST_OBJECT_PARENT(pad)); filter->image->imageData = (gchar*) GST_BUFFER_DATA(buf); // the bg model must be initialized with a valid image; thus we delay its // creation until the chain function if (filter->model == NULL) { filter->model = cvCreateFGDStatModel(filter->image, NULL); ((CvFGDStatModel*)filter->model)->params.minArea = filter->min_area; ((CvFGDStatModel*)filter->model)->params.erode_iterations = filter->n_erode_iterations; ((CvFGDStatModel*)filter->model)->params.dilate_iterations = filter->n_dilate_iterations; return gst_pad_push(filter->srcpad, buf); } cvUpdateBGStatModel(filter->image, filter->model, -1); // send mask event, if requested if (filter->send_mask_events) { GstStructure *structure; GstEvent *event; GArray *data_array; IplImage *mask; // prepare and send custom event with the mask surface mask = filter->model->foreground; data_array = g_array_sized_new(FALSE, FALSE, sizeof(mask->imageData[0]), mask->imageSize); g_array_append_vals(data_array, mask->imageData, mask->imageSize); structure = gst_structure_new("bgfg-mask", "data", G_TYPE_POINTER, data_array, "width", G_TYPE_UINT, mask->width, "height", G_TYPE_UINT, mask->height, "depth", G_TYPE_UINT, mask->depth, "channels", G_TYPE_UINT, mask->nChannels, "timestamp", G_TYPE_UINT64, GST_BUFFER_TIMESTAMP(buf), NULL); event = gst_event_new_custom(GST_EVENT_CUSTOM_DOWNSTREAM, structure); gst_pad_push_event(filter->srcpad, event); g_array_unref(data_array); if (filter->display) { // shade the regions not selected by the acmmm2003 algorithm cvXorS(mask, CV_RGB(255, 255, 255), mask, NULL); cvSubS(filter->image, CV_RGB(191, 191, 191), filter->image, mask); cvXorS(mask, CV_RGB(255, 255, 255), mask, NULL); } } if (filter->send_roi_events) { CvSeq *contour; CvRect *bounding_rects; guint i, j, n_rects; // count # of contours, allocate array to store the bounding rectangles for (contour = filter->model->foreground_regions, n_rects = 0; contour != NULL; contour = contour->h_next, ++n_rects); bounding_rects = g_new(CvRect, n_rects); for (contour = filter->model->foreground_regions, i = 0; contour != NULL; contour = contour->h_next, ++i) bounding_rects[i] = cvBoundingRect(contour, 0); for (i = 0; i < n_rects; ++i) { // skip collapsed rectangles if ((bounding_rects[i].width == 0) || (bounding_rects[i].height == 0)) continue; for (j = (i + 1); j < n_rects; ++j) { // skip collapsed rectangles if ((bounding_rects[j].width == 0) || (bounding_rects[j].height == 0)) continue; if (rect_overlap(bounding_rects[i], bounding_rects[j])) { bounding_rects[i] = rect_collapse(bounding_rects[i], bounding_rects[j]); bounding_rects[j] = NULL_RECT; } } } for (i = 0; i < n_rects; ++i) { GstEvent *event; GstStructure *structure; CvRect r; // skip collapsed rectangles r = bounding_rects[i]; if ((r.width == 0) || (r.height == 0)) continue; structure = gst_structure_new("bgfg-roi", "x", G_TYPE_UINT, r.x, "y", G_TYPE_UINT, r.y, "width", G_TYPE_UINT, r.width, "height", G_TYPE_UINT, r.height, "timestamp", G_TYPE_UINT64, GST_BUFFER_TIMESTAMP(buf), NULL); event = gst_event_new_custom(GST_EVENT_CUSTOM_DOWNSTREAM, structure); gst_pad_send_event(filter->sinkpad, event); if (filter->verbose) GST_INFO("[roi] x: %d, y: %d, width: %d, height: %d\n", r.x, r.y, r.width, r.height); if (filter->display) cvRectangle(filter->image, cvPoint(r.x, r.y), cvPoint(r.x + r.width, r.y + r.height), CV_RGB(0, 0, 255), 1, 0, 0); } g_free(bounding_rects); } if (filter->display) gst_buffer_set_data(buf, (guchar*) filter->image->imageData, filter->image->imageSize); return gst_pad_push(filter->srcpad, buf); }