Exemple #1
0
//--------------------------------------------------------------
void testApp::setup(){
	
	
	vidGrabber.setVerbose(true);
	vidGrabber.initGrabber(320,240);
	
    colorImg.allocate(320,240);
	
	gauss_foregroundImg.allocate(320,240);
	gauss_backgroundImg.allocate(320,240);
	
	fgd_foregroundImg.allocate(320,240);
	fgd_backgroundImg.allocate(320,240);
	
    CvGaussBGStatModelParams* params = new CvGaussBGStatModelParams;                  
    params->win_size=2;   
    params->n_gauss=5;
    params->bg_threshold=0.7;
    params->std_threshold=3.5;
    params->minArea=15;
    params->weight_init=0.05;
    params->variance_init=30; 
	
    //bgModel = cvCreateGaussianBGModel(colorImg.getCvImage() ,params);
	gauss_bgModel = cvCreateGaussianBGModel(colorImg.getCvImage());
	
	fgd_bgModel = cvCreateFGDStatModel(colorImg.getCvImage());
}
// Function cvCreateBGStatModel creates and returns initialized BG model.
// Parameters:
//      first_frame   - frame from video sequence
//      model_type ñ type of BG model (CV_BG_MODEL_MOG, CV_BG_MODEL_FGD,Ö)
//      parameters  - (optional) if NULL the default parameters of the algorithm will be used
static CvBGStatModel* cvCreateBGStatModel( IplImage* first_frame, int model_type, void* params )
{
    CvBGStatModel* bg_model = NULL;
    
    if( model_type == CV_BG_MODEL_FGD || model_type == CV_BG_MODEL_FGD_SIMPLE )
        bg_model = cvCreateFGDStatModel( first_frame, (CvFGDStatModelParams*)params );
    else if( model_type == CV_BG_MODEL_MOG )
        bg_model = cvCreateGaussianBGModel( first_frame, (CvGaussBGStatModelParams*)params );
    
    return bg_model;
}
// ######################################################################
void ForegroundDetectionChannel::doInput(const InputFrame& inframe)
{

#ifdef HAVE_OPENCV
  ASSERT(inframe.colorByte().initialized());
  LINFO("Input to Foreground Detection Channel ok.");

  //Convert the input frame to opencv format
  IplImage* inFrame_cv = img2ipl(inframe.colorByte());

  //If the statistics model has not been created (i.e. this is the first frame),
  //then create it.
  if(itsStatModel_cv == NULL)
    itsStatModel_cv = cvCreateFGDStatModel( inFrame_cv );

  //Update the statistics model
  cvUpdateBGStatModel( inFrame_cv, itsStatModel_cv );

  //Assign the foreground and background maps
  //OpenCV clears out the foreground and background memory at the beginning
  //of every icvUpdateFGDStatModel, so let's do a deep copy of the image
  //data just to be safe.
  //Also, because the source image is byte-valued, let's divide by 255 to get
  //a true probability
  itsForegroundMap = ipl2gray(itsStatModel_cv->foreground).deepcopy() / 255.0;

  //Rescale the image to the correct dimensions
  itsMap = rescale(itsForegroundMap, this->getMapDims());

  float mi, ma;
  getMinMax(itsMap,mi, ma);
  LINFO("FOREGROUND MAP RANGE: [%f .. %f]", mi, ma);

  //Free the memory allocated to the input frame - OpenCV makes it's own deep
  //copy of this data internally.
  cvReleaseImage( &inFrame_cv );

#endif


}
Exemple #4
0
  CvBGStatModel* BGModelCvFGDFactory::createModel(const jderobotutil::ParamDict params, IplImage* firstFrame) const{
    CvFGDStatModelParams tmpParams;
    //parse params
    tmpParams.Lc = params.getParamAsIntWithDefault("Lc",defaultParams.Lc);
    tmpParams.N1c = params.getParamAsIntWithDefault("N1c",defaultParams.N1c);
    tmpParams.N2c = params.getParamAsIntWithDefault("N2c",defaultParams.N2c);
    tmpParams.Lcc = params.getParamAsIntWithDefault("Lcc",defaultParams.Lcc);
    tmpParams.N1cc = params.getParamAsIntWithDefault("N1cc",defaultParams.N1cc);
    tmpParams.N2cc = params.getParamAsIntWithDefault("N2cc",defaultParams.N2cc);
    tmpParams.is_obj_without_holes = params.getParamAsIntWithDefault("is_obj_without_holes",
								     defaultParams.is_obj_without_holes);
    tmpParams.perform_morphing = params.getParamAsIntWithDefault("perform_morphing",
								 defaultParams.perform_morphing);
    tmpParams.alpha1 = params.getParamAsFloatWithDefault("alpha1",defaultParams.alpha1);
    tmpParams.alpha2 = params.getParamAsFloatWithDefault("alpha2",defaultParams.alpha2);
    tmpParams.alpha3 = params.getParamAsFloatWithDefault("alpha3",defaultParams.alpha3);
    tmpParams.delta = params.getParamAsFloatWithDefault("delta",defaultParams.delta);
    tmpParams.T = params.getParamAsFloatWithDefault("T",defaultParams.T);
    tmpParams.minArea = params.getParamAsFloatWithDefault("minArea",defaultParams.minArea);
    return cvCreateFGDStatModel(firstFrame,&tmpParams);
  }
void App::process()
{
    if (sources.empty())
    {
        std::cout << "Using default frames source...\n";
        sources.push_back(new VideoSource("data/bgfg/haut-640x480.avi"));
    }

    cv::Mat frame;
    cv::gpu::GpuMat d_frame;
    IplImage ipl_frame;

    cv::Mat fgmask;
    cv::gpu::GpuMat d_fgmask;
    cv::Mat buf;

    cv::Mat outImg;
    cv::Mat foreground;

    while (!exited)
    {
        int64 total_time = cv::getTickCount();

        sources[curSource]->next(frame);
        d_frame.upload(frame);
        ipl_frame = frame;
        frame.copyTo(outImg);

        double total_fps = 0.0;
        double proc_fps = 0.0;

        try
        {
            int64 proc_time = cv::getTickCount();

            switch (method) {
            case MOG:
                {
                    if (useGpu)
                    {
                        if (reinitialize)
                            mog_gpu.initialize(d_frame.size(), d_frame.type());
                        mog_gpu(d_frame, d_fgmask, 0.01f);
                    }
                    else
                    {
                        if (reinitialize)
                            mog_cpu.initialize(frame.size(), frame.type());
                        mog_cpu(frame, fgmask, 0.01);
                    }
                    break;
                }
            //case MOG2:
            //    {
            //        if (useGpu)
            //        {
            //            if (reinitialize)
            //                mog2_gpu.initialize(d_frame.size(), d_frame.type());
            //            mog2_gpu(d_frame, d_fgmask);
            //        }
            //        else
            //        {
            //            if (reinitialize)
            //            {
            //                mog2_cpu.set("detectShadows", false);
            //                mog2_cpu.initialize(frame.size(), frame.type());
            //            }
            //            mog2_cpu(frame, fgmask);
            //        }
            //        break;
            //    }
            case FGD:
                {
                    if (useGpu)
                    {
                        if (reinitialize)
                            fgd_gpu.create(d_frame);
                        fgd_gpu.update(d_frame);
                        fgd_gpu.foreground.copyTo(d_fgmask);
                    }
                    else
                    {
                        if (reinitialize)
                            fgd_cpu = cvCreateFGDStatModel(&ipl_frame);
                        cvUpdateBGStatModel(&ipl_frame, fgd_cpu);
                        cv::Mat(fgd_cpu->foreground).copyTo(fgmask);
                    }
                    break;
                }
            //case GMG:
            //    {
            //        if (useGpu)
            //        {
            //            if (reinitialize)
            //                gmg_gpu.initialize(d_frame.size());
            //            gmg_gpu(d_frame, d_fgmask);
            //        }
            //        else
            //        {
            //            if (reinitialize)
            //                gmg_cpu.initialize(frame.size(), 0, 255);
            //            gmg_cpu(frame, fgmask);
            //        }
            //        break;
            //    }
            case VIBE:
                {
                    if (useGpu)
                    {
                        if (reinitialize)
                            vibe_gpu.initialize(d_frame);
                        vibe_gpu(d_frame, d_fgmask);
                    }
                    break;
                }
            }

            proc_fps = cv::getTickFrequency() / (cv::getTickCount() - proc_time);

            if (useGpu)
                d_fgmask.download(fgmask);

            cv::filterSpeckles(fgmask, 0, 100, 1, buf);

            cv::add(outImg, cv::Scalar(100, 100, 0), outImg, fgmask);

            foreground.create(frame.size(), frame.type());
            foreground.setTo(0);
            frame.copyTo(foreground, fgmask);

            total_fps = cv::getTickFrequency() / (cv::getTickCount() - total_time);

            reinitialize = false;
        }
        catch (const cv::Exception&)
        {
            std::string msg = "Can't allocate memory";

            int fontFace = cv::FONT_HERSHEY_DUPLEX;
            int fontThickness = 2;
            double fontScale = 0.8;

            cv::Size fontSize = cv::getTextSize("T[]", fontFace, fontScale, fontThickness, 0);

            cv::Point org(outImg.cols / 2, outImg.rows / 2);
            org.x -= fontSize.width;
            org.y -= fontSize.height / 2;

            cv::putText(outImg, msg, org, fontFace, fontScale, cv::Scalar(0,0,0,255), 5 * fontThickness / 2, 16);
            cv::putText(outImg, msg, org, fontFace, fontScale, CV_RGB(255, 0, 0), fontThickness, 16);
        }

        displayState(outImg, proc_fps, total_fps);

        cv::imshow("Background Subtraction Demo", outImg);
        cv::imshow("Foreground", foreground);

        processKey(cv::waitKey(30) & 0xff);
    }
}
int main(int argc, char** argv)
{
	IplImage* temp1 = NULL;
    IplImage* temp2 = NULL;
    IplImage* result = NULL;
    IplImage* result1 = NULL;
    IplImage* result2 = NULL;

	CvBGStatModel* bg_model=0;
	CvBGStatModel* bg_model1=0;

    IplImage* rawImage = 0; 
	IplImage* yuvImage = 0; 
	IplImage* rawImage1 = 0;
    IplImage* pFrImg = 0;
	IplImage* pFrImg1= 0;
	IplImage* pFrImg2= 0;
	IplImage* ImaskCodeBookCC = 0;
    CvCapture* capture = 0;

	int c,n;

	maxMod[0] = 25; 
	minMod[0] = 35;
	maxMod[1] = 8;
	minMod[1] = 8;
	maxMod[2] = 8;
	minMod[2] = 8;

    argc=2;
    argv[1]="intelligentroom_raw.avi";
    if( argc > 2 )
	{
		fprintf(stderr, "Usage: bkgrd [video_file_name]\n");
		return -1;
	}
 
    if (argc ==1)
		if( !(capture = cvCaptureFromCAM(-1)))
		{
			fprintf(stderr, "Can not open camera.\n");
			return -2;
		}

    if(argc == 2)
		if( !(capture = cvCaptureFromFile(argv[1])))
		{
			fprintf(stderr, "Can not open video file %s\n", argv[1]);
			return -2;
		}

	bool pause = false;
	bool singlestep = false;

    if( capture )
    {
        cvNamedWindow( "原视频序列图像", 1 );
		cvNamedWindow("不实时更新的Codebook算法[本文]",1);
		cvNamedWindow("实时更新的Codebook算法[本文]",1);
		cvNamedWindow("基于MOG的方法[Chris Stauffer'2001]",1);
		cvNamedWindow("三帧差分", 1);
		cvNamedWindow("基于Bayes decision的方法[Liyuan Li'2003]", 1);
        
		cvMoveWindow("原视频序列图像", 0, 0);
		cvMoveWindow("不实时更新的Codebook算法[本文]", 360, 0);
		cvMoveWindow("实时更新的Codebook算法[本文]", 720, 350);
		cvMoveWindow("基于MOG的方法[Chris Stauffer'2001]", 0, 350);
		cvMoveWindow("三帧差分", 720, 0);
        cvMoveWindow("基于Bayes decision的方法[Liyuan Li'2003]",360, 350);
        int nFrmNum = -1;
        for(;;)
        {
    		if(!pause)
			{
				rawImage = cvQueryFrame( capture );
				++nFrmNum;
				printf("第%d帧\n",nFrmNum);
				if(!rawImage) 
					break;
			}
			if(singlestep)
			{
				pause = true;
			}
			if(0 == nFrmNum) 
			{
				printf(". . . wait for it . . .\n"); 
				
				temp1 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 3);
				temp2 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 3);
				result1 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 1);
				result2 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 1);
				result = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 1);

				bg_model = cvCreateGaussianBGModel(rawImage);
                bg_model1 = cvCreateFGDStatModel(rawImage);
				rawImage1 = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 3 );

				yuvImage = cvCloneImage(rawImage);
				pFrImg  = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				pFrImg1 = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				pFrImg2 = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );

				imageLen = rawImage->width*rawImage->height;

				cA = new codeBook [imageLen];
				cC = new codeBook [imageLen];
				cD = new codeBook [imageLen];

				for(int f = 0; f<imageLen; f++)
				{
					cA[f].numEntries = 0; cA[f].t = 0;
					cC[f].numEntries = 0; cC[f].t = 0;
					cD[f].numEntries = 0; cD[f].t = 0;
				}
				for(int nc=0; nc<nChannels;nc++)
				{
					cbBounds[nc] = 10;
				}
				ch[0] = true;
				ch[1] = true;
				ch[2] = true;
			}
             
        	if( rawImage )
        	{
				if(!pause)
				{					
					cvSmooth(rawImage, rawImage1, CV_GAUSSIAN,3,3);

					cvChangeDetection(temp1, temp2, result1);
					cvChangeDetection(rawImage1, temp1, result2);
					cvAnd(result1, result2, result, NULL);
					cvCopy(temp1,temp2, NULL);
					cvCopy(rawImage,temp1, NULL);

					
					cvUpdateBGStatModel( rawImage, bg_model );
					cvUpdateBGStatModel( rawImage, bg_model1 );
				}

				cvCvtColor( rawImage1, yuvImage, CV_BGR2YCrCb );
				if( !pause && nFrmNum >= 1 && nFrmNum < T  )
				{
					pColor = (uchar *)((yuvImage)->imageData);
					for(int c=0; c<imageLen; c++)
					{
						update_codebook_model(pColor, cA[c],cbBounds,nChannels);
					    trainig_codebook(pColor, cC[c],cbBounds,nChannels);
						pColor += 3;
					}
				}

				if( nFrmNum == T)
				{
					for(c=0; c<imageLen; c++)
					{
						clear_stale_entries(cA[c]);
						training_clear_stale_entries(cC[c]);
					}
				}

				if(nFrmNum > T) 
				{
					pColor = (uchar *)((yuvImage)->imageData);
					uchar maskPixelCodeBook;
					uchar maskPixelCodeBook1;
					uchar maskPixelCodeBook2;
					uchar *pMask = (uchar *)((pFrImg)->imageData);
					uchar *pMask1 = (uchar *)((pFrImg1)->imageData);
					uchar *pMask2 = (uchar *)((pFrImg2)->imageData);
					for(int c=0; c<imageLen; c++)
					{
						//本文中不带自动背景更新的算法输出
						maskPixelCodeBook1=background_Diff(pColor, cA[c],nChannels,minMod,maxMod);
                        *pMask1++ = maskPixelCodeBook1;
						
						//本文中带自动背景更新的算法输出
						if ( !pause && det_update_codebook_cC(pColor, cC[c],cbBounds,nChannels))
						{	
							det_update_codebook_cD(pColor, cD[c],cbBounds,nChannels, nFrmNum); 
							realtime_clear_stale_entries_cD(cD[c], nFrmNum);
							cD_to_cC(cD[c], cC[c], (nFrmNum - T)/5);
							
						}
						else
						{
							realtime_clear_stale_entries_cC(cC[c], nFrmNum);
						
						} 

						maskPixelCodeBook2=background_Diff(pColor, cC[c],nChannels,minMod,maxMod);
						*pMask2++ = maskPixelCodeBook2;  
						pColor += 3;
					}

					cvCopy(pFrImg2,ImaskCodeBookCC);
					if(!pause)
					{
						count_Segmentation(cC,yuvImage,nChannels,minMod,maxMod);
						forgratio = (float) (fgcount)/ imageLen;
					}
				}
				bg_model1->foreground->origin=1;
				bg_model->foreground->origin=1;				
				pFrImg->origin=1;
                pFrImg1->origin=1;
				pFrImg2->origin=1;
				ImaskCodeBookCC->origin=1;
				result->origin=1;
				//connected_Components(pFrImg1,1,40);
				//connected_Components(pFrImg2,1,40);
				
                cvShowImage("基于MOG的方法[Chris Stauffer'2001]", bg_model->foreground);
           		cvShowImage( "原视频序列图像", rawImage );
				cvShowImage("三帧差分", result);
 				cvShowImage( "不实时更新的Codebook算法[本文]",pFrImg1);
				cvShowImage("实时更新的Codebook算法[本文]",pFrImg2);
				cvShowImage("基于Bayes decision的方法[Liyuan Li'2003]", bg_model1->foreground);

	         	c = cvWaitKey(1)&0xFF;
				//End processing on ESC, q or Q
				if(c == 27 || c == 'q' || c == 'Q')
					break;
				//Else check for user input
				switch(c)
				{
					case 'h':
						help();
						break;
					case 'p':
						pause ^= 1;
						break;
					case 's':
						singlestep = 1;
						pause = false;
						break;
					case 'r':
						pause = false;
						singlestep = false;
						break;
				//CODEBOOK PARAMS
                case 'y':
                case '0':
                        ch[0] = 1;
                        ch[1] = 0;
                        ch[2] = 0;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'u':
                case '1':
                        ch[0] = 0;
                        ch[1] = 1;
                        ch[2] = 0;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'v':
                case '2':
                        ch[0] = 0;
                        ch[1] = 0;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'a': //All
                case '3':
                        ch[0] = 1;
                        ch[1] = 1;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'b':  //both u and v together
                        ch[0] = 0;
                        ch[1] = 1;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'z': 
					printf(" Fadd加1 ");
					Fadd += 1;
					printf("Fadd=%.4d\n",Fadd);										
					break;
				case 'x':
					printf(" Fadd减1 "); 
					Fadd -= 1;					
					printf("Fadd=%.4d\n",Fadd);										
					break;
				case 'n': 
					printf(" Tavgstale加1 ");
					Tavgstale += 1;
					printf("Tavgstale=%.4d\n",Tavgstale);										
					break;
				case 'm': 
					printf(" Tavgstale减1 ");
					Tavgstale -= 1;
					printf("Tavgstale=%.4d\n",Tavgstale);										
					break;
				case 'i': //modify max classification bounds (max bound goes higher)
					for(n=0; n<nChannels; n++)
					{
						if(ch[n])
							maxMod[n] += 1;
						printf("%.4d,",maxMod[n]);
					}
					printf(" CodeBook High Side\n");
					break;
				case 'o': //modify max classification bounds (max bound goes lower)
					for(n=0; n<nChannels; n++)
					{
						if(ch[n])
							maxMod[n] -= 1;
						printf("%.4d,",maxMod[n]);
					}
					printf(" CodeBook High Side\n");
					break;
				case 'k': //modify min classification bounds (min bound goes lower)
					for(n=0; n<nChannels; n++)
					{
						if(ch[n])
							minMod[n] += 1;
						printf("%.4d,",minMod[n]);
					}
					printf(" CodeBook Low Side\n");
					break;
				case 'l': //modify min classification bounds (min bound goes higher)
					for(n=0; n<nChannels; n++)
					{
						if(ch[n])
							minMod[n] -= 1;
						printf("%.4d,",minMod[n]);
					}
					printf(" CodeBook Low Side\n");
					break;
				}
            }
		}		
		cvReleaseCapture( &capture );
		cvReleaseBGStatModel((CvBGStatModel**)&bg_model);
		cvReleaseBGStatModel((CvBGStatModel**)&bg_model1);

        cvDestroyWindow( "原视频序列图像" );
		cvDestroyWindow( "不实时更新的Codebook算法[本文]");
		cvDestroyWindow( "实时更新的Codebook算法[本文]");
		cvDestroyWindow( "基于MOG的方法[Chris Stauffer'2001]");
		cvDestroyWindow( "三帧差分" );
		cvDestroyWindow( "基于Bayes decision的方法[Liyuan Li'2003]");

		cvReleaseImage(&temp1);
		cvReleaseImage(&temp2);
		cvReleaseImage(&result);
		cvReleaseImage(&result1);
		cvReleaseImage(&result2);
		cvReleaseImage(&pFrImg);
		cvReleaseImage(&pFrImg1);
		cvReleaseImage(&pFrImg2);

		if(yuvImage) cvReleaseImage(&yuvImage);
		if(rawImage) cvReleaseImage(&rawImage);
		if(rawImage1) cvReleaseImage(&rawImage1);
		if(ImaskCodeBookCC) cvReleaseImage(&ImaskCodeBookCC);
		delete [] cA;
		delete [] cC;
		delete [] cD;
    }
	else
	{ 
		printf("\n\nDarn, Something wrong with the parameters\n\n"); help();
	}
    return 0;
}
Exemple #7
0
// chain function - this function does the actual processing
static GstFlowReturn
gst_bgfg_acmmm2003_chain(GstPad *pad, GstBuffer *buf)
{
    GstBgFgACMMM2003 *filter;

    // sanity checks
    g_return_val_if_fail(pad != NULL, GST_FLOW_ERROR);
    g_return_val_if_fail(buf != NULL, GST_FLOW_ERROR);

    filter = GST_BGFG_ACMMM2003(GST_OBJECT_PARENT(pad));

    filter->image->imageData = (gchar*) GST_BUFFER_DATA(buf);

    // the bg model must be initialized with a valid image; thus we delay its
    // creation until the chain function
    if (filter->model == NULL) {
        filter->model = cvCreateFGDStatModel(filter->image, NULL);

        ((CvFGDStatModel*)filter->model)->params.minArea           = filter->min_area;
        ((CvFGDStatModel*)filter->model)->params.erode_iterations  = filter->n_erode_iterations;
        ((CvFGDStatModel*)filter->model)->params.dilate_iterations = filter->n_dilate_iterations;

        return gst_pad_push(filter->srcpad, buf);
    }

    cvUpdateBGStatModel(filter->image, filter->model, -1);

    // send mask event, if requested
    if (filter->send_mask_events) {
        GstStructure *structure;
        GstEvent     *event;
        GArray       *data_array;
        IplImage     *mask;

        // prepare and send custom event with the mask surface
        mask = filter->model->foreground;
        data_array = g_array_sized_new(FALSE, FALSE, sizeof(mask->imageData[0]), mask->imageSize);
        g_array_append_vals(data_array, mask->imageData, mask->imageSize);

        structure = gst_structure_new("bgfg-mask",
                                      "data",      G_TYPE_POINTER, data_array,
                                      "width",     G_TYPE_UINT,    mask->width,
                                      "height",    G_TYPE_UINT,    mask->height,
                                      "depth",     G_TYPE_UINT,    mask->depth,
                                      "channels",  G_TYPE_UINT,    mask->nChannels,
                                      "timestamp", G_TYPE_UINT64,  GST_BUFFER_TIMESTAMP(buf),
                                      NULL);

        event = gst_event_new_custom(GST_EVENT_CUSTOM_DOWNSTREAM, structure);
        gst_pad_push_event(filter->srcpad, event);
        g_array_unref(data_array);

        if (filter->display) {
            // shade the regions not selected by the acmmm2003 algorithm
            cvXorS(mask,          CV_RGB(255, 255, 255), mask,          NULL);
            cvSubS(filter->image, CV_RGB(191, 191, 191), filter->image, mask);
            cvXorS(mask,          CV_RGB(255, 255, 255), mask,          NULL);
        }
    }

    if (filter->send_roi_events) {
        CvSeq        *contour;
        CvRect       *bounding_rects;
        guint         i, j, n_rects;

        // count # of contours, allocate array to store the bounding rectangles
        for (contour = filter->model->foreground_regions, n_rects = 0;
             contour != NULL;
             contour = contour->h_next, ++n_rects);

        bounding_rects = g_new(CvRect, n_rects);

        for (contour = filter->model->foreground_regions, i = 0; contour != NULL; contour = contour->h_next, ++i)
            bounding_rects[i] = cvBoundingRect(contour, 0);

        for (i = 0; i < n_rects; ++i) {
            // skip collapsed rectangles
            if ((bounding_rects[i].width == 0) || (bounding_rects[i].height == 0)) continue;

            for (j = (i + 1); j < n_rects; ++j) {
                // skip collapsed rectangles
                if ((bounding_rects[j].width == 0) || (bounding_rects[j].height == 0)) continue;

                if (rect_overlap(bounding_rects[i], bounding_rects[j])) {
                    bounding_rects[i] = rect_collapse(bounding_rects[i], bounding_rects[j]);
                    bounding_rects[j] = NULL_RECT;
                }
            }
        }

        for (i = 0; i < n_rects; ++i) {
            GstEvent     *event;
            GstStructure *structure;
            CvRect        r;

            // skip collapsed rectangles
            r = bounding_rects[i];
            if ((r.width == 0) || (r.height == 0)) continue;

            structure = gst_structure_new("bgfg-roi",
                                          "x",         G_TYPE_UINT,   r.x,
                                          "y",         G_TYPE_UINT,   r.y,
                                          "width",     G_TYPE_UINT,   r.width,
                                          "height",    G_TYPE_UINT,   r.height,
                                          "timestamp", G_TYPE_UINT64, GST_BUFFER_TIMESTAMP(buf),
                                          NULL);

            event = gst_event_new_custom(GST_EVENT_CUSTOM_DOWNSTREAM, structure);
            gst_pad_send_event(filter->sinkpad, event);

            if (filter->verbose)
                GST_INFO("[roi] x: %d, y: %d, width: %d, height: %d\n",
                         r.x, r.y, r.width, r.height);

            if (filter->display)
                cvRectangle(filter->image, cvPoint(r.x, r.y), cvPoint(r.x + r.width, r.y + r.height),
                            CV_RGB(0, 0, 255), 1, 0, 0);
        }

        g_free(bounding_rects);
    }

    if (filter->display)
        gst_buffer_set_data(buf, (guchar*) filter->image->imageData, filter->image->imageSize);

    return gst_pad_push(filter->srcpad, buf);
}