Exemplo n.º 1
0
int main(int argc, char** argv)
{
	IplImage* temp1 = NULL;
    IplImage* temp2 = NULL;
    IplImage* result = NULL;
    IplImage* result1 = NULL;
    IplImage* result2 = NULL;

	CvBGStatModel* bg_model=0;
	CvBGStatModel* bg_model1=0;

    IplImage* rawImage = 0; 
	IplImage* yuvImage = 0; 
	IplImage* rawImage1 = 0;
    IplImage* pFrImg = 0;
	IplImage* pFrImg1= 0;
	IplImage* pFrImg2= 0;
	IplImage* ImaskCodeBookCC = 0;
    CvCapture* capture = 0;

	int c,n;

	maxMod[0] = 25; 
	minMod[0] = 35;
	maxMod[1] = 8;
	minMod[1] = 8;
	maxMod[2] = 8;
	minMod[2] = 8;

    argc=2;
    argv[1]="intelligentroom_raw.avi";
    if( argc > 2 )
	{
		fprintf(stderr, "Usage: bkgrd [video_file_name]\n");
		return -1;
	}
 
    if (argc ==1)
		if( !(capture = cvCaptureFromCAM(-1)))
		{
			fprintf(stderr, "Can not open camera.\n");
			return -2;
		}

    if(argc == 2)
		if( !(capture = cvCaptureFromFile(argv[1])))
		{
			fprintf(stderr, "Can not open video file %s\n", argv[1]);
			return -2;
		}

	bool pause = false;
	bool singlestep = false;

    if( capture )
    {
        cvNamedWindow( "原视频序列图像", 1 );
		cvNamedWindow("不实时更新的Codebook算法[本文]",1);
		cvNamedWindow("实时更新的Codebook算法[本文]",1);
		cvNamedWindow("基于MOG的方法[Chris Stauffer'2001]",1);
		cvNamedWindow("三帧差分", 1);
		cvNamedWindow("基于Bayes decision的方法[Liyuan Li'2003]", 1);
        
		cvMoveWindow("原视频序列图像", 0, 0);
		cvMoveWindow("不实时更新的Codebook算法[本文]", 360, 0);
		cvMoveWindow("实时更新的Codebook算法[本文]", 720, 350);
		cvMoveWindow("基于MOG的方法[Chris Stauffer'2001]", 0, 350);
		cvMoveWindow("三帧差分", 720, 0);
        cvMoveWindow("基于Bayes decision的方法[Liyuan Li'2003]",360, 350);
        int nFrmNum = -1;
        for(;;)
        {
    		if(!pause)
			{
				rawImage = cvQueryFrame( capture );
				++nFrmNum;
				printf("第%d帧\n",nFrmNum);
				if(!rawImage) 
					break;
			}
			if(singlestep)
			{
				pause = true;
			}
			if(0 == nFrmNum) 
			{
				printf(". . . wait for it . . .\n"); 
				
				temp1 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 3);
				temp2 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 3);
				result1 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 1);
				result2 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 1);
				result = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 1);

				bg_model = cvCreateGaussianBGModel(rawImage);
                bg_model1 = cvCreateFGDStatModel(rawImage);
				rawImage1 = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 3 );

				yuvImage = cvCloneImage(rawImage);
				pFrImg  = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				pFrImg1 = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				pFrImg2 = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );

				imageLen = rawImage->width*rawImage->height;

				cA = new codeBook [imageLen];
				cC = new codeBook [imageLen];
				cD = new codeBook [imageLen];

				for(int f = 0; f<imageLen; f++)
				{
					cA[f].numEntries = 0; cA[f].t = 0;
					cC[f].numEntries = 0; cC[f].t = 0;
					cD[f].numEntries = 0; cD[f].t = 0;
				}
				for(int nc=0; nc<nChannels;nc++)
				{
					cbBounds[nc] = 10;
				}
				ch[0] = true;
				ch[1] = true;
				ch[2] = true;
			}
             
        	if( rawImage )
        	{
				if(!pause)
				{					
					cvSmooth(rawImage, rawImage1, CV_GAUSSIAN,3,3);

					cvChangeDetection(temp1, temp2, result1);
					cvChangeDetection(rawImage1, temp1, result2);
					cvAnd(result1, result2, result, NULL);
					cvCopy(temp1,temp2, NULL);
					cvCopy(rawImage,temp1, NULL);

					
					cvUpdateBGStatModel( rawImage, bg_model );
					cvUpdateBGStatModel( rawImage, bg_model1 );
				}

				cvCvtColor( rawImage1, yuvImage, CV_BGR2YCrCb );
				if( !pause && nFrmNum >= 1 && nFrmNum < T  )
				{
					pColor = (uchar *)((yuvImage)->imageData);
					for(int c=0; c<imageLen; c++)
					{
						update_codebook_model(pColor, cA[c],cbBounds,nChannels);
					    trainig_codebook(pColor, cC[c],cbBounds,nChannels);
						pColor += 3;
					}
				}

				if( nFrmNum == T)
				{
					for(c=0; c<imageLen; c++)
					{
						clear_stale_entries(cA[c]);
						training_clear_stale_entries(cC[c]);
					}
				}

				if(nFrmNum > T) 
				{
					pColor = (uchar *)((yuvImage)->imageData);
					uchar maskPixelCodeBook;
					uchar maskPixelCodeBook1;
					uchar maskPixelCodeBook2;
					uchar *pMask = (uchar *)((pFrImg)->imageData);
					uchar *pMask1 = (uchar *)((pFrImg1)->imageData);
					uchar *pMask2 = (uchar *)((pFrImg2)->imageData);
					for(int c=0; c<imageLen; c++)
					{
						//本文中不带自动背景更新的算法输出
						maskPixelCodeBook1=background_Diff(pColor, cA[c],nChannels,minMod,maxMod);
                        *pMask1++ = maskPixelCodeBook1;
						
						//本文中带自动背景更新的算法输出
						if ( !pause && det_update_codebook_cC(pColor, cC[c],cbBounds,nChannels))
						{	
							det_update_codebook_cD(pColor, cD[c],cbBounds,nChannels, nFrmNum); 
							realtime_clear_stale_entries_cD(cD[c], nFrmNum);
							cD_to_cC(cD[c], cC[c], (nFrmNum - T)/5);
							
						}
						else
						{
							realtime_clear_stale_entries_cC(cC[c], nFrmNum);
						
						} 

						maskPixelCodeBook2=background_Diff(pColor, cC[c],nChannels,minMod,maxMod);
						*pMask2++ = maskPixelCodeBook2;  
						pColor += 3;
					}

					cvCopy(pFrImg2,ImaskCodeBookCC);
					if(!pause)
					{
						count_Segmentation(cC,yuvImage,nChannels,minMod,maxMod);
						forgratio = (float) (fgcount)/ imageLen;
					}
				}
				bg_model1->foreground->origin=1;
				bg_model->foreground->origin=1;				
				pFrImg->origin=1;
                pFrImg1->origin=1;
				pFrImg2->origin=1;
				ImaskCodeBookCC->origin=1;
				result->origin=1;
				//connected_Components(pFrImg1,1,40);
				//connected_Components(pFrImg2,1,40);
				
                cvShowImage("基于MOG的方法[Chris Stauffer'2001]", bg_model->foreground);
           		cvShowImage( "原视频序列图像", rawImage );
				cvShowImage("三帧差分", result);
 				cvShowImage( "不实时更新的Codebook算法[本文]",pFrImg1);
				cvShowImage("实时更新的Codebook算法[本文]",pFrImg2);
				cvShowImage("基于Bayes decision的方法[Liyuan Li'2003]", bg_model1->foreground);

	         	c = cvWaitKey(1)&0xFF;
				//End processing on ESC, q or Q
				if(c == 27 || c == 'q' || c == 'Q')
					break;
				//Else check for user input
				switch(c)
				{
					case 'h':
						help();
						break;
					case 'p':
						pause ^= 1;
						break;
					case 's':
						singlestep = 1;
						pause = false;
						break;
					case 'r':
						pause = false;
						singlestep = false;
						break;
				//CODEBOOK PARAMS
                case 'y':
                case '0':
                        ch[0] = 1;
                        ch[1] = 0;
                        ch[2] = 0;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'u':
                case '1':
                        ch[0] = 0;
                        ch[1] = 1;
                        ch[2] = 0;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'v':
                case '2':
                        ch[0] = 0;
                        ch[1] = 0;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'a': //All
                case '3':
                        ch[0] = 1;
                        ch[1] = 1;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'b':  //both u and v together
                        ch[0] = 0;
                        ch[1] = 1;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'z': 
					printf(" Fadd加1 ");
					Fadd += 1;
					printf("Fadd=%.4d\n",Fadd);										
					break;
				case 'x':
					printf(" Fadd减1 "); 
					Fadd -= 1;					
					printf("Fadd=%.4d\n",Fadd);										
					break;
				case 'n': 
					printf(" Tavgstale加1 ");
					Tavgstale += 1;
					printf("Tavgstale=%.4d\n",Tavgstale);										
					break;
				case 'm': 
					printf(" Tavgstale减1 ");
					Tavgstale -= 1;
					printf("Tavgstale=%.4d\n",Tavgstale);										
					break;
				case 'i': //modify max classification bounds (max bound goes higher)
					for(n=0; n<nChannels; n++)
					{
						if(ch[n])
							maxMod[n] += 1;
						printf("%.4d,",maxMod[n]);
					}
					printf(" CodeBook High Side\n");
					break;
				case 'o': //modify max classification bounds (max bound goes lower)
					for(n=0; n<nChannels; n++)
					{
						if(ch[n])
							maxMod[n] -= 1;
						printf("%.4d,",maxMod[n]);
					}
					printf(" CodeBook High Side\n");
					break;
				case 'k': //modify min classification bounds (min bound goes lower)
					for(n=0; n<nChannels; n++)
					{
						if(ch[n])
							minMod[n] += 1;
						printf("%.4d,",minMod[n]);
					}
					printf(" CodeBook Low Side\n");
					break;
				case 'l': //modify min classification bounds (min bound goes higher)
					for(n=0; n<nChannels; n++)
					{
						if(ch[n])
							minMod[n] -= 1;
						printf("%.4d,",minMod[n]);
					}
					printf(" CodeBook Low Side\n");
					break;
				}
            }
		}		
		cvReleaseCapture( &capture );
		cvReleaseBGStatModel((CvBGStatModel**)&bg_model);
		cvReleaseBGStatModel((CvBGStatModel**)&bg_model1);

        cvDestroyWindow( "原视频序列图像" );
		cvDestroyWindow( "不实时更新的Codebook算法[本文]");
		cvDestroyWindow( "实时更新的Codebook算法[本文]");
		cvDestroyWindow( "基于MOG的方法[Chris Stauffer'2001]");
		cvDestroyWindow( "三帧差分" );
		cvDestroyWindow( "基于Bayes decision的方法[Liyuan Li'2003]");

		cvReleaseImage(&temp1);
		cvReleaseImage(&temp2);
		cvReleaseImage(&result);
		cvReleaseImage(&result1);
		cvReleaseImage(&result2);
		cvReleaseImage(&pFrImg);
		cvReleaseImage(&pFrImg1);
		cvReleaseImage(&pFrImg2);

		if(yuvImage) cvReleaseImage(&yuvImage);
		if(rawImage) cvReleaseImage(&rawImage);
		if(rawImage1) cvReleaseImage(&rawImage1);
		if(ImaskCodeBookCC) cvReleaseImage(&ImaskCodeBookCC);
		delete [] cA;
		delete [] cC;
		delete [] cD;
    }
	else
	{ 
		printf("\n\nDarn, Something wrong with the parameters\n\n"); help();
	}
    return 0;
}
Exemplo n.º 2
0
// Function cvUpdateFGDStatModel updates statistical model and returns number of foreground regions
// parameters:
//      curr_frame  - current frame from video sequence
//      p_model     - pointer to CvFGDStatModel structure
static int CV_CDECL
icvUpdateFGDStatModel( IplImage* curr_frame, CvFGDStatModel*  model, double )
{
    int            mask_step = model->Ftd->widthStep;
    CvSeq         *first_seq = NULL, *prev_seq = NULL, *seq = NULL;
    IplImage*      prev_frame = model->prev_frame;
    int            region_count = 0;
    int            FG_pixels_count = 0;
    int            deltaC  = cvRound(model->params.delta * 256 / model->params.Lc);
    int            deltaCC = cvRound(model->params.delta * 256 / model->params.Lcc);
    int            i, j, k, l;

    //clear storages
    cvClearMemStorage(model->storage);
    cvZero(model->foreground);

    // From foreground pixel candidates using image differencing
    // with adaptive thresholding.  The algorithm is from:
    //
    //    Thresholding for Change Detection
    //    Paul L. Rosin 1998 6p
    //    http://www.cis.temple.edu/~latecki/Courses/CIS750-03/Papers/thresh-iccv.pdf
    //
    cvChangeDetection( prev_frame, curr_frame, model->Ftd );
    cvChangeDetection( model->background, curr_frame, model->Fbd );

    for( i = 0; i < model->Ftd->height; i++ )
    {
        for( j = 0; j < model->Ftd->width; j++ )
        {
            if( ((uchar*)model->Fbd->imageData)[i*mask_step+j] || ((uchar*)model->Ftd->imageData)[i*mask_step+j] )
            {
	        float Pb  = 0;
                float Pv  = 0;
                float Pvb = 0;

                CvBGPixelStat* stat = model->pixel_stat + i * model->Ftd->width + j;

                CvBGPixelCStatTable*   ctable = stat->ctable;
                CvBGPixelCCStatTable* cctable = stat->cctable;
    
                uchar* curr_data = (uchar*)(curr_frame->imageData) + i*curr_frame->widthStep + j*3;
                uchar* prev_data = (uchar*)(prev_frame->imageData) + i*prev_frame->widthStep + j*3;

                int val = 0;

                // Is it a motion pixel?
                if( ((uchar*)model->Ftd->imageData)[i*mask_step+j] )
                {
		    if( !stat->is_trained_dyn_model ) {

                        val = 1;

		    } else {

                        // Compare with stored CCt vectors:
                        for( k = 0;  PV_CC(k) > model->params.alpha2 && k < model->params.N1cc;  k++ )
                        {
                            if ( abs( V_CC(k,0) - prev_data[0]) <=  deltaCC &&
                                 abs( V_CC(k,1) - prev_data[1]) <=  deltaCC &&
                                 abs( V_CC(k,2) - prev_data[2]) <=  deltaCC &&
                                 abs( V_CC(k,3) - curr_data[0]) <=  deltaCC &&
                                 abs( V_CC(k,4) - curr_data[1]) <=  deltaCC &&
                                 abs( V_CC(k,5) - curr_data[2]) <=  deltaCC)
                            {
                                Pv += PV_CC(k);
                                Pvb += PVB_CC(k);
                            }
                        }
                        Pb = stat->Pbcc;
                        if( 2 * Pvb * Pb <= Pv ) val = 1;
                    }
                }
                else if( stat->is_trained_st_model )
                {
                    // Compare with stored Ct vectors:
                    for( k = 0;  PV_C(k) > model->params.alpha2 && k < model->params.N1c;  k++ )
                    {
                        if ( abs( V_C(k,0) - curr_data[0]) <=  deltaC &&
                             abs( V_C(k,1) - curr_data[1]) <=  deltaC &&
                             abs( V_C(k,2) - curr_data[2]) <=  deltaC )
                        {
                            Pv += PV_C(k);
                            Pvb += PVB_C(k);
                        }
                    }
                    Pb = stat->Pbc;
                    if( 2 * Pvb * Pb <= Pv ) val = 1;
                }

                // Update foreground:
                ((uchar*)model->foreground->imageData)[i*mask_step+j] = (uchar)(val*255);
                FG_pixels_count += val;

            }		// end if( change detection...
        }		// for j...
    }			// for i...
    //end BG/FG classification

    // Foreground segmentation.
    // Smooth foreground map:
    if( model->params.perform_morphing ){
        cvMorphologyEx( model->foreground, model->foreground, 0, 0, CV_MOP_OPEN,  model->params.perform_morphing );
        cvMorphologyEx( model->foreground, model->foreground, 0, 0, CV_MOP_CLOSE, model->params.perform_morphing );
    }
   
   
    if( model->params.minArea > 0 || model->params.is_obj_without_holes ){

        // Discard under-size foreground regions:
	//
        cvFindContours( model->foreground, model->storage, &first_seq, sizeof(CvContour), CV_RETR_LIST );
        for( seq = first_seq; seq; seq = seq->h_next )
        {
            CvContour* cnt = (CvContour*)seq;
            if( cnt->rect.width * cnt->rect.height < model->params.minArea || 
                (model->params.is_obj_without_holes && CV_IS_SEQ_HOLE(seq)) )
            {
                // Delete under-size contour:
                prev_seq = seq->h_prev;
                if( prev_seq )
                {
                    prev_seq->h_next = seq->h_next;
                    if( seq->h_next ) seq->h_next->h_prev = prev_seq;
                }
                else
                {
                    first_seq = seq->h_next;
                    if( seq->h_next ) seq->h_next->h_prev = NULL;
                }
            }
            else
            {
                region_count++;
            }
        }        
        model->foreground_regions = first_seq;
        cvZero(model->foreground);
        cvDrawContours(model->foreground, first_seq, CV_RGB(0, 0, 255), CV_RGB(0, 0, 255), 10, -1);

    } else {

        model->foreground_regions = NULL;
    }

    // Check ALL BG update condition:
    if( ((float)FG_pixels_count/(model->Ftd->width*model->Ftd->height)) > CV_BGFG_FGD_BG_UPDATE_TRESH )
    {
         for( i = 0; i < model->Ftd->height; i++ )
             for( j = 0; j < model->Ftd->width; j++ )
             {
                 CvBGPixelStat* stat = model->pixel_stat + i * model->Ftd->width + j;
                 stat->is_trained_st_model = stat->is_trained_dyn_model = 1;
             }
    }


    // Update background model:
    for( i = 0; i < model->Ftd->height; i++ )
    {
        for( j = 0; j < model->Ftd->width; j++ )
        {
            CvBGPixelStat* stat = model->pixel_stat + i * model->Ftd->width + j;
            CvBGPixelCStatTable* ctable = stat->ctable;
            CvBGPixelCCStatTable* cctable = stat->cctable;

            uchar *curr_data = (uchar*)(curr_frame->imageData)+i*curr_frame->widthStep+j*3;
            uchar *prev_data = (uchar*)(prev_frame->imageData)+i*prev_frame->widthStep+j*3;

            if( ((uchar*)model->Ftd->imageData)[i*mask_step+j] || !stat->is_trained_dyn_model )
            {
                float alpha = stat->is_trained_dyn_model ? model->params.alpha2 : model->params.alpha3;
                float diff = 0;
                int dist, min_dist = 2147483647, indx = -1;

                //update Pb
                stat->Pbcc *= (1.f-alpha);
                if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
                {
                    stat->Pbcc += alpha;
                }

                // Find best Vi match:
                for(k = 0; PV_CC(k) && k < model->params.N2cc; k++ )
                {
                    // Exponential decay of memory
                    PV_CC(k)  *= (1-alpha);
                    PVB_CC(k) *= (1-alpha);
                    if( PV_CC(k) < MIN_PV )
                    {
                        PV_CC(k) = 0;
                        PVB_CC(k) = 0;
                        continue;
                    }

                    dist = 0;
                    for( l = 0; l < 3; l++ )
                    {
                        int val = abs( V_CC(k,l) - prev_data[l] );
                        if( val > deltaCC ) break;
                        dist += val;
                        val = abs( V_CC(k,l+3) - curr_data[l] );
                        if( val > deltaCC) break;
                        dist += val;
                    }
                    if( l == 3 && dist < min_dist )
                    {
                        min_dist = dist;
                        indx = k;
                    }
                }


                if( indx < 0 )
                {   // Replace N2th elem in the table by new feature:
                    indx = model->params.N2cc - 1;
                    PV_CC(indx) = alpha;
                    PVB_CC(indx) = alpha;
                    //udate Vt
                    for( l = 0; l < 3; l++ )
                    {
                        V_CC(indx,l) = prev_data[l];
                        V_CC(indx,l+3) = curr_data[l];
                    }
                }
                else
                {   // Update:
                    PV_CC(indx) += alpha;
                    if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
                    {
                        PVB_CC(indx) += alpha;
                    }
                }

                //re-sort CCt table by Pv
                for( k = 0; k < indx; k++ )
                {
                    if( PV_CC(k) <= PV_CC(indx) )
                    {
                        //shift elements
                        CvBGPixelCCStatTable tmp1, tmp2 = cctable[indx];
                        for( l = k; l <= indx; l++ )
                        {
                            tmp1 = cctable[l];
                            cctable[l] = tmp2;
                            tmp2 = tmp1;
                        }
                        break;
                    }
                }


                float sum1=0, sum2=0;
                //check "once-off" changes
                for(k = 0; PV_CC(k) && k < model->params.N1cc; k++ )
                {
                    sum1 += PV_CC(k);
                    sum2 += PVB_CC(k);
                }
                if( sum1 > model->params.T ) stat->is_trained_dyn_model = 1;
                
                diff = sum1 - stat->Pbcc * sum2;
                // Update stat table:
                if( diff >  model->params.T )
                {
                    //printf("once off change at motion mode\n");
                    //new BG features are discovered
                    for( k = 0; PV_CC(k) && k < model->params.N1cc; k++ )
                    {
                        PVB_CC(k) =
                            (PV_CC(k)-stat->Pbcc*PVB_CC(k))/(1-stat->Pbcc);
                    }
                    assert(stat->Pbcc<=1 && stat->Pbcc>=0);
                }
            }

            // Handle "stationary" pixel:
            if( !((uchar*)model->Ftd->imageData)[i*mask_step+j] )
            {
                float alpha = stat->is_trained_st_model ? model->params.alpha2 : model->params.alpha3;
                float diff = 0;
                int dist, min_dist = 2147483647, indx = -1;

                //update Pb
                stat->Pbc *= (1.f-alpha);
                if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
                {
                    stat->Pbc += alpha;
                }

                //find best Vi match
                for( k = 0; k < model->params.N2c; k++ )
                {
                    // Exponential decay of memory
                    PV_C(k) *= (1-alpha);
                    PVB_C(k) *= (1-alpha);
                    if( PV_C(k) < MIN_PV )
                    {
                        PV_C(k) = 0;
                        PVB_C(k) = 0;
                        continue;
                    }
                    
                    dist = 0;
                    for( l = 0; l < 3; l++ )
                    {
                        int val = abs( V_C(k,l) - curr_data[l] );
                        if( val > deltaC ) break;
                        dist += val;
                    }
                    if( l == 3 && dist < min_dist )
                    {
                        min_dist = dist;
                        indx = k;
                    }
                }

                if( indx < 0 )
                {//N2th elem in the table is replaced by a new features
                    indx = model->params.N2c - 1;
                    PV_C(indx) = alpha;
                    PVB_C(indx) = alpha;
                    //udate Vt
                    for( l = 0; l < 3; l++ )
                    {
                        V_C(indx,l) = curr_data[l];
                    }
                } else
                {//update
                    PV_C(indx) += alpha;
                    if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
                    {
                        PVB_C(indx) += alpha;
                    }
                }

                //re-sort Ct table by Pv
                for( k = 0; k < indx; k++ )
                {
                    if( PV_C(k) <= PV_C(indx) )
                    {
                        //shift elements
                        CvBGPixelCStatTable tmp1, tmp2 = ctable[indx];
                        for( l = k; l <= indx; l++ )
                        {
                            tmp1 = ctable[l];
                            ctable[l] = tmp2;
                            tmp2 = tmp1;
                        }
                        break;
                    }
                }

                // Check "once-off" changes:
                float sum1=0, sum2=0;
                for( k = 0; PV_C(k) && k < model->params.N1c; k++ )
                {
                    sum1 += PV_C(k);
                    sum2 += PVB_C(k);
                }
                diff = sum1 - stat->Pbc * sum2;
                if( sum1 > model->params.T ) stat->is_trained_st_model = 1;

                // Update stat table:
                if( diff >  model->params.T )
                {
                    //printf("once off change at stat mode\n");
                    //new BG features are discovered
                    for( k = 0; PV_C(k) && k < model->params.N1c; k++ )
                    {
                        PVB_C(k) = (PV_C(k)-stat->Pbc*PVB_C(k))/(1-stat->Pbc);
                    }
                    stat->Pbc = 1 - stat->Pbc;
                }
            }		// if !(change detection) at pixel (i,j)

            // Update the reference BG image:
            if( !((uchar*)model->foreground->imageData)[i*mask_step+j])
            {
                uchar* ptr = ((uchar*)model->background->imageData) + i*model->background->widthStep+j*3;
                
                if( !((uchar*)model->Ftd->imageData)[i*mask_step+j] &&
                    !((uchar*)model->Fbd->imageData)[i*mask_step+j] )
                {
                    // Apply IIR filter:
                    for( l = 0; l < 3; l++ )
                    {
                        int a = cvRound(ptr[l]*(1 - model->params.alpha1) + model->params.alpha1*curr_data[l]);
                        ptr[l] = (uchar)a;
                        //((uchar*)model->background->imageData)[i*model->background->widthStep+j*3+l]*=(1 - model->params.alpha1);
                        //((uchar*)model->background->imageData)[i*model->background->widthStep+j*3+l] += model->params.alpha1*curr_data[l];
                    }
                }
                else
                {
                    // Background change detected:
                    for( l = 0; l < 3; l++ )
                    {
                        //((uchar*)model->background->imageData)[i*model->background->widthStep+j*3+l] = curr_data[l];
                        ptr[l] = curr_data[l];
                    }
                }
            }
        }		// j
    }			// i

    // Keep previous frame:
    cvCopy( curr_frame, model->prev_frame );

    return region_count;
}
Exemplo n.º 3
0
// Function updateBGMeanStatModel updates model and returns number of foreground regions
// parameters:
//      curr_frame  - current frame from video sequence
//      p_model     - pointer to BGMeanStatModel structure
int
updateBGMeanStatModel( IplImage* curr_frame, BGMeanStatModel*  model ){
  int region_count = 0;

  if (model->bg_frame_count >= model->params.bg_update_rate){
    model->bg_frame_count = 0;//reset counter
    //update model
    //insert curr_frame in circular buffer
    int i,j,k;
    int frame_cbuffer_pixelcluster_step = model->params.n_frames*model->background->nChannels;
    int frame_cbuffer_cbufferpixel_offset = model->cbuffer_idx*model->background->nChannels;
    int frame_cbuffer_width_step = model->background->width*frame_cbuffer_pixelcluster_step;
    
    cv::Scalar mean, std_dev;

    //idx_last is used to avoid calc mean over not initialized values
    if (model->cbuffer_nentries_init < model->params.n_frames)
      model->cbuffer_nentries_init++;

    //bg and curr_frame have same size
    for (i = 0; i < model->background->height; i++){//rows
      uchar* frame_cbuffer_row_p = model->frame_cbuffer + i*frame_cbuffer_width_step;
      uchar* curr_frame_row_p = (uchar*)curr_frame->imageData + i*curr_frame->widthStep;
      uchar* bg_frame_row_p = (uchar*)model->background->imageData + i*model->background->widthStep;
      for (j = 0; j < model->background->width; j++){//cols
	uchar* pixel_cluster_p = frame_cbuffer_row_p + j*frame_cbuffer_pixelcluster_step;
	uchar* pixel_to_update_p = pixel_cluster_p + frame_cbuffer_cbufferpixel_offset;//pointer to pixel in circular buffer to update
	uchar* curr_pixel_p = curr_frame_row_p + j*model->background->nChannels;//pixel from curr_frame
	//update circular buffer
	for (k=0; k<model->background->nChannels; k++)
	  pixel_to_update_p[k] = curr_pixel_p[k];
	
	//calc mean and std dev   
	cv::Mat pixel_cluster(1, std::min(model->params.n_frames,model->cbuffer_nentries_init), 
			      CV_MAKETYPE(CV_8U,model->background->nChannels),
			      pixel_cluster_p);/*cv::Mat with pixel cluster with last n frames*/
	cv::meanStdDev(pixel_cluster, mean, std_dev);
	
	int pixel_offset = j*model->background->nChannels + (i * model->background->width * model->background->nChannels);
	//double* mean_p = model->mean + pixel_offset;
	//double* std_dev_p = model->std_dev + pixel_offset;
	uchar* bg_p = bg_frame_row_p + j*model->background->nChannels;
	/*copy mean and std dev to each channel*/
	for (k=0; k<model->background->nChannels; k++){
	  //mean_p[k] = mean[k];
	  //std_dev_p[k] = std_dev[k];
	  double mean_weight = get_mean_weight(std_dev[k]);
	  double a = ((double)bg_p[k])*(1-mean_weight);
	  double b = mean[k]*mean_weight;
	  bg_p[k] = cv::saturate_cast<uchar>(a+b);
	}
      }
    }
    //update circular buffer idx
    model->cbuffer_idx++;
    if (model->cbuffer_idx >= model->params.n_frames)
      model->cbuffer_idx = 0;
  }
  
  if (model->fg_frame_count >= model->params.fg_update_rate){
    model->fg_frame_count = 0;//reset counter
    //clear fg
    cvZero(model->foreground);

    //difference bg - curr_frame. Adaptative threshold
    cvChangeDetection( model->background, curr_frame, model->foreground );//FIXME: just 3 channel support

    //segmentation if required
    if (model->params.perform_segmentation)
      region_count = bgfgSegmentation((CvBGStatModel*)model, &model->params.sg_params);
  }

  //update counters
  model->fg_frame_count++;
  model->bg_frame_count++;

  return region_count;
}