Пример #1
0
void MixtureOfGaussianV1BGS::process(const cv::Mat &img_input, cv::Mat &img_output)
{
    if(img_input.empty())
        return;

    loadConfig();

    if(firstTime)
        saveConfig();

    //------------------------------------------------------------------
    // BackgroundSubtractorMOG
    // http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog
    //
    // Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm.
    //
    // The class implements the algorithm described in:
    //   P. KadewTraKuPong and R. Bowden,
    //   An improved adaptive background mixture model for real-time tracking with shadow detection,
    //   Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001
    //------------------------------------------------------------------

    mog(img_input, img_foreground, alpha);

    if(enableThreshold)
        cv::threshold(img_foreground, img_foreground, threshold, 255, cv::THRESH_BINARY);

    //if(showOutput)
    ////cv::imshow("Gaussian Mixture Model (KadewTraKuPong&Bowden)", img_foreground);

    img_foreground.copyTo(img_output);

    firstTime = false;
}
void WorkerCPU::processFrame()
{
	cv::Mat sourceMogFrame;

	// Grayscalling
	if(preprocess == 1)
	{
		cv::cvtColor(srcFrame, sourceMogFrame, CV_BGR2GRAY);
	}
	// Bayer filter
	else if(preprocess == 2)
	{
		switch(bayer)
		{
		case Bayer_RG: cv::cvtColor(srcFrame, sourceMogFrame, CV_BayerRG2GRAY); break;
		case Bayer_BG: cv::cvtColor(srcFrame, sourceMogFrame, CV_BayerBG2GRAY); break;
		case Bayer_GR: cv::cvtColor(srcFrame, sourceMogFrame, CV_BayerGR2GRAY); break;
		case Bayer_GB: cv::cvtColor(srcFrame, sourceMogFrame, CV_BayerGB2GRAY); break;
		}			
	}
	// Passthrough
	else
	{
		sourceMogFrame = srcFrame;
	}

	if(showIntermediateFrame && preprocess != 0)
		interFrame = sourceMogFrame;

	mog(sourceMogFrame, dstFrame, learningRate);
}
Пример #3
0
bool ForegroundExtractor::run(const cv::Mat& img, const cv::Mat& mot, int motionQ,bool wasAmbig,std::vector<std::vector<cv::Point> >& validContours){

    cv::Mat seeds;
    cv::Mat out;

    if(motionQ > 0){ //todo rational
        this->updateTrainingRate();
        //
        double modifier;
        if(!wasAmbig || roundTrained < m_roundToTrain){
            modifier = 1;
        }
        else{
            modifier = FOREGROUND_MODIF_MANY_BLOBS;
        }

        mog(img, out,trainingRate * modifier);

        cv::dilate(out,out,cv::Mat(),cv::Point(-1,-1),1);
        cv::erode(out,out,cv::Mat(),cv::Point(-1,-1),2);


        cv::bitwise_and(out,mot,seeds);
        std::vector<std::vector<cv::Point> > contours;
        cv::findContours(out, contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE);
        validContours.resize(contours.size());
        size_t idx = 0;
        for(size_t i = 0; i < contours.size(); i++){
            if(contours[i].size() > 5){
                cv::Rect boundRect =  cv::boundingRect(contours[i]);
                cv::Mat miniMask(boundRect.height,boundRect.width,CV_8U,cv::Scalar(0));
                cv::approxPolyDP(contours[i],contours[i],5,true);
                cv::drawContours(miniMask,contours,i,cv::Scalar(255),-1,8,cv::noArray(),INT_MAX ,cv::Point(-boundRect.x,-boundRect.y));
                cv::bitwise_and(miniMask,seeds(boundRect),miniMask);
                if (cv::countNonZero(miniMask) > 0){
                    validContours[idx] = contours[i]; //todo copyTo
                    idx++;
                }
            }
        }

        validContours.resize(idx);

        this->mergeContours(seeds,validContours);
        unsigned int beforeLargeRemoval = validContours.size();
        this->removeLargeContours(validContours, (img.cols+img.rows)/5);//magic number

        if(validContours.size() >1 || beforeLargeRemoval >  validContours.size())
            return false;
        else
            return true;
    }
    else{
        validContours.resize(0);
        return false;
    }
}
Пример #4
0
void vehicle_det::get_foreground(Mat & background, Mat & foreground)
{
    //cout<<__PRETTY_FUNCTION__<<endl;
    mog(background, foreground, -1);
    threshold(foreground, foreground, 175, 255, THRESH_BINARY);
    //threshold(foreground, foreground, 150, 255, THRESH_TOZERO);
    medianBlur(foreground, foreground, 9);
    erode(foreground, foreground, Mat());
    dilate(foreground, foreground, Mat());
}
void MixtureOfGaussianV2BGS::process(const cv::Mat &img_input, cv::Mat &img_output)
{
  if(img_input.empty())
    return;

  loadConfig();

  if(firstTime)
    saveConfig();

  //------------------------------------------------------------------
  // BackgroundSubtractorMOG2
  // http://opencv.itseez.com/modules/video/doc/motion_analysis_and_object_tracking.html#backgroundsubtractormog2
  //
  // Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm.
  //
  // The class implements the Gaussian mixture model background subtraction described in:
  //  (1) Z.Zivkovic, Improved adaptive Gausian mixture model for background subtraction, International Conference Pattern Recognition, UK, August, 2004, 
  //  The code is very fast and performs also shadow detection. Number of Gausssian components is adapted per pixel.
  //
  //  (2) Z.Zivkovic, F. van der Heijden, Efficient Adaptive Density Estimation per Image Pixel for the Task of Background Subtraction, 
  //  Pattern Recognition Letters, vol. 27, no. 7, pages 773-780, 2006. 
  //  The algorithm similar to the standard Stauffer&Grimson algorithm with additional selection of the number of the Gaussian components based on: 
  //    Z.Zivkovic, F.van der Heijden, Recursive unsupervised learning of finite mixture models, IEEE Trans. on Pattern Analysis and Machine Intelligence, 
  //    vol.26, no.5, pages 651-656, 2004.
  //------------------------------------------------------------------

  mog(img_input, img_foreground, alpha);

  if(enableThreshold)
    cv::threshold(img_foreground, img_foreground, threshold, 255, cv::THRESH_BINARY);

  if(showOutput)
    cv::imshow("Gaussian Mixture Model (Zivkovic&Heijden)", img_foreground);

  img_foreground.copyTo(img_output);

  firstTime = false;
}
Пример #6
0
int CHuman::HumanAlarmRun(Mat &displayframe)
{
	int time_use=0;
	struct timeval start;
	struct timeval end;

  gettimeofday(&start,NULL);


	Mat tmpframe;
	Mat blobdealFrame;
	vector< vector<Point> >  contours;
	Rect contoursRect;

	alarm =0;

	displayframe.copyTo(tmpframe);
	displayframe.copyTo(blobdealFrame);

	vector<blobnode>().swap(humanlistpro);

	 m_zoomRows  =   tmpframe.rows  /m_rowsZoomRate;
	 m_zoomCols  =   tmpframe.cols   /m_colsZoomRate;

	 w_Rate = (float)tmpframe.cols / m_zoomCols;
	 h_Rate = (float)tmpframe.rows / m_zoomRows;


	Mat morph = Mat(tmpframe.rows ,tmpframe.cols,CV_8UC1);

	mog(tmpframe,foregrondframe,0.001);   // 0.001

	frameindex++;
	if(frameindex<250) return 2;
	if(frameindex >= 250) frameindex =250;
	foregrondframe.copyTo(mask);
	threshold(mask, mask, 200, 255, THRESH_BINARY);

	cv::erode(mask, mask, cv::Mat());

	cv::dilate(mask, mask, cv::Mat());

	algorithmMorphology_Operations(mask, mask);

	findContours(mask, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);

	m_BlobRects.clear();
	for(int i=0;i<contours.size();i++)
	{
		contoursRect = boundingRect(contours[i]);
		if(fabs(contourArea(contours[i])) > 600.0)
		{
			//rectangle(displayframe, contoursRect,color_rect, 2, 8, 0);
			m_BlobRects.push_back(contoursRect);
		}
	}

	if((t_camera[m_index].t_Camvarparam.t_CamHumAlarm.Flag & 0x02)  == 0x02){
		for(int i=0;i<t_camera[m_index].t_Camvarparam.t_CamHumAlarm.DirectionLines.size();i++)
		{
			line(displayframe,t_camera[m_index].t_Camvarparam.t_CamHumAlarm.DirectionLines[i].Start,t_camera[m_index].t_Camvarparam.t_CamHumAlarm.DirectionLines[i].End,Scalar(255));
		}

	}


	if((t_camera[m_index].t_Camvarparam.t_CamHumAlarm.Flag & 0x01)  == 1){

		for(int ii=0;ii<t_camera[m_index].t_Camvarparam.t_CamHumAlarm.MonitorZoneRects.size();ii++)
		{
			rectangle(displayframe, t_camera[m_index].t_Camvarparam.t_CamHumAlarm.MonitorZoneRects[ii], Scalar( 255, 0, 0 ), 2, 8, 0);//��
		}
	}

	human_detect(morph,displayframe);

	if((t_camera[m_index].t_Camvarparam.t_CamHumAlarm.Flag & 0x01)  == 1){
		census(displayframe);// for human statistics
	}

	if((t_camera[m_index].t_Camvarparam.t_CamHumAlarm.Flag & 0x02)  == 0x02){
		blobdeal(displayframe);
	}

        if(humanstatis.numAll<(humanstatis.doorin[0]+humanstatis.doorin[1]-humanstatis.doorout[0]-humanstatis.doorout[1]))
        		humanstatis.numAll = humanstatis.doorin[0]+humanstatis.doorin[1]-humanstatis.doorout[0]-humanstatis.doorout[1];

	 //dbgprint("door1:in=%d,out=%d  door2:in=%d,out=%d\n",humanstatis.doorin[0],humanstatis.doorout[0],humanstatis.doorin[1],humanstatis.doorout[1]);

	if(humanstatis.numAll >= t_camera[m_index].t_Camvarparam.t_CamHumAlarm.MaxNum){
				//printf("humanstatis.numAll is %d\n",humanstatis.numAll);
				//printf("t_Camera.t_SinCam[m_index].t_Camvarparam.t_CamHumAlarm.MaxNum is %d\n",t_Camera.t_SinCam[m_index].t_Camvarparam.t_CamHumAlarm.MaxNum);
				alarm =1;
	}

	char dstr[100];
	sprintf(dstr, "door1:in=%d,out=%d  door2:in=%d,out=%d",humanstatis.doorin[0],humanstatis.doorout[0],humanstatis.doorin[1],humanstatis.doorout[1]);
	putText(displayframe,dstr,cvPoint(200,25),CV_FONT_HERSHEY_COMPLEX, 0.5, cvScalar(0,0,255));


	//printf("humanstatis Num:%d, humanstatisIn:%d,humanstatisOut:%d\n",humanstatis.numAll,humanstatis.inAll,humanstatis.outAll);


	//char dstr[100];
  //sprintf(dstr,  "in=%d,out=%d",humanstatis.doorin,humanstatis.doorout);
  //putText(displayframe,dstr,cvPoint(25,25),CV_FONT_HERSHEY_COMPLEX, 1, cvScalar(0,0,255));
	//printf("doorin=%d,doorout=%d\n",humanstatis.doorin,humanstatis.doorout);

	morph.release();
	vector<Point>().swap(object); //vector<Point>
	vector<blobnode>().swap(humanlist);

	gettimeofday(&end,NULL);
	time_use=(end.tv_sec-start.tv_sec)*1000+(end.tv_usec-start.tv_usec)/1000;//΢��
	//printf("time_use is %d\n",time_use);

	return 0;
}
Пример #7
0
static int CV_CDECL
icvUpdateGaussianBGModel( IplImage* curr_frame, CvGaussBGModel*  bg_model, double learningRate )
{
    int region_count = 0;
    
    cv::Mat image = cv::cvarrToMat(curr_frame), mask = cv::cvarrToMat(bg_model->foreground);
    
    cv::BackgroundSubtractorMOG mog;
    mog.bgmodel = *(cv::Mat*)bg_model->g_point;
    mog.frameSize = mog.bgmodel.data ? cv::Size(cvGetSize(curr_frame)) : cv::Size();
    mog.frameType = image.type();

    mog.nframes = bg_model->countFrames;
    mog.history = bg_model->params.win_size;
    mog.nmixtures = bg_model->params.n_gauss;
    mog.varThreshold = bg_model->params.std_threshold;
    mog.backgroundRatio = bg_model->params.bg_threshold;
    
    mog(image, mask, learningRate);
    
    bg_model->countFrames = mog.nframes;
    if( ((cv::Mat*)bg_model->g_point)->data != mog.bgmodel.data )
        *((cv::Mat*)bg_model->g_point) = mog.bgmodel;
    
    //foreground filtering
    
    //filter small regions
    cvClearMemStorage(bg_model->storage);
    
    //cvMorphologyEx( bg_model->foreground, bg_model->foreground, 0, 0, CV_MOP_OPEN, 1 );
    //cvMorphologyEx( bg_model->foreground, bg_model->foreground, 0, 0, CV_MOP_CLOSE, 1 );
    
    /*
    CvSeq *first_seq = NULL, *prev_seq = NULL, *seq = NULL;
    cvFindContours( bg_model->foreground, bg_model->storage, &first_seq, sizeof(CvContour), CV_RETR_LIST );
    for( seq = first_seq; seq; seq = seq->h_next )
    {
        CvContour* cnt = (CvContour*)seq;
        if( cnt->rect.width * cnt->rect.height < bg_model->params.minArea )
        {
            //delete small contour
            prev_seq = seq->h_prev;
            if( prev_seq )
            {
                prev_seq->h_next = seq->h_next;
                if( seq->h_next ) seq->h_next->h_prev = prev_seq;
            }
            else
            {
                first_seq = seq->h_next;
                if( seq->h_next ) seq->h_next->h_prev = NULL;
            }
        }
        else
        {
            region_count++;
        }
    }
    bg_model->foreground_regions = first_seq;
    cvZero(bg_model->foreground);
    cvDrawContours(bg_model->foreground, first_seq, CV_RGB(0, 0, 255), CV_RGB(0, 0, 255), 10, -1);*/
    CvMat _mask = mask;
    cvCopy(&_mask, bg_model->foreground);
    
    return region_count;
}