void detect_object(IplImage *image, IplImage *pBkImg, IplImage *pFrImg, CvMat *pFrameMat, CvMat *pBkMat, CvMat *pFrMat,int thre_limit)
{
	nFrmNum++;
	cvCvtColor(image, pFrImg, CV_BGR2GRAY);
	cvConvert(pFrImg, pFrameMat);
	//高斯滤波
	cvSmooth(pFrameMat, pFrameMat, CV_GAUSSIAN, 3, 0, 0);
	//当前帧减去背景图像并取绝对值
	cvAbsDiff(pFrameMat, pBkMat, pFrMat);
	//二值化前景图像
	cvThreshold(pFrMat, pFrImg,thre_limit, 255.0, CV_THRESH_BINARY);

	/*形态学滤波*/
	//IplConvKernel* element = cvCreateStructuringElementEx(2, 2, 0, 0, CV_SHAPE_RECT);
	//cvErode(pFrImg, pFrImg,element, 1);	// 腐蚀
	//delete element;

	//element = cvCreateStructuringElementEx(2, 2, 1, 1, CV_SHAPE_RECT);
	//cvDilate(pFrImg, pFrImg, element, 1);	//膨胀
	//delete element;
	cvErode(pFrImg, pFrImg,0, 1);	// 腐蚀
	cvDilate(pFrImg, pFrImg,0, 1);	//膨胀

	//滑动平均更新背景(求平均)
	cvRunningAvg(pFrameMat, pBkMat, 0.004, 0);
	//将背景矩阵转化为图像格式,用以显示
	cvConvert(pBkMat, pBkImg);

	cvShowImage("background", pFrImg);
//	cvShowImage("background", pBkImg);
}
Exemple #2
0
void MainWindow::BackgroundDiff()
{
    ui->alpha_slider->setEnabled(true);

    cvReleaseCapture(&pCapture);
    pCapture=cvCaptureFromCAM(0);
  //  IplImage* pFrame=NULL;
    nFrameNum=0;

    while(pFrame = cvQueryFrame( pCapture ))
    {
        nFrameNum++;
        //如果是第一帧,需要申请内存,并初始化

        if(nFrameNum == 1)
        {
            pBkImg = cvCreateImage(cvSize(pFrame->width, pFrame->height),IPL_DEPTH_8U,1);
            pFrImg = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U,1);
            pBkMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
            pFrMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
            pFrameMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);

            //转化成单通道图像再处理
            cvCvtColor(pFrame, pBkImg, CV_BGR2GRAY);
            cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
            cvConvert(pFrImg, pFrameMat);
            cvConvert(pFrImg, pFrMat);
            cvConvert(pFrImg, pBkMat);
        }
        else
        {
            cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
            cvConvert(pFrImg, pFrameMat);
            //先做高斯滤波,以平滑图像
            cvSmooth(pFrameMat, pFrameMat, CV_GAUSSIAN, 3, 0, 0);
            //当前帧跟背景图相减
            cvAbsDiff(pFrameMat, pBkMat, pFrMat);
            //二值化前景图
            cvDilate(pFrMat,pFrMat);
            cvErode(pFrMat,pFrMat);

            cvThreshold(pFrMat, pFrImg, lowThreshold, 255.0, CV_THRESH_BINARY);
            //更新背景
            cvRunningAvg(pFrameMat, pBkMat, alpha,0);
            //将背景转化为图像格式,用以显示
            cvConvert(pBkMat, pBkImg);
            pFrame->origin = IPL_ORIGIN_BL;
            pFrImg->origin = IPL_ORIGIN_BL;
            pBkImg->origin = IPL_ORIGIN_BL;

        }

        if(27==cvWaitKey(33))
            break;

        MainWindow::Display(pFrame,pBkImg,pFrImg);
    }

}
void BlinkDetector::update(const boost::scoped_ptr<IplImage> &eyeFloat) {
	if (!_initialized) {
		cvCopy(eyeFloat.get(), _averageEye.get());
		_initialized = true;
	}

	double distance = cvNorm(eyeFloat.get(), _averageEye.get(), CV_L2);
	_accumulator.update(distance);
	//cout << "update distance" << distance << " -> " << accumulator.getValue() << endl;
	_states.updateState(distance / _accumulator.getValue());
	cvRunningAvg(eyeFloat.get(), _averageEye.get(), 0.05);
}
Exemple #4
0
int main() {
  IplImage *frame, *accumulator=NULL;

  cv_dc1394_init();

  while (cv_sdl_process_events()) {
    frame = cv_dc1394_capture_yuv(1)[0];

    if (!accumulator)
      accumulator = cvCreateImage(cvGetSize(frame), 32, 3);

    cvRunningAvg(frame, accumulator, 0.1);
    cvConvert(accumulator, frame);

    cv_sdl_show_yuv_image(frame);
  }
}
//由于高斯背景建模没返回背景,所以使用平均背景法自动生成背景
bool GetAvgBackgroudImg(const IplImage *pFrame, IplImage *bgImg,double alpha)
{  
	static bool isFirst=true;
	static IplImage *pAccImg = cvCreateImage(cvSize(pFrame->width, pFrame->height),  IPL_DEPTH_32F,3);
	if(isFirst){
		cvSetZero(pAccImg);
		isFirst=false;
	}
	if (!pFrame || !bgImg || !pAccImg ||
		pAccImg->width != pFrame->width || pAccImg->width != bgImg->width ||
		pAccImg->height != pFrame->height || pAccImg->height != bgImg->height|| 
		pFrame->nChannels!=3 || pFrame->nChannels!=3){
		return false;
	}
	cvRunningAvg(pFrame,pAccImg,alpha);
	cvConvertScale(pAccImg,bgImg,1.,0.);
	return true;
}
Exemple #6
0
int diff2_main( int argc, char** argv )
{
	//声明IplImage指针
	IplImage* pFrame = NULL; 
	IplImage* pFrImg = NULL;
	IplImage* pBkImg = NULL;

	CvMat* pFrameMat = NULL;
	CvMat* pFrMat = NULL;
	CvMat* pBkMat = NULL;

	CvCapture* pCapture = NULL;

	int nFrmNum = 0;

	//创建窗口
	cvNamedWindow("video", 1);
	cvNamedWindow("background",1);
	cvNamedWindow("foreground",1);
	//使窗口有序排列
	cvMoveWindow("video", 30, 0);
	cvMoveWindow("background", 360, 0);
	cvMoveWindow("foreground", 690, 0);


	if( !(pCapture = cvCaptureFromAVI("bike.avi")))
	{
		//pCapture = cvCaptureFromCAM(-1))
		fprintf(stderr, "Can not open camera.\n");
		return -2;
	}


	//逐帧读取视频
	while(pFrame = cvQueryFrame( pCapture ))
	{
		nFrmNum++;

		//如果是第一帧,需要申请内存,并初始化
		if(nFrmNum == 1)
		{
			pBkImg = cvCreateImage(cvSize(pFrame->width, pFrame->height),  IPL_DEPTH_8U,1);
			pFrImg = cvCreateImage(cvSize(pFrame->width, pFrame->height),  IPL_DEPTH_8U,1);

			pBkMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
			pFrMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
			pFrameMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);

			//转化成单通道图像再处理
			cvCvtColor(pFrame, pBkImg, CV_BGR2GRAY);
			cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);

			cvConvert(pFrImg, pFrameMat);
			cvConvert(pFrImg, pFrMat);
			cvConvert(pFrImg, pBkMat);
		}
		else
		{
			cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
			cvConvert(pFrImg, pFrameMat);
			//高斯滤波先,以平滑图像
			//cvSmooth(pFrameMat, pFrameMat, CV_GAUSSIAN, 3, 0, 0);

			//当前帧跟背景图相减
			cvAbsDiff(pFrameMat, pBkMat, pFrMat);

			//二值化前景图
			cvThreshold(pFrMat, pFrImg, 60, 255.0, CV_THRESH_BINARY);

			//进行形态学滤波,去掉噪音  
			//cvErode(pFrImg, pFrImg, 0, 1);
			//cvDilate(pFrImg, pFrImg, 0, 1);

			//更新背景
			cvRunningAvg(pFrameMat, pBkMat, 0.003, 0);
			//将背景转化为图像格式,用以显示
			cvConvert(pBkMat, pBkImg);

			//显示图像
			cvShowImage("video", pFrame);
			cvShowImage("background", pBkImg);
			cvShowImage("foreground", pFrImg);

			//如果有按键事件,则跳出循环
			//此等待也为cvShowImage函数提供时间完成显示
			//等待时间可以根据CPU速度调整
			if( cvWaitKey(20) >= 0 )
			{
				break;
			}
		}
	}
	cvWaitKey();

	//销毁窗口
	cvDestroyWindow("video");
	cvDestroyWindow("background");
	cvDestroyWindow("foreground");

	//释放图像和矩阵
	cvReleaseImage(&pFrImg);
	cvReleaseImage(&pBkImg);

	cvReleaseMat(&pFrameMat);
	cvReleaseMat(&pFrMat);
	cvReleaseMat(&pBkMat);

	cvReleaseCapture(&pCapture);

	return 0;
}
Exemple #7
0
int main( int argc, char** argv )
{
  const char avi_name[]="..\\res\\de.avi";
  //聲明IplImage指針
  IplImage* pFrame = NULL; 
  IplImage* pFrImg = NULL;
  IplImage* pBkImg = NULL;
 
  CvMat* pFrameMat = NULL;
  CvMat* pFrMat = NULL;
  CvMat* pBkMat = NULL;
 
  CvCapture* pCapture = NULL;
 
  int nFrmNum = 0;
 
  //創建視窗
  cvNamedWindow("video", 1);
  cvNamedWindow("background",1);
  cvNamedWindow("foreground",1);
  //使視窗有序排列
  cvMoveWindow("video", 30, 0);
  cvMoveWindow("background", 360, 0);
  cvMoveWindow("foreground", 690, 0);
 
 
 
  if( argc > 2 )
    {
      fprintf(stderr, "Usage: bkgrd [video_file_name]\n");
      return -1;
    }
 
  //打開攝像頭
  if (argc ==1)
    if( !(pCapture = cvCaptureFromAVI(avi_name)))
      {
	fprintf(stderr, "Can not open camera.\n");
	return -2;
      }
 
  //打開視頻文件
  if(argc == 2)
    if( !(pCapture = cvCaptureFromFile(argv[1])))
      {
	fprintf(stderr, "Can not open video file %s\n", argv[1]);
	return -2;
      }
 
  //逐幀讀取視頻
  while(pFrame = cvQueryFrame( pCapture ))
    {
      nFrmNum++;
 
      //如果是第一幀,需要申請記憶體,並初始化
      if(nFrmNum == 1)
	{
	  pBkImg = cvCreateImage(cvSize(pFrame->width, pFrame->height),  IPL_DEPTH_8U,1);
	  pFrImg = cvCreateImage(cvSize(pFrame->width, pFrame->height),  IPL_DEPTH_8U,1);
 
	  pBkMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
	  pFrMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
	  pFrameMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
 
	  //轉化成單通道圖像再處理
	  cvCvtColor(pFrame, pBkImg, CV_BGR2GRAY);
	  cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
 
	  cvConvert(pFrImg, pFrameMat);
	  cvConvert(pFrImg, pFrMat);
	  cvConvert(pFrImg, pBkMat);
	}
      else
	{
	  cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
	  cvConvert(pFrImg, pFrameMat);
	  //高斯濾波先,以平滑圖像
	  //cvSmooth(pFrameMat, pFrameMat, CV_GAUSSIAN, 3, 0, 0);
 
	  //當前幀跟背景圖相減
	  cvAbsDiff(pFrameMat, pBkMat, pFrMat);
 
	  //二值化前景圖
	  cvThreshold(pFrMat, pFrImg, 60, 255.0, CV_THRESH_BINARY);
 
	  //進行形態學濾波,去掉噪音  
	  //cvErode(pFrImg, pFrImg, 0, 1);
	  //cvDilate(pFrImg, pFrImg, 0, 1);
 
	  //更新背景
	  cvRunningAvg(pFrameMat, pBkMat, 0.003, 0);
	  //將背景轉化為圖像格式,用以顯示
	  cvConvert(pBkMat, pBkImg);
 
	  //顯示圖像
	  cvShowImage("video", pFrame);
	  cvShowImage("background", pBkImg);
	  cvShowImage("foreground", pFrImg);
 
	  //如果有按鍵事件,則跳出迴圈
	  //此等待也為cvShowImage函數提供時間完成顯示
	  //等待時間可以根據CPU速度調整
	  if( cvWaitKey(2) >= 0 )
	    break;
 
 
	}
 
    }
 
 
 
 
  //銷毀視窗
  cvDestroyWindow("video");
  cvDestroyWindow("background");
  cvDestroyWindow("foreground");
 
  //釋放圖像和矩陣
  cvReleaseImage(&pFrImg);
  cvReleaseImage(&pBkImg);
 
  cvReleaseMat(&pFrameMat);
  cvReleaseMat(&pFrMat);
  cvReleaseMat(&pBkMat);
 
  cvReleaseCapture(&pCapture);
 
  return 0;
}
/**
 * main
 */
int main(int argc, const char **argv)
{
	// *** MODIFICATION: OpenCV modifications.
	// Load previous image.
	IplImage* prevImage = cvLoadImage("motion1.jpg", CV_LOAD_IMAGE_COLOR);
		
	// Create two arrays with the same number of channels than the original one.		
	avg1 = cvCreateMat(prevImage->height,prevImage->width,CV_32FC3);
	avg2 = cvCreateMat(prevImage->height,prevImage->width,CV_32FC3);
		
	// Create image of 32 bits.
	IplImage* image32 = cvCreateImage(cvSize(prevImage->width,prevImage->height), 32,3);
												
	// Convert image to 32 bits.
	cvConvertScale(prevImage,image32,1/255,0);
		
	// Set data from previous image into arrays.
	cvSetData(avg1,image32->imageData,image32->widthStep);
	cvSetData(avg2,image32->imageData,image32->widthStep);
	// *** MODIFICATION end
	
   // Our main data storage vessel..
   RASPISTILL_STATE state;

   MMAL_STATUS_T status = MMAL_SUCCESS;
   MMAL_PORT_T *camera_preview_port = NULL;
   MMAL_PORT_T *camera_video_port = NULL;
   MMAL_PORT_T *camera_still_port = NULL;
   MMAL_PORT_T *preview_input_port = NULL;
   MMAL_PORT_T *encoder_input_port = NULL;
   MMAL_PORT_T *encoder_output_port = NULL;

   bcm_host_init();

   // Register our application with the logging system
   vcos_log_register("fast", VCOS_LOG_CATEGORY);

   signal(SIGINT, signal_handler);

   default_status(&state);     
   
   if (state.verbose)
   {
      fprintf(stderr, "\n%s Camera App %s\n\n", basename(argv[0]), VERSION_STRING);      
   }

   // OK, we have a nice set of parameters. Now set up our components
   // We have three components. Camera, Preview and encoder.
   // Camera and encoder are different in stills/video, but preview
   // is the same so handed off to a separate module

   if ((status = create_camera_component(&state)) != MMAL_SUCCESS)
   {
      vcos_log_error("%s: Failed to create camera component", __func__);
   }
   else if ((status = raspipreview_create(&state.preview_parameters)) != MMAL_SUCCESS)
   {
      vcos_log_error("%s: Failed to create preview component", __func__);
      destroy_camera_component(&state);
   }
   else if ((status = create_encoder_component(&state)) != MMAL_SUCCESS)
   {
      vcos_log_error("%s: Failed to create encode component", __func__);
      raspipreview_destroy(&state.preview_parameters);
      destroy_camera_component(&state);
   }
   else
   {
      PORT_USERDATA callback_data;

      if (state.verbose)
         fprintf(stderr, "Starting component connection stage\n");
         
      camera_preview_port = state.camera_component->output[MMAL_CAMERA_PREVIEW_PORT];
      camera_video_port   = state.camera_component->output[MMAL_CAMERA_VIDEO_PORT];
      camera_still_port   = state.camera_component->output[MMAL_CAMERA_CAPTURE_PORT];
      preview_input_port  = state.preview_parameters.preview_component->input[0];
      encoder_input_port  = state.encoder_component->input[0];
      encoder_output_port = state.encoder_component->output[0];

      if (state.preview_parameters.wantPreview )
      {
         if (state.verbose)
         {
            fprintf(stderr, "Connecting camera preview port to preview input port\n");
            fprintf(stderr, "Starting video preview\n");
         }

         // *** USER: remove preview
         // Connect camera to preview
         //status = connect_ports(camera_preview_port, preview_input_port, &state.preview_connection);

      }
      else
      {
         status = MMAL_SUCCESS;
      }

      if (status == MMAL_SUCCESS)
      {
         VCOS_STATUS_T vcos_status;

         if (state.verbose)
            fprintf(stderr, "Connecting camera stills port to encoder input port\n");

         // Now connect the camera to the encoder
         status = connect_ports(camera_still_port, encoder_input_port, &state.encoder_connection);
         

         if (status != MMAL_SUCCESS)
         {
            vcos_log_error("%s: Failed to connect camera video port to encoder input", __func__);
            goto error;
         }

         // Set up our userdata - this is passed though to the callback where we need the information.
         // Null until we open our filename
         callback_data.file_handle = NULL;
         callback_data.pstate = &state;
         vcos_status = vcos_semaphore_create(&callback_data.complete_semaphore, "RaspiStill-sem", 0);

         vcos_assert(vcos_status == VCOS_SUCCESS);

         if (status != MMAL_SUCCESS)
         {
            vcos_log_error("Failed to setup encoder output");
            goto error;
         }
         
         FILE *output_file = NULL;
         
         int frame = 1;
         
         // Enable the encoder output port
         encoder_output_port->userdata = (struct MMAL_PORT_USERDATA_T *)&callback_data;
         
         if (state.verbose)
			fprintf(stderr, "Enabling encoder output port\n");
			
		// Enable the encoder output port and tell it its callback function
		status = mmal_port_enable(encoder_output_port, encoder_buffer_callback);
		
		// Create an empty matrix with the size of the buffer.
		CvMat* buf = cvCreateMat(1,60000,CV_8UC1);
		
		// Keep buffer that gets frames from queue.
		MMAL_BUFFER_HEADER_T *buffer;
		
		// Image to be displayed.
		IplImage* image;
		
		// Keep number of buffers and index for the loop.
		int num, q; 
		
		while(1) 
		{
			// Send all the buffers to the encoder output port
			num = mmal_queue_length(state.encoder_pool->queue);
			
			for (q=0;q<num;q++)
			{
				buffer = mmal_queue_get(state.encoder_pool->queue);
				
				if (!buffer)
					vcos_log_error("Unable to get a required buffer %d from pool queue", q);
					
				if (mmal_port_send_buffer(encoder_output_port, buffer)!= MMAL_SUCCESS)
					vcos_log_error("Unable to send a buffer to encoder output port (%d)", q);
			} // for
			
			if (mmal_port_parameter_set_boolean(camera_still_port, MMAL_PARAMETER_CAPTURE, 1) != MMAL_SUCCESS)
				vcos_log_error("%s: Failed to start capture", __func__);
			
			else
			{
				// Wait for capture to complete
				// For some reason using vcos_semaphore_wait_timeout sometimes returns immediately with bad parameter error
				// even though it appears to be all correct, so reverting to untimed one until figure out why its erratic
				vcos_semaphore_wait(&callback_data.complete_semaphore);
				if (state.verbose)
					fprintf(stderr, "Finished capture %d\n", frame);
			} // else
			
			// Copy buffer from camera to matrix.
			buf->data.ptr = buffer->data;
			
			// This workaround is needed for the code to work
			// *** TODO: investigate why.
			printf("Until here works\n");
			
			// Decode the image and display it.
			image = cvDecodeImage(buf, CV_LOAD_IMAGE_COLOR);
		
			// Destinations
			CvMat* res1 = cvCreateMat(image->height,image->width,CV_8UC3);
			CvMat* res2 = cvCreateMat(image->height,image->width,CV_8UC3);
		
			// Update running averages and then scale, calculate absolute values
			// and convert the result 8-bit.
			// *** USER:change the value of the weight.
			cvRunningAvg(image,avg2,0.0001, NULL);		
			cvConvertScaleAbs(avg2, res2, 1,0);
		
			cvRunningAvg(image,avg1,0.1, NULL);
			cvConvertScaleAbs(avg1, res1, 1,0);
				
			// Show images
			cvShowImage("img",image);
			cvShowImage("avg1",res1);
			cvShowImage("avg2",res2);
			cvWaitKey(20);
		
			// Update previous image.
			cvSaveImage("motion1.jpg", image, 0);
		} // end while 
		
		vcos_semaphore_delete(&callback_data.complete_semaphore);
         
      }
      else
      {
         mmal_status_to_int(status);
         vcos_log_error("%s: Failed to connect camera to preview", __func__);
      }

error:

      mmal_status_to_int(status);

      if (state.verbose)
         fprintf(stderr, "Closing down\n");

      // Disable all our ports that are not handled by connections
      check_disable_port(camera_video_port);
      check_disable_port(encoder_output_port);

      if (state.preview_parameters.wantPreview )
         mmal_connection_destroy(state.preview_connection);

      mmal_connection_destroy(state.encoder_connection);

      /* Disable components */
      if (state.encoder_component)
         mmal_component_disable(state.encoder_component);

      if (state.preview_parameters.preview_component)
         mmal_component_disable(state.preview_parameters.preview_component);

      if (state.camera_component)
         mmal_component_disable(state.camera_component);

      destroy_encoder_component(&state);
      raspipreview_destroy(&state.preview_parameters);
      destroy_camera_component(&state);

      if (state.verbose)
         fprintf(stderr, "Close down completed, all components disconnected, disabled and destroyed\n\n");
   }

   if (status != MMAL_SUCCESS)
      raspicamcontrol_check_configuration(128);
      
   return 0;
}
detectMotion::detectMotion(char *_videoname, int Threshold)
{
	this->videoname= _videoname;
	T = Threshold;

	//IplImage pointers
	IplImage* pFrame = NULL; 
	IplImage* pFrImg = NULL;
	IplImage* pBkImg = NULL;
	//CvMat pointers
	CvMat* pFrameMat = NULL;
	CvMat* pFrMat = NULL;
	CvMat* pBkMat = NULL;
	//CvVideoWriter pointers
	CvVideoWriter* vWriter = NULL;
	CvCapture* pCapture = NULL;

	int nFrmNum = 0;
	int Mt = 0; //Motion
	double TM = 0; //Total motion
	double AM = 0; //Avarage motion

	if( !(pCapture = cvCaptureFromFile(videoname)))
	{
		fprintf(stderr, "Can not open video file %s\n", videoname);
		return ;
	}

	//For each frame
	while(pFrame = cvQueryFrame( pCapture ))
	{
		nFrmNum++;

		//memory alloc and variable intial for first frame
		if(nFrmNum == 1)
		{
			//8 bit unsigned int array of size width*height
			pBkImg = cvCreateImage(cvSize(pFrame->width, pFrame->height),  IPL_DEPTH_8U,1); 
			pFrImg = cvCreateImage(cvSize(pFrame->width, pFrame->height),  IPL_DEPTH_8U,1); 
			//
			pBkMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1); 
			pFrMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1); 
			pFrameMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);

			//save to gray image
			cvCvtColor(pFrame, pBkImg, CV_BGR2GRAY);
			cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
			//convert gray image to pFrameMat pointer
			cvConvert(pFrImg, pFrameMat);
			cvConvert(pFrImg, pFrMat);
			cvConvert(pFrImg, pBkMat);
			//double temp = cvGetCaptureProperty(pCapture,CV_CAP_PROP_FPS); //fps

			//init the writer pointer
			vWriter = cvCreateVideoWriter ("ccfgvideo.avi", cvGetCaptureProperty(pCapture, CV_CAP_PROP_FOURCC), cvGetCaptureProperty(pCapture, CV_CAP_PROP_FPS ),cvSize (pFrame->width,pFrame->height),1); 
			cvWriteFrame (vWriter, pFrame);
		}
		else
		{
			//convert to gray image
			cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
			cvConvert(pFrImg, pFrameMat);
			//gaussian filter
			//cvSmooth(pFrameMat, pFrameMat, CV_GAUSSIAN, 3, 0, 0);

			//get background frame
			cvAbsDiff(pFrameMat, pBkMat, pFrMat);

			//T=60
			cvThreshold(pFrMat, pFrImg, T, 255.0, CV_THRESH_BINARY);

			//erode and dilate to reduce hole in objects  
			cvErode(pFrImg, pFrImg, 0, 1);
			cvDilate(pFrImg, pFrImg, 0, 1);

			//calculate Mt and reset fps base on it
			Mt = (cvSum(pFrImg).val[0]/255); //(pFrame->width*pFrame->height);
			//cvCreateVideoWriter( const char* filename, int fourcc, double fps, CvSize frame_size, int is_color=1 );
			if(Mt>0)
			{
				int n=cvWriteFrame (vWriter, pFrame); //write to frame
				//cout<<n;
				if (Mt<100)
				{
					cvRunningAvg(pFrameMat, pBkMat, 0.05, 0);
				} 
				else
				{
					cvRunningAvg(pFrameMat, pBkMat, 0.8, 0);
				}
			}
			else
			{
				cvRunningAvg(pFrameMat, pBkMat, 0.05, 0);
				nFrmNum-=1;
			}


			/*CvScalar Mn;
			CvScalar SDev;
			cvAvgSdv(pFrMat,&Mn,&SDev);
			Mn.val[0];*/

			/*if ( SDev.val[0]<1)
			{
			cvRunningAvg(pFrameMat, pBkMat, 1, 0);
			} 
			else
			{

			}*/
			//renew background
			//cvRunningAvg(pFrameMat, pBkMat, 1, 0);
			cvConvert(pBkMat, pBkImg);


			//wait for key interupt
			if( cvWaitKey(2) >= 0 )
			{
				cvReleaseVideoWriter(&vWriter);
				cvReleaseImage(&pFrImg);
				cvReleaseImage(&pBkImg);
				cvReleaseMat(&pFrameMat);
				cvReleaseMat(&pFrMat);  
				cvReleaseMat(&pBkMat);
				
				cvReleaseCapture(&pCapture);
				std::cout<<"Frame number after deleting zero motion frames" <<nFrmNum<<std::endl;
				break;
			}
			//
			//TM+=Mt;

		}

	}
	//AM=TM/nFrmNum;

	//Release pointers
	cvReleaseVideoWriter(&vWriter);
	cvReleaseImage(&pFrImg);
	cvReleaseImage(&pBkImg);
	cvReleaseMat(&pFrameMat);
	cvReleaseMat(&pFrMat);  
	cvReleaseMat(&pBkMat);
	//print Mt and Tm
	cvReleaseCapture(&pCapture);
	//cout<<"Total Motion = "<<TM<<endl;
	//cout<<"Average Motion = "<<AM<<endl;
	std::cout<<"Frame number after deleting zero motion frames" <<nFrmNum<<std::endl;
	return;


}
//--------------------------------------------------------------
bool CameraInput::update(){

	vidGrabber.update();
	
	bool newFrame = vidGrabber.isFrameNew();
	if (newFrame){
		
        calculateCaptureFramerate();
		camImage->setFromPixels( vidGrabber.getPixels(), camWidth, camHeight );

		if (camFormat == VID_FORMAT_RGB){
			if (warp)
				grayImage = rgbImage;
			else
				grayWarpImage = rgbImage;
		}
		
		if (camFormat == VID_FORMAT_GREYSCALE){
			if (warp)
				grayWarpImage.warpIntoMe( grayImage, warper.dstPoints , warper.srcPoints);
			else
				grayWarpImage = grayImage;
		}
		
		//now grayWarpImage has the image
		
		if (bgSubtraction){			
			//absDiffWithCutoffInverted( &bgImage, &grayWarpImage, &diffImage, bgSubtractionCutOff );
			cvAbsDiff(bgImage.getCvImage(), grayWarpImage.getCvImage(), diffImage.getCvImage());
			diffImage.flagImageChanged();
			binaryImage = diffImage;
		}else {
			binaryImage = grayWarpImage;
		}

		if (threshold)
			binaryImage.threshold(thresholdVal, false);

		if (invert)
			binaryImage.invert();
		
		if ( numErosions > 0 ){				
			for (int i=0; i<numErosions; i++)
				binaryImage.erode();
		}		

		if ( numDilations > 0 ){				
			for (int i=0; i<numDilations; i++)
				binaryImage.dilate();
		}
			
		if (bgSubtraction){
			cvRunningAvg(grayWarpImage.getCvImage(), accImage.getCvImage(), bgAccumRate);
			cvConvert( accImage.getCvImage(), bgImage.getCvImage() );
			bgImage.flagImageChanged();
		}
	}
	
	if (uiState == SET_PROCESSING_SETTINGS){
		processingPanel.update();
	}
	
	if (uiState == SET_IMAGE_WARP){
		warpingPanel.update();
	}
	
	return newFrame;

}
FrameAcquirer::FrameAcquirer(SinGen *s) 
{
	this->sg = s;
    
    // create all necessary instances
    cvNamedWindow (WINDOW_NAME, CV_WINDOW_AUTOSIZE);
    CvCapture * camera = cvCreateCameraCapture (CV_CAP_ANY);
    CvMemStorage* storage = cvCreateMemStorage(0);
    assert (storage);

    // you do own an iSight, don't you ?!?
    if (! camera)
        abort ();


    if(useMotion == false){
      // get an initial frame and duplicate it for later work
      IplImage *  current_frame = cvQueryFrame (camera);
      IplImage *  gray_image    = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_8U, 1);
      IplImage *  gray_image2    = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_8U, 1);
      assert (current_frame && gray_image);
      
      // as long as there are images ...
      while (current_frame = cvQueryFrame (camera))
      {
  	
          // convert to gray and downsize
        cvCvtColor (current_frame, gray_image, CV_BGR2GRAY);
        cvConvert( gray_image, gray_image2);
          
  	float vals[NUM_WAVES];
  	pixelate(gray_image,vals);
  	this->sg->setAmplitudes(vals);
         
  
          // just show the image
          cvAddWeighted( gray_image2, 0.5, gray_image, 0.5, 0.5, gray_image);
          cvShowImage (WINDOW_NAME, gray_image);
  
          // cvShowImage (WINDOW_NAME, current_frame);
          // wait a tenth of a second for keypress and window drawing
          int key = cvWaitKey (30);
          if (key == 'q' || key == 'Q')
              break;
      }

    }else{

      IplImage *  current_frame = cvQueryFrame (camera);
      IplImage *  gray_image    = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_8U, 1);
      IplImage *  avg_img = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_32F, 1);
      IplImage *  gavg_img = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_8U, 1);
      IplImage *  diff_img = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_8U, 1);
      IplImage *  diff_img2 = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_8U, 1);
      IplImage *  diff_img3 = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_8U, 1);
      
      // as long as there are images ...
      while (current_frame = cvQueryFrame (camera))
      {
  
          // convert to gray and downsize
          cvCvtColor (current_frame, gray_image, CV_BGR2GRAY);
  
          cvSmooth( gray_image, gray_image);
          
          cvRunningAvg( gray_image, avg_img, .250, NULL);
  
          cvConvert( avg_img, gavg_img );
  
          cvAbsDiff( gray_image, gavg_img, diff_img );    
       
          cvConvert( diff_img, diff_img2 );

  	  float vals[NUM_WAVES];
          pixelate(diff_img,vals);
  	  this->sg->setAmplitudes(vals);

          if(useMotionAndLight){
            pixelate(gray_image,vals);
            for(int i = 0; i < NUM_WAVES; i++){
              vals[i] *= C8;
            }
  	    this->sg->setFrequencies(vals);
          
            cvAddWeighted( diff_img2, 0.5, gray_image, 0.5, 0.5, diff_img);
            cvShowImage ( WINDOW_NAME, diff_img);
          }else{
            cvAddWeighted( diff_img, 0.5, diff_img2, 0.5, 0.5, diff_img);
            cvShowImage ( WINDOW_NAME, diff_img);

          }
          

          int key = cvWaitKey (30);
          if (key == 'q' || key == 'Q')
              break;
      }
  
    }
    
    // be nice and return no error
    return;
}
char operateImage(Userdata *userdata) {
	if (!userdata) {
		return 0;
	}

	IplImage *image1 = userdata->input[0];
	IplImage *image2 = userdata->input[1];
	IplImage *imageOut = userdata->output[0];
	IplImage *imageOut2 = userdata->output[1];

	static int color_mode = 4;
	static int smooth_mode = 0;
	static int otsu_mode = 0;
	static int close_mode = 0;
	static int canny_mode = 0;
	static int contour_mode = 0;
	static int hsv_mode = 0;
	static int save_img = 0;
	static int history_mode = 0;

	int key = userdata->key;
	switch (key) {
	case 'g':
		color_mode++;
		color_mode %= 5;
		break;
	case 's':
		smooth_mode = !smooth_mode;
		break;
	case 'o':
		otsu_mode = !otsu_mode;
		break;
	case 'e':
		close_mode = !close_mode;
		break;
	case 'c':
		canny_mode = !canny_mode;
		break;
	case 'b':
		contour_mode = !contour_mode;
		break;
	case 'h':
		hsv_mode = !hsv_mode;
		break;
	case 'H':
		history_mode = !history_mode;
		break;
	case 'S':
		save_img = 1;
		break;
	default:
		//cout << key << "\n";
		break;
	}

	int value = userdata->value;
	int kernelSize = userdata->kernelSize;
	kernelSize += 1 - (kernelSize % 2);
	int lowThreshold = userdata->lowThreshold;
	int highThreshold = userdata->highThreshold;
	CvScalar minScalar = cvScalar(userdata->minScalar0, userdata->minScalar1, userdata->minScalar2);
	CvScalar maxScalar = cvScalar(userdata->maxScalar0, userdata->maxScalar1, userdata->maxScalar2);

	static IplImage *tmp1d = cvCreateImage(cvGetSize(image1), IPL_DEPTH_8U, 1);
	static IplImage *tmp3d = cvCreateImage(cvGetSize(image1), IPL_DEPTH_8U, 3);
	static IplImage *tmp3d2 = cvCreateImage(cvGetSize(image1), IPL_DEPTH_8U, 3);

	static IplImage *backgroundAcc = cvCreateImage(cvGetSize(image1), IPL_DEPTH_32F, 3);
	static IplImage *background = cvCreateImage(cvGetSize(image1), IPL_DEPTH_8U, 3);

	COND_PRINTF("                                                                                                 \r");

	char img_full_channel = 0;
	switch (color_mode) {
	case 0:
		COND_PRINTF("Gray");
		cvCvtColor(image1, tmp1d, CV_BGR2GRAY);
		break;
	case 1: // Hue mode
		COND_PRINTF("Hue");
		cvCvtColor(image1, tmp3d, CV_BGR2HSV);
		cvSplit(tmp3d, tmp1d, NULL, NULL, NULL);
		break;
	case 2: // Saturation mode
		COND_PRINTF("Saturation");
		cvCvtColor(image1, tmp3d, CV_BGR2HSV);
		cvSplit(tmp3d, NULL, tmp1d, NULL, NULL);
		break;
	case 3: // Brightness mode
		COND_PRINTF("Brightness");
		cvCvtColor(image1, tmp3d, CV_BGR2HSV);
		cvSplit(tmp3d, NULL, NULL, tmp1d, NULL);
		break;
	case 4: // 
		COND_PRINTF("Color");
		img_full_channel = 1;
		break;
	}

	//filterByVolume(tmp1d, tmp1d, value);
	if (img_full_channel) { // Image has 3 channel
#if 0
		cvRunningAvg(image1, backgroundAcc, (double)userdata->accValue / 1024, NULL);
		cvConvertScale(backgroundAcc, background, 1, 0);
		cvNamedWindow(CONTROL_WINDOW "41", 0);
		cvResizeWindow(CONTROL_WINDOW "41", 640 / 2, 480 / 2);
		cvShowImage(CONTROL_WINDOW "41", background);
		cvCreateTrackbar("accValue", CONTROL_WINDOW "41", &(userdata->accValue), 1024, trackbarCallback);

#endif
		filterByHSV(image1, minScalar, maxScalar, tmp3d);
		if (history_mode) {
			cvCopy(image1, tmp3d, NULL);
			cvCopy(image1, tmp3d2, NULL);
			//cvCvtColor(image1, tmp3d, CV_BGR2HSV);

			//CvRect rect = cvRect(userdata->size.width * 3 / 4 - 40, userdata->size.height / 2 - 40, 80, 80);
			//CvRect rect = cvRect(userdata->size.width * 1 / 4 - 40, userdata->size.height / 2 - 40, userdata->size.width * 3 / 4, 80);
			CvRect rect = cvRect(userdata->square.origin.x, userdata->square.origin.y, userdata->square.size.width, userdata->square.size.height);
			cvSetImageROI(tmp3d, rect);
			GraficarHistograma(tmp3d, tmp3d2);
			cvResetImageROI(tmp3d);

			cvCopy(tmp3d2, tmp3d, NULL);
		}
		else {
			cvCopy(image1, tmp3d, NULL);
		}
	}
	else { // Image has 1 channel

		cvSmooth(tmp1d, tmp1d, CV_GAUSSIAN, 5, 0, 0, 0);

		if (otsu_mode) { // Apply Otsu's method
			COND_PRINTF(", Otsu");
			cvThreshold(tmp1d, tmp1d, 0, 255, CV_THRESH_OTSU);
		}

		if (smooth_mode) { // Apply Gaussian smoothing
			COND_PRINTF(", Gauss");
			cvSmooth(tmp1d, tmp1d, CV_GAUSSIAN, 5, 0, 0, 0);
		}

		if (close_mode) {
			COND_PRINTF(", closE");
			int n = kernelSize;
			cvErode(tmp1d, tmp1d, NULL, n);
			cvDilate(tmp1d, tmp1d, NULL, n);
		}

		if (canny_mode) { // Apply Canny's method
			COND_PRINTF(", Canny");
			cvCanny(tmp1d, tmp1d, lowThreshold, highThreshold, 3);
			cvDilate(tmp1d, tmp1d, NULL, 1);
			cvErode(tmp1d, tmp1d, NULL, 1);
		}

		cvMerge(tmp1d, tmp1d, tmp1d, NULL, tmp3d);

		if (contour_mode) {
			COND_PRINTF(", contours(b)");
			CvMemStorage *storage = cvCreateMemStorage(0);
			CvSeq *contours = NULL;
			int n = cvFindContours(tmp1d, storage, &contours, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE, cvPoint(0, 0));
			//COND_PRINTF(", (" << n <<","<< contours->total <<")contours");
			for (int i = 0; contours != NULL; contours = contours->h_next, i++) {
				int cc = (int)((float)(255 * i) / contours->total);
				CvScalar colorpx = CV_RGB((cc) % 256, (cc + 256 / 3) % 256, (cc + 256 * 2 / 3) % 256);
				cvDrawContours(tmp3d, contours, colorpx, CV_RGB(0, 0, 0), -1, CV_FILLED, 8, cvPoint(0, 0));
			}
		}

	}

	COND_PRINTF("\r");

	cvCopy(image1, image2, NULL);
	cvCopy(imageOut, imageOut2, NULL);
	cvCopy(tmp3d, imageOut, NULL);

	//cvReleaseImage(&tmp1d);
	//cvReleaseImage(&tmp3d);
	//cvReleaseImage(&tmp3d2);

	afterProcess(userdata);

	if (save_img) {
		save_img = 0;
		cvSaveImage(RESOURCES "output.png", imageOut);
	}

	return 0;
}
Exemple #13
0
int main(int argc, char **argv)
{
  bool isStop = false;
  const int INIT_TIME = 50;
  const double BG_RATIO = 0.02; // 背景領域更新レート
  const double OBJ_RATIO = 0.005; // 物体領域更新レート
  const double Zeta = 10.0;
  IplImage *img = NULL;

  CvCapture *capture = NULL;
  capture = cvCreateCameraCapture(0);
  //capture = cvCaptureFromAVI("test.avi");
  if(capture == NULL){
    printf("capture device not found!!");
    return -1;
  }

  img = cvQueryFrame(capture);
  int w = img->width;
  int h = img->height;

  IplImage *imgAverage = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *imgSgm = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *imgTmp = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *img_lower = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *img_upper = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *imgSilhouette = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
  IplImage *imgSilhouetteInv = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
  IplImage *imgResult = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 3);

  printf("背景初期化中...\n");
  cvSetZero(imgAverage);
  for(int i = 0; i < INIT_TIME; i++){
    img = cvQueryFrame(capture);
    cvAcc(img, imgAverage);
    printf("輝度平均 %d/%d\n", i, INIT_TIME);
  }
  cvConvertScale(imgAverage, imgAverage, 1.0 / INIT_TIME);
  cvSetZero(imgSgm);
  for(int i = 0; i < INIT_TIME; i++){
    img = cvQueryFrame(capture);
    cvConvert(img, imgTmp);
    cvSub(imgTmp, imgAverage, imgTmp);
    cvPow(imgTmp, imgTmp, 2.0);
    cvConvertScale(imgTmp, imgTmp, 2.0);
    cvPow(imgTmp, imgTmp, 0.5);
    cvAcc(imgTmp, imgSgm);
    printf("輝度振幅 %d/%d\n", i, INIT_TIME);
  }
  cvConvertScale(imgSgm, imgSgm, 1.0 / INIT_TIME);
  printf("背景初期化完了\n");

  char winNameCapture[] = "Capture";
  char winNameSilhouette[] = "Silhouette";
  cvNamedWindow(winNameCapture, CV_WINDOW_AUTOSIZE);
  cvNamedWindow(winNameSilhouette, CV_WINDOW_AUTOSIZE);

  while(1){
    if(!isStop){
      img = cvQueryFrame(capture);
      if(img == NULL) break;
      cvConvert(img, imgTmp);

      // 輝度範囲
      cvSub(imgAverage, imgSgm, img_lower);
      cvSubS(img_lower, cvScalarAll(Zeta), img_lower);
      cvAdd(imgAverage, imgSgm, img_upper);
      cvAddS(img_upper, cvScalarAll(Zeta), img_upper);
      cvInRange(imgTmp, img_lower, img_upper, imgSilhouette);

      // 輝度振幅
      cvSub(imgTmp, imgAverage, imgTmp);
      cvPow(imgTmp, imgTmp, 2.0);
      cvConvertScale(imgTmp, imgTmp, 2.0);
      cvPow(imgTmp, imgTmp, 0.5);

      // 背景領域を更新
      cvRunningAvg(img, imgAverage, BG_RATIO, imgSilhouette);
      cvRunningAvg(imgTmp, imgSgm, BG_RATIO, imgSilhouette);

      // 物体領域を更新
      cvNot(imgSilhouette, imgSilhouetteInv);
      cvRunningAvg(imgTmp, imgSgm, OBJ_RATIO, imgSilhouetteInv);

      cvErode(imgSilhouette, imgSilhouette, NULL, 1); // 収縮
      cvDilate(imgSilhouette, imgSilhouette, NULL, 2); // 膨張
      cvErode(imgSilhouette, imgSilhouette, NULL, 1); // 収縮

      cvMerge(imgSilhouette, imgSilhouette, imgSilhouette, NULL, imgResult);
      cvShowImage(winNameCapture, img);
      cvShowImage(winNameSilhouette, imgResult);
    }
    int waitKey = cvWaitKey(33);
    if(waitKey == 'q') break;
    if(waitKey == ' '){
      isStop = !isStop;
      if(isStop) printf("stop\n");
      else printf("start\n");
    }
  }

  cvReleaseCapture(&capture);
  cvDestroyWindow(winNameCapture);
  cvDestroyWindow(winNameSilhouette);

  return 0;
}
int main( int argc, char** argv )
{
    IplImage *current_frame=NULL;
	IplImage *running_average_background=NULL;

	IplImage *static_background_image=NULL;
	IplImage *static_moving_mask_image=NULL;
	IplImage *running_average_background_image=NULL;
	IplImage *running_average_moving_mask_image=NULL;
	IplImage *running_gaussian_average_background_average=NULL;
	IplImage *running_gaussian_average_background_sd=NULL;
	IplImage *running_gaussian_average_sd_image=NULL;
	IplImage *running_gaussian_average_background_image=NULL;
	IplImage *running_gaussian_average_moving_mask_image=NULL;

	IplImage *change_and_remain_changed_background_image=NULL;
	IplImage *subtracted_image=NULL;
	IplImage *moving_mask_image=NULL;

    int user_clicked_key=0;
	int show_ch = 'm';
	bool paused = false;
    
    // Load the video (AVI) file
    CvCapture *capture = cvCaptureFromAVI( " " );   //Add here the inputh video path
    // Ensure AVI opened properly
    if( !capture )
		return 1;    
    
    // Get Frames Per Second in order to playback the video at the correct speed
    int fps = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
    
	// Explain the User Interface
    printf( "Hot keys: \n"
		    "\tESC - quit the program\n"
            "\tSPACE - pause/resume the video\n");

	// Create display windows for images
	cvNamedWindow( "Input video", 0 );
    cvNamedWindow( "Static Background", 0 );
    cvNamedWindow( "Running Average Background", 0 );
    cvNamedWindow( "Running Gaussian Average Background", 0 );
    cvNamedWindow( "Running Gaussian Average Stan. Dev.", 0 );
    cvNamedWindow( "Moving Points - Static", 0 );
    cvNamedWindow( "Moving Points - Running Average", 0 );
    cvNamedWindow( "Moving Points - Running Gaussian Average", 0 );

	// Setup mouse callback on the original image so that the user can see image values as they move the
	// cursor over the image.
    cvSetMouseCallback( "Input video", on_mouse_show_values, 0 );
	window_name_for_on_mouse_show_values="Input video";

    while( user_clicked_key != ESC ) {
		// Get current video frame
        current_frame = cvQueryFrame( capture );
        if( !current_frame ) // No new frame available
			break;
		image_for_on_mouse_show_values = current_frame; // Assign image for mouse callback
		cvShowImage( "Input video", current_frame );

		if (static_background_image == NULL)
		{	// The first time around the loop create the images for processing
			// General purpose images
			subtracted_image = cvCloneImage( current_frame );
			// Static backgound images
			static_background_image = cvCloneImage( current_frame );
			static_moving_mask_image = cvCreateImage( cvGetSize(current_frame), 8, 3 );
			cvShowImage( "Static Background", static_background_image );
			// Running average images
			running_average_background = cvCreateImage( cvGetSize(current_frame), IPL_DEPTH_32F, 3 );
			//cvZero(running_average_background);
			cvConvert(current_frame, running_average_background);
			running_average_background_image = cvCloneImage( current_frame );
			running_average_moving_mask_image = cvCreateImage( cvGetSize(current_frame), 8, 3 );
			// Running Gaussian average images
			running_gaussian_average_background_image = cvCloneImage( current_frame );
			running_gaussian_average_sd_image = cvCloneImage( current_frame );
			running_gaussian_average_moving_mask_image = cvCreateImage( cvGetSize(current_frame), 8, 3 );
			running_gaussian_average_background_average = cvCreateImage( cvGetSize(current_frame), IPL_DEPTH_32F, 3 );
			cvConvert(current_frame, running_gaussian_average_background_average);
			running_gaussian_average_background_sd = cvCreateImage( cvGetSize(current_frame), IPL_DEPTH_32F, 3 );
			cvZero(running_gaussian_average_background_sd);
		}
		// Static Background Processing
		cvAbsDiff( current_frame, static_background_image, subtracted_image );
		cvThreshold( subtracted_image, static_moving_mask_image, 30, 255, CV_THRESH_BINARY );
        cvShowImage( "Moving Points - Static", static_moving_mask_image );

		// Running Average Background Processing
		cvRunningAvg( current_frame, running_average_background, 0.01 /*, moving_mask_image*/ );
		cvConvert( running_average_background, running_average_background_image );
		cvAbsDiff( current_frame, running_average_background_image, subtracted_image );
		cvThreshold( subtracted_image, running_average_moving_mask_image, 30, 255, CV_THRESH_BINARY );
		cvShowImage( "Running Average Background", running_average_background_image );
        cvShowImage( "Moving Points - Running Average", running_average_moving_mask_image );
		
		
		// Running Gaussian Average Background Processing
		
		update_running_gaussian_averages( current_frame, running_gaussian_average_background_average, running_gaussian_average_background_sd );
		cvConvertScaleAbs( running_gaussian_average_background_average, running_gaussian_average_background_image, 1.0, 0 );
		cvShowImage( "Running Gaussian Average Background", running_gaussian_average_background_image );
		cvConvertScaleAbs( running_gaussian_average_background_sd, running_gaussian_average_sd_image, 10.0, 0 );
		cvShowImage( "Running Gaussian Average Stan. Dev.", running_gaussian_average_sd_image );
		determine_moving_points_using_running_gaussian_averages( current_frame, running_gaussian_average_background_average, running_gaussian_average_background_sd, running_gaussian_average_moving_mask_image );
        cvShowImage( "Moving Points - Running Gaussian Average", running_gaussian_average_moving_mask_image );

        // Deal with user input, and wait for the delay between frames
		do {
			if( user_clicked_key == ' ' )
			{
				paused = !paused;
			}
			if (paused)
				user_clicked_key = cvWaitKey(0);
			else user_clicked_key = cvWaitKey( 1000 / fps );
		} while (( user_clicked_key != ESC ) && ( user_clicked_key != -1 ));
	}
    
    /* free memory */
    cvReleaseCapture( &capture );
 	cvDestroyWindow( "Input video" );
    cvDestroyWindow( "Static Background" );
    cvDestroyWindow( "Running Average Background" );
    cvDestroyWindow( "Running Gaussian Average Background" );
    cvDestroyWindow( "Running Gaussian Average Stan. Dev." );
    cvDestroyWindow( "Moving Points - Static" );
    cvDestroyWindow( "Moving Points - Running Average" );
    cvDestroyWindow( "Moving Points - Running Gaussian Average" );

    return 0;
}