Beispiel #1
0
static void
median(GwyContainer *data, GwyRunType run)
{
    GwyDataField *dfield, *background = NULL;
    MedianBgArgs args;
    gint oldid, newid;
    GQuark dquark;
    gdouble xr, yr;
    gboolean ok = TRUE;

    g_return_if_fail(run & MEDIANBG_RUN_MODES);
    gwy_app_data_browser_get_current(GWY_APP_DATA_FIELD, &dfield,
                                     GWY_APP_DATA_FIELD_KEY, &dquark,
                                     GWY_APP_DATA_FIELD_ID, &oldid,
                                     0);
    g_return_if_fail(dfield && dquark);

    median_load_args(gwy_app_settings_get(), &args);

    /* FIXME: this is bogus for non-square pixels anyway */
    xr = gwy_data_field_get_xreal(dfield)/gwy_data_field_get_xres(dfield);
    yr = gwy_data_field_get_yreal(dfield)/gwy_data_field_get_yres(dfield);
    args.pixelsize = hypot(xr, yr);
    args.valform
        = gwy_data_field_get_value_format_xy(dfield,
                                             GWY_SI_UNIT_FORMAT_VFMARKUP, NULL);
    gwy_debug("pixelsize = %g, vf = (%g, %d, %s)",
              args.pixelsize, args.valform->magnitude, args.valform->precision,
              args.valform->units);

    if (run == GWY_RUN_INTERACTIVE) {
        ok = median_dialog(&args);
        median_save_args(gwy_app_settings_get(), &args);
    }

    gwy_si_unit_value_format_free(args.valform);
    if (!ok)
        return;

    gwy_app_wait_start(gwy_app_find_window_for_channel(data, oldid),
                       _("Median-leveling..."));
    background = median_background(GWY_ROUND(args.size), dfield);
    gwy_app_wait_finish();
    if (!background)
        return;

    gwy_app_undo_qcheckpointv(data, 1, &dquark);
    gwy_data_field_subtract_fields(dfield, dfield, background);
    gwy_data_field_data_changed(dfield);
    gwy_app_channel_log_add_proc(data, oldid, oldid);

    if (!args.do_extract) {
        g_object_unref(background);
        return;
    }

    newid = gwy_app_data_browser_add_data_field(background, data, TRUE);
    g_object_unref(background);
    gwy_app_sync_data_items(data, data, oldid, newid, FALSE,
                            GWY_DATA_ITEM_GRADIENT,
                            0);
    gwy_app_set_data_field_title(data, newid, _("Background"));
    gwy_app_channel_log_add(data, oldid, newid, NULL, NULL);
}
Beispiel #2
0
void VideoDemos( VideoCapture& surveillance_video, int starting_frame, bool clean_binary_images )
{
	Mat previous_gray_frame, optical_flow, optical_flow_display;
	Mat current_frame, thresholded_image, closed_image, first_frame;
	Mat current_frame_gray, running_average_background;
	Mat temp_running_average_background, running_average_difference;
	Mat running_average_foreground_mask, running_average_foreground_image;
	Mat selective_running_average_background;
	Mat temp_selective_running_average_background, selective_running_average_difference;
	Mat selective_running_average_foreground_mask, selective_running_average_background_mask, selective_running_average_foreground_image;
	double running_average_learning_rate = 0.01;
	surveillance_video.set(CV_CAP_PROP_POS_FRAMES,starting_frame);
	surveillance_video >> current_frame;
	first_frame = current_frame.clone();
	cvtColor(current_frame, current_frame_gray, CV_BGR2GRAY);
	current_frame.convertTo(running_average_background, CV_32F);
	selective_running_average_background = running_average_background.clone();
	int rad = running_average_background.depth();
	MedianBackground median_background( current_frame, (float) 1.005, 1 );
	Mat median_background_image, median_foreground_image;

	int codec = static_cast<int>(surveillance_video.get(CV_CAP_PROP_FOURCC));
	// V3.0.0 update on next line.  OLD CODE was    BackgroundSubtractorMOG2 gmm; //(50,16,true);
    Ptr<BackgroundSubtractorMOG2> gmm = createBackgroundSubtractorMOG2();
	Mat foreground_mask, foreground_image = Mat::zeros(current_frame.size(), CV_8UC3);

	double frame_rate = surveillance_video.get(CV_CAP_PROP_FPS);
	double time_between_frames = 1000.0/frame_rate;
	Timestamper* timer = new Timestamper();
	int frame_count = 0;
	while ((!current_frame.empty()) && (frame_count++ < 1000))//1800))
    {
 		double duration = static_cast<double>(getTickCount());
		vector<Mat> input_planes(3);
		split(current_frame,input_planes);
		cvtColor(current_frame, current_frame_gray, CV_BGR2GRAY);

		if (frame_count%2 == 0)  // Skip every second frame so the flow is greater.
		{
			if ( previous_gray_frame.data )
			{
				Mat lucas_kanade_flow;
				timer->ignoreTimeSinceLastRecorded();
				LucasKanadeOpticalFlow(previous_gray_frame, current_frame_gray, lucas_kanade_flow);
				timer->recordTime("Lucas Kanade Optical Flow");
				calcOpticalFlowFarneback(previous_gray_frame, current_frame_gray, optical_flow, 0.5, 3, 15, 3, 5, 1.2, 0);
				cvtColor(previous_gray_frame, optical_flow_display, CV_GRAY2BGR);
				drawOpticalFlow(optical_flow, optical_flow_display, 8, Scalar(0, 255, 0), Scalar(0, 0, 255));
				timer->recordTime("Farneback Optical Flow");
				char frame_str[100];
				sprintf( frame_str, "Frame = %d", frame_count);
 				Mat temp_output = JoinImagesHorizontally( current_frame, frame_str, optical_flow_display, "Farneback Optical Flow", 4 );
				Mat optical_flow_output = JoinImagesHorizontally( temp_output, "", lucas_kanade_flow, "Lucas Kanade Optical Flow", 4 );
				imshow("Optical Flow", optical_flow_output );
			}
			std::swap(previous_gray_frame, current_frame_gray);
		}
	
		// Static background image
		Mat difference_frame, binary_difference;
		Mat structuring_element(3,3,CV_8U,Scalar(1));
		timer->ignoreTimeSinceLastRecorded();
		absdiff(current_frame,first_frame,difference_frame);
		cvtColor(difference_frame, thresholded_image, CV_BGR2GRAY);
		threshold(thresholded_image,thresholded_image,30,255,THRESH_BINARY);
		if (clean_binary_images)
		{
			morphologyEx(thresholded_image,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,binary_difference,MORPH_OPEN,structuring_element);
			current_frame.copyTo(binary_difference, thresholded_image);
		}
		else
		{
			binary_difference.setTo(Scalar(0,0,0));
		    current_frame.copyTo(binary_difference, thresholded_image);
		}
		timer->recordTime("Static difference");

		// Running Average (three channel version)
		vector<Mat> running_average_planes(3);
		split(running_average_background,running_average_planes);
		accumulateWeighted(input_planes[0], running_average_planes[0], running_average_learning_rate);
		accumulateWeighted(input_planes[1], running_average_planes[1], running_average_learning_rate);
		accumulateWeighted(input_planes[2], running_average_planes[2], running_average_learning_rate);
		merge(running_average_planes,running_average_background);
		running_average_background.convertTo(temp_running_average_background,CV_8U);
		absdiff(temp_running_average_background,current_frame,running_average_difference);
		split(running_average_difference,running_average_planes);
		// Determine foreground points as any point with a difference of more than 30 on any one channel:
		threshold(running_average_difference,running_average_foreground_mask,30,255,THRESH_BINARY);
		split(running_average_foreground_mask,running_average_planes);
		bitwise_or( running_average_planes[0], running_average_planes[1], running_average_foreground_mask );
		bitwise_or( running_average_planes[2], running_average_foreground_mask, running_average_foreground_mask );
		if (clean_binary_images)
		{
			morphologyEx(running_average_foreground_mask,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,running_average_foreground_mask,MORPH_OPEN,structuring_element);
		}
		running_average_foreground_image.setTo(Scalar(0,0,0));
	    current_frame.copyTo(running_average_foreground_image, running_average_foreground_mask);
		timer->recordTime("Running Average");

		// Running Average with selective update
		vector<Mat> selective_running_average_planes(3);
		// Find Foreground mask
		selective_running_average_background.convertTo(temp_selective_running_average_background,CV_8U);
		absdiff(temp_selective_running_average_background,current_frame,selective_running_average_difference);
		split(selective_running_average_difference,selective_running_average_planes);
		// Determine foreground points as any point with an average difference of more than 30 over all channels:
		Mat temp_sum = (selective_running_average_planes[0]/3 + selective_running_average_planes[1]/3 + selective_running_average_planes[2]/3);
		threshold(temp_sum,selective_running_average_foreground_mask,30,255,THRESH_BINARY_INV);
		// Update background
		split(selective_running_average_background,selective_running_average_planes);
		accumulateWeighted(input_planes[0], selective_running_average_planes[0], running_average_learning_rate,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[1], selective_running_average_planes[1], running_average_learning_rate,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[2], selective_running_average_planes[2], running_average_learning_rate,selective_running_average_foreground_mask);
    	invertImage(selective_running_average_foreground_mask,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[0], selective_running_average_planes[0], running_average_learning_rate/3.0,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[1], selective_running_average_planes[1], running_average_learning_rate/3.0,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[2], selective_running_average_planes[2], running_average_learning_rate/3.0,selective_running_average_foreground_mask);
		merge(selective_running_average_planes,selective_running_average_background);
		if (clean_binary_images)
		{
			morphologyEx(selective_running_average_foreground_mask,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,selective_running_average_foreground_mask,MORPH_OPEN,structuring_element);
		}
 		selective_running_average_foreground_image.setTo(Scalar(0,0,0));
	    current_frame.copyTo(selective_running_average_foreground_image, selective_running_average_foreground_mask);
		timer->recordTime("Selective Running Average");

		// Median background
		timer->ignoreTimeSinceLastRecorded();
		median_background.UpdateBackground( current_frame );
		timer->recordTime("Median");
		median_background_image = median_background.GetBackgroundImage();
		Mat median_difference;
		absdiff(median_background_image,current_frame,median_difference);
		cvtColor(median_difference, median_difference, CV_BGR2GRAY);
		threshold(median_difference,median_difference,30,255,THRESH_BINARY);
		median_foreground_image.setTo(Scalar(0,0,0));
	    current_frame.copyTo(median_foreground_image, median_difference);

		// Update the Gaussian Mixture Model
 		// V3.0.0 update on next line.  OLD CODE was  gmm(current_frame, foreground_mask);
        gmm->apply(current_frame, foreground_mask);
		// Clean the resultant binary (moving pixel) mask using an opening.
		threshold(foreground_mask,thresholded_image,150,255,THRESH_BINARY);
		Mat moving_incl_shadows, shadow_points;
		threshold(foreground_mask,moving_incl_shadows,50,255,THRESH_BINARY);
		absdiff( thresholded_image, moving_incl_shadows, shadow_points );
		Mat cleaned_foreground_mask;
		if (clean_binary_images)
		{
			morphologyEx(thresholded_image,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,cleaned_foreground_mask,MORPH_OPEN,structuring_element);
		}
		else cleaned_foreground_mask = thresholded_image.clone();
 		foreground_image.setTo(Scalar(0,0,0));
        current_frame.copyTo(foreground_image, cleaned_foreground_mask);
		timer->recordTime("Gaussian Mixture Model");
		// Create an average background image (just for information)
        Mat mean_background_image;
		timer->ignoreTimeSinceLastRecorded();
		// V3.0.0 update on next line.  OLD CODE was   gmm.getBackgroundImage(mean_background_image);
        gmm->getBackgroundImage(mean_background_image);

		duration = static_cast<double>(getTickCount())-duration;
		duration /= getTickFrequency()/1000.0;
		int delay = (time_between_frames>duration) ? ((int) (time_between_frames-duration)) : 1;
		char c = cvWaitKey(delay);
		
		char frame_str[100];
		sprintf( frame_str, "Frame = %d", frame_count);
		Mat temp_static_output = JoinImagesHorizontally( current_frame, frame_str, first_frame, "Static Background", 4 );
		Mat static_output = JoinImagesHorizontally( temp_static_output, "", binary_difference, "Foreground", 4 );
        imshow("Static Background Model", static_output );
 		Mat temp_running_output = JoinImagesHorizontally( current_frame, frame_str, temp_running_average_background, "Running Average Background", 4 );
		Mat running_output = JoinImagesHorizontally( temp_running_output, "", running_average_foreground_image, "Foreground", 4 );
		imshow("Running Average Background Model", running_output );
 		Mat temp_selective_output = JoinImagesHorizontally( current_frame, frame_str, temp_selective_running_average_background, "Selective Running Average Background", 4 );
		Mat selective_output = JoinImagesHorizontally( temp_selective_output, "", selective_running_average_foreground_image, "Foreground", 4 );
        imshow("Selective Running Average Background Model", selective_output );
 		Mat temp_median_output = JoinImagesHorizontally( current_frame, frame_str, median_background_image, "Median Background", 4 );
		Mat median_output = JoinImagesHorizontally( temp_median_output, "", median_foreground_image, "Foreground", 4 );
        imshow("Median Background Model", median_output );
  		Mat temp_gaussian_output = JoinImagesHorizontally( current_frame, frame_str, mean_background_image, "GMM Background", 4 );
		Mat gaussian_output = JoinImagesHorizontally( temp_gaussian_output, "", foreground_image, "Foreground", 4 );
        imshow("Gaussian Mixture Model", gaussian_output );
		timer->putTimes( current_frame );
		imshow( "Computation Times", current_frame );
	 	surveillance_video >> current_frame;
	}
	cvDestroyAllWindows();
}