Ejemplo n.º 1
0
void ImagesDemos( Mat& image1, Mat& image2, Mat& logo_image, Mat& people_image )
{
	Timestamper* timer = new Timestamper();

	// Basic colour image access (demonstration using invert)
	Mat output_image;
	InvertColour( image1, output_image );
	Mat output1 = JoinImagesHorizontally(image1,"Original Image",output_image,"Inverted Image",4);
	imshow("Basic Image Processing", output1);
	char c = cvWaitKey();
    cvDestroyAllWindows();

	// Sampling & Quantisation (Grey scale)
	Mat image1_gray, smaller_image, resized_image, two_bit_image;
	cvtColor(image1, image1_gray, CV_BGR2GRAY);
	resize(image1_gray, smaller_image, Size( image1.cols/2, image1.rows/2 ));
	resize(smaller_image, resized_image, image1.size() );
	two_bit_image = image1_gray.clone();
	ChangeQuantisationGrey( two_bit_image, 2 );
	Mat image1_gray_display, smaller_image_display, resized_image_display, two_bit_image_display;
	cvtColor(image1_gray, image1_gray_display, CV_GRAY2BGR);
	cvtColor(smaller_image, smaller_image_display, CV_GRAY2BGR);
	cvtColor(resized_image, resized_image_display, CV_GRAY2BGR);
	cvtColor(two_bit_image, two_bit_image_display, CV_GRAY2BGR);
	output1 = JoinImagesHorizontally(two_bit_image_display,"Quantisation 8->2 bits",image1_gray_display,"Original Greyscale Image",4);
	Mat output2 = JoinImagesHorizontally(output1,"",smaller_image_display,"Half sized image",4);
	Mat output3 = JoinImagesHorizontally(output2,"",resized_image_display,"Resized image",4);
	// Sampling & Quantisation
	Mat quantised_frame;
	quantised_frame = image1.clone();
	resize(image1, smaller_image, Size( image1.cols/2, image1.rows/2 ));
	resize(smaller_image, resized_image, image1.size(), 0.0, 0.0, INTER_NEAREST );
	changeQuantisation(quantised_frame, 2);
	output1 = JoinImagesHorizontally(quantised_frame,"Quantisation 8->2 bits",image1,"Original Colour Image",4);
	output2 = JoinImagesHorizontally(output1,"",smaller_image,"Half sized image",4);
	Mat output4 = JoinImagesHorizontally(output2,"",resized_image,"Resized image",4);
	Mat output5 = JoinImagesVertically(output3,"",output4,"",4);
	imshow("Sampling & Quantisation", output5);
	c = cvWaitKey();
    cvDestroyAllWindows();

	// Colour channels.
	resize(image2, smaller_image, Size( image2.cols/2, image2.rows/2 ));
	vector<Mat> input_planes(3);
	split(smaller_image,input_planes);
	Mat channel1_display, channel2_display, channel3_display;
	cvtColor(input_planes[2], channel1_display, CV_GRAY2BGR);
	cvtColor(input_planes[1], channel2_display, CV_GRAY2BGR);
	cvtColor(input_planes[0], channel3_display, CV_GRAY2BGR);
	output1 = JoinImagesHorizontally(channel1_display,"Red",channel2_display,"Green",4);
	output2 = JoinImagesHorizontally(output1,"",channel3_display,"Blue",4);

	Mat yuv_image;
	cvtColor(smaller_image, yuv_image, CV_BGR2YUV);
	split(yuv_image,input_planes);
	cvtColor(input_planes[0], channel1_display, CV_GRAY2BGR);
	cvtColor(input_planes[1], channel2_display, CV_GRAY2BGR);
	cvtColor(input_planes[2], channel3_display, CV_GRAY2BGR);
	output1 = JoinImagesHorizontally(channel1_display,"Y",channel2_display,"U",4);
	output3 = JoinImagesHorizontally(output1,"",channel3_display,"V",4);
	output4 = JoinImagesVertically(output2,"",output3,"",4);

	Mat hls_image;
	cvtColor(smaller_image, hls_image, CV_BGR2HLS);
	vector<Mat> hls_planes(3);
	split(hls_image,hls_planes);
	Mat& hue_image = hls_planes[0];
	cvtColor(hls_planes[0], channel1_display, CV_GRAY2BGR);
	cvtColor(hls_planes[1], channel2_display, CV_GRAY2BGR);
	cvtColor(hls_planes[2], channel3_display, CV_GRAY2BGR);
	output1 = JoinImagesHorizontally(channel1_display,"Hue",channel2_display,"Luminance",4);
	output2 = JoinImagesHorizontally(output1,"",channel3_display,"Saturation",4);
	output3 = JoinImagesVertically(output4,"",output2,"",4);
	Mat lab_image;
	cvtColor(smaller_image, lab_image, CV_BGR2Lab);
	vector<Mat> lab_planes(3);
	split(lab_image,lab_planes);
	cvtColor(lab_planes[0], channel1_display, CV_GRAY2BGR);
	cvtColor(lab_planes[1], channel2_display, CV_GRAY2BGR);
	cvtColor(lab_planes[2], channel3_display, CV_GRAY2BGR);
	output1 = JoinImagesHorizontally(channel1_display,"Luminance",channel2_display,"A",4);
	output2 = JoinImagesHorizontally(output1,"",channel3_display,"B",4);
	output4 = JoinImagesVertically(output3,"",output2,"",4);
	output3 = JoinImagesHorizontally(smaller_image,"",output4,"",4);
	imshow("Colour Models - RGB, YUV, HLS, Lab", output3);
	c = cvWaitKey();
    cvDestroyAllWindows();

	Mat hls_people_image, hls_skin_image, skin_image, redeye_image;
	cvtColor(people_image, hls_people_image, CV_BGR2HLS);
	SelectSkin( hls_people_image, hls_skin_image );
	SelectRedEyePixels( people_image, redeye_image );
	cvtColor(hls_skin_image, skin_image, CV_HLS2BGR);
	output1 = JoinImagesHorizontally(people_image,"Original Image",skin_image,"Possible skin pixels",4);
	output2 = JoinImagesHorizontally(output1,"",redeye_image,"Possible Red-Eye pixels",4);
	imshow("Skin & Redeye detection", output2);
	c = cvWaitKey();
    cvDestroyAllWindows();

	// Noise & Smoothing
	resize(image1, smaller_image, Size( image1.cols*3/4, image1.rows*3/4 ));
	Mat noise_test = smaller_image.clone();
	addGaussianNoise(noise_test, 0.0, 20.0);
	Mat noise_test1 = noise_test.clone();
	Mat noise_test2 = noise_test.clone();
	Mat noise_test3 = noise_test.clone();
	blur(noise_test1,noise_test1,Size(5,5));
	GaussianBlur(noise_test2,noise_test2,Size(5,5),1.5);
	medianBlur(noise_test3,noise_test3,5);
	output1 = JoinImagesHorizontally(noise_test,"Gaussian Noise (0, 20)",noise_test1,"Local Average",4);
	output2 = JoinImagesHorizontally(output1,"",noise_test2,"Gaussian filtered",4);
	output3 = JoinImagesHorizontally(output2,"",noise_test3,"Median filtered",4);
	noise_test = smaller_image.clone();
	addSaltAndPepperNoise(noise_test, 5.0);
	noise_test1 = noise_test.clone();
	noise_test2 = noise_test.clone();
	noise_test3 = noise_test.clone();
	blur(noise_test1,noise_test1,Size(5,5));
	GaussianBlur(noise_test2,noise_test2,Size(5,5),1.5);
	medianBlur(noise_test3,noise_test3,5);
	output1 = JoinImagesHorizontally(noise_test,"Salt and Pepper Noise (5%)",noise_test1,"Local Average",4);
	output2 = JoinImagesHorizontally(output1,"",noise_test2,"Gaussian filtered",4);
	output4 = JoinImagesHorizontally(output2,"",noise_test3,"Median filtered",4);
	output5 = JoinImagesVertically(output3,"",output4,"",4);
	output1 = JoinImagesHorizontally(smaller_image,"Original Image",output5,"",4);
	imshow("Noise and Smoothing", output1);
	c = cvWaitKey();
    cvDestroyAllWindows();

	// Regions of Interest and weighted image addition.
	Mat watermarked_image = image1.clone();
	double scale = (((double)logo_image.cols)/((double)image1.cols)) > (((double)logo_image.rows)/((double)image1.rows)) ?
		             0.5/(((double)logo_image.cols)/((double)image1.cols)) : 0.5/(((double)logo_image.rows)/((double)image1.rows));
	int new_logo_size = image1.cols < image1.rows ? image1.cols/8 : image1.rows/8;
	resize(logo_image,logo_image,Size(((int) (((double) logo_image.cols)*scale)),((int) (((double) logo_image.rows)*scale))));
	Mat imageROI;
	imageROI = watermarked_image(cv::Rect((image1.cols-logo_image.cols)/2,(image1.rows-logo_image.rows)/2,logo_image.cols,logo_image.rows));
	addWeighted(imageROI,1.0,logo_image,0.1,0.0,imageROI);
	output1 = JoinImagesHorizontally(image1,"Original Image",logo_image,"Watermark",4);
	output2 = JoinImagesHorizontally(output1,"",watermarked_image,"Watermarked Image",4);
    imshow("Watermarking (Demo of Image ROIs & weighted addition)", output2);
	c = cvWaitKey();
    cvDestroyAllWindows();
}
Ejemplo n.º 2
0
void VideoDemos( VideoCapture& surveillance_video, int starting_frame, bool clean_binary_images )
{
	Mat previous_gray_frame, optical_flow, optical_flow_display;
	Mat current_frame, thresholded_image, closed_image, first_frame;
	Mat current_frame_gray, running_average_background;
	Mat temp_running_average_background, running_average_difference;
	Mat running_average_foreground_mask, running_average_foreground_image;
	Mat selective_running_average_background;
	Mat temp_selective_running_average_background, selective_running_average_difference;
	Mat selective_running_average_foreground_mask, selective_running_average_background_mask, selective_running_average_foreground_image;
	double running_average_learning_rate = 0.01;
	surveillance_video.set(CV_CAP_PROP_POS_FRAMES,starting_frame);
	surveillance_video >> current_frame;
	first_frame = current_frame.clone();
	cvtColor(current_frame, current_frame_gray, CV_BGR2GRAY);
	current_frame.convertTo(running_average_background, CV_32F);
	selective_running_average_background = running_average_background.clone();
	int rad = running_average_background.depth();
	MedianBackground median_background( current_frame, (float) 1.005, 1 );
	Mat median_background_image, median_foreground_image;

	int codec = static_cast<int>(surveillance_video.get(CV_CAP_PROP_FOURCC));
	// V3.0.0 update on next line.  OLD CODE was    BackgroundSubtractorMOG2 gmm; //(50,16,true);
    Ptr<BackgroundSubtractorMOG2> gmm = createBackgroundSubtractorMOG2();
	Mat foreground_mask, foreground_image = Mat::zeros(current_frame.size(), CV_8UC3);

	double frame_rate = surveillance_video.get(CV_CAP_PROP_FPS);
	double time_between_frames = 1000.0/frame_rate;
	Timestamper* timer = new Timestamper();
	int frame_count = 0;
	while ((!current_frame.empty()) && (frame_count++ < 1000))//1800))
    {
 		double duration = static_cast<double>(getTickCount());
		vector<Mat> input_planes(3);
		split(current_frame,input_planes);
		cvtColor(current_frame, current_frame_gray, CV_BGR2GRAY);

		if (frame_count%2 == 0)  // Skip every second frame so the flow is greater.
		{
			if ( previous_gray_frame.data )
			{
				Mat lucas_kanade_flow;
				timer->ignoreTimeSinceLastRecorded();
				LucasKanadeOpticalFlow(previous_gray_frame, current_frame_gray, lucas_kanade_flow);
				timer->recordTime("Lucas Kanade Optical Flow");
				calcOpticalFlowFarneback(previous_gray_frame, current_frame_gray, optical_flow, 0.5, 3, 15, 3, 5, 1.2, 0);
				cvtColor(previous_gray_frame, optical_flow_display, CV_GRAY2BGR);
				drawOpticalFlow(optical_flow, optical_flow_display, 8, Scalar(0, 255, 0), Scalar(0, 0, 255));
				timer->recordTime("Farneback Optical Flow");
				char frame_str[100];
				sprintf( frame_str, "Frame = %d", frame_count);
 				Mat temp_output = JoinImagesHorizontally( current_frame, frame_str, optical_flow_display, "Farneback Optical Flow", 4 );
				Mat optical_flow_output = JoinImagesHorizontally( temp_output, "", lucas_kanade_flow, "Lucas Kanade Optical Flow", 4 );
				imshow("Optical Flow", optical_flow_output );
			}
			std::swap(previous_gray_frame, current_frame_gray);
		}
	
		// Static background image
		Mat difference_frame, binary_difference;
		Mat structuring_element(3,3,CV_8U,Scalar(1));
		timer->ignoreTimeSinceLastRecorded();
		absdiff(current_frame,first_frame,difference_frame);
		cvtColor(difference_frame, thresholded_image, CV_BGR2GRAY);
		threshold(thresholded_image,thresholded_image,30,255,THRESH_BINARY);
		if (clean_binary_images)
		{
			morphologyEx(thresholded_image,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,binary_difference,MORPH_OPEN,structuring_element);
			current_frame.copyTo(binary_difference, thresholded_image);
		}
		else
		{
			binary_difference.setTo(Scalar(0,0,0));
		    current_frame.copyTo(binary_difference, thresholded_image);
		}
		timer->recordTime("Static difference");

		// Running Average (three channel version)
		vector<Mat> running_average_planes(3);
		split(running_average_background,running_average_planes);
		accumulateWeighted(input_planes[0], running_average_planes[0], running_average_learning_rate);
		accumulateWeighted(input_planes[1], running_average_planes[1], running_average_learning_rate);
		accumulateWeighted(input_planes[2], running_average_planes[2], running_average_learning_rate);
		merge(running_average_planes,running_average_background);
		running_average_background.convertTo(temp_running_average_background,CV_8U);
		absdiff(temp_running_average_background,current_frame,running_average_difference);
		split(running_average_difference,running_average_planes);
		// Determine foreground points as any point with a difference of more than 30 on any one channel:
		threshold(running_average_difference,running_average_foreground_mask,30,255,THRESH_BINARY);
		split(running_average_foreground_mask,running_average_planes);
		bitwise_or( running_average_planes[0], running_average_planes[1], running_average_foreground_mask );
		bitwise_or( running_average_planes[2], running_average_foreground_mask, running_average_foreground_mask );
		if (clean_binary_images)
		{
			morphologyEx(running_average_foreground_mask,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,running_average_foreground_mask,MORPH_OPEN,structuring_element);
		}
		running_average_foreground_image.setTo(Scalar(0,0,0));
	    current_frame.copyTo(running_average_foreground_image, running_average_foreground_mask);
		timer->recordTime("Running Average");

		// Running Average with selective update
		vector<Mat> selective_running_average_planes(3);
		// Find Foreground mask
		selective_running_average_background.convertTo(temp_selective_running_average_background,CV_8U);
		absdiff(temp_selective_running_average_background,current_frame,selective_running_average_difference);
		split(selective_running_average_difference,selective_running_average_planes);
		// Determine foreground points as any point with an average difference of more than 30 over all channels:
		Mat temp_sum = (selective_running_average_planes[0]/3 + selective_running_average_planes[1]/3 + selective_running_average_planes[2]/3);
		threshold(temp_sum,selective_running_average_foreground_mask,30,255,THRESH_BINARY_INV);
		// Update background
		split(selective_running_average_background,selective_running_average_planes);
		accumulateWeighted(input_planes[0], selective_running_average_planes[0], running_average_learning_rate,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[1], selective_running_average_planes[1], running_average_learning_rate,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[2], selective_running_average_planes[2], running_average_learning_rate,selective_running_average_foreground_mask);
    	invertImage(selective_running_average_foreground_mask,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[0], selective_running_average_planes[0], running_average_learning_rate/3.0,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[1], selective_running_average_planes[1], running_average_learning_rate/3.0,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[2], selective_running_average_planes[2], running_average_learning_rate/3.0,selective_running_average_foreground_mask);
		merge(selective_running_average_planes,selective_running_average_background);
		if (clean_binary_images)
		{
			morphologyEx(selective_running_average_foreground_mask,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,selective_running_average_foreground_mask,MORPH_OPEN,structuring_element);
		}
 		selective_running_average_foreground_image.setTo(Scalar(0,0,0));
	    current_frame.copyTo(selective_running_average_foreground_image, selective_running_average_foreground_mask);
		timer->recordTime("Selective Running Average");

		// Median background
		timer->ignoreTimeSinceLastRecorded();
		median_background.UpdateBackground( current_frame );
		timer->recordTime("Median");
		median_background_image = median_background.GetBackgroundImage();
		Mat median_difference;
		absdiff(median_background_image,current_frame,median_difference);
		cvtColor(median_difference, median_difference, CV_BGR2GRAY);
		threshold(median_difference,median_difference,30,255,THRESH_BINARY);
		median_foreground_image.setTo(Scalar(0,0,0));
	    current_frame.copyTo(median_foreground_image, median_difference);

		// Update the Gaussian Mixture Model
 		// V3.0.0 update on next line.  OLD CODE was  gmm(current_frame, foreground_mask);
        gmm->apply(current_frame, foreground_mask);
		// Clean the resultant binary (moving pixel) mask using an opening.
		threshold(foreground_mask,thresholded_image,150,255,THRESH_BINARY);
		Mat moving_incl_shadows, shadow_points;
		threshold(foreground_mask,moving_incl_shadows,50,255,THRESH_BINARY);
		absdiff( thresholded_image, moving_incl_shadows, shadow_points );
		Mat cleaned_foreground_mask;
		if (clean_binary_images)
		{
			morphologyEx(thresholded_image,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,cleaned_foreground_mask,MORPH_OPEN,structuring_element);
		}
		else cleaned_foreground_mask = thresholded_image.clone();
 		foreground_image.setTo(Scalar(0,0,0));
        current_frame.copyTo(foreground_image, cleaned_foreground_mask);
		timer->recordTime("Gaussian Mixture Model");
		// Create an average background image (just for information)
        Mat mean_background_image;
		timer->ignoreTimeSinceLastRecorded();
		// V3.0.0 update on next line.  OLD CODE was   gmm.getBackgroundImage(mean_background_image);
        gmm->getBackgroundImage(mean_background_image);

		duration = static_cast<double>(getTickCount())-duration;
		duration /= getTickFrequency()/1000.0;
		int delay = (time_between_frames>duration) ? ((int) (time_between_frames-duration)) : 1;
		char c = cvWaitKey(delay);
		
		char frame_str[100];
		sprintf( frame_str, "Frame = %d", frame_count);
		Mat temp_static_output = JoinImagesHorizontally( current_frame, frame_str, first_frame, "Static Background", 4 );
		Mat static_output = JoinImagesHorizontally( temp_static_output, "", binary_difference, "Foreground", 4 );
        imshow("Static Background Model", static_output );
 		Mat temp_running_output = JoinImagesHorizontally( current_frame, frame_str, temp_running_average_background, "Running Average Background", 4 );
		Mat running_output = JoinImagesHorizontally( temp_running_output, "", running_average_foreground_image, "Foreground", 4 );
		imshow("Running Average Background Model", running_output );
 		Mat temp_selective_output = JoinImagesHorizontally( current_frame, frame_str, temp_selective_running_average_background, "Selective Running Average Background", 4 );
		Mat selective_output = JoinImagesHorizontally( temp_selective_output, "", selective_running_average_foreground_image, "Foreground", 4 );
        imshow("Selective Running Average Background Model", selective_output );
 		Mat temp_median_output = JoinImagesHorizontally( current_frame, frame_str, median_background_image, "Median Background", 4 );
		Mat median_output = JoinImagesHorizontally( temp_median_output, "", median_foreground_image, "Foreground", 4 );
        imshow("Median Background Model", median_output );
  		Mat temp_gaussian_output = JoinImagesHorizontally( current_frame, frame_str, mean_background_image, "GMM Background", 4 );
		Mat gaussian_output = JoinImagesHorizontally( temp_gaussian_output, "", foreground_image, "Foreground", 4 );
        imshow("Gaussian Mixture Model", gaussian_output );
		timer->putTimes( current_frame );
		imshow( "Computation Times", current_frame );
	 	surveillance_video >> current_frame;
	}
	cvDestroyAllWindows();
}