Beispiel #1
0
void ImagesDemos( Mat& image1, Mat& image2, Mat& logo_image, Mat& people_image )
{
	Timestamper* timer = new Timestamper();

	// Basic colour image access (demonstration using invert)
	Mat output_image;
	InvertColour( image1, output_image );
	Mat output1 = JoinImagesHorizontally(image1,"Original Image",output_image,"Inverted Image",4);
	imshow("Basic Image Processing", output1);
	char c = cvWaitKey();
    cvDestroyAllWindows();

	// Sampling & Quantisation (Grey scale)
	Mat image1_gray, smaller_image, resized_image, two_bit_image;
	cvtColor(image1, image1_gray, CV_BGR2GRAY);
	resize(image1_gray, smaller_image, Size( image1.cols/2, image1.rows/2 ));
	resize(smaller_image, resized_image, image1.size() );
	two_bit_image = image1_gray.clone();
	ChangeQuantisationGrey( two_bit_image, 2 );
	Mat image1_gray_display, smaller_image_display, resized_image_display, two_bit_image_display;
	cvtColor(image1_gray, image1_gray_display, CV_GRAY2BGR);
	cvtColor(smaller_image, smaller_image_display, CV_GRAY2BGR);
	cvtColor(resized_image, resized_image_display, CV_GRAY2BGR);
	cvtColor(two_bit_image, two_bit_image_display, CV_GRAY2BGR);
	output1 = JoinImagesHorizontally(two_bit_image_display,"Quantisation 8->2 bits",image1_gray_display,"Original Greyscale Image",4);
	Mat output2 = JoinImagesHorizontally(output1,"",smaller_image_display,"Half sized image",4);
	Mat output3 = JoinImagesHorizontally(output2,"",resized_image_display,"Resized image",4);
	// Sampling & Quantisation
	Mat quantised_frame;
	quantised_frame = image1.clone();
	resize(image1, smaller_image, Size( image1.cols/2, image1.rows/2 ));
	resize(smaller_image, resized_image, image1.size(), 0.0, 0.0, INTER_NEAREST );
	changeQuantisation(quantised_frame, 2);
	output1 = JoinImagesHorizontally(quantised_frame,"Quantisation 8->2 bits",image1,"Original Colour Image",4);
	output2 = JoinImagesHorizontally(output1,"",smaller_image,"Half sized image",4);
	Mat output4 = JoinImagesHorizontally(output2,"",resized_image,"Resized image",4);
	Mat output5 = JoinImagesVertically(output3,"",output4,"",4);
	imshow("Sampling & Quantisation", output5);
	c = cvWaitKey();
    cvDestroyAllWindows();

	// Colour channels.
	resize(image2, smaller_image, Size( image2.cols/2, image2.rows/2 ));
	vector<Mat> input_planes(3);
	split(smaller_image,input_planes);
	Mat channel1_display, channel2_display, channel3_display;
	cvtColor(input_planes[2], channel1_display, CV_GRAY2BGR);
	cvtColor(input_planes[1], channel2_display, CV_GRAY2BGR);
	cvtColor(input_planes[0], channel3_display, CV_GRAY2BGR);
	output1 = JoinImagesHorizontally(channel1_display,"Red",channel2_display,"Green",4);
	output2 = JoinImagesHorizontally(output1,"",channel3_display,"Blue",4);

	Mat yuv_image;
	cvtColor(smaller_image, yuv_image, CV_BGR2YUV);
	split(yuv_image,input_planes);
	cvtColor(input_planes[0], channel1_display, CV_GRAY2BGR);
	cvtColor(input_planes[1], channel2_display, CV_GRAY2BGR);
	cvtColor(input_planes[2], channel3_display, CV_GRAY2BGR);
	output1 = JoinImagesHorizontally(channel1_display,"Y",channel2_display,"U",4);
	output3 = JoinImagesHorizontally(output1,"",channel3_display,"V",4);
	output4 = JoinImagesVertically(output2,"",output3,"",4);

	Mat hls_image;
	cvtColor(smaller_image, hls_image, CV_BGR2HLS);
	vector<Mat> hls_planes(3);
	split(hls_image,hls_planes);
	Mat& hue_image = hls_planes[0];
	cvtColor(hls_planes[0], channel1_display, CV_GRAY2BGR);
	cvtColor(hls_planes[1], channel2_display, CV_GRAY2BGR);
	cvtColor(hls_planes[2], channel3_display, CV_GRAY2BGR);
	output1 = JoinImagesHorizontally(channel1_display,"Hue",channel2_display,"Luminance",4);
	output2 = JoinImagesHorizontally(output1,"",channel3_display,"Saturation",4);
	output3 = JoinImagesVertically(output4,"",output2,"",4);
	Mat lab_image;
	cvtColor(smaller_image, lab_image, CV_BGR2Lab);
	vector<Mat> lab_planes(3);
	split(lab_image,lab_planes);
	cvtColor(lab_planes[0], channel1_display, CV_GRAY2BGR);
	cvtColor(lab_planes[1], channel2_display, CV_GRAY2BGR);
	cvtColor(lab_planes[2], channel3_display, CV_GRAY2BGR);
	output1 = JoinImagesHorizontally(channel1_display,"Luminance",channel2_display,"A",4);
	output2 = JoinImagesHorizontally(output1,"",channel3_display,"B",4);
	output4 = JoinImagesVertically(output3,"",output2,"",4);
	output3 = JoinImagesHorizontally(smaller_image,"",output4,"",4);
	imshow("Colour Models - RGB, YUV, HLS, Lab", output3);
	c = cvWaitKey();
    cvDestroyAllWindows();

	Mat hls_people_image, hls_skin_image, skin_image, redeye_image;
	cvtColor(people_image, hls_people_image, CV_BGR2HLS);
	SelectSkin( hls_people_image, hls_skin_image );
	SelectRedEyePixels( people_image, redeye_image );
	cvtColor(hls_skin_image, skin_image, CV_HLS2BGR);
	output1 = JoinImagesHorizontally(people_image,"Original Image",skin_image,"Possible skin pixels",4);
	output2 = JoinImagesHorizontally(output1,"",redeye_image,"Possible Red-Eye pixels",4);
	imshow("Skin & Redeye detection", output2);
	c = cvWaitKey();
    cvDestroyAllWindows();

	// Noise & Smoothing
	resize(image1, smaller_image, Size( image1.cols*3/4, image1.rows*3/4 ));
	Mat noise_test = smaller_image.clone();
	addGaussianNoise(noise_test, 0.0, 20.0);
	Mat noise_test1 = noise_test.clone();
	Mat noise_test2 = noise_test.clone();
	Mat noise_test3 = noise_test.clone();
	blur(noise_test1,noise_test1,Size(5,5));
	GaussianBlur(noise_test2,noise_test2,Size(5,5),1.5);
	medianBlur(noise_test3,noise_test3,5);
	output1 = JoinImagesHorizontally(noise_test,"Gaussian Noise (0, 20)",noise_test1,"Local Average",4);
	output2 = JoinImagesHorizontally(output1,"",noise_test2,"Gaussian filtered",4);
	output3 = JoinImagesHorizontally(output2,"",noise_test3,"Median filtered",4);
	noise_test = smaller_image.clone();
	addSaltAndPepperNoise(noise_test, 5.0);
	noise_test1 = noise_test.clone();
	noise_test2 = noise_test.clone();
	noise_test3 = noise_test.clone();
	blur(noise_test1,noise_test1,Size(5,5));
	GaussianBlur(noise_test2,noise_test2,Size(5,5),1.5);
	medianBlur(noise_test3,noise_test3,5);
	output1 = JoinImagesHorizontally(noise_test,"Salt and Pepper Noise (5%)",noise_test1,"Local Average",4);
	output2 = JoinImagesHorizontally(output1,"",noise_test2,"Gaussian filtered",4);
	output4 = JoinImagesHorizontally(output2,"",noise_test3,"Median filtered",4);
	output5 = JoinImagesVertically(output3,"",output4,"",4);
	output1 = JoinImagesHorizontally(smaller_image,"Original Image",output5,"",4);
	imshow("Noise and Smoothing", output1);
	c = cvWaitKey();
    cvDestroyAllWindows();

	// Regions of Interest and weighted image addition.
	Mat watermarked_image = image1.clone();
	double scale = (((double)logo_image.cols)/((double)image1.cols)) > (((double)logo_image.rows)/((double)image1.rows)) ?
		             0.5/(((double)logo_image.cols)/((double)image1.cols)) : 0.5/(((double)logo_image.rows)/((double)image1.rows));
	int new_logo_size = image1.cols < image1.rows ? image1.cols/8 : image1.rows/8;
	resize(logo_image,logo_image,Size(((int) (((double) logo_image.cols)*scale)),((int) (((double) logo_image.rows)*scale))));
	Mat imageROI;
	imageROI = watermarked_image(cv::Rect((image1.cols-logo_image.cols)/2,(image1.rows-logo_image.rows)/2,logo_image.cols,logo_image.rows));
	addWeighted(imageROI,1.0,logo_image,0.1,0.0,imageROI);
	output1 = JoinImagesHorizontally(image1,"Original Image",logo_image,"Watermark",4);
	output2 = JoinImagesHorizontally(output1,"",watermarked_image,"Watermarked Image",4);
    imshow("Watermarking (Demo of Image ROIs & weighted addition)", output2);
	c = cvWaitKey();
    cvDestroyAllWindows();
}
void RecognitionDemos( Mat& full_image, Mat& template1, Mat& template2, Mat& template1locations, Mat& template2locations, VideoCapture& bicycle_video, Mat& bicycle_background, Mat& bicycle_model, VideoCapture& people_video, CascadeClassifier& cascade, Mat& numbers, Mat& good_orings, Mat& bad_orings, Mat& unknown_orings )
{
	Timestamper* timer = new Timestamper();

	// Principal Components Analysis
	PCASimpleExample();
    char ch = cvWaitKey();
	cvDestroyAllWindows();

	PCAFaceRecognition();
    ch = cvWaitKey();
	cvDestroyAllWindows();

	// Statistical Pattern Recognition
	Mat gray_numbers,binary_numbers;
	cvtColor(numbers, gray_numbers, CV_BGR2GRAY);
	threshold(gray_numbers,binary_numbers,128,255,THRESH_BINARY_INV);
    vector<vector<Point>> contours;
	vector<Vec4i> hierarchy;
	findContours(binary_numbers,contours,hierarchy,CV_RETR_TREE,CV_CHAIN_APPROX_NONE);
	Mat contours_image = Mat::zeros(binary_numbers.size(), CV_8UC3);
	contours_image = Scalar(255,255,255);
	// Do some processing on all contours (objects and holes!)
	vector<RotatedRect> min_bounding_rectangle(contours.size());
	vector<vector<Point>> hulls(contours.size());
	vector<vector<int>> hull_indices(contours.size());
	vector<vector<Vec4i>> convexity_defects(contours.size());
	vector<Moments> contour_moments(contours.size());
	for (int contour_number=0; (contour_number<(int)contours.size()); contour_number++)
	{
		if (contours[contour_number].size() > 10)
		{
			min_bounding_rectangle[contour_number] = minAreaRect(contours[contour_number]);
			convexHull(contours[contour_number], hulls[contour_number]);
			convexHull(contours[contour_number], hull_indices[contour_number]);
			convexityDefects( contours[contour_number], hull_indices[contour_number], convexity_defects[contour_number]);
			contour_moments[contour_number] = moments( contours[contour_number] );
		}
	}
	for (int contour_number=0; (contour_number>=0); contour_number=hierarchy[contour_number][0])
	{
		if (contours[contour_number].size() > 10)
		{
        Scalar colour( rand()&0x7F, rand()&0x7F, rand()&0x7F );
        drawContours( contours_image, contours, contour_number, colour, CV_FILLED, 8, hierarchy );
		char output[500];
		double area = contourArea(contours[contour_number])+contours[contour_number].size()/2+1;
		// Process any holes (removing the area from the are of the enclosing contour)
		for (int hole_number=hierarchy[contour_number][2]; (hole_number>=0); hole_number=hierarchy[hole_number][0])
		{
			area -= (contourArea(contours[hole_number])-contours[hole_number].size()/2+1);
			Scalar colour( rand()&0x7F, rand()&0x7F, rand()&0x7F );
 			drawContours( contours_image, contours, hole_number, colour, CV_FILLED, 8, hierarchy );
			sprintf(output,"Area=%.0f", contourArea(contours[hole_number])-contours[hole_number].size()/2+1);
			Point location( contours[hole_number][0].x +20, contours[hole_number][0].y +5 );
			putText( contours_image, output, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
		}
		// Draw the minimum bounding rectangle
		Point2f bounding_rect_points[4];
		min_bounding_rectangle[contour_number].points(bounding_rect_points);
		line( contours_image, bounding_rect_points[0], bounding_rect_points[1], Scalar(0, 0, 127));
		line( contours_image, bounding_rect_points[1], bounding_rect_points[2], Scalar(0, 0, 127));
		line( contours_image, bounding_rect_points[2], bounding_rect_points[3], Scalar(0, 0, 127));
		line( contours_image, bounding_rect_points[3], bounding_rect_points[0], Scalar(0, 0, 127));
		float bounding_rectangle_area = min_bounding_rectangle[contour_number].size.area();
		// Draw the convex hull
        drawContours( contours_image, hulls, contour_number, Scalar(127,0,127) );
		// Highlight any convexities
		int largest_convexity_depth=0;
		for (int convexity_index=0; convexity_index < (int)convexity_defects[contour_number].size(); convexity_index++)
		{
			if (convexity_defects[contour_number][convexity_index][3] > largest_convexity_depth)
				largest_convexity_depth = convexity_defects[contour_number][convexity_index][3];
			if (convexity_defects[contour_number][convexity_index][3] > 256*2)
			{
				line( contours_image, contours[contour_number][convexity_defects[contour_number][convexity_index][0]], contours[contour_number][convexity_defects[contour_number][convexity_index][2]], Scalar(0,0, 255));
				line( contours_image, contours[contour_number][convexity_defects[contour_number][convexity_index][1]], contours[contour_number][convexity_defects[contour_number][convexity_index][2]], Scalar(0,0, 255));
			}
		}
		double hu_moments[7];
		HuMoments( contour_moments[contour_number], hu_moments );
		sprintf(output,"Perimeter=%d, Area=%.0f, BArea=%.0f, CArea=%.0f", contours[contour_number].size(),area,min_bounding_rectangle[contour_number].size.area(),contourArea(hulls[contour_number]));
		Point location( contours[contour_number][0].x, contours[contour_number][0].y-3 );
		putText( contours_image, output, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
		sprintf(output,"HuMoments = %.2f, %.2f, %.2f", hu_moments[0],hu_moments[1],hu_moments[2]);
		Point location2( contours[contour_number][0].x+100, contours[contour_number][0].y-3+15 );
		putText( contours_image, output, location2, FONT_HERSHEY_SIMPLEX, 0.4, colour );
		}
	}
	imshow("Shape Statistics", contours_image );
	char c = cvWaitKey();
	cvDestroyAllWindows();

	// Support Vector Machine
	imshow("Good - original",good_orings);
	imshow("Defective - original",bad_orings);
	imshow("Unknown - original",unknown_orings);
	SupportVectorMachineDemo(good_orings,"Good",bad_orings,"Defective",unknown_orings);
	c = cvWaitKey();
	cvDestroyAllWindows();

	// Template Matching
	Mat display_image, correlation_image;
	full_image.copyTo( display_image );
	double min_correlation, max_correlation;
	Mat matched_template_map;
	int result_columns =  full_image.cols - template1.cols + 1;
	int result_rows = full_image.rows - template1.rows + 1;
	correlation_image.create( result_columns, result_rows, CV_32FC1 );
	timer->reset();
	double before_tick_count = static_cast<double>(getTickCount());
	matchTemplate( full_image, template1, correlation_image, CV_TM_CCORR_NORMED );
	double after_tick_count = static_cast<double>(getTickCount());
	double duration_in_ms = 1000.0*(after_tick_count-before_tick_count)/getTickFrequency();
	minMaxLoc( correlation_image, &min_correlation, &max_correlation );
	FindLocalMaxima( correlation_image, matched_template_map, max_correlation*0.99 );
	timer->recordTime("Template Matching (1)");
	Mat matched_template_display1;
	cvtColor(matched_template_map, matched_template_display1, CV_GRAY2BGR);
	Mat correlation_window1 = convert_32bit_image_for_display( correlation_image, 0.0 );
	DrawMatchingTemplateRectangles( display_image, matched_template_map, template1, Scalar(0,0,255) );
	double precision, recall, accuracy, specificity, f1;
	Mat template1locations_gray;
	cvtColor(template1locations, template1locations_gray, CV_BGR2GRAY);
	CompareRecognitionResults( matched_template_map, template1locations_gray, precision, recall, accuracy, specificity, f1 );
	char results[400];
	Scalar colour( 255, 255, 255);
	sprintf( results, "precision=%.2f", precision);
	Point location( 7, 213 );
	putText( display_image, "Results (1)", location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "recall=%.2f", recall);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "accuracy=%.2f", accuracy);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "specificity=%.2f", specificity);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "f1=%.2f", f1);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
  
	result_columns =  full_image.cols - template2.cols + 1;
	result_rows = full_image.rows - template2.rows + 1;
	correlation_image.create( result_columns, result_rows, CV_32FC1 );
	timer->ignoreTimeSinceLastRecorded();
	matchTemplate( full_image, template2, correlation_image, CV_TM_CCORR_NORMED );
	minMaxLoc( correlation_image, &min_correlation, &max_correlation );
	FindLocalMaxima( correlation_image, matched_template_map, max_correlation*0.99 );
	timer->recordTime("Template Matching (2)");
	Mat matched_template_display2;
	cvtColor(matched_template_map, matched_template_display2, CV_GRAY2BGR);
	Mat correlation_window2 = convert_32bit_image_for_display( correlation_image, 0.0 );
	DrawMatchingTemplateRectangles( display_image, matched_template_map, template2, Scalar(0,0,255) );
	timer->putTimes(display_image);
	Mat template2locations_gray;
	cvtColor(template2locations, template2locations_gray, CV_BGR2GRAY);
	CompareRecognitionResults( matched_template_map, template2locations_gray, precision, recall, accuracy, specificity, f1 );
	sprintf( results, "precision=%.2f", precision);
	location.x = 123;
	location.y = 213;
	putText( display_image, "Results (2)", location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "recall=%.2f", recall);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "accuracy=%.2f", accuracy);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "specificity=%.2f", specificity);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "f1=%.2f", f1);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	Mat correlation_display1, correlation_display2;
	cvtColor(correlation_window1, correlation_display1, CV_GRAY2BGR);
	cvtColor(correlation_window2, correlation_display2, CV_GRAY2BGR);

	Mat output1 = JoinImagesVertically(template1,"Template (1)",correlation_display1,"Correlation (1)",4);
	Mat output2 = JoinImagesVertically(output1,"",matched_template_display1,"Local maxima (1)",4);
	Mat output3 = JoinImagesVertically(template2,"Template (2)",correlation_display2,"Correlation (2)",4);
	Mat output4 = JoinImagesVertically(output3,"",matched_template_display2,"Local maxima (2)",4);
	Mat output5 = JoinImagesHorizontally( full_image, "Original Image", output2, "", 4 );
	Mat output6 = JoinImagesHorizontally( output5, "", output4, "", 4 );
	Mat output7 = JoinImagesHorizontally( output6, "", display_image, "", 4 );
	imshow( "Template matching result", output7 );
	c = cvWaitKey();
	cvDestroyAllWindows();

	// Chamfer Matching
    Mat model_gray,model_edges,model_edges2;
	cvtColor(bicycle_model, model_gray, CV_BGR2GRAY);
	threshold(model_gray,model_edges,127,255,THRESH_BINARY);
	Mat current_frame;
	bicycle_video.set(CV_CAP_PROP_POS_FRAMES,400);  // Just in case the video has already been used.
	bicycle_video >> current_frame;
	bicycle_background = current_frame.clone();
	bicycle_video.set(CV_CAP_PROP_POS_FRAMES,500); 
	timer->reset();
	int count = 0;
	while (!current_frame.empty() && (count < 8))
    {
		Mat result_image = current_frame.clone();
		count++;
		Mat difference_frame, difference_gray, current_edges;
		absdiff(current_frame,bicycle_background,difference_frame);
		cvtColor(difference_frame, difference_gray, CV_BGR2GRAY);
		Canny(difference_frame, current_edges, 100, 200, 3);

		vector<vector<Point> > results;
		vector<float> costs;
		threshold(model_gray,model_edges,127,255,THRESH_BINARY);
		Mat matching_image, chamfer_image, local_minima;
		timer->ignoreTimeSinceLastRecorded();
		threshold(current_edges,current_edges,127,255,THRESH_BINARY_INV);
		distanceTransform( current_edges, chamfer_image, CV_DIST_L2 , 3);
		timer->recordTime("Chamfer Image");
		ChamferMatching( chamfer_image, model_edges, matching_image );
		timer->recordTime("Matching");
		FindLocalMinima( matching_image, local_minima, 500.0 );
		timer->recordTime("Find Minima");
		DrawMatchingTemplateRectangles( result_image, local_minima, model_edges, Scalar( 255, 0, 0 ) );
		Mat chamfer_display_image = convert_32bit_image_for_display( chamfer_image );
		Mat matching_display_image = convert_32bit_image_for_display( matching_image );
		//timer->putTimes(result_image);
		Mat current_edges_display, local_minima_display, model_edges_display, colour_matching_display_image, colour_chamfer_display_image;
		cvtColor(current_edges, current_edges_display, CV_GRAY2BGR);
		cvtColor(local_minima, local_minima_display, CV_GRAY2BGR);
		cvtColor(model_edges, model_edges_display, CV_GRAY2BGR);
		cvtColor(matching_display_image, colour_matching_display_image, CV_GRAY2BGR);
		cvtColor(chamfer_display_image, colour_chamfer_display_image, CV_GRAY2BGR);

		Mat output1 = JoinImagesVertically(current_frame,"Video Input",current_edges_display,"Edges from difference", 4);
		Mat output2 = JoinImagesVertically(output1,"",model_edges_display,"Model", 4);
		Mat output3 = JoinImagesVertically(bicycle_background,"Static Background",colour_chamfer_display_image,"Chamfer image", 4);
		Mat output4 = JoinImagesVertically(output3,"",colour_matching_display_image,"Degree of fit", 4);
		Mat output5 = JoinImagesVertically(difference_frame,"Difference",result_image,"Result", 4);
		Mat output6 = JoinImagesVertically(output5,"",local_minima_display,"Local minima", 4);
		Mat output7 = JoinImagesHorizontally( output2, "", output4, "", 4 );
		Mat output8 = JoinImagesHorizontally( output7, "", output6, "", 4 );
		imshow("Chamfer matching", output8);
		c = waitKey(1000);  // This makes the image appear on screen
		bicycle_video >> current_frame;
	}
	c = cvWaitKey();
	cvDestroyAllWindows();

	// Cascade of Haar classifiers (most often shown for face detection).
    VideoCapture camera;
	camera.open(1);
	camera.set(CV_CAP_PROP_FRAME_WIDTH, 320);
	camera.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
    if( camera.isOpened() )
	{
		timer->reset();
		Mat current_frame;
		do {
			camera >> current_frame;
			if( current_frame.empty() )
				break;
			vector<Rect> faces;
			timer->ignoreTimeSinceLastRecorded();
			Mat gray;
			cvtColor( current_frame, gray, CV_BGR2GRAY );
			equalizeHist( gray, gray );
			cascade.detectMultiScale( gray, faces, 1.1, 2, CV_HAAR_SCALE_IMAGE, Size(30, 30) );
			timer->recordTime("Haar Classifier");
			for( int count = 0; count < (int)faces.size(); count++ )
				rectangle(current_frame, faces[count], cv::Scalar(255,0,0), 2);
			//timer->putTimes(current_frame);
			imshow( "Cascade of Haar Classifiers", current_frame );
			c = waitKey(10);  // This makes the image appear on screen
        } while (c == -1);
	}
Beispiel #3
0
void MeanShiftDemo( VideoCapture& video, Rect& starting_position, int starting_frame_number, int end_frame)
{
	bool half_size = true;
	video.set(CV_CAP_PROP_POS_FRAMES,starting_frame_number);
	Mat current_frame, hls_image;
	std::vector<cv::Mat> hls_planes(3);
	video >> current_frame;
	Rect current_position(starting_position);
	if (half_size)
	{
		resize(current_frame, current_frame, Size( current_frame.cols/2, current_frame.rows/2 ));
		current_position.height /= 2;
		current_position.width /= 2;
		current_position.x /= 2;
		current_position.y /= 2;
	}
	cvtColor(current_frame, hls_image, CV_BGR2HLS);
	split(hls_image,hls_planes);
    int chosen_channel = 0;  // Hue channel
	Mat image1ROI = hls_planes[chosen_channel](current_position);

	float channel_range[2] = { 0.0, 255.0 };
    int channel_numbers[1] = { 0 };
	int number_bins[1] = { 32 };
	MatND histogram[1];
    const float* channel_ranges = channel_range;
	calcHist(&(image1ROI), 1, channel_numbers, Mat(), histogram[0], 1 , number_bins, &channel_ranges);
    normalize(histogram[0],histogram[0],1.0);
	rectangle(current_frame,current_position,Scalar(0,255,0),2);
	Mat starting_frame = current_frame.clone();
	int frame_number = starting_frame_number;
	while (!current_frame.empty() && (frame_number < end_frame))
    {
		// Calculate back projection
		Mat back_projection_probabilities;
        calcBackProject(&(hls_planes[chosen_channel]),1,channel_numbers,*histogram,back_projection_probabilities,&channel_ranges,255.0);
		// Remove low saturation points from consideration
		Mat saturation_mask;
        inRange( hls_image, Scalar(0,10,50,0),Scalar(180,256,256,0), saturation_mask );
		bitwise_and( back_projection_probabilities, back_projection_probabilities,back_projection_probabilities, saturation_mask );
		// Mean shift
		TermCriteria criteria(cv::TermCriteria::MAX_ITER,5,0.01);
		meanShift(back_projection_probabilities,current_position,criteria);
		// Output to screen
		rectangle(current_frame,current_position,Scalar(0,255,0),2);
		Mat chosen_channel_image, back_projection_image;
		cvtColor(hls_planes[chosen_channel], chosen_channel_image, CV_GRAY2BGR);
		cvtColor(back_projection_probabilities, back_projection_image, CV_GRAY2BGR);
		Mat row1_output = JoinImagesHorizontally( starting_frame, "Starting position", chosen_channel_image, "Chosen channel (Hue)", 4 );
		Mat row2_output = JoinImagesHorizontally( back_projection_image, "Back projection", current_frame, "Current position", 4 );
		Mat mean_shift_output = JoinImagesVertically(row1_output,"",row2_output,"", 4);
        imshow("Mean Shift Tracking", mean_shift_output );
		// Advance to next frame
		video >> current_frame;
		if (half_size)
			resize(current_frame, current_frame, Size( current_frame.cols/2, current_frame.rows/2 ));
		cvtColor(current_frame, hls_image, CV_BGR2HLS);
		split(hls_image,hls_planes);
		frame_number++;
	    cvWaitKey(1000);
	}
	char c = cvWaitKey();
    cvDestroyAllWindows();
}
void PCAFaceRecognition() {
#define NUMBER_OF_FACES 10
#define NUMBER_OF_IMAGES_PER_FACE 3
    vector<Mat> known_face_images;
    vector<int> known_labels;
    vector<Mat> unknown_face_images;
    vector<int> unknown_labels;
    // Load greyscale face images (which are from http://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html)
	char file_name[40];
	Mat original_images,row_so_far,image_so_far,temp_image1;
	int face_number = 1;
	for (; face_number<=NUMBER_OF_FACES; face_number++)
	{
		for (int image_number = 1; image_number<=NUMBER_OF_IMAGES_PER_FACE; image_number++)
		{
			sprintf(file_name,"Media/att_faces/s%d/%d.pgm",face_number,image_number);
			Mat current_image = imread(file_name,0);
			if (image_number>1)
			{
				known_face_images.push_back(current_image);
				known_labels.push_back(face_number);
			}
			else
			{
				// Keep the last image of each face as a test case.
				unknown_face_images.push_back(current_image);
				unknown_labels.push_back(face_number);
			}
			cvtColor(current_image, current_image, CV_GRAY2BGR);
			if (image_number == 2)
			{
				if (face_number%10 == 1)
				{
					if (face_number > 1)
						if (face_number == 11)
							original_images = row_so_far.clone();
						else original_images = JoinImagesVertically( original_images, "", row_so_far, "", 1 );
					row_so_far = current_image.clone();
				}
				else
				{
					char image_number_string[10],previous_image_number_string[10];
					sprintf(previous_image_number_string,"%d",face_number-1);
					sprintf(image_number_string,"%d",face_number);
					row_so_far = JoinImagesHorizontally( row_so_far, (face_number%10==2)?previous_image_number_string:"", current_image, image_number_string, 1 );
				}
			}
		}
	}
	if (face_number <= 11)
		original_images = row_so_far.clone();
	else original_images = JoinImagesVertically( original_images, "", row_so_far, "", 1 );
	imshow("Known face images", original_images);
	imwrite("pca_unknown_faces.bmp",original_images);
    Ptr<FaceRecognizer> face_recogniser = createEigenFaceRecognizer();
    face_recogniser->train(known_face_images, known_labels);
	char previous_face_number_string[100]="";
	char face_number_string[100]="";
	int correct_count = 0;
	for (face_number = 0; face_number < (int)unknown_face_images.size(); face_number++)
	{
		int predicted_face_number = 0;
		double recognition_confidence = 0.0;
		face_recogniser->predict(unknown_face_images[face_number],predicted_face_number,recognition_confidence);
		if (unknown_labels[face_number]==predicted_face_number)
			correct_count++;
		strcpy(previous_face_number_string,face_number_string);
		cvtColor(unknown_face_images[face_number], temp_image1, CV_GRAY2BGR);
		sprintf(face_number_string,"%d (%.0f)",predicted_face_number,recognition_confidence);
		Point location(2,15);
		putText( temp_image1, face_number_string, location, FONT_HERSHEY_SIMPLEX, 0.4, unknown_labels[face_number]==predicted_face_number?Scalar( 0,255,0 ):Scalar( 0,0,255 ) );
		if (face_number%10 == 0)
		{
			if (face_number > 10)
				image_so_far = JoinImagesVertically( image_so_far, "", row_so_far, "", 1 );
			else image_so_far = row_so_far.clone();
			row_so_far = temp_image1.clone();
		}
		else 
		{
			row_so_far = JoinImagesHorizontally( row_so_far, "", temp_image1, "", 1 );
		}
	}
	if (face_number > 10)
		image_so_far = JoinImagesVertically( image_so_far, "", row_so_far, "", 1 );
	else image_so_far = row_so_far.clone();
	char output[300];
	sprintf(output,"OVERALL Recognised %d/%d (with %d training image%s of %d subjects)",correct_count,unknown_face_images.size(),NUMBER_OF_IMAGES_PER_FACE-1,(NUMBER_OF_IMAGES_PER_FACE-1==1)?"":"s",NUMBER_OF_FACES);
	Point location(10,image_so_far.rows-10);
	putText( image_so_far, output, location, FONT_HERSHEY_SIMPLEX, 0.4, Scalar( 255,0,0 ) );
	imshow("Recognised faces using PCA (Eigenfaces)", image_so_far);
}
Beispiel #5
0
void VideoDemos( VideoCapture& surveillance_video, int starting_frame, bool clean_binary_images )
{
	Mat previous_gray_frame, optical_flow, optical_flow_display;
	Mat current_frame, thresholded_image, closed_image, first_frame;
	Mat current_frame_gray, running_average_background;
	Mat temp_running_average_background, running_average_difference;
	Mat running_average_foreground_mask, running_average_foreground_image;
	Mat selective_running_average_background;
	Mat temp_selective_running_average_background, selective_running_average_difference;
	Mat selective_running_average_foreground_mask, selective_running_average_background_mask, selective_running_average_foreground_image;
	double running_average_learning_rate = 0.01;
	surveillance_video.set(CV_CAP_PROP_POS_FRAMES,starting_frame);
	surveillance_video >> current_frame;
	first_frame = current_frame.clone();
	cvtColor(current_frame, current_frame_gray, CV_BGR2GRAY);
	current_frame.convertTo(running_average_background, CV_32F);
	selective_running_average_background = running_average_background.clone();
	int rad = running_average_background.depth();
	MedianBackground median_background( current_frame, (float) 1.005, 1 );
	Mat median_background_image, median_foreground_image;

	int codec = static_cast<int>(surveillance_video.get(CV_CAP_PROP_FOURCC));
	// V3.0.0 update on next line.  OLD CODE was    BackgroundSubtractorMOG2 gmm; //(50,16,true);
    Ptr<BackgroundSubtractorMOG2> gmm = createBackgroundSubtractorMOG2();
	Mat foreground_mask, foreground_image = Mat::zeros(current_frame.size(), CV_8UC3);

	double frame_rate = surveillance_video.get(CV_CAP_PROP_FPS);
	double time_between_frames = 1000.0/frame_rate;
	Timestamper* timer = new Timestamper();
	int frame_count = 0;
	while ((!current_frame.empty()) && (frame_count++ < 1000))//1800))
    {
 		double duration = static_cast<double>(getTickCount());
		vector<Mat> input_planes(3);
		split(current_frame,input_planes);
		cvtColor(current_frame, current_frame_gray, CV_BGR2GRAY);

		if (frame_count%2 == 0)  // Skip every second frame so the flow is greater.
		{
			if ( previous_gray_frame.data )
			{
				Mat lucas_kanade_flow;
				timer->ignoreTimeSinceLastRecorded();
				LucasKanadeOpticalFlow(previous_gray_frame, current_frame_gray, lucas_kanade_flow);
				timer->recordTime("Lucas Kanade Optical Flow");
				calcOpticalFlowFarneback(previous_gray_frame, current_frame_gray, optical_flow, 0.5, 3, 15, 3, 5, 1.2, 0);
				cvtColor(previous_gray_frame, optical_flow_display, CV_GRAY2BGR);
				drawOpticalFlow(optical_flow, optical_flow_display, 8, Scalar(0, 255, 0), Scalar(0, 0, 255));
				timer->recordTime("Farneback Optical Flow");
				char frame_str[100];
				sprintf( frame_str, "Frame = %d", frame_count);
 				Mat temp_output = JoinImagesHorizontally( current_frame, frame_str, optical_flow_display, "Farneback Optical Flow", 4 );
				Mat optical_flow_output = JoinImagesHorizontally( temp_output, "", lucas_kanade_flow, "Lucas Kanade Optical Flow", 4 );
				imshow("Optical Flow", optical_flow_output );
			}
			std::swap(previous_gray_frame, current_frame_gray);
		}
	
		// Static background image
		Mat difference_frame, binary_difference;
		Mat structuring_element(3,3,CV_8U,Scalar(1));
		timer->ignoreTimeSinceLastRecorded();
		absdiff(current_frame,first_frame,difference_frame);
		cvtColor(difference_frame, thresholded_image, CV_BGR2GRAY);
		threshold(thresholded_image,thresholded_image,30,255,THRESH_BINARY);
		if (clean_binary_images)
		{
			morphologyEx(thresholded_image,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,binary_difference,MORPH_OPEN,structuring_element);
			current_frame.copyTo(binary_difference, thresholded_image);
		}
		else
		{
			binary_difference.setTo(Scalar(0,0,0));
		    current_frame.copyTo(binary_difference, thresholded_image);
		}
		timer->recordTime("Static difference");

		// Running Average (three channel version)
		vector<Mat> running_average_planes(3);
		split(running_average_background,running_average_planes);
		accumulateWeighted(input_planes[0], running_average_planes[0], running_average_learning_rate);
		accumulateWeighted(input_planes[1], running_average_planes[1], running_average_learning_rate);
		accumulateWeighted(input_planes[2], running_average_planes[2], running_average_learning_rate);
		merge(running_average_planes,running_average_background);
		running_average_background.convertTo(temp_running_average_background,CV_8U);
		absdiff(temp_running_average_background,current_frame,running_average_difference);
		split(running_average_difference,running_average_planes);
		// Determine foreground points as any point with a difference of more than 30 on any one channel:
		threshold(running_average_difference,running_average_foreground_mask,30,255,THRESH_BINARY);
		split(running_average_foreground_mask,running_average_planes);
		bitwise_or( running_average_planes[0], running_average_planes[1], running_average_foreground_mask );
		bitwise_or( running_average_planes[2], running_average_foreground_mask, running_average_foreground_mask );
		if (clean_binary_images)
		{
			morphologyEx(running_average_foreground_mask,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,running_average_foreground_mask,MORPH_OPEN,structuring_element);
		}
		running_average_foreground_image.setTo(Scalar(0,0,0));
	    current_frame.copyTo(running_average_foreground_image, running_average_foreground_mask);
		timer->recordTime("Running Average");

		// Running Average with selective update
		vector<Mat> selective_running_average_planes(3);
		// Find Foreground mask
		selective_running_average_background.convertTo(temp_selective_running_average_background,CV_8U);
		absdiff(temp_selective_running_average_background,current_frame,selective_running_average_difference);
		split(selective_running_average_difference,selective_running_average_planes);
		// Determine foreground points as any point with an average difference of more than 30 over all channels:
		Mat temp_sum = (selective_running_average_planes[0]/3 + selective_running_average_planes[1]/3 + selective_running_average_planes[2]/3);
		threshold(temp_sum,selective_running_average_foreground_mask,30,255,THRESH_BINARY_INV);
		// Update background
		split(selective_running_average_background,selective_running_average_planes);
		accumulateWeighted(input_planes[0], selective_running_average_planes[0], running_average_learning_rate,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[1], selective_running_average_planes[1], running_average_learning_rate,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[2], selective_running_average_planes[2], running_average_learning_rate,selective_running_average_foreground_mask);
    	invertImage(selective_running_average_foreground_mask,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[0], selective_running_average_planes[0], running_average_learning_rate/3.0,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[1], selective_running_average_planes[1], running_average_learning_rate/3.0,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[2], selective_running_average_planes[2], running_average_learning_rate/3.0,selective_running_average_foreground_mask);
		merge(selective_running_average_planes,selective_running_average_background);
		if (clean_binary_images)
		{
			morphologyEx(selective_running_average_foreground_mask,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,selective_running_average_foreground_mask,MORPH_OPEN,structuring_element);
		}
 		selective_running_average_foreground_image.setTo(Scalar(0,0,0));
	    current_frame.copyTo(selective_running_average_foreground_image, selective_running_average_foreground_mask);
		timer->recordTime("Selective Running Average");

		// Median background
		timer->ignoreTimeSinceLastRecorded();
		median_background.UpdateBackground( current_frame );
		timer->recordTime("Median");
		median_background_image = median_background.GetBackgroundImage();
		Mat median_difference;
		absdiff(median_background_image,current_frame,median_difference);
		cvtColor(median_difference, median_difference, CV_BGR2GRAY);
		threshold(median_difference,median_difference,30,255,THRESH_BINARY);
		median_foreground_image.setTo(Scalar(0,0,0));
	    current_frame.copyTo(median_foreground_image, median_difference);

		// Update the Gaussian Mixture Model
 		// V3.0.0 update on next line.  OLD CODE was  gmm(current_frame, foreground_mask);
        gmm->apply(current_frame, foreground_mask);
		// Clean the resultant binary (moving pixel) mask using an opening.
		threshold(foreground_mask,thresholded_image,150,255,THRESH_BINARY);
		Mat moving_incl_shadows, shadow_points;
		threshold(foreground_mask,moving_incl_shadows,50,255,THRESH_BINARY);
		absdiff( thresholded_image, moving_incl_shadows, shadow_points );
		Mat cleaned_foreground_mask;
		if (clean_binary_images)
		{
			morphologyEx(thresholded_image,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,cleaned_foreground_mask,MORPH_OPEN,structuring_element);
		}
		else cleaned_foreground_mask = thresholded_image.clone();
 		foreground_image.setTo(Scalar(0,0,0));
        current_frame.copyTo(foreground_image, cleaned_foreground_mask);
		timer->recordTime("Gaussian Mixture Model");
		// Create an average background image (just for information)
        Mat mean_background_image;
		timer->ignoreTimeSinceLastRecorded();
		// V3.0.0 update on next line.  OLD CODE was   gmm.getBackgroundImage(mean_background_image);
        gmm->getBackgroundImage(mean_background_image);

		duration = static_cast<double>(getTickCount())-duration;
		duration /= getTickFrequency()/1000.0;
		int delay = (time_between_frames>duration) ? ((int) (time_between_frames-duration)) : 1;
		char c = cvWaitKey(delay);
		
		char frame_str[100];
		sprintf( frame_str, "Frame = %d", frame_count);
		Mat temp_static_output = JoinImagesHorizontally( current_frame, frame_str, first_frame, "Static Background", 4 );
		Mat static_output = JoinImagesHorizontally( temp_static_output, "", binary_difference, "Foreground", 4 );
        imshow("Static Background Model", static_output );
 		Mat temp_running_output = JoinImagesHorizontally( current_frame, frame_str, temp_running_average_background, "Running Average Background", 4 );
		Mat running_output = JoinImagesHorizontally( temp_running_output, "", running_average_foreground_image, "Foreground", 4 );
		imshow("Running Average Background Model", running_output );
 		Mat temp_selective_output = JoinImagesHorizontally( current_frame, frame_str, temp_selective_running_average_background, "Selective Running Average Background", 4 );
		Mat selective_output = JoinImagesHorizontally( temp_selective_output, "", selective_running_average_foreground_image, "Foreground", 4 );
        imshow("Selective Running Average Background Model", selective_output );
 		Mat temp_median_output = JoinImagesHorizontally( current_frame, frame_str, median_background_image, "Median Background", 4 );
		Mat median_output = JoinImagesHorizontally( temp_median_output, "", median_foreground_image, "Foreground", 4 );
        imshow("Median Background Model", median_output );
  		Mat temp_gaussian_output = JoinImagesHorizontally( current_frame, frame_str, mean_background_image, "GMM Background", 4 );
		Mat gaussian_output = JoinImagesHorizontally( temp_gaussian_output, "", foreground_image, "Foreground", 4 );
        imshow("Gaussian Mixture Model", gaussian_output );
		timer->putTimes( current_frame );
		imshow( "Computation Times", current_frame );
	 	surveillance_video >> current_frame;
	}
	cvDestroyAllWindows();
}