Esempio n. 1
0
//===========================================================================
void CCNF_patch_expert::Response(cv::Mat_<float> &area_of_interest, cv::Mat_<float> &response)
{
	
	int response_height = area_of_interest.rows - height + 1;
	int response_width = area_of_interest.cols - width + 1;

	if(response.rows != response_height || response.cols != response_width)
	{
		response.create(response_height, response_width);
	}
		
	response.setTo(0);
	
	// the placeholder for the DFT of the image, the integral image, and squared integral image so they don't get recalculated for every response
	cv::Mat_<double> area_of_interest_dft;
	cv::Mat integral_image, integral_image_sq;
	
	cv::Mat_<float> neuron_response;

	// responses from the neural layers
	for(size_t i = 0; i < neurons.size(); i++)
	{		
		// Do not bother with neuron response if the alpha is tiny and will not contribute much to overall result
		if(neurons[i].alpha > 1e-4)
		{
			neurons[i].Response(area_of_interest, area_of_interest_dft, integral_image, integral_image_sq, neuron_response);
			response = response + neuron_response;						
		}
	}

	int s_to_use = -1;

	// Find the matching sigma
	for(size_t i=0; i < window_sizes.size(); ++i)
	{
		if(window_sizes[i] == response_height)
		{
			// Found the correct sigma
			s_to_use = i;			
			break;
		}
	}

	cv::Mat_<float> resp_vec_f = response.reshape(1, response_height * response_width);

	cv::Mat out = Sigmas[s_to_use] * resp_vec_f;
	
	response = out.reshape(1, response_height);

	// Making sure the response does not have negative numbers
	double min;

	minMaxIdx(response, &min, 0);
	if(min < 0)
	{
		response = response - min;
	}

}
Esempio n. 2
0
	// Aligning a face to a common reference frame
	void AlignFaceMask(cv::Mat& aligned_face, const cv::Mat& frame, const cv::Mat_<float>& detected_landmarks, cv::Vec6f params_global, const LandmarkDetector::PDM& pdm, const cv::Mat_<int>& triangulation, bool rigid, double sim_scale, int out_width, int out_height)
	{
		// Will warp to scaled mean shape
		cv::Mat_<float> similarity_normalised_shape = pdm.mean_shape * sim_scale;
	
		// Discard the z component
		similarity_normalised_shape = similarity_normalised_shape(cv::Rect(0, 0, 1, 2*similarity_normalised_shape.rows/3)).clone();

		cv::Mat_<float> source_landmarks = detected_landmarks.reshape(1, 2).t();
		cv::Mat_<float> destination_landmarks = similarity_normalised_shape.reshape(1, 2).t();

		// Aligning only the more rigid points
		if(rigid)
		{
			extract_rigid_points(source_landmarks, destination_landmarks);
		}

		cv::Matx22f scale_rot_matrix = AlignShapesWithScale(source_landmarks, destination_landmarks);
		cv::Matx23f warp_matrix;

		warp_matrix(0,0) = scale_rot_matrix(0,0);
		warp_matrix(0,1) = scale_rot_matrix(0,1);
		warp_matrix(1,0) = scale_rot_matrix(1,0);
		warp_matrix(1,1) = scale_rot_matrix(1,1);

		float tx = params_global[4];
		float ty = params_global[5];

		cv::Vec2f T(tx, ty);
		T = scale_rot_matrix * T;

		// Make sure centering is correct
		warp_matrix(0,2) = -T(0) + out_width/2;
		warp_matrix(1,2) = -T(1) + out_height/2;

		cv::warpAffine(frame, aligned_face, warp_matrix, cv::Size(out_width, out_height), cv::INTER_LINEAR);

		// Move the destination landmarks there as well
		cv::Matx22f warp_matrix_2d(warp_matrix(0,0), warp_matrix(0,1), warp_matrix(1,0), warp_matrix(1,1));
		
		destination_landmarks = cv::Mat(detected_landmarks.reshape(1, 2).t()) * cv::Mat(warp_matrix_2d).t();

		destination_landmarks.col(0) = destination_landmarks.col(0) + warp_matrix(0,2);
		destination_landmarks.col(1) = destination_landmarks.col(1) + warp_matrix(1,2);
		
		// Move the eyebrows up to include more of upper face
		destination_landmarks.at<float>(0,1) -= (30/0.7)*sim_scale;
		destination_landmarks.at<float>(16,1) -= (30 / 0.7)*sim_scale;

		destination_landmarks.at<float>(17,1) -= (30 / 0.7)*sim_scale;
		destination_landmarks.at<float>(18,1) -= (30 / 0.7)*sim_scale;
		destination_landmarks.at<float>(19,1) -= (30 / 0.7)*sim_scale;
		destination_landmarks.at<float>(20,1) -= (30 / 0.7)*sim_scale;
		destination_landmarks.at<float>(21,1) -= (30 / 0.7)*sim_scale;
		destination_landmarks.at<float>(22,1) -= (30 / 0.7)*sim_scale;
		destination_landmarks.at<float>(23,1) -= (30 / 0.7)*sim_scale;
		destination_landmarks.at<float>(24,1) -= (30 / 0.7)*sim_scale;
		destination_landmarks.at<float>(25,1) -= (30 / 0.7)*sim_scale;
		destination_landmarks.at<float>(26,1) -= (30 / 0.7)*sim_scale;

		destination_landmarks = cv::Mat(destination_landmarks.t()).reshape(1, 1).t();

		LandmarkDetector::PAW paw(destination_landmarks, triangulation, 0, 0, aligned_face.cols-1, aligned_face.rows-1);
		
		// Mask each of the channels (a bit of a roundabout way, but OpenCV 3.1 in debug mode doesn't seem to be able to handle a more direct way using split and merge)
		vector<cv::Mat> aligned_face_channels(aligned_face.channels());
		
		for (int c = 0; c < aligned_face.channels(); ++c)
		{
			cv::extractChannel(aligned_face, aligned_face_channels[c], c);
		}

		for(size_t i = 0; i < aligned_face_channels.size(); ++i)
		{
			cv::multiply(aligned_face_channels[i], paw.pixel_mask, aligned_face_channels[i], 1.0, CV_8U);
		}

		if(aligned_face.channels() == 3)
		{
			cv::Mat planes[] = { aligned_face_channels[0], aligned_face_channels[1], aligned_face_channels[2] };
			cv::merge(planes, 3, aligned_face);
		}
		else
		{
			aligned_face = aligned_face_channels[0];
		}
	}