Exemplo n.º 1
0
static void Normalize(vector<cv::Point2d> * shape, const TrainingParameters &tp)
{

	cv::Point2d center;
	for (const cv::Point2d p : *shape)
		center += p;
	center *= 1.0 / shape->size();
	for (cv::Point2d &p : *shape)
		p -= center;

	cv::Point2d left_eye = (*shape).at(tp.left_eye_index);
	cv::Point2d right_eye = (*shape).at(tp.right_eye_index);
	double eyes_distance = cv::norm(left_eye - right_eye);
	double scale = 1 / eyes_distance;

	double theta = -atan((right_eye.y - left_eye.y) / (right_eye.x - left_eye.x));

	// Must do translation first, and then rotation.
	// Therefore, translation is done separately
	Transform t;
	t.scale_rotation(0, 0) = scale * cos(theta);
	t.scale_rotation(0, 1) = -scale * sin(theta);
	t.scale_rotation(1, 0) = scale * sin(theta);
	t.scale_rotation(1, 1) = scale * cos(theta);


	t.Apply(shape, false);
}
Exemplo n.º 2
0
vector<cv::Point2d> MeanShape(vector<vector<cv::Point2d>> shapes, 
	const TrainingParameters &tp)
{
	const int kIterationCount = 10;
	vector<cv::Point2d> mean_shape = shapes[0];
	
	for (int i = 0; i < kIterationCount; ++i)
	{
		for (vector<cv::Point2d> & shape: shapes)
		{
			Transform t = Procrustes(mean_shape, shape);
			t.Apply(&shape);
		}
		
		for (cv::Point2d & p : mean_shape)
			p.x = p.y = 0;
		
		for (const vector<cv::Point2d> & shape : shapes)
			for (int j = 0; j < mean_shape.size(); ++j)
			{
				mean_shape[j].x += shape[j].x;
				mean_shape[j].y += shape[j].y;
			}

		for (cv::Point2d & p : mean_shape)
			p *= 1.0 / shapes.size();

		Normalize(&mean_shape, tp);
	}

	return mean_shape;
}
Exemplo n.º 3
0
vector<cv::Point2d> FaceX::Alignment(cv::Mat image,
	vector<cv::Point2d> initial_landmarks) const
{
	vector<vector<double>> all_results(test_init_shapes_[0].size() * 2);
	for (int i = 0; i < test_init_shapes_.size(); ++i)
	{
		Transform t = Procrustes(initial_landmarks, test_init_shapes_[i]);
		vector<cv::Point2d> init_shape = test_init_shapes_[i];
		t.Apply(&init_shape);
		for (int j = 0; j < stage_regressors_.size(); ++j)
		{
			vector<cv::Point2d> offset =
				stage_regressors_[j].Apply(image, mean_shape_, init_shape);
			Transform t = Procrustes(init_shape, mean_shape_);
			t.Apply(&offset, false);
			init_shape = ShapeAdjustment(init_shape, offset);
		}

		for (int i = 0; i < init_shape.size(); ++i)
		{
			all_results[i * 2].push_back(init_shape[i].x);
			all_results[i * 2 + 1].push_back(init_shape[i].y);
		}
	}

	vector<cv::Point2d> result(test_init_shapes_[0].size());
	for (int i = 0; i < result.size(); ++i)
	{
		nth_element(all_results[i * 2].begin(),
			all_results[i * 2].begin() + test_init_shapes_.size() / 2,
			all_results[i * 2].end());
		result[i].x = all_results[i * 2][test_init_shapes_.size() / 2];
		nth_element(all_results[i * 2 + 1].begin(),
			all_results[i * 2 + 1].begin() + test_init_shapes_.size() / 2,
			all_results[i * 2 + 1].end());
		result[i].y = all_results[i * 2 + 1][test_init_shapes_.size() / 2];
	}
	return result;
}
Exemplo n.º 4
0
 void SetTransform(const Transform &inTrans)
 {
    int points = mCount;
    if (points!=mTransformed.size() || inTrans!=mTransform)
    {
       mTransform = inTrans;
       mTransformed.resize(points);
       UserPoint *src= (UserPoint *)&mData[ mData0 ];
       for(int i=0;i<points;i++)
       {
          mTransformed[i] = mTransform.Apply(src[i].x,src[i].y);
       }
    }
 }
vector<cv::Point2d> Regressor::Apply(const vector<cv::Point2d> &mean_shape, 
	cv::Mat image_infrared, cv::Mat image_depth, const vector<cv::Point2d> &init_shape) const
{
	cv::Mat pixels_val(1, pixels_.size(), CV_64FC1);
	Transform t = Procrustes(init_shape, mean_shape);
	vector<cv::Point2d> offsets(pixels_.size());
	for (int j = 0; j < pixels_.size(); ++j)
		offsets[j] = pixels_[j].offset;
	t.Apply(&offsets, false);

	vector<cv::Point> pixels_pos;
	for (int j = 0; j < pixels_.size(); ++j)
		pixels_pos.push_back(init_shape[pixels_[j].base_landmark] + offsets[j]);

	pair<double, double> coeffs = ComputePose(image_depth, pixels_pos);

	double *p = pixels_val.ptr<double>(0);
	for (int j = 0; j < pixels_.size(); ++j)
	{
		if (pixels_pos[j].inside(cv::Rect(0, 0, image_infrared.cols, image_infrared.rows)))
		{
			if (pixels_[j].type == 0)
				p[j] = image_infrared.at<ushort>(pixels_pos[j]);
			else
			{
				p[j] = image_depth.at<ushort>(pixels_pos[j])
					- alpha_ * (coeffs.first * pixels_pos[j].x + coeffs.second * pixels_pos[j].y);
			}
		}
		else
			p[j] = 0;
	}

	cv::Mat base_coeffs = cv::Mat::zeros(base_.cols, 1, CV_64FC1);
	for (int i = 0; i < ferns_.size(); ++i)
		ferns_[i].ApplyMini(pixels_val, base_coeffs);

	cv::Mat result_mat = base_ * base_coeffs;

	vector<cv::Point2d> result(mean_shape.size());
	for (int i = 0; i < result.size(); ++i)
	{
		result[i].x = result_mat.at<double>(i * 2);
		result[i].y = result_mat.at<double>(i * 2 + 1);
	}
	return result;
}
vector<cv::Point2d> FaceX::Alignment(cv::Mat image_infrared, cv::Mat image_depth, cv::Rect face_rect) const
{
	CV_Assert(is_loaded_);

	vector<vector<double>> all_results(test_init_shapes_[0].size() * 2);
	for (int i = 0; i < test_init_shapes_.size(); ++i)
	{
		vector<cv::Point2d> init_shape = MapShape(cv::Rect(0, 0, 1, 1),
			test_init_shapes_[i], face_rect);
		for (int j = 0; j < stage_regressors_.size(); ++j)
		{
			vector<cv::Point2d> offset =
				stage_regressors_[j].Apply(mean_shape_, image_infrared, image_depth, init_shape);
			Transform t = Procrustes(init_shape, mean_shape_);
			t.Apply(&offset, false);
			init_shape = ShapeAdjustment(init_shape, offset);
		}

		for (int i = 0; i < init_shape.size(); ++i)
		{
			all_results[i * 2].push_back(init_shape[i].x);
			all_results[i * 2 + 1].push_back(init_shape[i].y);
		}
	}

	vector<cv::Point2d> result(test_init_shapes_[0].size());
	for (int i = 0; i < result.size(); ++i)
	{
		nth_element(all_results[i * 2].begin(),
			all_results[i * 2].begin() + test_init_shapes_.size() / 2,
			all_results[i * 2].end());
		result[i].x = all_results[i * 2][test_init_shapes_.size() / 2];
		nth_element(all_results[i * 2 + 1].begin(),
			all_results[i * 2 + 1].begin() + test_init_shapes_.size() / 2,
			all_results[i * 2 + 1].end());
		result[i].y = all_results[i * 2 + 1][test_init_shapes_.size() / 2];
	}
	return result;
}
Exemplo n.º 7
0
void RTree::Apply(int num_trees, int num_lm, int index_tree, int index_lm,
	const cv::Mat &image, const vector<cv::Point2d> &mean_shape,
	const vector<cv::Point2d> &init_shape, cv::Mat &bin_feat) const
{
	int num_nodes_split = (num_nodes - 1) / 2;
	int idx_node = 0;
	while (idx_node < num_nodes_split){
		// get the feature and go ahead
		double feat;
		Transform t = Procrustes(init_shape, mean_shape);
		vector<cv::Point2d> offset_pair(2);
		offset_pair[0] = feats[idx_node].first;
		offset_pair[1] = feats[idx_node].second;
		t.Apply(&offset_pair, false);

		cv::Point feat_pos_first = init_shape[index_lm] + offset_pair[0];
		cv::Point feat_pos_second = init_shape[index_lm] + offset_pair[1];
		if (feat_pos_first.inside(cv::Rect(0, 0, image.cols, image.rows))
			&& feat_pos_second.inside(cv::Rect(0, 0, image.cols, image.rows)))
		{
			feat = image.at<uchar>(feat_pos_first)
				-image.at<uchar>(feat_pos_second);
		}
		else
			feat = 0;

		if (feat < thresholds[idx_node]) idx_node = 2 * idx_node + 1;
		else idx_node = 2 * idx_node + 2;
	}
	if (idx_node >= num_nodes)
		throw out_of_range("idx_node is greater than or equal to num_nodes during appling the tree");
	// calculate the index of this leaf node in the binary feature vector
	int num_leaves = num_nodes - num_nodes_split;
	int bool_index = index_lm * num_trees * num_leaves
		+ index_tree * num_leaves + idx_node - (num_nodes - num_leaves);
	if (bool_index > num_lm * num_trees * num_leaves)
		throw out_of_range("bool index is out of the range of bin_feat during appling the tree");
	bin_feat.at<float>(0, bool_index) = 1;
}