コード例 #1
0
	//=============================================================================
	// Basically Kabsch's algorithm but also allows the collection of points to be different in scale from each other
	cv::Matx22f AlignShapesWithScale(cv::Mat_<float>& src, cv::Mat_<float> dst)
	{
		int n = src.rows;

		// First we mean normalise both src and dst
		float mean_src_x = cv::mean(src.col(0))[0];
		float mean_src_y = cv::mean(src.col(1))[0];

		float mean_dst_x = cv::mean(dst.col(0))[0];
		float mean_dst_y = cv::mean(dst.col(1))[0];

		cv::Mat_<float> src_mean_normed = src.clone();
		src_mean_normed.col(0) = src_mean_normed.col(0) - mean_src_x;
		src_mean_normed.col(1) = src_mean_normed.col(1) - mean_src_y;

		cv::Mat_<float> dst_mean_normed = dst.clone();
		dst_mean_normed.col(0) = dst_mean_normed.col(0) - mean_dst_x;
		dst_mean_normed.col(1) = dst_mean_normed.col(1) - mean_dst_y;

		// Find the scaling factor of each
		cv::Mat src_sq;
		cv::pow(src_mean_normed, 2, src_sq);

		cv::Mat dst_sq;
		cv::pow(dst_mean_normed, 2, dst_sq);

		float s_src = sqrt(cv::sum(src_sq)[0] / n);
		float s_dst = sqrt(cv::sum(dst_sq)[0] / n);

		src_mean_normed = src_mean_normed / s_src;
		dst_mean_normed = dst_mean_normed / s_dst;

		float s = s_dst / s_src;

		// Get the rotation
		cv::Matx22f R = AlignShapesKabsch2D(src_mean_normed, dst_mean_normed);

		cv::Matx22f	A;
		cv::Mat(s * R).copyTo(A);

		cv::Mat_<float> aligned = (cv::Mat(cv::Mat(A) * src.t())).t();
		cv::Mat_<float> offset = dst - aligned;

		float t_x = cv::mean(offset.col(0))[0];
		float t_y = cv::mean(offset.col(1))[0];

		return A;

	}
コード例 #2
0
/** Orthogonal matching pursuit
* x: input signal, N * 1 
* D: dictionary, N * M 
* L: number of non_zero elements in output
* coeff: coefficent of each atoms in dictionary, M * 1
*/
void OMP(const cv::Mat_<double>& x, const cv::Mat_<double>& D, int L, cv::Mat_<double>& coeff){
    int dim = x.rows;
    int atom_num = D.cols;
    coeff = Mat::zeros(atom_num, 1, CV_64FC1);
    Mat_<double> residual = x.clone();
    Mat_<double> selected_index(L, 1);
    Mat_<double> a;
    for (int i = 0; i < L; i++){
        cout << "here ok 1" << endl;
        Mat_<double> dot_p = D.t() * residual; 
        Point max_index;
        minMaxLoc(abs(dot_p), NULL, NULL, NULL, &max_index);
        int max_row = max_index.y;
        selected_index(i) = max_row;
        Mat_<double> temp(dim, i + 1);
        for (int j = 0; j < i + 1; j++){
            D.col(selected_index(j)).copyTo(temp.col(j));
        }
        Mat_<double> invert_temp;
        invert(temp, invert_temp, CV_SVD);
        a = invert_temp * x;
        residual = x - temp * a;
    }

    for (int i = 0; i < L; i++){
        coeff(selected_index(i)) = a(i);
    }
} 
コード例 #3
0
	// Extract summary statistics (mean, stdev, min, max) from each dimension of a descriptor, each row is a descriptor
	void ExtractSummaryStatistics(const cv::Mat_<double>& descriptors, cv::Mat_<double>& sum_stats, bool use_mean, bool use_stdev, bool use_max_min)
	{
		// Using four summary statistics at the moment 
		// Means, stds, mins, maxs
		int num_stats = 0;

		if(use_mean)
			num_stats++;

		if(use_stdev)
			num_stats++;

		if(use_max_min)
			num_stats++;

		sum_stats = Mat_<double>(1, descriptors.cols * num_stats, 0.0);
		for(int i = 0; i < descriptors.cols; ++i)
		{
			Scalar mean, stdev;
			cv::meanStdDev(descriptors.col(i), mean, stdev);

			int add = 0;

			if(use_mean)
			{
				sum_stats.at<double>(0, i*num_stats + add) = mean[0];
				add++;
			}

			if(use_stdev)
			{
				sum_stats.at<double>(0, i*num_stats + add) = stdev[0];
				add++;
			}

			if(use_max_min)
			{
				double min, max;
				cv::minMaxIdx(descriptors.col(i), &min, &max);
				sum_stats.at<double>(0, i*num_stats + add) = max - min;
				add++;
			}
		}		
	}
コード例 #4
0
ファイル: model.cpp プロジェクト: 201312201401019/DeepID1
void cModel::UpDate(cv::Mat_<int>&binary, cv::Rect &bbox, cv::Mat_<float>& shape, int stage)
{	
	int num_point = m_Model.__head.__num_point;
	int w_cols = 2 * num_point;
	int w_rows = binary.cols;	

	cv::Mat_<float> deltashapes = cv::Mat::zeros(1, w_cols, CV_32FC1);

	for (int i = 0; i < w_rows; i++){
		if (binary(0, i) == 1){
			deltashapes += m_Model.__w.row(stage * w_rows + i);
		}
	}

	cv::Mat_<float> deltax = deltashapes.colRange(0, num_point).t();
	deltax *= bbox.width;
	cv::Mat_<float> deltay = deltashapes.colRange(num_point, w_cols).t();
	deltay *= bbox.height;	
		
	shape.col(0) += deltax;
	shape.col(1) += deltay;
}
コード例 #5
0
/**
 * Find 3D rotation and translation with homography (i.e. calibration boards position in the camera cooperate system )
 * @param h**o: [Input] Estimated Homography
 * @param rot: [Output] Rotation result
 * @param trans: [Output] Translation result
 */
void AugmentationEnvironment::H2Rt(const cv::Mat_<double> intrisinc, const cv::Mat_<double>& h**o, cv::Mat_<double>& rot, cv::Mat_<double> &trans)
{
	double coefH;

	cv::Mat_<double> invcam = intrisinc.inv();
	rot = invcam*h**o;

	//scaling rotation matrix to right order = find the coefficient lost while calculating homography
	/**
	 * [Cam]*[Rot]' = [H**o]*coefH
	 *          |fx  0  u0|
	 * [Cam]' = | 0 fy  v0|
	 *          | 0  0   1|
	 *
	 *          |R11 R12 t1|
	 * [Rot]' = |R21 R22 t2|
	 *          |R31 R32 t3|
	 *
	 *           |H11 H12 H13|
	 * [H**o]' = |H21 H22 H23|
	 *           |H31 H32   1|
	 */
	coefH = 2/(cv::norm(rot.col(0)) + cv::norm(rot.col(1)));
	rot = rot * coefH;
	trans = rot.col(2).clone();

	//The their rotation vector in the rotation matrix is produced as the cross product of other two vectors
	cv::Mat tmp = rot.col(2);
	rot.col(0).cross(rot.col(1)).copyTo(tmp);

	//Add here to make rotation matrix a "real" rotation matrix in zhang's method
	/**
	 * rot = UDVt
	 * rot_real = UVt
	 */
	cv::Mat_<double> D,U,Vt;
	cv::SVD::compute(rot,D,U,Vt);
	rot = U*Vt;
}