ClusteredLinearRegression::ClusteredLinearRegression(const cv::Mat_<double>& X, const cv::Mat_<double>& Y, int minClusterSize) {
	int N = X.rows;

	vector<cv::Mat_<float> > clusterX, clusterY;
	vector<cv::Mat_<float> > floatCentroids;
	{
		cv::Mat floatX, floatY;
		X.convertTo(floatX, CV_32F);
		Y.convertTo(floatY, CV_32F);
		
		cv::Mat_<float> centroid;
		X.convertTo(centroid, CV_32F);
		cv::reduce(centroid, centroid, 0, CV_REDUCE_AVG);
		partition(floatX, floatY, centroid, minClusterSize, clusterX, clusterY, floatCentroids);
	}

	clusterCentroids.resize(clusterX.size());
	W.resize(clusterX.size());

	for (int i = 0; i < clusterX.size(); ++i) {
		floatCentroids[i].convertTo(clusterCentroids[i], CV_64F);

		cv::Mat_<double> X2;
		clusterX[i].convertTo(X2, CV_64F);
		ml::addBias(X2);
		cv::Mat_<double> Y2;
		clusterY[i].convertTo(Y2, CV_64F);

		W[i] = X2.inv(cv::DECOMP_SVD) * Y2;
	}
}
예제 #2
0
EDTFeature::EDTFeature( const cv::Mat_<byte> img, const cv::Size fvDims)
    : RFeatures::FeatureOperator( img.size(), fvDims)
{
    const cv::Mat_<int> sedt = RFeatures::DistanceTransform::calcdt( img);   // Distance map to edges
    cv::Mat fsedt;  // convert to 64 bit float for sqrt
    sedt.convertTo( fsedt, CV_64F);
    cv::sqrt( fsedt, _dtimg);
    cv::integral( _dtimg, _iimg, CV_64F);
}   // end ctor
예제 #3
0
/**
* Root mean square (RMS) contrast
*/
double rmsContrast(cv::Mat_<unsigned char> grayscale)
{
    cv::Mat I;
    grayscale.convertTo(I, CV_32F, 1.0f / 255.0f);

    cv::Mat normalized = (I - cv::mean(I).val[0]);

    double sum = cv::sum(normalized.mul(normalized)).val[0];
    double totalPixels = grayscale.rows * grayscale.cols;

    return sqrt(sum / totalPixels);
}
/**
 * データXをk-meansクラスタリングで階層的に分割していく。
 * それに合わせて、データYも分割する。
 * データX、データYの行数は同じであること。
 *
 * @param X						データX
 * @param Y						データY
 * @param indices				各データ要素の、元データでのindex番号
 * @param minSize				クラスタの最小サイズ
 * @param clusterX [OUT]		データXのクラスタリング結果
 * @param clusterY [OUT]		データYのクラスタリング結果
 * @param clusterCentroids [OUT]	クラスタリング結果における各要素の、元データでのindex番号
 */
void ClusteredLinearRegression::partition(const cv::Mat_<float>& X, const cv::Mat_<float>& Y, const cv::Mat_<float>& centroid, int minSize, vector<cv::Mat_<float> >&clusterX, vector<cv::Mat_<float> >&clusterY, vector<cv::Mat_<float> >& clusterCentroids) {
	// サンプル数がminSize未満になるまで、繰り返し、X、Yを分割する。
	cv::Mat samples;
	X.convertTo(samples, CV_32F);
	cv::Mat_<float> centroids;
	cv::Mat labels;
	//cv::TermCriteria cri(cv::TermCriteria::MAX_ITER | cv::TermCriteria::EPS, 50, FLT_EPSILON);
	cv::TermCriteria cri(cv::TermCriteria::COUNT, 200, FLT_EPSILON);
	double compactness = cv::kmeans(samples, 2, labels, cri, 200, cv::KMEANS_PP_CENTERS, centroids);
		
	int nClass1 = cv::countNonZero(labels);
	int nClass0 = X.rows - nClass1;

	if (nClass1 < minSize || nClass0 < minSize) {
		clusterX.push_back(X);
		clusterY.push_back(Y);
		clusterCentroids.push_back(centroid); 
		return;
	}

	cv::Mat_<float> classX0(nClass0, X.cols);
	cv::Mat_<float> classX1(nClass1, X.cols);
	cv::Mat_<float> classY0(nClass0, Y.cols);
	cv::Mat_<float> classY1(nClass1, Y.cols);

	int index0 = 0;
	int index1 = 0;
	for (int r = 0; r < X.rows; ++r) {
		if (labels.at<float>(r, 0) == 0) {
			for (int c = 0; c < X.cols; ++c) {
				classX0(index0, c) = X(r, c);
			}
			for (int c = 0; c < Y.cols; ++c) {
				classY0(index0, c) = Y(r, c);
			}

			index0++;
		} else {
			for (int c = 0; c < X.cols; ++c) {
				classX1(index1, c) = X(r, c);
			}
			for (int c = 0; c < Y.cols; ++c) {
				classY1(index1, c) = Y(r, c);
			}

			index1++;
		}
	}

	partition(classX0, classY0, centroids.row(0), minSize, clusterX, clusterY, clusterCentroids);
	partition(classX1, classY1, centroids.row(1), minSize, clusterX, clusterY, clusterCentroids);
}
예제 #5
0
// mex function call:
// x = mexFGS(input_image, guidance_image = NULL, sigma, lambda, fgs_iteration = 3, fgs_attenuation = 4);
void FGS(const cv::Mat_<float> &in, const cv::Mat_<cv::Vec3b> &color_guide, cv::Mat_<float> &out, double sigma, double lambda, int solver_iteration, int solver_attenuation)
{
	
	// image resolution
	W = in.cols;
	H = in.rows;

    nChannels = 1;
    nChannels_guide = 3;
	
	cv::Mat_<cv::Vec3f> image_guidance;
	color_guide.convertTo(image_guidance,CV_32FC3);

	cv::Mat_<float> image_filtered;
	in.copyTo(image_filtered);

	// run FGS
	sigma *= 255.0;
    
	FGS_simple(image_filtered, image_guidance, sigma, lambda, solver_iteration, solver_attenuation);
	image_filtered.copyTo(out);
}
void NormalAdaptiveSuperpixel::SetParametor(int rows, int cols, cv::Mat_<double> intrinsic){
	//number of clusters
	ClusterNum.x = cols;
	ClusterNum.y = rows;
	//grid(window) size
	Window_Size.x = width/cols;
	Window_Size.y = height/rows;
	//Init GPU memory
	initMemory();						
	//Random colors
	for(int i=0; i<ClusterNum.x*ClusterNum.y; i++){
		int3 tmp;
		tmp.x = rand()%255;
		tmp.y = rand()%255;
		tmp.z = rand()%255;
		RandomColors[i] = tmp;
	}
	////////////////////////////////Virtual//////////////////////////////////////////
	//set intrinsic mat
	cv::Mat_<float> intr;
	intrinsic.convertTo(intr, CV_32F);
	Intrinsic_Device.upload(intr);
}
cv::Mat_<double> cRegression::GlobalRegression(const cv::Mat_<int>&binaryfeatures, int stage)
{
	cv::Mat_<double> deltashapes = cv::Mat::zeros(m_TrainData->__dbsizes, 2 * m_TrainData->__numlandmarks, CV_64FC1);
	std::cout << "compute residual." << std::endl;
	for (int i = 0;i < m_TrainData->__dbsizes; i++){
		cv::Mat_<double> residua_col0 = m_TrainData->__data[i].__shapes_residual.col(0).t();
		cv::Mat_<double> residua_col1 = m_TrainData->__data[i].__shapes_residual.col(1).t();

		residua_col0.copyTo(deltashapes.row(i).colRange(0, m_TrainData->__numlandmarks));
		residua_col1.copyTo(deltashapes.row(i).colRange(m_TrainData->__numlandmarks, 2 * m_TrainData->__numlandmarks));
	}		
	std::cout << "compute residual finished." << std::endl;

	std::cout << "linear regression." << std::endl;
	cv::Mat_<double> W_liblinear = cv::Mat::zeros(binaryfeatures.cols, deltashapes.cols,CV_64FC1);
	// std::cout << "convert to sparse feature." << std::endl;
	cv::SparseMat_<int> binaryfeatures_sparse(binaryfeatures);
	// std::cout << binaryfeatures_sparse.size(1) << std::endl;
	// std::cout << "convert to sparse feature finished." << std::endl;
    #pragma omp parallel for
	for (int i = 0; i < deltashapes.cols; i++){
		cv::Mat_<double> tmp = __train_regressor(deltashapes.col(i), binaryfeatures_sparse);
		tmp.copyTo(W_liblinear.col(i));
	}
	std::cout << "linear regression finished." << std::endl;

	cv::Mat_<double> binaryfeatures_d;
	binaryfeatures.convertTo(binaryfeatures_d,CV_64FC1);
	cv::Mat_<double> deltashapes_bar = binaryfeatures_d * W_liblinear;

	cv::Mat_<double> deltashapes_bar_x = deltashapes_bar.colRange(0, deltashapes_bar.cols/2);//54 X 68
	cv::Mat_<double> deltashapes_bar_y = deltashapes_bar.colRange(deltashapes_bar.cols / 2, deltashapes_bar.cols);

	std::vector<cv::Mat_<double>> preshapes(0);
	std::vector<cv::Mat_<double>> gtshapes(0);

	std::cout << "update stage." << std::endl;
	for (int i=0; i < m_TrainData->__dbsizes; i++){		
		cv::Mat_<double> delta_shape_interm_coord = cv::Mat::zeros(deltashapes_bar_x.cols,2,CV_64FC1);//68 X 2
		cv::Mat_<double> deltashapes_bar_x_t, deltashapes_bar_y_t;

		deltashapes_bar_x_t = deltashapes_bar_x.t();
		deltashapes_bar_y_t = deltashapes_bar_y.t();

		deltashapes_bar_x_t.col(i).copyTo(delta_shape_interm_coord.col(0));
		deltashapes_bar_y_t.col(i).copyTo(delta_shape_interm_coord.col(1));		

		delta_shape_interm_coord.col(0) *= m_TrainData->__data[i].__intermediate_bboxes[stage].width;
		delta_shape_interm_coord.col(1) *= m_TrainData->__data[i].__intermediate_bboxes[stage].height;	

		cv::Mat_<double> shape_newstage = m_TrainData->__data[i].__intermediate_shapes[stage] + delta_shape_interm_coord;		

		m_TrainData->__data[i].__intermediate_shapes[stage + 1] = shape_newstage.clone();
		m_TrainData->__data[i].__intermediate_bboxes[stage + 1] = m_TrainData->__data[i].__intermediate_bboxes[stage];

		preshapes.push_back(m_TrainData->__data[i].__intermediate_shapes[stage + 1].clone());
		gtshapes.push_back(m_TrainData->__data[i].__shape_gt.clone());

		/*
		cv::Mat img = m_TrainData->__data[i].__img_gray.clone();
		for (int k = 0; k < preshapes[i].rows; ++k) {
			cv::circle(img, cv::Point(m_TrainData->__data[i].__intermediate_shapes[stage](k, 0), 
				m_TrainData->__data[i].__intermediate_shapes[stage](k, 1)), 
				1, CV_RGB(128, 128, 128), 1);
			cv::circle(img, cv::Point(preshapes[i](k, 0), preshapes[i](k, 1)), 1, CV_RGB(255, 255, 255), 1);
			cv::circle(img, cv::Point(gtshapes[i](k, 0), gtshapes[i](k, 1)), 1, CV_RGB(0, 0, 0), 1);
		}
		cv::rectangle(img, m_TrainData->__data[i].__bbox_gt, CV_RGB(255, 255, 255), 1);
		cv::imshow("img", img);
		cv::waitKey(0);
		*/

		//calculate transform
		/*cv::Mat_<double> shape_newstage_x = shape_newstage.col(0);
		cv::Mat_<double> shape_newstage_y = shape_newstage.col(1);
		cv::Scalar cx = cv::mean(shape_newstage_x);
		cv::Scalar cy = cv::mean(shape_newstage_y);
		shape_newstage_x = shape_newstage_x - cx[0];
		shape_newstage_y = shape_newstage_y - cy[0];		

		cv::Rect         bbox   = m_TrainData->__data[i].__intermediate_bboxes[stage + 1];
		cv::Mat_<double> mean = m_Utils.Reshape(m_TrainData->__meanface_one_row, bbox, m_Params);
		cv::Mat_<double> mean_x = mean.colRange(0, mean.cols / 2).t();
		cv::Mat_<double> mean_y = mean.colRange(mean.cols / 2, mean.cols).t();

		cv::Scalar mx = cv::mean(mean_x);
		cv::Scalar my = cv::mean(mean_y);
		mean_x = mean_x - mx[0];
		mean_y = mean_y - my[0];
		cv::Mat_<double> mean_newstage = cv::Mat::zeros(mean_x.rows,2,CV_64FC1);
		mean_x.copyTo(mean_newstage.col(0));
		mean_y.copyTo(mean_newstage.col(1));

		m_TrainData->__data[i].__tf2meanshape = m_Utils.Calc_Point_Transform(shape_newstage, mean_newstage);
		m_TrainData->__data[i].__meanshape2tf = m_Utils.Calc_Point_Transform(mean_newstage, shape_newstage);*/		

		cv::Mat_<double> residual_col0, residual_col1, residual;
		residual = m_TrainData->__data[i].__shape_gt - shape_newstage;
		
		residual.col(0) = residual.col(0) / (double)m_TrainData->__data[i].__intermediate_bboxes[stage + 1].width;
		residual.col(1) = residual.col(1) / (double)m_TrainData->__data[i].__intermediate_bboxes[stage + 1].height;	

		//residual.col(0) = residual.col(0) * m_TrainData->__data[i].__tf2meanshape.at<double>(0, 0) + residual.col(1) * m_TrainData->__data[i].__tf2meanshape.at<double>(0, 1) + m_TrainData->__data[i].__tf2meanshape.at<double>(0, 2);
		//residual.col(1) = residual.col(1) * m_TrainData->__data[i].__tf2meanshape.at<double>(1, 0) + residual.col(1) * m_TrainData->__data[i].__tf2meanshape.at<double>(1, 1) + m_TrainData->__data[i].__tf2meanshape.at<double>(1, 2);

		residual.copyTo(m_TrainData->__data[i].__shapes_residual);		
	}
	std::wcout << "update stage finished." << std::endl;

	std::cout << "Mean Square Root Error: " << m_Evaluate->compute_error(preshapes, gtshapes) << std::endl;

	// std::cout << "regression stage finished." << std::endl;
	return W_liblinear;
}
예제 #8
0
파일: PDM.cpp 프로젝트: ezzaouia/OpenFace
//===========================================================================
// Calculate the PDM's Jacobian over all parameters (rigid and non-rigid), the additional input W represents trust for each of the landmarks and is part of Non-Uniform RLMS
void PDM::ComputeJacobian(const cv::Mat_<float>& params_local, const cv::Vec6d& params_global, cv::Mat_<float> &Jacobian, const cv::Mat_<float> W, cv::Mat_<float> &Jacob_t_w)
{ 
	
	// number of vertices
	int n = this->NumberOfPoints();
		
	// number of non-rigid parameters
	int m = this->NumberOfModes();

	Jacobian.create(n * 2, 6 + m);
	
	float X,Y,Z;
	
	float s = (float) params_global[0];
  	
	cv::Mat_<double> shape_3D_d;
	cv::Mat_<double> p_local_d;
	params_local.convertTo(p_local_d, CV_64F);
	this->CalcShape3D(shape_3D_d, p_local_d);
	
	cv::Mat_<float> shape_3D;
	shape_3D_d.convertTo(shape_3D, CV_32F);

	cv::Vec3d euler(params_global[1], params_global[2], params_global[3]);
	cv::Matx33d currRot = Euler2RotationMatrix(euler);
	
	float r11 = (float) currRot(0,0);
	float r12 = (float) currRot(0,1);
	float r13 = (float) currRot(0,2);
	float r21 = (float) currRot(1,0);
	float r22 = (float) currRot(1,1);
	float r23 = (float) currRot(1,2);
	float r31 = (float) currRot(2,0);
	float r32 = (float) currRot(2,1);
	float r33 = (float) currRot(2,2);

	cv::MatIterator_<float> Jx =  Jacobian.begin();
	cv::MatIterator_<float> Jy =  Jx + n * (6 + m);
	cv::MatConstIterator_<double> Vx =  this->princ_comp.begin();
	cv::MatConstIterator_<double> Vy =  Vx + n*m;
	cv::MatConstIterator_<double> Vz =  Vy + n*m;

	for(int i = 0; i < n; i++)
	{
    
		X = shape_3D.at<float>(i,0);
		Y = shape_3D.at<float>(i+n,0);
		Z = shape_3D.at<float>(i+n*2,0);    
    
		// The rigid jacobian from the axis angle rotation matrix approximation using small angle assumption (R * R')
		// where R' = [1, -wz, wy
		//             wz, 1, -wx
		//             -wy, wx, 1]
		// And this is derived using the small angle assumption on the axis angle rotation matrix parametrisation

		// scaling term
		*Jx++ = (X  * r11 + Y * r12 + Z * r13);
		*Jy++ = (X  * r21 + Y * r22 + Z * r23);
		
		// rotation terms
		*Jx++ = (s * (Y * r13 - Z * r12) );
		*Jy++ = (s * (Y * r23 - Z * r22) );
		*Jx++ = (-s * (X * r13 - Z * r11));
		*Jy++ = (-s * (X * r23 - Z * r21));
		*Jx++ = (s * (X * r12 - Y * r11) );
		*Jy++ = (s * (X * r22 - Y * r21) );
		
		// translation terms
		*Jx++ = 1.0f;
		*Jy++ = 0.0f;
		*Jx++ = 0.0f;
		*Jy++ = 1.0f;

		for(int j = 0; j < m; j++,++Vx,++Vy,++Vz)
		{
			// How much the change of the non-rigid parameters (when object is rotated) affect 2D motion
			*Jx++ = (float) ( s*(r11*(*Vx) + r12*(*Vy) + r13*(*Vz)) );
			*Jy++ = (float) ( s*(r21*(*Vx) + r22*(*Vy) + r23*(*Vz)) );
		}
	}	

	// Adding the weights here
	cv::Mat Jacob_w = Jacobian.clone();
	
	if(cv::trace(W)[0] != W.rows) 
	{
		Jx =  Jacobian.begin();
		Jy =  Jx + n*(6+m);

		cv::MatIterator_<float> Jx_w =  Jacob_w.begin<float>();
		cv::MatIterator_<float> Jy_w =  Jx_w + n*(6+m);

		// Iterate over all Jacobian values and multiply them by the weight in diagonal of W
		for(int i = 0; i < n; i++)
		{
			float w_x = W.at<float>(i, i);
			float w_y = W.at<float>(i+n, i+n);

			for(int j = 0; j < Jacobian.cols; ++j)
			{
				*Jx_w++ = *Jx++ * w_x;
				*Jy_w++ = *Jy++ * w_y;
			}
		}
	}
	Jacob_t_w = Jacob_w.t();

}
예제 #9
0
파일: PDM.cpp 프로젝트: ezzaouia/OpenFace
//===========================================================================
// Calculate the PDM's Jacobian over rigid parameters (rotation, translation and scaling), the additional input W represents trust for each of the landmarks and is part of Non-Uniform RLMS 
void PDM::ComputeRigidJacobian(const cv::Mat_<float>& p_local, const cv::Vec6d& params_global, cv::Mat_<float> &Jacob, const cv::Mat_<float> W, cv::Mat_<float> &Jacob_t_w)
{
  	
	// number of verts
	int n = this->NumberOfPoints();
  
	Jacob.create(n * 2, 6);

	float X,Y,Z;

	float s = (float)params_global[0];
  	
	cv::Mat_<double> shape_3D_d;
	cv::Mat_<double> p_local_d;
	p_local.convertTo(p_local_d, CV_64F);
	this->CalcShape3D(shape_3D_d, p_local_d);
	
	cv::Mat_<float> shape_3D;
	shape_3D_d.convertTo(shape_3D, CV_32F);

	 // Get the rotation matrix
	cv::Vec3d euler(params_global[1], params_global[2], params_global[3]);
	cv::Matx33d currRot = Euler2RotationMatrix(euler);
	
	float r11 = (float) currRot(0,0);
	float r12 = (float) currRot(0,1);
	float r13 = (float) currRot(0,2);
	float r21 = (float) currRot(1,0);
	float r22 = (float) currRot(1,1);
	float r23 = (float) currRot(1,2);
	float r31 = (float) currRot(2,0);
	float r32 = (float) currRot(2,1);
	float r33 = (float) currRot(2,2);

	cv::MatIterator_<float> Jx = Jacob.begin();
	cv::MatIterator_<float> Jy = Jx + n * 6;

	for(int i = 0; i < n; i++)
	{
    
		X = shape_3D.at<float>(i,0);
		Y = shape_3D.at<float>(i+n,0);
		Z = shape_3D.at<float>(i+n*2,0);    
		
		// The rigid jacobian from the axis angle rotation matrix approximation using small angle assumption (R * R')
		// where R' = [1, -wz, wy
		//             wz, 1, -wx
		//             -wy, wx, 1]
		// And this is derived using the small angle assumption on the axis angle rotation matrix parametrisation

		// scaling term
		*Jx++ =  (X  * r11 + Y * r12 + Z * r13);
		*Jy++ =  (X  * r21 + Y * r22 + Z * r23);
		
		// rotation terms
		*Jx++ = (s * (Y * r13 - Z * r12) );
		*Jy++ = (s * (Y * r23 - Z * r22) );
		*Jx++ = (-s * (X * r13 - Z * r11));
		*Jy++ = (-s * (X * r23 - Z * r21));
		*Jx++ = (s * (X * r12 - Y * r11) );
		*Jy++ = (s * (X * r22 - Y * r21) );
		
		// translation terms
		*Jx++ = 1.0f;
		*Jy++ = 0.0f;
		*Jx++ = 0.0f;
		*Jy++ = 1.0f;

	}

	cv::Mat Jacob_w = cv::Mat::zeros(Jacob.rows, Jacob.cols, Jacob.type());
	
	Jx =  Jacob.begin();
	Jy =  Jx + n*6;

	cv::MatIterator_<float> Jx_w =  Jacob_w.begin<float>();
	cv::MatIterator_<float> Jy_w =  Jx_w + n*6;

	// Iterate over all Jacobian values and multiply them by the weight in diagonal of W
	for(int i = 0; i < n; i++)
	{
		float w_x = W.at<float>(i, i);
		float w_y = W.at<float>(i+n, i+n);

		for(int j = 0; j < Jacob.cols; ++j)
		{
			*Jx_w++ = *Jx++ * w_x;
			*Jy_w++ = *Jy++ * w_y;
		}		
	}

	Jacob_t_w = Jacob_w.t();
}