Ejemplo n.º 1
0
vnl_matrix <double> MCLR_SM::Normalize_Feature_Matrix(vnl_matrix<double> feats)
{
	mbl_stats_nd stats;

	for(int i = 0; i<feats.rows() ; ++i)
	{
		vnl_vector<double> temp_row = feats.get_row(i);
		stats.obs(temp_row);	
	}

	std_vec = stats.sd();
	mean_vec = stats.mean();
	

//The last column is the training column 
	for(int i = 0; i<feats.columns() ; ++i)
	{
		vnl_vector<double> temp_col = feats.get_column(i);
		if(std_vec(i) > 0)
		{	
			for(int j =0; j<temp_col.size() ; ++j)
				temp_col[j] = (temp_col[j] - mean_vec(i))/std_vec(i) ;
		}
	
		feats.set_column(i,temp_col);
	}

	return feats;
}
/// \brief		Unscented transform of process Sigma points
void UnscentedKalmanFilter::utf(vnl_matrix<double> X, vnl_vector<double> u,
	vnl_vector<double> &y, vnl_matrix<double> &Y, vnl_matrix<double> &P, vnl_matrix<double> &Y1)
{
	// determine number of sigma points
	unsigned int L = X.cols();
	// zero output matrices
	y.fill(0.0); Y.fill(0.0);

	// transform the sigma points and put them as columns in a matrix Y
	for( int k = 0; k < L; k++ )
	{
		vnl_vector<double> xk = X.get_column(k);
		vnl_vector<double> yk(N);
		f(xk,u,yk);
		Y.set_column(k,yk);
		// add each transformed point to the weighted mean
		y = y + Wm.get(0,k)*yk;
	}

	// create a matrix with each column being the weighted mean
	vnl_matrix<double> Ymean(N,L);
	for( int k = 0; k < L; k++ )
		Ymean.set_column(k,y);
	// set the matrix of difference vectors
	Y1 = Y-Ymean;
	// calculate the covariance matrix output
	vnl_matrix<double> WC(L,L,0.0);
	WC.set_diagonal(Wc.get_row(0));
	P = Y1*WC*Y1.transpose();
}
Ejemplo n.º 3
0
vnl_matrix <double> Normalize_Feature_Matrix(vnl_matrix<double> feats)
{
	mbl_stats_nd stats;

	for(int i = 0; i<feats.rows() ; ++i)
	{
		vnl_vector<double> temp_row = feats.get_row(i);
		stats.obs(temp_row);	
	}

	vnl_vector<double> std_vec = stats.sd();
	vnl_vector<double> mean_vec = stats.mean();
	
	for(int i = 0; i<feats.columns()-3 ; ++i)
	{
		vnl_vector<double> temp_col = feats.get_column(i);
		if(std_vec(i) > 0)
		{	
			for(int j =0; j<temp_col.size() ; ++j)
				temp_col[j] = (temp_col[j] - mean_vec(i))/std_vec(i) ;
		}
	
		feats.set_column(i,temp_col);
	}

	return feats;
}
Ejemplo n.º 4
0
void normalize(vnl_matrix<double>& x,
    vnl_vector<double>& centroid, double& scale) {
  int n = x.rows();
  if (n==0) return;
  int d = x.cols();
  centroid.set_size(d);

  vnl_vector<double> col;
  for (int i = 0; i < d; ++i) {
    col = x.get_column(i);
    centroid(i) = col.mean();
  }
  for (int i = 0; i < n; ++i) {
    x.set_row(i, x.get_row(i) - centroid);
  }
  scale = x.frobenius_norm() / sqrt(double(n));
  x = x / scale;
}
Ejemplo n.º 5
0
vnl_matrix <double> MCLR_SM::Normalize_Feature_Matrix_1(vnl_matrix<double> feats, vnl_vector<double> vector_1, vnl_vector<double> vector_2)
{
	std_vec = vector_1;
	mean_vec = vector_2;
	

//The last column is the training column 
	for(int i = 0; i<feats.columns() ; ++i)
	{
		vnl_vector<double> temp_col = feats.get_column(i);
		if(std_vec(i) > 0)
		{	
			for(int j =0; j<temp_col.size() ; ++j)
				temp_col[j] = (temp_col[j] - mean_vec(i))/std_vec(i) ;
		}
	
		feats.set_column(i,temp_col);
	}

	return feats;
}