/// \brief Create Sigma point to represent a normal distribution void UnscentedKalmanFilter::sigmas(vnl_vector<double> x, vnl_matrix<double> P, double c, vnl_matrix<double> &X) { // Copy matrix P to A vnl_matrix<double> A = P; // Compute the Cholesky decomposition of P vnl_cholesky chol(A,vnl_cholesky::verbose); A = chol.upper_triangle(); // Apply the weight c A = c*A.transpose(); // Create matrix Y with copies of reference point x vnl_matrix<double> Y(N,N); for( int i = 0; i<N; i++ ) Y.set_column(i,x); // Add and subtract A from Y vnl_matrix<double> YpA = Y + A; vnl_matrix<double> YmA = Y - A; // Set columns of X with Sigma points X.set_column(0,x); for( int i = 0; i<N; i++ ) { X.set_column(i+1,YpA.get_column(i)); X.set_column(i+1+N,YmA.get_column(i)); } }
vnl_matrix <double> MCLR_SM::Normalize_Feature_Matrix(vnl_matrix<double> feats) { mbl_stats_nd stats; for(int i = 0; i<feats.rows() ; ++i) { vnl_vector<double> temp_row = feats.get_row(i); stats.obs(temp_row); } std_vec = stats.sd(); mean_vec = stats.mean(); //The last column is the training column for(int i = 0; i<feats.columns() ; ++i) { vnl_vector<double> temp_col = feats.get_column(i); if(std_vec(i) > 0) { for(int j =0; j<temp_col.size() ; ++j) temp_col[j] = (temp_col[j] - mean_vec(i))/std_vec(i) ; } feats.set_column(i,temp_col); } return feats; }
/// \brief Unscented transform of process Sigma points void UnscentedKalmanFilter::utf(vnl_matrix<double> X, vnl_vector<double> u, vnl_vector<double> &y, vnl_matrix<double> &Y, vnl_matrix<double> &P, vnl_matrix<double> &Y1) { // determine number of sigma points unsigned int L = X.cols(); // zero output matrices y.fill(0.0); Y.fill(0.0); // transform the sigma points and put them as columns in a matrix Y for( int k = 0; k < L; k++ ) { vnl_vector<double> xk = X.get_column(k); vnl_vector<double> yk(N); f(xk,u,yk); Y.set_column(k,yk); // add each transformed point to the weighted mean y = y + Wm.get(0,k)*yk; } // create a matrix with each column being the weighted mean vnl_matrix<double> Ymean(N,L); for( int k = 0; k < L; k++ ) Ymean.set_column(k,y); // set the matrix of difference vectors Y1 = Y-Ymean; // calculate the covariance matrix output vnl_matrix<double> WC(L,L,0.0); WC.set_diagonal(Wc.get_row(0)); P = Y1*WC*Y1.transpose(); }
vnl_matrix <double> Normalize_Feature_Matrix(vnl_matrix<double> feats) { mbl_stats_nd stats; for(int i = 0; i<feats.rows() ; ++i) { vnl_vector<double> temp_row = feats.get_row(i); stats.obs(temp_row); } vnl_vector<double> std_vec = stats.sd(); vnl_vector<double> mean_vec = stats.mean(); for(int i = 0; i<feats.columns()-3 ; ++i) { vnl_vector<double> temp_col = feats.get_column(i); if(std_vec(i) > 0) { for(int j =0; j<temp_col.size() ; ++j) temp_col[j] = (temp_col[j] - mean_vec(i))/std_vec(i) ; } feats.set_column(i,temp_col); } return feats; }
vnl_matrix <double> MCLR_SM::Normalize_Feature_Matrix_1(vnl_matrix<double> feats, vnl_vector<double> vector_1, vnl_vector<double> vector_2) { std_vec = vector_1; mean_vec = vector_2; //The last column is the training column for(int i = 0; i<feats.columns() ; ++i) { vnl_vector<double> temp_col = feats.get_column(i); if(std_vec(i) > 0) { for(int j =0; j<temp_col.size() ; ++j) temp_col[j] = (temp_col[j] - mean_vec(i))/std_vec(i) ; } feats.set_column(i,temp_col); } return feats; }