vnl_matrix <double> Normalize_Feature_Matrix(vnl_matrix<double> feats) { mbl_stats_nd stats; for(int i = 0; i<feats.rows() ; ++i) { vnl_vector<double> temp_row = feats.get_row(i); stats.obs(temp_row); } vnl_vector<double> std_vec = stats.sd(); vnl_vector<double> mean_vec = stats.mean(); for(int i = 0; i<feats.columns()-3 ; ++i) { vnl_vector<double> temp_col = feats.get_column(i); if(std_vec(i) > 0) { for(int j =0; j<temp_col.size() ; ++j) temp_col[j] = (temp_col[j] - mean_vec(i))/std_vec(i) ; } feats.set_column(i,temp_col); } return feats; }
vnl_matrix <double> MCLR_SM::Normalize_Feature_Matrix(vnl_matrix<double> feats) { mbl_stats_nd stats; for(int i = 0; i<feats.rows() ; ++i) { vnl_vector<double> temp_row = feats.get_row(i); stats.obs(temp_row); } std_vec = stats.sd(); mean_vec = stats.mean(); //The last column is the training column for(int i = 0; i<feats.columns() ; ++i) { vnl_vector<double> temp_col = feats.get_column(i); if(std_vec(i) > 0) { for(int j =0; j<temp_col.size() ; ++j) temp_col[j] = (temp_col[j] - mean_vec(i))/std_vec(i) ; } feats.set_column(i,temp_col); } return feats; }
//std::vector<double> Vectorize(const vnl_matrix<double> &M) vnl_vector<double> Vectorize(const vnl_matrix<double> &M) { //std::vector<double> V; vnl_vector<double> V(M.rows() * M.columns()); for (unsigned j = 0; j < M.rows(); j++) { for (unsigned i = 0; i < M.columns(); i++) { //V.push_back(M(i,j)); V[M.columns() * j + i] = M(i,j); } } return V; }
std::vector<vnl_vector<double> > EigenVectors(const vnl_matrix<double> &M) { vnl_symmetric_eigensystem<double> Eigs(M); std::vector<vnl_vector<double> > EVecs; for(unsigned int i = 0; i < M.columns(); i++) EVecs.push_back(Eigs.get_eigenvector(i)); return EVecs; }
void LocalGeometryRef::Initialize(vnl_matrix<double> data) { this->num_row = data.rows(); this->num_col = data.columns(); this->data_matrix.set_size(this->num_row, this->num_col); for(int row = 0; row < this->num_row; row++) { for(int col = 0; col < this->num_col; col++) { this->data_matrix(row, col) = data(row, col); } } }
bool CloseEnough(const vnl_matrix<double> &M1, const vnl_matrix<double> &M2, const double eps) { unsigned int NumRows = M1.rows(); unsigned int NumCols = M1.columns(); if((M2.rows() != NumRows) || (M2.columns() != NumCols)) { std::cout << "Dimensions do not match!" << std::endl; return false; } for(unsigned int r = 0; r < NumRows; r++) { for(unsigned int c = 0; c < NumCols; c++) { if(fabs(M1(r,c) - M2(r,c)) > eps) { std::cout << "Failed comparison: " << "M1: " << M1(r,c) << " M2: " << M2(r,c) << " diff: " << fabs(M1(r,c) - M2(r,c)) << std::endl; return false; } } } return true; }
void WriteMatrixImage(const vnl_matrix<double> &M, const std::string &Filename) { vil_image_view<vxl_byte> Image(M.rows(), M.columns(), 1, 1); //(ni, nj, n_planes, n_interleaved_planes) for (unsigned j = 0; j < Image.nj(); j++) { for (unsigned i = 0; i < Image.ni(); i++) { Image(i,j) = static_cast<vxl_byte>(255 * M(i,j)); } } vil_save(Image, Filename.c_str()); }
void WriteMatrixImageScaled(const vnl_matrix<double> &M, const std::string &Filename) { vil_image_view<vxl_byte> Image(M.rows(), M.columns(), 1, 1); //(ni, nj, n_planes, n_interleaved_planes) //double Max = Tools::VectorMax(Vectorize(M)); double Max = M.max_value(); for (unsigned j = 0; j < Image.nj(); j++) { for (unsigned i = 0; i < Image.ni(); i++) { Image(i,j) = static_cast<vxl_byte>(255 * M(i,j)/Max); //cout << "M: " << M(i,j) << endl; //cout << "Image: " << Image(i,j) << endl; } } vil_save(Image, Filename.c_str()); }
vnl_matrix <double> MCLR_SM::Normalize_Feature_Matrix_1(vnl_matrix<double> feats, vnl_vector<double> vector_1, vnl_vector<double> vector_2) { std_vec = vector_1; mean_vec = vector_2; //The last column is the training column for(int i = 0; i<feats.columns() ; ++i) { vnl_vector<double> temp_col = feats.get_column(i); if(std_vec(i) > 0) { for(int j =0; j<temp_col.size() ; ++j) temp_col[j] = (temp_col[j] - mean_vec(i))/std_vec(i) ; } feats.set_column(i,temp_col); } return feats; }