Ejemplo n.º 1
0
vnl_matrix <double> Normalize_Feature_Matrix(vnl_matrix<double> feats)
{
	mbl_stats_nd stats;

	for(int i = 0; i<feats.rows() ; ++i)
	{
		vnl_vector<double> temp_row = feats.get_row(i);
		stats.obs(temp_row);	
	}

	vnl_vector<double> std_vec = stats.sd();
	vnl_vector<double> mean_vec = stats.mean();
	
	for(int i = 0; i<feats.columns()-3 ; ++i)
	{
		vnl_vector<double> temp_col = feats.get_column(i);
		if(std_vec(i) > 0)
		{	
			for(int j =0; j<temp_col.size() ; ++j)
				temp_col[j] = (temp_col[j] - mean_vec(i))/std_vec(i) ;
		}
	
		feats.set_column(i,temp_col);
	}

	return feats;
}
Ejemplo n.º 2
0
bool rgrsn_ldp::local_dynamic_programming(const vnl_matrix<double> & probMap, int nNeighborBin,
                                          vcl_vector<int> & optimalBins)
{
    const int N    = probMap.rows();
    const int nBin = probMap.cols();
    
    // dynamic programming
    vnl_matrix<double> accumulatedProbMap = vnl_matrix<double>(N, nBin);
    accumulatedProbMap.fill(0.0);
    vnl_matrix<int> lookbackTable = vnl_matrix<int>(N, nBin);
    lookbackTable.fill(0);
    // copy first row
    for (int c = 0; c<probMap.cols(); c++) {
        accumulatedProbMap[0][c] = probMap[0][c];
        lookbackTable[0][c] = c;
    }
    
    for (int r = 1; r <N; r++) {
        for (int c = 0; c<probMap.cols(); c++) {
            // lookup all possible place in the window
            double max_val = -1;
            int max_index  = -1;
            for (int w = -nNeighborBin; w <= nNeighborBin; w++) {
                if (c + w <0 || c + w >= probMap.cols()) {
                    continue;
                }
                double val = probMap[r][c] + accumulatedProbMap[r-1][c+w];
                if (val > max_val) {
                    max_val = val;
                    max_index = c + w; // most probable path from the [r-1] row, in column c + w
                }
            }
            assert(max_index != -1);
            accumulatedProbMap[r][c] = max_val;
            lookbackTable[r][c]      = max_index;
        }
    }
    
    // lookback the table
    double max_prob    = -1.0;
    int max_prob_index = -1;
    for (int c = 0; c<accumulatedProbMap.cols(); c++) {
        if (accumulatedProbMap[N-1][c] > max_prob) {
            max_prob = accumulatedProbMap[N-1][c];
            max_prob_index = c;
        }
    }
    
    // back track
    optimalBins.push_back(max_prob_index);
    for (int r = N-1; r > 0; r--) {
        int bin = lookbackTable[r][optimalBins.back()];
        optimalBins.push_back(bin);
    }
    assert(optimalBins.size() == N);
    
  //  vcl_reverse(optimalBins.begin(), optimalBins.end());
    return true;
}
Ejemplo n.º 3
0
void denormalize(vnl_matrix<double>& x, const vnl_vector<double>& centroid, const double scale) {
  int n = x.rows();
  if (n==0) return;
  int d = x.cols();
  for (int i = 0; i < n; ++i) {
    x.set_row(i, x.get_row(i) * scale + centroid);
  }
}
Ejemplo n.º 4
0
	//std::vector<double> Vectorize(const vnl_matrix<double> &M)
	vnl_vector<double> Vectorize(const vnl_matrix<double> &M)
	{
		
		//std::vector<double> V;
		vnl_vector<double> V(M.rows() * M.columns());
		
		for (unsigned j = 0; j < M.rows(); j++)
		{
			for (unsigned i = 0; i < M.columns(); i++)
			{
				//V.push_back(M(i,j));
				V[M.columns() * j + i] = M(i,j);
			}
		}
		
		return V;
	}
Ejemplo n.º 5
0
void compute_P(const vnl_matrix<double>& x,const vnl_matrix<double>& y, vnl_matrix<double>& P, double &E, double sigma, int outliers) {
  double k;
  k = -2*sigma*sigma;

  //P.set_size(m,n); P.fill(0);
  //vnl_vector<double> v_ij;

  vnl_vector<double> column_sum;
  int m = x.rows();
  int s = y.rows();
  int d = x.cols();
  column_sum.set_size(s);
  column_sum.fill(0);
  double outlier_term = outliers*pow((2*sigma*sigma*3.1415926),0.5*d);
  for (int i = 0; i < m; ++i) {
    for (int j = 0; j < s; ++j) {
      double r = 0;
      for (int t = 0; t < d; ++t) {
        r += (x(i,t) - y(j,t))*(x(i,t) - y(j,t));
      }
      P(i,j) = exp(r/k);
      column_sum[j]+=P(i,j);
    }
  }


  if (outliers!=0) {
    for (int i = 0; i < s; ++i)
      column_sum[i] += outlier_term;
  }
  if (column_sum.min_value()>(1e-12)) {
    E = 0;
    for (int i = 0; i < s; ++i) {
      for (int j = 0; j < m; ++j){
        P(j,i) = P(j,i)/column_sum[i];
      }
      E -= log(column_sum[i]);
    }
    //vcl_cerr< < s;
    //vcl_cerr<<P.get_column(10);
  }
  else {
    P.empty();
  }
}
Ejemplo n.º 6
0
void normalize_same(vnl_matrix<double>& x,
    vnl_vector<double>& centroid, double& scale) {
  int n = x.rows();
  if (n==0) return;
  int d = x.cols();
  for (int i = 0; i < n; ++i) {
    x.set_row(i, (x.get_row(i) - centroid) / scale);
  }
}
Ejemplo n.º 7
0
void vnl_flann::search(const vnl_matrix<double> & query_data,
                       vcl_vector<vcl_vector<int> > & indices,
                       vcl_vector<vcl_vector<double> > & dists, int knn) const
{
    const int dim = (int)query_data.cols();
    assert(dim == dim_);
    
    Matrix<double> query_data_wrap((double *)query_data.data_block(), (int)query_data.rows(), dim);
    index_.knnSearch(query_data_wrap, indices, dists, knn, flann::SearchParams(128));
}
Ejemplo n.º 8
0
bool rgrsn_ldp::dynamic_programming(const vnl_matrix<double> & data, double v_min, double v_max,
                                    unsigned int nBin, int nJumpBin, unsigned int windowSize,
                                    vnl_vector<double> & optimalSignal)
{
    assert(v_min < v_max);
    // raw data to probability map
    const int N = data.rows();
    vnl_matrix<double> probMap = vnl_matrix<double>(N, nBin);
    double interval = (v_max - v_min)/nBin;
    for (int r = 0; r<N; r++) {
        for (int c = 0; c<data.cols(); c++) {
            int num = value_to_bin_number(v_min, interval, data[r][c], nBin);
            probMap[r][num] += 1.0;
        }
    }
    probMap /= data.cols(); // normalize
    
    // save prob
    {
      //  vnl_matlab_filewrite awriter("prob.mat");
      //  awriter.write(probMap, "prob");
     //   printf("save to prob.mat.\n");
    }
  
    vcl_vector<double> optimalValues(N, 0);
    vcl_vector<int> numValues(N, 0);      // multiple values from local dynamic programming
    for (int i = 0; i<=N - windowSize; i++) {
        // get a local probMap;
        vnl_matrix<double> localProbMap = probMap.extract(windowSize, probMap.cols(), i, 0);
        vcl_vector<int> localOptimalBins;
        rgrsn_ldp::local_dynamic_programming(localProbMap, nJumpBin, localOptimalBins);
        assert(localOptimalBins.size() == windowSize);
        for (int j = 0; j < localOptimalBins.size(); j++) {
            numValues[j + i]     += 1;
            optimalValues[j + i] += bin_number_to_value(v_min, interval, localOptimalBins[j]);
        }
        
        // test
        if (0 && i == 0)
        {
            printf("test first window output\n");
            for (int j = 0; j<optimalValues.size() && j<windowSize; j++) {
                printf("%f ", optimalValues[j]);
            }
            printf("\n");
        }
    }
    //
    for (int i = 0; i<optimalValues.size(); i++) {
        optimalValues[i] /= numValues[i];
    }
    optimalSignal = vnl_vector<double>(&optimalValues[0], (int)optimalValues.size());
    return true;
}
Ejemplo n.º 9
0
void LocalGeometryRef::Initialize(vnl_matrix<double> data)
{
	this->num_row = data.rows();
	this->num_col = data.columns();
	this->data_matrix.set_size(this->num_row, this->num_col);
	for(int row = 0; row < this->num_row; row++)
	{
		for(int col = 0; col < this->num_col; col++)
		{
			this->data_matrix(row, col) = data(row, col);
		}
	}
}
Ejemplo n.º 10
0
/*
   Matlab code in cpd_G.m:
   k=-2*beta^2;
   [n, d]=size(x);  [m, d]=size(y);

   G=repmat(x,[1 1 m])-permute(repmat(y,[1 1 n]),[3 2 1]);
   G=squeeze(sum(G.^2,2));
   G=G/k;
   G=exp(G);
   */
void ComputeGaussianKernel(const vnl_matrix<double>& model,
    const vnl_matrix<double>& ctrl_pts,
    vnl_matrix<double>& G, vnl_matrix<double>& K,
    double lambda) {
  int m,n,d;
  m = model.rows();
  n = ctrl_pts.rows();
  d = ctrl_pts.cols();
  //asssert(model.cols()==d);
  //assert(lambda>0);

  G.set_size(m,n);
  GaussianAffinityMatrix(model.data_block(), ctrl_pts.data_block(),
      m, n, d, lambda, G.data_block());

  if (model == ctrl_pts) {
    K = G;
  } else {
    K.set_size(n,n);
    GaussianAffinityMatrix(ctrl_pts.data_block(), ctrl_pts.data_block(),
        n, n, d, lambda, K.data_block());
  }
}
Ejemplo n.º 11
0
void ComputeTPSKernelU(const vnl_matrix<double>& model,
    const vnl_matrix<double>& ctrl_pts,
    vnl_matrix<double>& U) {
  int m = model.rows();
  int n = ctrl_pts.rows();
  int d = ctrl_pts.cols();
  //asssert(model.cols()==d==(2|3));
  //K.set_size(n, n);
  //K.fill(0);
  U.set_size(m, n);
  U.fill(0);
  double eps = 1e-006;

  vnl_vector<double> v_ij;
  for (int i = 0; i < m; ++i) {
    for (int j = 0; j < n; ++j) 
	{
      v_ij = model.get_row(i) - ctrl_pts.get_row(j);
      double r = v_ij.two_norm();
      U(i, j) = -r;
    }
  }
}
Ejemplo n.º 12
0
//Reshape the matrix : columns first ; Similar to MATLAB
vnl_matrix<double> MCLR_SM::Reshape_Matrix(vnl_matrix<double>mat,int r,int c )
{
	if(mat.rows()*mat.cols() != r*c)
	{
		cout<< "Number of elements in the matrix/vector should be equal to the total number of elements in the reshaped matrix";
		getchar();
		exit(1);
	}
	
	vnl_matrix<double>reshaped_matrix;
	reshaped_matrix.set_size(r,c);
	int count = 0;
	
	for(int j=0;j<c;++j)	
	{
		for(int i=0;i<r;++i)
		{
			reshaped_matrix(i,j) = mat(count%mat.rows(),floor(static_cast<double>(count/mat.rows())));
			count++;
		}
	}
	return reshaped_matrix;
}
Ejemplo n.º 13
0
	void WriteMatrixImage(const vnl_matrix<double> &M, const std::string &Filename)
	{
		vil_image_view<vxl_byte> Image(M.rows(), M.columns(), 1, 1); //(ni, nj, n_planes, n_interleaved_planes)
	
		for (unsigned j = 0; j < Image.nj(); j++)
		{
			for (unsigned i = 0; i < Image.ni(); i++)
			{
				Image(i,j) = static_cast<vxl_byte>(255 * M(i,j));
			}
		}
		
		vil_save(Image, Filename.c_str());
	}
Ejemplo n.º 14
0
	bool CloseEnough(const vnl_matrix<double> &M1, const vnl_matrix<double> &M2, const double eps)
	{
		unsigned int NumRows = M1.rows();
		unsigned int NumCols = M1.columns();
		if((M2.rows() != NumRows) || (M2.columns() != NumCols))
		{
			std::cout << "Dimensions do not match!" << std::endl;
			return false;	
		}
		
		for(unsigned int r = 0; r < NumRows; r++)
		{
			for(unsigned int c = 0; c < NumCols; c++)
			{
				if(fabs(M1(r,c) - M2(r,c)) > eps)
				{
					std::cout << "Failed comparison: " << "M1: " << M1(r,c) << " M2: " << M2(r,c) << " diff: " << fabs(M1(r,c) - M2(r,c)) << std::endl;
					return false;
				}
			}
		}
		return true;	
	}
Ejemplo n.º 15
0
bool rgrsn_ldp::local_viterbi(const vnl_matrix<double> & data,
                              double resolution,
                              const vnl_vector<double> & transition,
                              unsigned int window_size,
                              vnl_vector<double> & optimal_signal)
{
    assert(resolution > 0.0);
    assert(transition.size()%2 == 1);
    
    const double min_v = data.min_value();
    const double max_v = data.max_value();
    const int nBin = (max_v - min_v)/resolution;
    
    // raw data to probability map
    // quantilization
    const int N = data.rows();
    vnl_matrix<double> probMap = vnl_matrix<double>(N, nBin);
    for (int r = 0; r<N; r++) {
        for (int c = 0; c<data.cols(); c++) {
            int num = value_to_bin_number(min_v, resolution, data[r][c], nBin);
            probMap[r][num] += 1.0;
        }
    }
    probMap /= data.cols(); // normalization
    
    vcl_vector<double> optimalValues(N, 0);
    vcl_vector<int> numValues(N, 0);      // multiple values from local dynamic programming
    
    for (int i = 0; i <= N - window_size; i++) {
        // get a local probMap;
        vnl_matrix<double> localProbMap = probMap.extract(window_size, probMap.cols(), i, 0);
        vcl_vector<int> localOptimalBins;
        rgrsn_ldp::viterbi(localProbMap, transition, localOptimalBins);
        assert(localOptimalBins.size() == window_size);
        for (int j = 0; j < localOptimalBins.size(); j++) {
            double value = bin_number_to_value(min_v, resolution, localOptimalBins[j]);
            numValues[j + i]     += 1;
            optimalValues[j + i] += value;
        }
    }
    
    // average all optimal path as final result
    for (int i = 0; i<optimalValues.size(); i++) {
        optimalValues[i] /= numValues[i];
    }
    optimal_signal = vnl_vector<double>(&optimalValues[0], (int)optimalValues.size());
    
    return true;
}
Ejemplo n.º 16
0
void itk::BiExpFitFunctor::operator()(vnl_matrix<double> & newSignal,const vnl_matrix<double> & SignalMatrix, const double & S0)
{

  vnl_vector<double> initalGuess(3);
  // initialize Least Squres Function
  // SignalMatrix.cols() defines the number of shells points
  lestSquaresFunction model(SignalMatrix.cols());
  model.set_bvalues(m_BValueList);// set BValue Vector e.g.: [1000, 2000, 3000] <- shell b Values

  // initialize Levenberg Marquardt
  vnl_levenberg_marquardt minimizer(model);
  minimizer.set_max_function_evals(1000);   // Iterations
  minimizer.set_f_tolerance(1e-10);        // Function tolerance

  // for each Direction calculate LSF Coeffs ADC & AKC
  for(unsigned int i = 0 ; i < SignalMatrix.rows(); i++)
  {
    model.set_measurements(SignalMatrix.get_row(i));
    model.set_reference_measurement(S0);

    initalGuess.put(0, 0.f); // ADC_slow
    initalGuess.put(1, 0.009f); // ADC_fast
    initalGuess.put(2, 0.7f); // lambda

    // start Levenberg-Marquardt
    minimizer.minimize_without_gradient(initalGuess);

    const double & ADC_slow = initalGuess.get(0);
    const double & ADC_fast = initalGuess.get(1);
    const double & lambda = initalGuess(2);

    newSignal.put(i, 0, S0 * (lambda * std::exp(-m_TargetBvalue * ADC_slow) + (1-lambda)* std::exp(-m_TargetBvalue * ADC_fast)));
    newSignal.put(i, 1, minimizer.get_end_error()); // RMS Error

    //OUTPUT FOR EVALUATION
    /*std::cout << std::scientific << std::setprecision(5)
              << ADC_slow   << ","                        // lambda
              << ADC_fast   << ","                        // alpha
              << lambda     << ","                        // lambda
              << S0         << ","                        // S0 value
              << minimizer.get_end_error() << ",";      // End error
    for(unsigned int j = 0; j < SignalMatrix.get_row(i).size(); j++ ){
      std::cout << std::scientific << std::setprecision(5) << SignalMatrix.get_row(i)[j];    // S_n Values corresponding to shell 1 to shell n
      if(j != SignalMatrix.get_row(i).size()-1) std::cout << ",";
    }
    std::cout << std::endl;*/
  }

}
Ejemplo n.º 17
0
int LoadMatrixFromTxt(const char* filename, vnl_matrix<double>& matrix) {
  std::ifstream infile(filename, std::ios_base::in);
  if (infile.is_open()) {
    if (matrix.read_ascii(infile)) {
      return matrix.rows();
    } else {
      std::cerr << "unable to parse input file " << filename
                << " as a matrix." << std::endl;
      return -1;
    }
  } else {
    std::cerr << "unable to open model file " << filename << std::endl;
    return -1;
  }
}
Ejemplo n.º 18
0
bool rgrsn_ldp::compact_transition_matrix(const vcl_vector<int> & fns,
                                          const vcl_vector<double> & values,
                                          vnl_matrix<double> & transition,
                                          const double resolution)
{
    assert(fns.size() == values.size());
    
    double min_v = *vcl_min_element(values.begin(), values.end());
    double max_v = *vcl_max_element(values.begin(), values.end());
    unsigned num_bin = (max_v - min_v)/resolution;
    
    unsigned max_bin_transition = 0;  // biggest transition between frames quantized in bin
    for (int i = 0; i<fns.size(); i++) {
        if (fns[i] + 1 == fns[i+1]) {
            double cur_v  = values[i];
            double next_v = values[i+1];
            int cur_bin  = value_to_bin_number(min_v, resolution, cur_v, num_bin);
            int next_bin = value_to_bin_number(min_v, resolution, next_v, num_bin);
            int dif = abs(next_bin - cur_bin);
            if (dif > max_bin_transition) {
                max_bin_transition = dif;
            }
        }
    }
    transition = vnl_matrix<double>(max_bin_transition * 2 + 1, num_bin, 0.0);
    vnl_vector<double> column(num_bin, 0.0);
    for (int i = 0; i<fns.size()-1; i++) {
        if (fns[i] + 1 == fns[i+1]) {
            double cur_v = values[i];
            double next_v = values[i+1];
            int cur_bin  = value_to_bin_number(min_v, resolution, cur_v, num_bin);
            int next_bin = value_to_bin_number(min_v, resolution, next_v, num_bin);
            int row = max_bin_transition + (next_bin - cur_bin);
            transition[row][cur_bin] += 1.0;
            column[cur_bin] += 1.0;
        }
    }
    
    // normalize each column
    for (int r = 0; r < transition.rows(); r++) {
        for (int c = 0; c < transition.cols(); c++) {
    //        transition[r][c] /= column[c];
        }
    }
    
    return true;
}
Ejemplo n.º 19
0
int vnl_matrix2vtkPolyData(vtkPolyData* A, vnl_matrix<double>& matrix) 
{
	int dim = matrix.cols();;
	int n = matrix.rows();
	//int n = A->GetNumberOfPoints();
	//matrix.set_size(n, dim);
	double P[3];
	for(int i = 0;i < n; i++)
	{
		for(int j = 0;j < dim; j++ )
		{
			P[j] = matrix(i,j);
		}
		A->GetPoints()->SetPoint(i,P);
	}
	return 1;
}
vnl_matrix<double> FiniteDiffOdfMaximaExtractionFilter< PixelType, ShOrder, NrOdfDirections>
::CalcShBasis(vnl_matrix<double>& sphCoords)
{
    int M = sphCoords.rows();
    int j, m; double mag, plm;
    vnl_matrix<double> shBasis;
    shBasis.set_size(M, m_NumCoeffs);

    for (int p=0; p<M; p++)
    {
        j=0;
        for (int l=0; l<=ShOrder; l=l+2)
            for (m=-l; m<=l; m++)
            {
                switch (m_Toolkit)
                {
                case FSL:
                plm = legendre_p<double>(l,abs(m),cos(sphCoords(p,0)));
                mag = sqrt((double)(2*l+1)/(4.0*M_PI)*factorial<double>(l-abs(m))/factorial<double>(l+abs(m)))*plm;

                if (m<0)
                    shBasis(p,j) = sqrt(2.0)*mag*cos(fabs((double)m)*sphCoords(p,1));
                else if (m==0)
                    shBasis(p,j) = mag;
                else
                    shBasis(p,j) = pow(-1.0, m)*sqrt(2.0)*mag*sin(m*sphCoords(p,1));
                    break;
                case MRTRIX:

                plm = legendre_p<double>(l,abs(m),-cos(sphCoords(p,0)));
                mag = sqrt((double)(2*l+1)/(4.0*M_PI)*factorial<double>(l-abs(m))/factorial<double>(l+abs(m)))*plm;
                if (m>0)
                    shBasis(p,j) = mag*cos(m*sphCoords(p,1));
                else if (m==0)
                    shBasis(p,j) = mag;
                else
                    shBasis(p,j) = mag*sin(-m*sphCoords(p,1));
                    break;
                }

                j++;
            }
    }
    return shBasis;
}
Ejemplo n.º 21
0
void normalize(vnl_matrix<double>& x,
    vnl_vector<double>& centroid, double& scale) {
  int n = x.rows();
  if (n==0) return;
  int d = x.cols();
  centroid.set_size(d);

  vnl_vector<double> col;
  for (int i = 0; i < d; ++i) {
    col = x.get_column(i);
    centroid(i) = col.mean();
  }
  for (int i = 0; i < n; ++i) {
    x.set_row(i, x.get_row(i) - centroid);
  }
  scale = x.frobenius_norm() / sqrt(double(n));
  x = x / scale;
}
Ejemplo n.º 22
0
	void WriteMatrixImageScaled(const vnl_matrix<double> &M, const std::string &Filename)
	{
		vil_image_view<vxl_byte> Image(M.rows(), M.columns(), 1, 1); //(ni, nj, n_planes, n_interleaved_planes)
		
		//double Max = Tools::VectorMax(Vectorize(M));
		double Max = M.max_value();
		
		for (unsigned j = 0; j < Image.nj(); j++)
		{
			for (unsigned i = 0; i < Image.ni(); i++)
			{
				Image(i,j) = static_cast<vxl_byte>(255 * M(i,j)/Max);
				//cout << "M: " << M(i,j) << endl;
				//cout << "Image: " << Image(i,j) << endl;
			}
		}
		
		vil_save(Image, Filename.c_str());
	}
vnl_vector<double> mitk::GeneralizedLinearModel::ExpMu(const vnl_matrix<double> &x)
{
  LogItLinking link;
  vnl_vector<double> mu(x.rows());
  int cols = m_B.size();
  for (unsigned int r = 0 ; r < mu.size(); ++r)
  {
    mu(r) = 0;
    for (int c = 0; c < cols; ++c)
    {
      if (!m_AddConstantColumn)
        mu(r) += x(r,c)*m_B(c);
      else if ( c == 0)
        mu(r) += m_B(c);
      else
        mu(r) += x(r,c-1)*m_B(c);
    }
    mu(r) = exp(-mu(r));
  }
  return mu;
}
void mitk::GeneralizedLinearModel::EstimatePermutation(const vnl_matrix<double> &xData)
{
  v3p_netlib_integer rows = xData.rows();
  v3p_netlib_integer cols = xData.cols();

  if (m_AddConstantColumn)
    ++cols;

  v3p_netlib_doublereal *x = new v3p_netlib_doublereal[rows* cols];
  _UpdateXMatrix(xData, m_AddConstantColumn, x);
  v3p_netlib_doublereal *qraux = new v3p_netlib_doublereal[cols];
  v3p_netlib_integer *jpvt = new v3p_netlib_integer[cols];
  std::fill_n(jpvt,cols,0);
  v3p_netlib_doublereal *work = new v3p_netlib_doublereal[cols];
  std::fill_n(work,cols,0);
  v3p_netlib_integer job = 16;

  // Make a call to Lapack-DQRDC which does QR with permutation
  // Permutation is saved in JPVT.
  v3p_netlib_dqrdc_(x, &rows, &rows, &cols, qraux, jpvt, work, &job);

  double limit = std::abs(x[0]) * std::max(cols, rows) * std::numeric_limits<double>::epsilon();
  // Calculate the rank of the matrix
  int m_Rank = 0;
  for (int i = 0; i <cols; ++i)
  {
    m_Rank += (std::abs(x[i*rows + i]) > limit) ? 1 : 0;
  }
  // Create a permutation vector
  m_Permutation.set_size(m_Rank);
  for (int i = 0; i < m_Rank; ++i)
  {
    m_Permutation(i) = jpvt[i]-1;
  }

  delete x;
  delete qraux;
  delete jpvt;
  delete work;
}
Ejemplo n.º 25
0
void pick_indices(const vnl_matrix<double>&dist,
    std::vector<int>&row_index, std::vector<int>&col_index,
    const double threshold) {
  int m = dist.rows();
  int n = dist.cols();
  vnl_vector<int> row_flag, col_flag;
  col_flag.set_size(n);  col_flag.fill(0);
  row_flag.set_size(n);  row_flag.fill(0);
  for (int i = 0; i < m; ++i) {
    double min_dist = dist.get_row(i).min_value();
    if (min_dist < threshold) {
      for (int j = 0; j < n; ++j){
        if (dist(i,j) == min_dist && col_flag[j] == 0){
          row_index.push_back(i);
          row_flag[i] = 1;
          col_index.push_back(j);
          col_flag[j] = 1;
        }
      }
    }
  }
}
// Fills the value of the xData-matrix into the x-matrix. Adds a constant
// column if required. Permutes the rows corresponding to the permutation vector.
static void _UpdatePermXMatrix(const vnl_matrix<double> &xData, bool addConstant, const vnl_vector<unsigned int> &permutation, vnl_matrix<double> &x)
{
  int rows = xData.rows();
  int cols = permutation.size();
  x.set_size(rows, cols);
  for (int r=0; r < rows; ++r)
  {
    for (int c=0; c<cols; ++c)
    {
      unsigned int newCol = permutation(c);
      if (!addConstant)
      {
        x(r, c) = xData(r,newCol);
      } else if (newCol == 0)
      {
        x(r, c) = 1.0;
      } else
      {
        x(r, c) = xData(r, newCol-1);
      }
    }
  }
}
// Copy a vnl-matrix to an c-array with row-wise representation.
// Adds a constant column if required.
static void _UpdateXMatrix(const vnl_matrix<double> &xData, bool addConstant, v3p_netlib_doublereal *x)
{
  v3p_netlib_integer rows = xData.rows();
  v3p_netlib_integer cols = xData.cols();
  if (addConstant)
    ++cols;

  for (int r=0; r < rows; ++r)
  {
    for (int c=0; c <cols; ++c)
    {
      if (!addConstant)
      {
        x[c*rows + r] = xData(r,c);
      } else if (c == 0)
      {
        x[c*rows + r] = 1.0;
      } else
      {
        x[c*rows + r] = xData(r, c-1);
      }
    }
  }
}
Ejemplo n.º 28
0
bool rgrsn_ldp::dynamic_programming(const vnl_matrix<double> & data,
                                    double v_min, double v_max,
                                    unsigned int nBin,
                                    int nJumpBin,
                                    unsigned int windowSize,
                                    vnl_vector<double> & optimalSignal,
                                    vnl_vector<double> & signal_variance)
{
    assert(v_min < v_max);
    // raw data to probability map
    // quantilization
    const int N = data.rows();
    vnl_matrix<double> probMap = vnl_matrix<double>(N, nBin);
    double interval = (v_max - v_min)/nBin;
    for (int r = 0; r<N; r++) {
        for (int c = 0; c<data.cols(); c++) {
            int num = value_to_bin_number(v_min, interval, data[r][c], nBin);
            probMap[r][num] += 1.0;
        }
    }
    probMap /= data.cols(); // normalization
    
    
    vcl_vector<double> optimalValues(N, 0);
    vcl_vector<int> numValues(N, 0);      // multiple values from local dynamic programming
    vcl_vector<vcl_vector<double> > all_values(N);
    for (int i = 0; i<=N - windowSize; i++) {
        // get a local probMap;
        vnl_matrix<double> localProbMap = probMap.extract(windowSize, probMap.cols(), i, 0);
        vcl_vector<int> localOptimalBins;
        rgrsn_ldp::local_dynamic_programming(localProbMap, nJumpBin, localOptimalBins);
        assert(localOptimalBins.size() == windowSize);
        for (int j = 0; j < localOptimalBins.size(); j++) {
            double value = bin_number_to_value(v_min, interval, localOptimalBins[j]);
            assert(j + i < N);
            all_values[j + i].push_back(value);
            numValues[j + i]     += 1;
            optimalValues[j + i] += value;
        }
    }
    // mean value
    for (int i = 0; i<optimalValues.size(); i++) {
        optimalValues[i] /= numValues[i];
    }
    
    // variance
    signal_variance = vnl_vector<double>(N, 0);
    for (int i = 0; i<optimalValues.size(); i++) {
        assert(all_values[i].size() > 0);
        if (all_values[i].size() == 1) {
            signal_variance[i] = 0.0001;
        }
        else
        {
            double dump_mean = 0.0;
            double sigma = 0.0;
            VnlPlus::mean_std(&all_values[i][0], (int)all_values[i].size(), dump_mean, sigma);
            signal_variance[i] = sigma + 0.0001; // avoid zero
        }
    }
    optimalSignal = vnl_vector<double>(&optimalValues[0], (int)optimalValues.size());
    
    // save variance with the size of window size, for test purpose
    if(0)
    {
        vcl_vector<vnl_vector<double> > all_value_vecs;
        for (int i = 0; i<all_values.size(); i++) {
            if (all_values[i].size() == windowSize) {
                all_value_vecs.push_back(VnlPlus::vector_2_vec(all_values[i]));
            }
        }
        
        vcl_string save_file("ldp_all_prediction.mat");
        vnl_matlab_filewrite awriter(save_file.c_str());
        awriter.write(VnlPlus::vector_2_mat(all_value_vecs), "ldp_all_opt_path");
        printf("save to %s\n", save_file.c_str());
    }
    return true;
}
Ejemplo n.º 29
0
bool rgrsn_ldp::local_dynamic_programming_log(const vnl_matrix<double> & probMap, int nNeighborBin,
                                              vcl_vector<int> & optimalBins)
{
    // find minimum path
    const int N    = probMap.rows();
    const int nBin = probMap.cols();
    const double epsilon = 0.01;
    
    vnl_matrix<double> negLogProbMap(N, nBin);
    for (int r = 0; r<N; r++) {
        for (int c = 0; c <nBin; c++) {
            negLogProbMap(r, c) = -log(probMap(r, c) + epsilon);
        }
    }
    
    // dynamic programming
    vnl_matrix<double> accumulatedMap = vnl_matrix<double>(N, nBin);
    accumulatedMap.fill(0.0);
    vnl_matrix<int> lookbackTable = vnl_matrix<int>(N, nBin);
    lookbackTable.fill(0);
    // copy first row
    for (int c = 0; c<negLogProbMap.cols(); c++) {
        accumulatedMap[0][c] = negLogProbMap[0][c];
    }
    
    for (int r = 1; r <N; r++) {
        for (int c = 0; c<negLogProbMap.cols(); c++) {
            // lookup all possible place in the window
            double min_val = INT_MAX;
            int index      = -1;
            for (int w = -nNeighborBin; w <= nNeighborBin; w++) {
                if (c + w <0 || c + w >= negLogProbMap.cols()) {
                    continue;
                }
                double val = negLogProbMap[r][c] + accumulatedMap[r-1][c+w];
                if (val < min_val) {
                    min_val = val;
                    index = c + w;
                }
            }
            assert(index != -1);
            accumulatedMap[r][c] = min_val;
            lookbackTable[r][c]  = index;
        }
    }
    
    // lookback the table
    double min_val = INT_MAX;
    int initIndex  = -1;
    for (int c = 0; c<accumulatedMap.cols(); c++) {
        if (accumulatedMap[N-1][c] < min_val) {
            min_val = accumulatedMap[N-1][c];
            initIndex = c;
        }
    }
    
    // back track
    optimalBins.push_back(initIndex);
    for (int r = N-1; r > 0; r--) {
        int bin = lookbackTable[r][optimalBins.back()];
        optimalBins.push_back(bin);
    }
    assert(optimalBins.size() == N);
    
    vcl_reverse(optimalBins.begin(), optimalBins.end());
    return true;
}
Ejemplo n.º 30
0
bool rgrsn_ldp::viterbi(const vnl_matrix<double> & prob_map, const vnl_vector<double> & transition,
                        vcl_vector<int> & optimal_bins)
{
    
    const int N    = prob_map.rows();
    const int nBin = prob_map.cols();
    const int nNeighborBin = transition.size()/2;
    const double epsilon = 0.01;
    
    // dynamic programming
    vnl_matrix<double> log_accumulatedProbMap = vnl_matrix<double>(N, nBin);
    log_accumulatedProbMap.fill(0.0);
    vnl_matrix<int> lookbackTable = vnl_matrix<int>(N, nBin);
    lookbackTable.fill(0);
    // copy first row
    for (int c = 0; c<prob_map.cols(); c++) {
        log_accumulatedProbMap[0][c] = log(prob_map[0][c] + epsilon);
        lookbackTable[0][c] = c;
    }
    vnl_vector<double> log_transition = vnl_vector<double>(transition.size(), 0);
    
    for (int i = 0; i<transition.size(); i++) {
        log_transition[i] = log(transition[i] + epsilon);
    }
    
    for (int r = 1; r <N; r++) {
        for (int c = 0; c<prob_map.cols(); c++) {
            // lookup all possible place in the window
            double max_val = vcl_numeric_limits<int>::min();
            int max_index  = -1;
            for (int w = -nNeighborBin; w <= nNeighborBin; w++) {
                if (c + w < 0 || c + w >= prob_map.cols()) {
                    continue;
                }
                assert(w + nNeighborBin >= 0 && w + nNeighborBin < transition.size());
                double val = log_accumulatedProbMap[r-1][c+w] + log_transition[w + nNeighborBin];
                if (val > max_val) {
                    max_val = val;
                    max_index = c + w; // most probable path from the [r-1] row, in column c + w
                }
            }
            assert(max_index != -1);
            log_accumulatedProbMap[r][c] = max_val + log(prob_map[r][c] + epsilon);
            lookbackTable[r][c]          = max_index;
        }
    }
    
    // lookback the table
    double max_prob    = vcl_numeric_limits<int>::min();
    int max_prob_index = -1;
    for (int c = 0; c<log_accumulatedProbMap.cols(); c++) {
        if (log_accumulatedProbMap[N-1][c] > max_prob) {
            max_prob = log_accumulatedProbMap[N-1][c];
            max_prob_index = c;
        }
    }
    
    // back track
    optimal_bins.push_back(max_prob_index);
    for (int r = N-1; r > 0; r--) {
        int bin = lookbackTable[r][optimal_bins.back()];
        optimal_bins.push_back(bin);
    }
    assert(optimal_bins.size() == N);
    vcl_reverse(optimal_bins.begin(), optimal_bins.end());
    return true;
}