Example #1
0
	std::vector<vnl_vector<double> > EigenVectors(const vnl_matrix<double> &M)
	{
		vnl_symmetric_eigensystem<double> Eigs(M);
		
		std::vector<vnl_vector<double> > EVecs;
		for(unsigned int i = 0; i < M.columns(); i++)
			EVecs.push_back(Eigs.get_eigenvector(i));
		
			return EVecs;
		
	}
void FiniteDiffOdfMaximaExtractionFilter< PixelType, ShOrder, NrOdfDirections>
::CreateDirMatrix(const std::vector< DirectionType >& dir, vnl_matrix<double>& sphCoords)
{
  sphCoords.set_size(3, dir.size());
  for (unsigned int i=0; i<dir.size(); i++)
  {
    sphCoords(0, i) = dir[i](0);
    sphCoords(1, i) = dir[i](1);
    sphCoords(2, i) = dir[i](2);
  }
}
Example #3
0
vnl_vector<double> Rigid2DTransform::GetParametersFromMatrix(const vnl_matrix<double> &transformMatrix) const {
	vnl_vector<double> params(m_nbParams);

	vnl_matrix<double> rotmat = transformMatrix.extract(m_nbDimensions,m_nbDimensions,0,0);

	params(0) = transformMatrix(0,m_nbDimensions);
	params(1) = transformMatrix(1,m_nbDimensions);

	params(2) = psciob::GetAngleFrom2DRotationMatrix(rotmat);

	return params;
}
Example #4
0
vnl_matrix <double> MCLR_SM::Normalize_Feature_Matrix_1(vnl_matrix<double> feats, vnl_vector<double> vector_1, vnl_vector<double> vector_2)
{
	std_vec = vector_1;
	mean_vec = vector_2;
	

//The last column is the training column 
	for(int i = 0; i<feats.columns() ; ++i)
	{
		vnl_vector<double> temp_col = feats.get_column(i);
		if(std_vec(i) > 0)
		{	
			for(int j =0; j<temp_col.size() ; ++j)
				temp_col[j] = (temp_col[j] - mean_vec(i))/std_vec(i) ;
		}
	
		feats.set_column(i,temp_col);
	}

	return feats;
}
Example #5
0
//f  = f./repmat(sum(f,1),[classN,1]);
vnl_matrix<double> MCLR_SM::Normalize_F_Sum(vnl_matrix<double> f)
{
	// Matrix for normalization
	vnl_matrix<double> norm_matrix(f.rows(),f.cols());
	vnl_vector<double> norm_matrix_row;
	norm_matrix_row.set_size(f.cols());
	
	 //repmat(sum(f,1),[classN,1]);
	for(int i=0;i<f.cols();++i)
	{
      double sum = 0 ;  
	  for(int j=0;j<no_of_classes;++j)
		{
			sum = sum + f(j,i);
		}
		norm_matrix_row(i) = sum;
	}


    for(int i=0;i<no_of_classes;++i)
	{
		norm_matrix.set_row(i,norm_matrix_row); 
	}

// f  = f./repmat(sum(f,1),[classN,1]);
	for(int i=0;i<f.rows();++i)
	{
	  for(int j=0;j<f.cols();++j)
	   {
		  f(i,j) = f(i,j)/norm_matrix(i,j);
	   }
	}

	return f;
}
void mitk::GeneralizedLinearModel::EstimatePermutation(const vnl_matrix<double> &xData)
{
  v3p_netlib_integer rows = xData.rows();
  v3p_netlib_integer cols = xData.cols();

  if (m_AddConstantColumn)
    ++cols;

  v3p_netlib_doublereal *x = new v3p_netlib_doublereal[rows* cols];
  _UpdateXMatrix(xData, m_AddConstantColumn, x);
  v3p_netlib_doublereal *qraux = new v3p_netlib_doublereal[cols];
  v3p_netlib_integer *jpvt = new v3p_netlib_integer[cols];
  std::fill_n(jpvt,cols,0);
  v3p_netlib_doublereal *work = new v3p_netlib_doublereal[cols];
  std::fill_n(work,cols,0);
  v3p_netlib_integer job = 16;

  // Make a call to Lapack-DQRDC which does QR with permutation
  // Permutation is saved in JPVT.
  v3p_netlib_dqrdc_(x, &rows, &rows, &cols, qraux, jpvt, work, &job);

  double limit = std::abs(x[0]) * std::max(cols, rows) * std::numeric_limits<double>::epsilon();
  // Calculate the rank of the matrix
  int m_Rank = 0;
  for (int i = 0; i <cols; ++i)
  {
    m_Rank += (std::abs(x[i*rows + i]) > limit) ? 1 : 0;
  }
  // Create a permutation vector
  m_Permutation.set_size(m_Rank);
  for (int i = 0; i < m_Rank; ++i)
  {
    m_Permutation(i) = jpvt[i]-1;
  }

  delete x;
  delete qraux;
  delete jpvt;
  delete work;
}
Example #7
0
int vtkPolyData2vnl_matrix(vtkPolyData* A, vnl_matrix<double>& matrix) 
{
	int dim = 3;
	int n = A->GetNumberOfPoints();
	matrix.set_size(n, dim);
	for(int i = 0;i < n; i++)
	{
		double *P = A->GetPoint(i);
		for(int j = 0;j < dim; j++ )
		matrix(i,j) = P[j];
	}
	return 1;
}
Example #8
0
void pick_indices(const vnl_matrix<double>&dist,
    std::vector<int>&row_index, std::vector<int>&col_index,
    const double threshold) {
  int m = dist.rows();
  int n = dist.cols();
  vnl_vector<int> row_flag, col_flag;
  col_flag.set_size(n);  col_flag.fill(0);
  row_flag.set_size(n);  row_flag.fill(0);
  for (int i = 0; i < m; ++i) {
    double min_dist = dist.get_row(i).min_value();
    if (min_dist < threshold) {
      for (int j = 0; j < n; ++j){
        if (dist(i,j) == min_dist && col_flag[j] == 0){
          row_index.push_back(i);
          row_flag[i] = 1;
          col_index.push_back(j);
          col_flag[j] = 1;
        }
      }
    }
  }
}
Example #9
0
double GaussTransform(const vnl_matrix<double>& A,
    const vnl_matrix<double>& B, double scale,
    vnl_matrix<double>& gradient) {
  // assert A.cols() == B.cols()
  return GaussTransform(A.data_block(), B.data_block(),
      A.rows(), B.rows(), A.cols(), scale,
      gradient.data_block());
}
// Fills the value of the xData-matrix into the x-matrix. Adds a constant
// column if required. Permutes the rows corresponding to the permutation vector.
static void _UpdatePermXMatrix(const vnl_matrix<double> &xData, bool addConstant, const vnl_vector<unsigned int> &permutation, vnl_matrix<double> &x)
{
  int rows = xData.rows();
  int cols = permutation.size();
  x.set_size(rows, cols);
  for (int r=0; r < rows; ++r)
  {
    for (int c=0; c<cols; ++c)
    {
      unsigned int newCol = permutation(c);
      if (!addConstant)
      {
        x(r, c) = xData(r,newCol);
      } else if (newCol == 0)
      {
        x(r, c) = 1.0;
      } else
      {
        x(r, c) = xData(r, newCol-1);
      }
    }
  }
}
Example #11
0
//Reshape the matrix : columns first ; Similar to MATLAB
vnl_matrix<double> MCLR_SM::Reshape_Matrix(vnl_matrix<double>mat,int r,int c )
{
	if(mat.rows()*mat.cols() != r*c)
	{
		cout<< "Number of elements in the matrix/vector should be equal to the total number of elements in the reshaped matrix";
		getchar();
		exit(1);
	}
	
	vnl_matrix<double>reshaped_matrix;
	reshaped_matrix.set_size(r,c);
	int count = 0;
	
	for(int j=0;j<c;++j)	
	{
		for(int i=0;i<r;++i)
		{
			reshaped_matrix(i,j) = mat(count%mat.rows(),floor(static_cast<double>(count/mat.rows())));
			count++;
		}
	}
	return reshaped_matrix;
}
Example #12
0
vnl_matrix<double> MCLR_SM::Get_F_Matrix(vnl_matrix<double> data_bias,vnl_matrix<double> w_temp)
{
	vnl_matrix<double> epow_matrix = w_temp.transpose()*data_bias;
	vnl_matrix<double> temp_f;
	temp_f.set_size(epow_matrix.rows(),epow_matrix.cols());
	for(int i=0;i<epow_matrix.rows();++i)
	{
	  for(int j=0;j<epow_matrix.cols();++j)
	   {
		  temp_f(i,j) = exp(epow_matrix(i,j));
	  }
	}
	return temp_f;
}
Example #13
0
	bool CloseEnough(const vnl_matrix<double> &M1, const vnl_matrix<double> &M2, const double eps)
	{
		unsigned int NumRows = M1.rows();
		unsigned int NumCols = M1.columns();
		if((M2.rows() != NumRows) || (M2.columns() != NumCols))
		{
			std::cout << "Dimensions do not match!" << std::endl;
			return false;	
		}
		
		for(unsigned int r = 0; r < NumRows; r++)
		{
			for(unsigned int c = 0; c < NumCols; c++)
			{
				if(fabs(M1(r,c) - M2(r,c)) > eps)
				{
					std::cout << "Failed comparison: " << "M1: " << M1(r,c) << " M2: " << M2(r,c) << " diff: " << fabs(M1(r,c) - M2(r,c)) << std::endl;
					return false;
				}
			}
		}
		return true;	
	}
Example #14
0
	vnl_double_3x3 Get3x3SubMatrix(const vnl_matrix<double> &M)
	{
		vnl_double_3x3 R;
		
		for(unsigned r = 0; r < 3; r++)
		{
			for(unsigned c = 0; c < 3; c++)
			{
				R.put(r,c, M.get(r,c));	
			}
		}
		
		return R;
	}
// Copy a vnl-matrix to an c-array with row-wise representation.
// Adds a constant column if required.
static void _UpdateXMatrix(const vnl_matrix<double> &xData, bool addConstant, v3p_netlib_doublereal *x)
{
  v3p_netlib_integer rows = xData.rows();
  v3p_netlib_integer cols = xData.cols();
  if (addConstant)
    ++cols;

  for (int r=0; r < rows; ++r)
  {
    for (int c=0; c <cols; ++c)
    {
      if (!addConstant)
      {
        x[c*rows + r] = xData(r,c);
      } else if (c == 0)
      {
        x[c*rows + r] = 1.0;
      } else
      {
        x[c*rows + r] = xData(r, c-1);
      }
    }
  }
}
Example #16
0
// ------------------------------------------------------------------------
void computeTransform(const ImageType::Pointer &image, vnl_matrix<double> &transform)
{
	vnl_matrix<double> dirMat = image->GetDirection().GetVnlMatrix();
	transform.set_size(4,4);

	for(unsigned int i = 0; i < 3; i++)
	{
		for(unsigned int j = 0; j < 3; j++)
		{
			transform(i,j) = dirMat(i,j);
		}
	}


}
Example #17
0
vnl_vector<double> Rigid3DTransform::GetParametersFromMatrix(const vnl_matrix<double> &transformMatrix) const {
	vnl_vector<double> params(m_nbParams);

	params(0) = transformMatrix(0,m_nbDimensions);
	params(1) = transformMatrix(1,m_nbDimensions);
	params(2) = transformMatrix(2,m_nbDimensions);

	vnl_vector<double> tmpvect = GetEulerAnglesFrom3DRotationMatrix(transformMatrix.extract(3,3,0,0));

	params(3) = tmpvect(0);
	params(4) = tmpvect(1);
	params(5) = tmpvect(2);

	return params;
}
vnl_matrix<double> FiniteDiffOdfMaximaExtractionFilter< PixelType, ShOrder, NrOdfDirections>
::CalcShBasis(vnl_matrix<double>& sphCoords)
{
    int M = sphCoords.rows();
    int j, m; double mag, plm;
    vnl_matrix<double> shBasis;
    shBasis.set_size(M, m_NumCoeffs);

    for (int p=0; p<M; p++)
    {
        j=0;
        for (int l=0; l<=ShOrder; l=l+2)
            for (m=-l; m<=l; m++)
            {
                switch (m_Toolkit)
                {
                case FSL:
                plm = legendre_p<double>(l,abs(m),cos(sphCoords(p,0)));
                mag = sqrt((double)(2*l+1)/(4.0*M_PI)*factorial<double>(l-abs(m))/factorial<double>(l+abs(m)))*plm;

                if (m<0)
                    shBasis(p,j) = sqrt(2.0)*mag*cos(fabs((double)m)*sphCoords(p,1));
                else if (m==0)
                    shBasis(p,j) = mag;
                else
                    shBasis(p,j) = pow(-1.0, m)*sqrt(2.0)*mag*sin(m*sphCoords(p,1));
                    break;
                case MRTRIX:

                plm = legendre_p<double>(l,abs(m),-cos(sphCoords(p,0)));
                mag = sqrt((double)(2*l+1)/(4.0*M_PI)*factorial<double>(l-abs(m))/factorial<double>(l+abs(m)))*plm;
                if (m>0)
                    shBasis(p,j) = mag*cos(m*sphCoords(p,1));
                else if (m==0)
                    shBasis(p,j) = mag;
                else
                    shBasis(p,j) = mag*sin(-m*sphCoords(p,1));
                    break;
                }

                j++;
            }
    }
    return shBasis;
}
void FiniteDiffOdfMaximaExtractionFilter< PixelType, ShOrder, NrOdfDirections>
::Cart2Sph(const std::vector< DirectionType >& dir, vnl_matrix<double>& sphCoords)
{
    sphCoords.set_size(dir.size(), 2);

    for (unsigned int i=0; i<dir.size(); i++)
    {
        double mag = dir[i].magnitude();

        if( mag<0.0001 )
        {
            sphCoords(i,0) = M_PI/2; // theta
            sphCoords(i,1) = M_PI/2; // phi
        }
        else
        {
            sphCoords(i,0) = acos(dir[i](2)/mag); // theta
            sphCoords(i,1) = atan2(dir[i](1), dir[i](0)); // phi
        }
    }
}
vnl_vector<double> mitk::GeneralizedLinearModel::ExpMu(const vnl_matrix<double> &x)
{
  LogItLinking link;
  vnl_vector<double> mu(x.rows());
  int cols = m_B.size();
  for (unsigned int r = 0 ; r < mu.size(); ++r)
  {
    mu(r) = 0;
    for (int c = 0; c < cols; ++c)
    {
      if (!m_AddConstantColumn)
        mu(r) += x(r,c)*m_B(c);
      else if ( c == 0)
        mu(r) += m_B(c);
      else
        mu(r) += x(r,c-1)*m_B(c);
    }
    mu(r) = exp(-mu(r));
  }
  return mu;
}
Example #21
0
void f(const vnl_matrix<double>& model,
    const vnl_matrix<double>& scene, double threshold,
    vnl_matrix<double>& extracted_model,
    vnl_matrix<double>& extracted_scene) {
  vnl_matrix<double> dist;
  vnl_matrix<int> pairs;
  ComputeSquaredDistanceMatrix(model, scene, dist);
  pick_indices(dist, pairs, threshold*threshold);
  std::cout << "distance threshold : " << threshold << std::endl;
  int j, n = pairs.cols();
  int d = model.cols();
  extracted_model.set_size(n,d);
  extracted_scene.set_size(n,d);
  std::cout << "# of matched point pairs : " << n << std::endl;
  for (j=0; j<n; ++j) {
    extracted_model.set_row(j,model.get_row(pairs(0,j)));
  }
  for (j=0; j<n; ++j) {
    extracted_scene.set_row(j,scene.get_row(pairs(1,j)));
  }
}
Example #22
0
void normalize(vnl_matrix<double>& x,
    vnl_vector<double>& centroid, double& scale) {
  int n = x.rows();
  if (n==0) return;
  int d = x.cols();
  centroid.set_size(d);

  vnl_vector<double> col;
  for (int i = 0; i < d; ++i) {
    col = x.get_column(i);
    centroid(i) = col.mean();
  }
  for (int i = 0; i < n; ++i) {
    x.set_row(i, x.get_row(i) - centroid);
  }
  scale = x.frobenius_norm() / sqrt(double(n));
  x = x / scale;
}
Example #23
0
void ExtractMatchingPairs(
    const vnl_matrix<T>& model,
    const vnl_matrix<T>& scene,
    const T& threshold,
    vnl_matrix<T>& extracted_model,
    vnl_matrix<T>& extracted_scene) {
  vnl_matrix<T> dist;
  vnl_matrix<int> pairs;
  ComputeSquaredDistanceMatrix<T>(model, scene, dist);
  PickIndices<T>(dist, pairs, threshold*threshold);
  std::cout << "distance threshold : " << threshold << std::endl;
  int n = pairs.cols();
  int d = model.cols();
  extracted_model.set_size(n, d);
  extracted_scene.set_size(n, d);
  std::cout << "# of matched point pairs : " << n << std::endl;
  for (int j = 0; j < n; ++j) {
    extracted_model.set_row(j,model.get_row(pairs(0, j)));
  }
  for (int j = 0; j < n; ++j) {
    extracted_scene.set_row(j,scene.get_row(pairs(1, j)));
  }
}
Example #24
0
void ComputeTPSKernelU(const vnl_matrix<double>& model,
    const vnl_matrix<double>& ctrl_pts,
    vnl_matrix<double>& U) {
  int m = model.rows();
  int n = ctrl_pts.rows();
  int d = ctrl_pts.cols();
  //asssert(model.cols()==d==(2|3));
  //K.set_size(n, n);
  //K.fill(0);
  U.set_size(m, n);
  U.fill(0);
  double eps = 1e-006;

  vnl_vector<double> v_ij;
  for (int i = 0; i < m; ++i) {
    for (int j = 0; j < n; ++j) 
	{
      v_ij = model.get_row(i) - ctrl_pts.get_row(j);
      double r = v_ij.two_norm();
      U(i, j) = -r;
    }
  }
}
Example #25
0
bool rgrsn_ldp::dynamic_programming(const vnl_matrix<double> & data,
                                    double v_min, double v_max,
                                    unsigned int nBin,
                                    int nJumpBin,
                                    unsigned int windowSize,
                                    vnl_vector<double> & optimalSignal,
                                    vnl_vector<double> & signal_variance)
{
    assert(v_min < v_max);
    // raw data to probability map
    // quantilization
    const int N = data.rows();
    vnl_matrix<double> probMap = vnl_matrix<double>(N, nBin);
    double interval = (v_max - v_min)/nBin;
    for (int r = 0; r<N; r++) {
        for (int c = 0; c<data.cols(); c++) {
            int num = value_to_bin_number(v_min, interval, data[r][c], nBin);
            probMap[r][num] += 1.0;
        }
    }
    probMap /= data.cols(); // normalization
    
    
    vcl_vector<double> optimalValues(N, 0);
    vcl_vector<int> numValues(N, 0);      // multiple values from local dynamic programming
    vcl_vector<vcl_vector<double> > all_values(N);
    for (int i = 0; i<=N - windowSize; i++) {
        // get a local probMap;
        vnl_matrix<double> localProbMap = probMap.extract(windowSize, probMap.cols(), i, 0);
        vcl_vector<int> localOptimalBins;
        rgrsn_ldp::local_dynamic_programming(localProbMap, nJumpBin, localOptimalBins);
        assert(localOptimalBins.size() == windowSize);
        for (int j = 0; j < localOptimalBins.size(); j++) {
            double value = bin_number_to_value(v_min, interval, localOptimalBins[j]);
            assert(j + i < N);
            all_values[j + i].push_back(value);
            numValues[j + i]     += 1;
            optimalValues[j + i] += value;
        }
    }
    // mean value
    for (int i = 0; i<optimalValues.size(); i++) {
        optimalValues[i] /= numValues[i];
    }
    
    // variance
    signal_variance = vnl_vector<double>(N, 0);
    for (int i = 0; i<optimalValues.size(); i++) {
        assert(all_values[i].size() > 0);
        if (all_values[i].size() == 1) {
            signal_variance[i] = 0.0001;
        }
        else
        {
            double dump_mean = 0.0;
            double sigma = 0.0;
            VnlPlus::mean_std(&all_values[i][0], (int)all_values[i].size(), dump_mean, sigma);
            signal_variance[i] = sigma + 0.0001; // avoid zero
        }
    }
    optimalSignal = vnl_vector<double>(&optimalValues[0], (int)optimalValues.size());
    
    // save variance with the size of window size, for test purpose
    if(0)
    {
        vcl_vector<vnl_vector<double> > all_value_vecs;
        for (int i = 0; i<all_values.size(); i++) {
            if (all_values[i].size() == windowSize) {
                all_value_vecs.push_back(VnlPlus::vector_2_vec(all_values[i]));
            }
        }
        
        vcl_string save_file("ldp_all_prediction.mat");
        vnl_matlab_filewrite awriter(save_file.c_str());
        awriter.write(VnlPlus::vector_2_mat(all_value_vecs), "ldp_all_opt_path");
        printf("save to %s\n", save_file.c_str());
    }
    return true;
}
Example #26
0
bool rgrsn_ldp::local_dynamic_programming_log(const vnl_matrix<double> & probMap, int nNeighborBin,
                                              vcl_vector<int> & optimalBins)
{
    // find minimum path
    const int N    = probMap.rows();
    const int nBin = probMap.cols();
    const double epsilon = 0.01;
    
    vnl_matrix<double> negLogProbMap(N, nBin);
    for (int r = 0; r<N; r++) {
        for (int c = 0; c <nBin; c++) {
            negLogProbMap(r, c) = -log(probMap(r, c) + epsilon);
        }
    }
    
    // dynamic programming
    vnl_matrix<double> accumulatedMap = vnl_matrix<double>(N, nBin);
    accumulatedMap.fill(0.0);
    vnl_matrix<int> lookbackTable = vnl_matrix<int>(N, nBin);
    lookbackTable.fill(0);
    // copy first row
    for (int c = 0; c<negLogProbMap.cols(); c++) {
        accumulatedMap[0][c] = negLogProbMap[0][c];
    }
    
    for (int r = 1; r <N; r++) {
        for (int c = 0; c<negLogProbMap.cols(); c++) {
            // lookup all possible place in the window
            double min_val = INT_MAX;
            int index      = -1;
            for (int w = -nNeighborBin; w <= nNeighborBin; w++) {
                if (c + w <0 || c + w >= negLogProbMap.cols()) {
                    continue;
                }
                double val = negLogProbMap[r][c] + accumulatedMap[r-1][c+w];
                if (val < min_val) {
                    min_val = val;
                    index = c + w;
                }
            }
            assert(index != -1);
            accumulatedMap[r][c] = min_val;
            lookbackTable[r][c]  = index;
        }
    }
    
    // lookback the table
    double min_val = INT_MAX;
    int initIndex  = -1;
    for (int c = 0; c<accumulatedMap.cols(); c++) {
        if (accumulatedMap[N-1][c] < min_val) {
            min_val = accumulatedMap[N-1][c];
            initIndex = c;
        }
    }
    
    // back track
    optimalBins.push_back(initIndex);
    for (int r = N-1; r > 0; r--) {
        int bin = lookbackTable[r][optimalBins.back()];
        optimalBins.push_back(bin);
    }
    assert(optimalBins.size() == N);
    
    vcl_reverse(optimalBins.begin(), optimalBins.end());
    return true;
}
Example #27
0
bool rgrsn_ldp::viterbi(const vnl_matrix<double> & prob_map, const vnl_vector<double> & transition,
                        vcl_vector<int> & optimal_bins)
{
    
    const int N    = prob_map.rows();
    const int nBin = prob_map.cols();
    const int nNeighborBin = transition.size()/2;
    const double epsilon = 0.01;
    
    // dynamic programming
    vnl_matrix<double> log_accumulatedProbMap = vnl_matrix<double>(N, nBin);
    log_accumulatedProbMap.fill(0.0);
    vnl_matrix<int> lookbackTable = vnl_matrix<int>(N, nBin);
    lookbackTable.fill(0);
    // copy first row
    for (int c = 0; c<prob_map.cols(); c++) {
        log_accumulatedProbMap[0][c] = log(prob_map[0][c] + epsilon);
        lookbackTable[0][c] = c;
    }
    vnl_vector<double> log_transition = vnl_vector<double>(transition.size(), 0);
    
    for (int i = 0; i<transition.size(); i++) {
        log_transition[i] = log(transition[i] + epsilon);
    }
    
    for (int r = 1; r <N; r++) {
        for (int c = 0; c<prob_map.cols(); c++) {
            // lookup all possible place in the window
            double max_val = vcl_numeric_limits<int>::min();
            int max_index  = -1;
            for (int w = -nNeighborBin; w <= nNeighborBin; w++) {
                if (c + w < 0 || c + w >= prob_map.cols()) {
                    continue;
                }
                assert(w + nNeighborBin >= 0 && w + nNeighborBin < transition.size());
                double val = log_accumulatedProbMap[r-1][c+w] + log_transition[w + nNeighborBin];
                if (val > max_val) {
                    max_val = val;
                    max_index = c + w; // most probable path from the [r-1] row, in column c + w
                }
            }
            assert(max_index != -1);
            log_accumulatedProbMap[r][c] = max_val + log(prob_map[r][c] + epsilon);
            lookbackTable[r][c]          = max_index;
        }
    }
    
    // lookback the table
    double max_prob    = vcl_numeric_limits<int>::min();
    int max_prob_index = -1;
    for (int c = 0; c<log_accumulatedProbMap.cols(); c++) {
        if (log_accumulatedProbMap[N-1][c] > max_prob) {
            max_prob = log_accumulatedProbMap[N-1][c];
            max_prob_index = c;
        }
    }
    
    // back track
    optimal_bins.push_back(max_prob_index);
    for (int r = N-1; r > 0; r--) {
        int bin = lookbackTable[r][optimal_bins.back()];
        optimal_bins.push_back(bin);
    }
    assert(optimal_bins.size() == N);
    vcl_reverse(optimal_bins.begin(), optimal_bins.end());
    return true;
}
Example #28
0
bool rgrsn_ldp::local_viterbi(const vnl_matrix<double> & data,
                              double resolution,
                              const vnl_vector<double> & transition,
                              unsigned int window_size,
                              vnl_vector<double> & optimal_signal,
                              vnl_vector<double> & signal_variance)
{
    assert(resolution > 0.0);
    assert(transition.size()%2 == 1);
    
    const double min_v = data.min_value();
    const double max_v = data.max_value();
    const int nBin = (max_v - min_v)/resolution;
    
    // raw data to probability map
    // quantilization
    const int N = data.rows();
    vnl_matrix<double> probMap = vnl_matrix<double>(N, nBin);
    for (int r = 0; r<N; r++) {
        for (int c = 0; c<data.cols(); c++) {
            int num = value_to_bin_number(min_v, resolution, data[r][c], nBin);
            probMap[r][num] += 1.0;
        }
    }
    probMap /= data.cols(); // normalization
    
    vcl_vector<double> optimalValues(N, 0);
    vcl_vector<int> numValues(N, 0);      // multiple values from local dynamic programming
    vcl_vector<vcl_vector<double> > all_values(N);   // for calculate variance
    for (int i = 0; i<=N - window_size; i++) {
        // get a local probMap;
        vnl_matrix<double> localProbMap = probMap.extract(window_size, probMap.cols(), i, 0);
        vcl_vector<int> localOptimalBins;
        rgrsn_ldp::viterbi(localProbMap, transition, localOptimalBins);
        assert(localOptimalBins.size() == window_size);
        for (int j = 0; j < localOptimalBins.size(); j++) {
            double value = bin_number_to_value(min_v, resolution, localOptimalBins[j]);
            numValues[j + i]     += 1;
            optimalValues[j + i] += value;
            all_values[j + i].push_back(value);
        }
    }
    
    //
    for (int i = 0; i<optimalValues.size(); i++) {
        optimalValues[i] /= numValues[i];
    }
    optimal_signal = vnl_vector<double>(&optimalValues[0], (int)optimalValues.size());
    
    if(1)
    {
        vcl_vector<vnl_vector<double> > all_value_vecs;
        for (int i = 0; i<all_values.size(); i++) {
            if (all_values[i].size() == window_size) {
                all_value_vecs.push_back(VnlPlus::vector_2_vec(all_values[i]));
            }
        }
        
        vcl_string save_file("lv_all_prediction.mat");
        vnl_matlab_filewrite awriter(save_file.c_str());
        awriter.write(VnlPlus::vector_2_mat(all_value_vecs), "lv_all_opt_path");
        printf("save to %s\n", save_file.c_str());
    }

    
    return true;
}
Example #29
0
bool rgrsn_ldp::local_viterbi_overlapping_ratio(const vnl_matrix<double> & data,
                                                double resolution,
                                                const vnl_vector<double> & transition,
                                                unsigned int window_size,
                                                const double overlapping_ratio,
                                                vnl_vector<double> & optimal_signal)
{
    assert(resolution > 0.0);
    assert(transition.size()%2 == 1);
    assert(overlapping_ratio >=0 && overlapping_ratio < 1.0);
    
    const double min_v = data.min_value();
    const double max_v = data.max_value();
    const int nBin = (max_v - min_v)/resolution;
    const int moving_step = window_size * (1.0 - overlapping_ratio);  // window move forward step
    assert(moving_step >= 1);
    
    // raw data to probability map
    // quantilization
    const int N = data.rows();
    vnl_matrix<double> probMap = vnl_matrix<double>(N, nBin);
    for (int r = 0; r<N; r++) {
        for (int c = 0; c<data.cols(); c++) {
            int num = value_to_bin_number(min_v, resolution, data[r][c], nBin);
            probMap[r][num] += 1.0;
        }
    }
    probMap /= data.cols(); // normalization
    
    vcl_vector<double> optimalValues(N, 0);
    vcl_vector<int> numValues(N, 0);      // multiple values from local dynamic programming
    
    int last_index = 0;
    for (int i = 0; i <= N - window_size; i += moving_step) {
        // get a local probMap;
        vnl_matrix<double> localProbMap = probMap.extract(window_size, probMap.cols(), i, 0);
        vcl_vector<int> localOptimalBins;
        rgrsn_ldp::viterbi(localProbMap, transition, localOptimalBins);
        assert(localOptimalBins.size() == window_size);
        for (int j = 0; j < localOptimalBins.size(); j++) {
            double value = bin_number_to_value(min_v, resolution, localOptimalBins[j]);
            numValues[j + i]     += 1;
            optimalValues[j + i] += value;
        }
        last_index = i;
    }
    
    // with fully overlapping for last several numbers
    for (int i = last_index; i <= N - window_size; i++) {
        vnl_matrix<double> localProbMap = probMap.extract(window_size, probMap.cols(), i, 0);
        vcl_vector<int> localOptimalBins;
        rgrsn_ldp::viterbi(localProbMap, transition, localOptimalBins);
        assert(localOptimalBins.size() == window_size);
        for (int j = 0; j < localOptimalBins.size(); j++) {
            double value = bin_number_to_value(min_v, resolution, localOptimalBins[j]);
            numValues[j + i]     += 1;
            optimalValues[j + i] += value;
        }
    }
    // average all optimal path as final result
    for (int i = 0; i<optimalValues.size(); i++) {
        assert(numValues[i] != 0);
        optimalValues[i] /= numValues[i];
    }
    optimal_signal = vnl_vector<double>(&optimalValues[0], (int)optimalValues.size());
    return true;
}
Example #30
0
bool rgrsn_ldp::dynamic_programming_median(const vnl_matrix<double> & data,
                                           double v_min, double v_max,
                                           unsigned int nBin,
                                           int nJumpBin,
                                           unsigned int windowSize,
                                           vnl_vector<double> & optimalSignal,
                                           vnl_vector<double> & medianSignal)
{
    assert(v_min < v_max);
    // raw data to probability map
    // quantilization
    const int N = data.rows();
    vnl_matrix<double> probMap = vnl_matrix<double>(N, nBin);
    double interval = (v_max - v_min)/nBin;
    for (int r = 0; r<N; r++) {
        for (int c = 0; c<data.cols(); c++) {
            int num = value_to_bin_number(v_min, interval, data[r][c], nBin);
            probMap[r][num] += 1.0;
        }
    }
    probMap /= data.cols(); // normalization
    
    vcl_vector<double> optimalValues(N, 0);
    vcl_vector<int> numValues(N, 0);         // multiple values from local dynamic programming
    vcl_vector<vcl_vector<double> > all_values(N);
    for (int i = 0; i<=N - windowSize; i++) {
        // get a local probMap;
        vnl_matrix<double> localProbMap = probMap.extract(windowSize, probMap.cols(), i, 0);
        vcl_vector<int> localOptimalBins;
        rgrsn_ldp::local_dynamic_programming(localProbMap, nJumpBin, localOptimalBins);
        assert(localOptimalBins.size() == windowSize);
        for (int j = 0; j < localOptimalBins.size(); j++) {
            double value = bin_number_to_value(v_min, interval, localOptimalBins[j]);
            assert(j + i < N);
            all_values[j + i].push_back(value);
            numValues[j + i]     += 1;
            optimalValues[j + i] += value;
        }
    }
    // mean value
    for (int i = 0; i<optimalValues.size(); i++) {
        optimalValues[i] /= numValues[i];
    }
    
    // variance
    medianSignal = vnl_vector<double>(N, 0);
    for (int i = 0; i<optimalValues.size(); i++) {
        assert(all_values[i].size() > 0);
        if (all_values[i].size() == 1) {
            medianSignal[i] = all_values[i][0];
        }
        else
        {
            size_t n = all_values[i].size() / 2;
            vcl_nth_element(all_values[i].begin(), all_values[i].begin() + n, all_values[i].end());
            medianSignal[i] = all_values[i][n];
        }
    }
    optimalSignal = vnl_vector<double>(&optimalValues[0], (int)optimalValues.size());
    
    return true;
}