Beispiel #1
0
//f  = f./repmat(sum(f,1),[classN,1]);
vnl_matrix<double> MCLR_SM::Normalize_F_Sum(vnl_matrix<double> f)
{
	// Matrix for normalization
	vnl_matrix<double> norm_matrix(f.rows(),f.cols());
	vnl_vector<double> norm_matrix_row;
	norm_matrix_row.set_size(f.cols());
	
	 //repmat(sum(f,1),[classN,1]);
	for(int i=0;i<f.cols();++i)
	{
      double sum = 0 ;  
	  for(int j=0;j<no_of_classes;++j)
		{
			sum = sum + f(j,i);
		}
		norm_matrix_row(i) = sum;
	}


    for(int i=0;i<no_of_classes;++i)
	{
		norm_matrix.set_row(i,norm_matrix_row); 
	}

// f  = f./repmat(sum(f,1),[classN,1]);
	for(int i=0;i<f.rows();++i)
	{
	  for(int j=0;j<f.cols();++j)
	   {
		  f(i,j) = f(i,j)/norm_matrix(i,j);
	   }
	}

	return f;
}
Beispiel #2
0
void pick_indices(const vnl_matrix<double>&dist,
    vnl_matrix<int>&pairs, const double threshold) {
  int m = dist.rows();
  int n = dist.cols();
  vnl_vector<int> row_flag, col_flag;
  col_flag.set_size(n);  col_flag.fill(0);
  row_flag.set_size(n);  row_flag.fill(0);
  std::vector<int> row_index,col_index;
  for (int i = 0; i < m; ++i) {
    double min_dist = dist.get_row(i).min_value();
    if (min_dist < threshold) {
      for (int j = 0; j < n; ++j) {
        if (dist(i,j)==min_dist && col_flag[j] == 0){
          row_index.push_back(i);
          row_flag[i] = 1;
          col_index.push_back(j);
          col_flag[j] = 1;
        }
      }
    }
  }
  pairs.set_size(2, row_index.size());
  for (int i = 0; i<pairs.cols(); ++i){
    pairs(0,i) = row_index[i];
    pairs(1,i) = col_index[i];
  }
}
Beispiel #3
0
bool rgrsn_ldp::local_dynamic_programming(const vnl_matrix<double> & probMap, int nNeighborBin,
                                          vcl_vector<int> & optimalBins)
{
    const int N    = probMap.rows();
    const int nBin = probMap.cols();
    
    // dynamic programming
    vnl_matrix<double> accumulatedProbMap = vnl_matrix<double>(N, nBin);
    accumulatedProbMap.fill(0.0);
    vnl_matrix<int> lookbackTable = vnl_matrix<int>(N, nBin);
    lookbackTable.fill(0);
    // copy first row
    for (int c = 0; c<probMap.cols(); c++) {
        accumulatedProbMap[0][c] = probMap[0][c];
        lookbackTable[0][c] = c;
    }
    
    for (int r = 1; r <N; r++) {
        for (int c = 0; c<probMap.cols(); c++) {
            // lookup all possible place in the window
            double max_val = -1;
            int max_index  = -1;
            for (int w = -nNeighborBin; w <= nNeighborBin; w++) {
                if (c + w <0 || c + w >= probMap.cols()) {
                    continue;
                }
                double val = probMap[r][c] + accumulatedProbMap[r-1][c+w];
                if (val > max_val) {
                    max_val = val;
                    max_index = c + w; // most probable path from the [r-1] row, in column c + w
                }
            }
            assert(max_index != -1);
            accumulatedProbMap[r][c] = max_val;
            lookbackTable[r][c]      = max_index;
        }
    }
    
    // lookback the table
    double max_prob    = -1.0;
    int max_prob_index = -1;
    for (int c = 0; c<accumulatedProbMap.cols(); c++) {
        if (accumulatedProbMap[N-1][c] > max_prob) {
            max_prob = accumulatedProbMap[N-1][c];
            max_prob_index = c;
        }
    }
    
    // back track
    optimalBins.push_back(max_prob_index);
    for (int r = N-1; r > 0; r--) {
        int bin = lookbackTable[r][optimalBins.back()];
        optimalBins.push_back(bin);
    }
    assert(optimalBins.size() == N);
    
  //  vcl_reverse(optimalBins.begin(), optimalBins.end());
    return true;
}
Beispiel #4
0
bool rgrsn_ldp::dynamic_programming(const vnl_matrix<double> & data, double v_min, double v_max,
                                    unsigned int nBin, int nJumpBin, unsigned int windowSize,
                                    vnl_vector<double> & optimalSignal)
{
    assert(v_min < v_max);
    // raw data to probability map
    const int N = data.rows();
    vnl_matrix<double> probMap = vnl_matrix<double>(N, nBin);
    double interval = (v_max - v_min)/nBin;
    for (int r = 0; r<N; r++) {
        for (int c = 0; c<data.cols(); c++) {
            int num = value_to_bin_number(v_min, interval, data[r][c], nBin);
            probMap[r][num] += 1.0;
        }
    }
    probMap /= data.cols(); // normalize
    
    // save prob
    {
      //  vnl_matlab_filewrite awriter("prob.mat");
      //  awriter.write(probMap, "prob");
     //   printf("save to prob.mat.\n");
    }
  
    vcl_vector<double> optimalValues(N, 0);
    vcl_vector<int> numValues(N, 0);      // multiple values from local dynamic programming
    for (int i = 0; i<=N - windowSize; i++) {
        // get a local probMap;
        vnl_matrix<double> localProbMap = probMap.extract(windowSize, probMap.cols(), i, 0);
        vcl_vector<int> localOptimalBins;
        rgrsn_ldp::local_dynamic_programming(localProbMap, nJumpBin, localOptimalBins);
        assert(localOptimalBins.size() == windowSize);
        for (int j = 0; j < localOptimalBins.size(); j++) {
            numValues[j + i]     += 1;
            optimalValues[j + i] += bin_number_to_value(v_min, interval, localOptimalBins[j]);
        }
        
        // test
        if (0 && i == 0)
        {
            printf("test first window output\n");
            for (int j = 0; j<optimalValues.size() && j<windowSize; j++) {
                printf("%f ", optimalValues[j]);
            }
            printf("\n");
        }
    }
    //
    for (int i = 0; i<optimalValues.size(); i++) {
        optimalValues[i] /= numValues[i];
    }
    optimalSignal = vnl_vector<double>(&optimalValues[0], (int)optimalValues.size());
    return true;
}
Beispiel #5
0
/*template <typename T1, typename T2>
itk::Image<T1,2>::Pointer vnlmat_to_itkimage(vnl_matrix<T2> mat,float scale = 1.0)
{
	typedef typename itk::Image<T1,2> ImageType;
	ImageType::SizeType size;
	ImageType::IndexType index;
	ImageType::RegionType region;
	index.Fill(0);
	size[0] = mat.cols();
	size[1] = mat.rows();
	region.SetSize(size);
	region.SetIndex(index);
	ImageType::Pointer im = ImageType::New();
	im->SetRegions(region);
	im->Allocate();
	for(int xco = 0; xco<size[1]; xco++)
	{
		for(int yco = 0; yco < size[0]; yco++)
		{
			index[1] = xco;
			index[0] = yco;
			im->SetPixel(index,mat[xco][yco]*scale);
		}
	}
	return im;

}
*/

vnl_matrix<float> cpx_to_abs(vnl_matrix< std::complex< float> > mat)
{
	vnl_matrix<float> out(mat.rows(),mat.cols());

	for(int x= 0; x< mat.rows(); x++)
	{
		for(int y = 0; y < mat.cols(); y++)
		{
			out[x][y] = abs(mat[x][y]);
		}
	}
	return out;
Beispiel #6
0
bool rgrsn_ldp::local_viterbi(const vnl_matrix<double> & data,
                              double resolution,
                              const vnl_vector<double> & transition,
                              unsigned int window_size,
                              vnl_vector<double> & optimal_signal)
{
    assert(resolution > 0.0);
    assert(transition.size()%2 == 1);
    
    const double min_v = data.min_value();
    const double max_v = data.max_value();
    const int nBin = (max_v - min_v)/resolution;
    
    // raw data to probability map
    // quantilization
    const int N = data.rows();
    vnl_matrix<double> probMap = vnl_matrix<double>(N, nBin);
    for (int r = 0; r<N; r++) {
        for (int c = 0; c<data.cols(); c++) {
            int num = value_to_bin_number(min_v, resolution, data[r][c], nBin);
            probMap[r][num] += 1.0;
        }
    }
    probMap /= data.cols(); // normalization
    
    vcl_vector<double> optimalValues(N, 0);
    vcl_vector<int> numValues(N, 0);      // multiple values from local dynamic programming
    
    for (int i = 0; i <= N - window_size; i++) {
        // get a local probMap;
        vnl_matrix<double> localProbMap = probMap.extract(window_size, probMap.cols(), i, 0);
        vcl_vector<int> localOptimalBins;
        rgrsn_ldp::viterbi(localProbMap, transition, localOptimalBins);
        assert(localOptimalBins.size() == window_size);
        for (int j = 0; j < localOptimalBins.size(); j++) {
            double value = bin_number_to_value(min_v, resolution, localOptimalBins[j]);
            numValues[j + i]     += 1;
            optimalValues[j + i] += value;
        }
    }
    
    // average all optimal path as final result
    for (int i = 0; i<optimalValues.size(); i++) {
        optimalValues[i] /= numValues[i];
    }
    optimal_signal = vnl_vector<double>(&optimalValues[0], (int)optimalValues.size());
    
    return true;
}
/// \brief		Unscented transform of process Sigma points
void UnscentedKalmanFilter::utf(vnl_matrix<double> X, vnl_vector<double> u,
	vnl_vector<double> &y, vnl_matrix<double> &Y, vnl_matrix<double> &P, vnl_matrix<double> &Y1)
{
	// determine number of sigma points
	unsigned int L = X.cols();
	// zero output matrices
	y.fill(0.0); Y.fill(0.0);

	// transform the sigma points and put them as columns in a matrix Y
	for( int k = 0; k < L; k++ )
	{
		vnl_vector<double> xk = X.get_column(k);
		vnl_vector<double> yk(N);
		f(xk,u,yk);
		Y.set_column(k,yk);
		// add each transformed point to the weighted mean
		y = y + Wm.get(0,k)*yk;
	}

	// create a matrix with each column being the weighted mean
	vnl_matrix<double> Ymean(N,L);
	for( int k = 0; k < L; k++ )
		Ymean.set_column(k,y);
	// set the matrix of difference vectors
	Y1 = Y-Ymean;
	// calculate the covariance matrix output
	vnl_matrix<double> WC(L,L,0.0);
	WC.set_diagonal(Wc.get_row(0));
	P = Y1*WC*Y1.transpose();
}
Beispiel #8
0
bool rgrsn_ldp::transition_matrix(const vcl_vector<int> & fns,
                                  const vcl_vector<double> & values,
                                  vnl_matrix<double> & transition,
                                  const double resolution)
{
    assert(fns.size() == values.size());
    
    double min_v = *vcl_min_element(values.begin(), values.end());
    double max_v = *vcl_max_element(values.begin(), values.end());
    
    unsigned num_bin = (max_v - min_v)/resolution;
    transition = vnl_matrix<double>(num_bin, num_bin, 0.0);
    vnl_vector<double> column(num_bin, 0.0);
    for (int i = 0; i<fns.size()-1; i++) {
        if (fns[i] + 1 == fns[i+1]) {
            double cur_v = values[i];
            double next_v = values[i+1];
            unsigned cur_bin = value_to_bin_number(min_v, resolution, cur_v, num_bin);
            unsigned next_bin = value_to_bin_number(min_v, resolution, next_v, num_bin);
            transition[next_bin][cur_bin] += 1.0;
            column[cur_bin] += 1.0;
        }
    }
    
    // normalize each column
    for (int r = 0; r < transition.rows(); r++) {
        for (int c = 0; c < transition.cols(); c++) {
            transition[r][c] /= column[c];
        }
    }
    return true;
}
Beispiel #9
0
vnl_vector<double> MCLR_SM::Column_Order_Matrix(vnl_matrix<double> mat)
{
		vnl_vector<double> mat_column_ordered;
		mat_column_ordered.set_size(mat.rows()*mat.cols());
		int count = 0;

		for(int j=0;j<mat.cols();++j)	
		{
		for(int i=0;i<mat.rows();++i)
		{
			mat_column_ordered(count) = mat(i,j);
			count++;
		  }
		}
	return mat_column_ordered;
}
Beispiel #10
0
/** Applies an affine transform specified by the homogeneous transformation matrix to a vtkPolyData */
void psciob::AffineTransformVTKPolyData( vtkPolyData* inputPolyData, vnl_matrix<double> transformMatrix, vtkPolyData* outputPolyData) {
	if ( transformMatrix.rows() != transformMatrix.cols() ) throw DeformableModelException("transform matrix must be square");
	unsigned int d = transformMatrix.rows()-1;
	vtkSmartPointer<vtkMatrix4x4> matrix = vtkSmartPointer<vtkMatrix4x4>::New();
	vtkSmartPointer<vtkTransform> transform = vtkSmartPointer<vtkTransform>::New();
	vtkSmartPointer<vtkTransformPolyDataFilter> transformFilter = vtkSmartPointer<vtkTransformPolyDataFilter>::New();

	switch(d) {
	case 2:
		matrix->SetElement(0,0, transformMatrix(0,0)); matrix->SetElement(0,1, transformMatrix(0,1)); matrix->SetElement(0,2, 0); matrix->SetElement(0,3, transformMatrix(0,2)); 
		matrix->SetElement(1,0, transformMatrix(1,0)); matrix->SetElement(1,1, transformMatrix(1,1)); matrix->SetElement(1,2, 0); matrix->SetElement(1,3, transformMatrix(1,2)); 
		matrix->SetElement(2,0,         0           ); matrix->SetElement(2,1,         0           ); matrix->SetElement(2,2, 1); matrix->SetElement(2,3,         0           ); 
		matrix->SetElement(3,0,         0           ); matrix->SetElement(3,1,         0           ); matrix->SetElement(3,2, 0); matrix->SetElement(3,3,         1           ); 
		break;
	case 3:
		for (unsigned i=0 ; i<4 ; i++) {
			for (unsigned j=0 ; j<4 ; j++) { 
				matrix->SetElement(i,j, transformMatrix(i,j)); 
			} 
		}
		break;
	default: throw DeformableModelException("only 2D or 3D transforms are allowed -> 3*3 or 4*4 matrices");
	}

	transform->SetMatrix( matrix );
	transform->Update();

	transformFilter->SetInput( inputPolyData );
	transformFilter->SetTransform( transform );
	transformFilter->Update();

	outputPolyData->ShallowCopy( transformFilter->GetOutput() );
}
Beispiel #11
0
void denormalize(vnl_matrix<double>& x, const vnl_vector<double>& centroid, const double scale) {
  int n = x.rows();
  if (n==0) return;
  int d = x.cols();
  for (int i = 0; i < n; ++i) {
    x.set_row(i, x.get_row(i) * scale + centroid);
  }
}
Beispiel #12
0
vnl_matrix<double> MCLR_SM::Add_Bias(vnl_matrix<double> data)
{
	vnl_matrix<double> data_with_bias; // Data to be modified

	vnl_vector<double> temp_vector;
	temp_vector.set_size(data.cols());
	temp_vector.fill(1);
	
	data_with_bias.set_size(no_of_features+1,data.cols());
	data_with_bias.set_row(0,temp_vector);
	for(int i =0; i<no_of_features ; ++i)
	{
		data_with_bias.set_row(i+1,data.get_row(i));	
	}
		
	return data_with_bias;
}
Beispiel #13
0
double GaussTransform(const vnl_matrix<double>& A,
    const vnl_matrix<double>& B, double scale,
    vnl_matrix<double>& gradient) {
  // assert A.cols() == B.cols()
  return GaussTransform(A.data_block(), B.data_block(),
      A.rows(), B.rows(), A.cols(), scale,
      gradient.data_block());
}
Beispiel #14
0
void normalize_same(vnl_matrix<double>& x,
    vnl_vector<double>& centroid, double& scale) {
  int n = x.rows();
  if (n==0) return;
  int d = x.cols();
  for (int i = 0; i < n; ++i) {
    x.set_row(i, (x.get_row(i) - centroid) / scale);
  }
}
Beispiel #15
0
int select_points(const vnl_matrix<double>&pts,
    const std::vector<int>&index, vnl_matrix<double>& selected) {
  int n = index.size();
  int d = pts.cols();
  selected.set_size(n,d);
  for (int i = 0; i < n; ++i) {
    selected.update(pts.extract(1, d, index[i]), i);
  }
  return n;
}
Beispiel #16
0
void vnl_flann::search(const vnl_matrix<double> & query_data,
                       vcl_vector<vcl_vector<int> > & indices,
                       vcl_vector<vcl_vector<double> > & dists, int knn) const
{
    const int dim = (int)query_data.cols();
    assert(dim == dim_);
    
    Matrix<double> query_data_wrap((double *)query_data.data_block(), (int)query_data.rows(), dim);
    index_.knnSearch(query_data_wrap, indices, dists, knn, flann::SearchParams(128));
}
Beispiel #17
0
// todo: add one more version when the model is same as ctrl_pts
// reference:  Landmark-based Image Analysis, Karl Rohr, p195
void ComputeTPSKernel(const vnl_matrix<double>& model,
    const vnl_matrix<double>& ctrl_pts,
    vnl_matrix<double>& U, vnl_matrix<double>& K) {
  int m = model.rows();
  int n = ctrl_pts.rows();
  int d = ctrl_pts.cols();
  //asssert(model.cols()==d==(2|3));
  K.set_size(n, n);
  K.fill(0);
  U.set_size(m, n);
  U.fill(0);
  double eps = 1e-006;

  vnl_vector<double> v_ij;
  for (int i = 0; i < m; ++i) {
    for (int j = 0; j < n; ++j) {
      v_ij = model.get_row(i) - ctrl_pts.get_row(j);
      if (d == 2) {
        double r = v_ij.squared_magnitude();
        if (r > eps) {
          U(i, j) = r * log(r) / 2;
        }
      } else if (d == 3) {
        double r = v_ij.two_norm();
        U(i, j) = -r;
      }
    }
  }
  for (int i = 0; i < n; ++i) {
    for (int j = i + 1; j < n; ++j) {
      v_ij = ctrl_pts.get_row(i) - ctrl_pts.get_row(j);
      if (d == 2) {
        double r = v_ij.squared_magnitude();
        if (r > eps) {
          K(i, j) = r * log(r) / 2;
        }
      }
      else if (d == 3) {
        double r = v_ij.two_norm();
        K(i, j) = -r;
      }
    }
  }
  for (int i = 0; i < n; ++i) {
    for (int j = 0; j < i; ++j) {
      K(i, j) = K(j, i);
    }
  }

}
Beispiel #18
0
void itk::BiExpFitFunctor::operator()(vnl_matrix<double> & newSignal,const vnl_matrix<double> & SignalMatrix, const double & S0)
{

  vnl_vector<double> initalGuess(3);
  // initialize Least Squres Function
  // SignalMatrix.cols() defines the number of shells points
  lestSquaresFunction model(SignalMatrix.cols());
  model.set_bvalues(m_BValueList);// set BValue Vector e.g.: [1000, 2000, 3000] <- shell b Values

  // initialize Levenberg Marquardt
  vnl_levenberg_marquardt minimizer(model);
  minimizer.set_max_function_evals(1000);   // Iterations
  minimizer.set_f_tolerance(1e-10);        // Function tolerance

  // for each Direction calculate LSF Coeffs ADC & AKC
  for(unsigned int i = 0 ; i < SignalMatrix.rows(); i++)
  {
    model.set_measurements(SignalMatrix.get_row(i));
    model.set_reference_measurement(S0);

    initalGuess.put(0, 0.f); // ADC_slow
    initalGuess.put(1, 0.009f); // ADC_fast
    initalGuess.put(2, 0.7f); // lambda

    // start Levenberg-Marquardt
    minimizer.minimize_without_gradient(initalGuess);

    const double & ADC_slow = initalGuess.get(0);
    const double & ADC_fast = initalGuess.get(1);
    const double & lambda = initalGuess(2);

    newSignal.put(i, 0, S0 * (lambda * std::exp(-m_TargetBvalue * ADC_slow) + (1-lambda)* std::exp(-m_TargetBvalue * ADC_fast)));
    newSignal.put(i, 1, minimizer.get_end_error()); // RMS Error

    //OUTPUT FOR EVALUATION
    /*std::cout << std::scientific << std::setprecision(5)
              << ADC_slow   << ","                        // lambda
              << ADC_fast   << ","                        // alpha
              << lambda     << ","                        // lambda
              << S0         << ","                        // S0 value
              << minimizer.get_end_error() << ",";      // End error
    for(unsigned int j = 0; j < SignalMatrix.get_row(i).size(); j++ ){
      std::cout << std::scientific << std::setprecision(5) << SignalMatrix.get_row(i)[j];    // S_n Values corresponding to shell 1 to shell n
      if(j != SignalMatrix.get_row(i).size()-1) std::cout << ",";
    }
    std::cout << std::endl;*/
  }

}
Beispiel #19
0
bool rgrsn_ldp::compact_transition_matrix(const vcl_vector<int> & fns,
                                          const vcl_vector<double> & values,
                                          vnl_matrix<double> & transition,
                                          const double resolution)
{
    assert(fns.size() == values.size());
    
    double min_v = *vcl_min_element(values.begin(), values.end());
    double max_v = *vcl_max_element(values.begin(), values.end());
    unsigned num_bin = (max_v - min_v)/resolution;
    
    unsigned max_bin_transition = 0;  // biggest transition between frames quantized in bin
    for (int i = 0; i<fns.size(); i++) {
        if (fns[i] + 1 == fns[i+1]) {
            double cur_v  = values[i];
            double next_v = values[i+1];
            int cur_bin  = value_to_bin_number(min_v, resolution, cur_v, num_bin);
            int next_bin = value_to_bin_number(min_v, resolution, next_v, num_bin);
            int dif = abs(next_bin - cur_bin);
            if (dif > max_bin_transition) {
                max_bin_transition = dif;
            }
        }
    }
    transition = vnl_matrix<double>(max_bin_transition * 2 + 1, num_bin, 0.0);
    vnl_vector<double> column(num_bin, 0.0);
    for (int i = 0; i<fns.size()-1; i++) {
        if (fns[i] + 1 == fns[i+1]) {
            double cur_v = values[i];
            double next_v = values[i+1];
            int cur_bin  = value_to_bin_number(min_v, resolution, cur_v, num_bin);
            int next_bin = value_to_bin_number(min_v, resolution, next_v, num_bin);
            int row = max_bin_transition + (next_bin - cur_bin);
            transition[row][cur_bin] += 1.0;
            column[cur_bin] += 1.0;
        }
    }
    
    // normalize each column
    for (int r = 0; r < transition.rows(); r++) {
        for (int c = 0; c < transition.cols(); c++) {
    //        transition[r][c] /= column[c];
        }
    }
    
    return true;
}
Beispiel #20
0
int vnl_matrix2vtkPolyData(vtkPolyData* A, vnl_matrix<double>& matrix) 
{
	int dim = matrix.cols();;
	int n = matrix.rows();
	//int n = A->GetNumberOfPoints();
	//matrix.set_size(n, dim);
	double P[3];
	for(int i = 0;i < n; i++)
	{
		for(int j = 0;j < dim; j++ )
		{
			P[j] = matrix(i,j);
		}
		A->GetPoints()->SetPoint(i,P);
	}
	return 1;
}
Beispiel #21
0
void compute_P(const vnl_matrix<double>& x,const vnl_matrix<double>& y, vnl_matrix<double>& P, double &E, double sigma, int outliers) {
  double k;
  k = -2*sigma*sigma;

  //P.set_size(m,n); P.fill(0);
  //vnl_vector<double> v_ij;

  vnl_vector<double> column_sum;
  int m = x.rows();
  int s = y.rows();
  int d = x.cols();
  column_sum.set_size(s);
  column_sum.fill(0);
  double outlier_term = outliers*pow((2*sigma*sigma*3.1415926),0.5*d);
  for (int i = 0; i < m; ++i) {
    for (int j = 0; j < s; ++j) {
      double r = 0;
      for (int t = 0; t < d; ++t) {
        r += (x(i,t) - y(j,t))*(x(i,t) - y(j,t));
      }
      P(i,j) = exp(r/k);
      column_sum[j]+=P(i,j);
    }
  }


  if (outliers!=0) {
    for (int i = 0; i < s; ++i)
      column_sum[i] += outlier_term;
  }
  if (column_sum.min_value()>(1e-12)) {
    E = 0;
    for (int i = 0; i < s; ++i) {
      for (int j = 0; j < m; ++j){
        P(j,i) = P(j,i)/column_sum[i];
      }
      E -= log(column_sum[i]);
    }
    //vcl_cerr< < s;
    //vcl_cerr<<P.get_column(10);
  }
  else {
    P.empty();
  }
}
Beispiel #22
0
void normalize(vnl_matrix<double>& x,
    vnl_vector<double>& centroid, double& scale) {
  int n = x.rows();
  if (n==0) return;
  int d = x.cols();
  centroid.set_size(d);

  vnl_vector<double> col;
  for (int i = 0; i < d; ++i) {
    col = x.get_column(i);
    centroid(i) = col.mean();
  }
  for (int i = 0; i < n; ++i) {
    x.set_row(i, x.get_row(i) - centroid);
  }
  scale = x.frobenius_norm() / sqrt(double(n));
  x = x / scale;
}
Beispiel #23
0
void f(const vnl_matrix<double>& model,
    const vnl_matrix<double>& scene, double threshold,
    vnl_matrix<double>& extracted_model,
    vnl_matrix<double>& extracted_scene) {
  vnl_matrix<double> dist;
  vnl_matrix<int> pairs;
  ComputeSquaredDistanceMatrix(model, scene, dist);
  pick_indices(dist, pairs, threshold*threshold);
  std::cout << "distance threshold : " << threshold << std::endl;
  int j, n = pairs.cols();
  int d = model.cols();
  extracted_model.set_size(n,d);
  extracted_scene.set_size(n,d);
  std::cout << "# of matched point pairs : " << n << std::endl;
  for (j=0; j<n; ++j) {
    extracted_model.set_row(j,model.get_row(pairs(0,j)));
  }
  for (j=0; j<n; ++j) {
    extracted_scene.set_row(j,scene.get_row(pairs(1,j)));
  }
}
void mitk::GeneralizedLinearModel::EstimatePermutation(const vnl_matrix<double> &xData)
{
  v3p_netlib_integer rows = xData.rows();
  v3p_netlib_integer cols = xData.cols();

  if (m_AddConstantColumn)
    ++cols;

  v3p_netlib_doublereal *x = new v3p_netlib_doublereal[rows* cols];
  _UpdateXMatrix(xData, m_AddConstantColumn, x);
  v3p_netlib_doublereal *qraux = new v3p_netlib_doublereal[cols];
  v3p_netlib_integer *jpvt = new v3p_netlib_integer[cols];
  std::fill_n(jpvt,cols,0);
  v3p_netlib_doublereal *work = new v3p_netlib_doublereal[cols];
  std::fill_n(work,cols,0);
  v3p_netlib_integer job = 16;

  // Make a call to Lapack-DQRDC which does QR with permutation
  // Permutation is saved in JPVT.
  v3p_netlib_dqrdc_(x, &rows, &rows, &cols, qraux, jpvt, work, &job);

  double limit = std::abs(x[0]) * std::max(cols, rows) * std::numeric_limits<double>::epsilon();
  // Calculate the rank of the matrix
  int m_Rank = 0;
  for (int i = 0; i <cols; ++i)
  {
    m_Rank += (std::abs(x[i*rows + i]) > limit) ? 1 : 0;
  }
  // Create a permutation vector
  m_Permutation.set_size(m_Rank);
  for (int i = 0; i < m_Rank; ++i)
  {
    m_Permutation(i) = jpvt[i]-1;
  }

  delete x;
  delete qraux;
  delete jpvt;
  delete work;
}
void ExtractMatchingPairs(
    const vnl_matrix<T>& model,
    const vnl_matrix<T>& scene,
    const T& threshold,
    vnl_matrix<T>& extracted_model,
    vnl_matrix<T>& extracted_scene) {
  vnl_matrix<T> dist;
  vnl_matrix<int> pairs;
  ComputeSquaredDistanceMatrix<T>(model, scene, dist);
  PickIndices<T>(dist, pairs, threshold*threshold);
  std::cout << "distance threshold : " << threshold << std::endl;
  int n = pairs.cols();
  int d = model.cols();
  extracted_model.set_size(n, d);
  extracted_scene.set_size(n, d);
  std::cout << "# of matched point pairs : " << n << std::endl;
  for (int j = 0; j < n; ++j) {
    extracted_model.set_row(j,model.get_row(pairs(0, j)));
  }
  for (int j = 0; j < n; ++j) {
    extracted_scene.set_row(j,scene.get_row(pairs(1, j)));
  }
}
Beispiel #26
0
void ComputeTPSKernelU(const vnl_matrix<double>& model,
    const vnl_matrix<double>& ctrl_pts,
    vnl_matrix<double>& U) {
  int m = model.rows();
  int n = ctrl_pts.rows();
  int d = ctrl_pts.cols();
  //asssert(model.cols()==d==(2|3));
  //K.set_size(n, n);
  //K.fill(0);
  U.set_size(m, n);
  U.fill(0);
  double eps = 1e-006;

  vnl_vector<double> v_ij;
  for (int i = 0; i < m; ++i) {
    for (int j = 0; j < n; ++j) 
	{
      v_ij = model.get_row(i) - ctrl_pts.get_row(j);
      double r = v_ij.two_norm();
      U(i, j) = -r;
    }
  }
}
Beispiel #27
0
/*
   Matlab code in cpd_G.m:
   k=-2*beta^2;
   [n, d]=size(x);  [m, d]=size(y);

   G=repmat(x,[1 1 m])-permute(repmat(y,[1 1 n]),[3 2 1]);
   G=squeeze(sum(G.^2,2));
   G=G/k;
   G=exp(G);
   */
void ComputeGaussianKernel(const vnl_matrix<double>& model,
    const vnl_matrix<double>& ctrl_pts,
    vnl_matrix<double>& G, vnl_matrix<double>& K,
    double lambda) {
  int m,n,d;
  m = model.rows();
  n = ctrl_pts.rows();
  d = ctrl_pts.cols();
  //asssert(model.cols()==d);
  //assert(lambda>0);

  G.set_size(m,n);
  GaussianAffinityMatrix(model.data_block(), ctrl_pts.data_block(),
      m, n, d, lambda, G.data_block());

  if (model == ctrl_pts) {
    K = G;
  } else {
    K.set_size(n,n);
    GaussianAffinityMatrix(ctrl_pts.data_block(), ctrl_pts.data_block(),
        n, n, d, lambda, K.data_block());
  }
}
Beispiel #28
0
//Reshape the matrix : columns first ; Similar to MATLAB
vnl_matrix<double> MCLR_SM::Reshape_Matrix(vnl_matrix<double>mat,int r,int c )
{
	if(mat.rows()*mat.cols() != r*c)
	{
		cout<< "Number of elements in the matrix/vector should be equal to the total number of elements in the reshaped matrix";
		getchar();
		exit(1);
	}
	
	vnl_matrix<double>reshaped_matrix;
	reshaped_matrix.set_size(r,c);
	int count = 0;
	
	for(int j=0;j<c;++j)	
	{
		for(int i=0;i<r;++i)
		{
			reshaped_matrix(i,j) = mat(count%mat.rows(),floor(static_cast<double>(count/mat.rows())));
			count++;
		}
	}
	return reshaped_matrix;
}
// Copy a vnl-matrix to an c-array with row-wise representation.
// Adds a constant column if required.
static void _UpdateXMatrix(const vnl_matrix<double> &xData, bool addConstant, v3p_netlib_doublereal *x)
{
  v3p_netlib_integer rows = xData.rows();
  v3p_netlib_integer cols = xData.cols();
  if (addConstant)
    ++cols;

  for (int r=0; r < rows; ++r)
  {
    for (int c=0; c <cols; ++c)
    {
      if (!addConstant)
      {
        x[c*rows + r] = xData(r,c);
      } else if (c == 0)
      {
        x[c*rows + r] = 1.0;
      } else
      {
        x[c*rows + r] = xData(r, c-1);
      }
    }
  }
}
Beispiel #30
0
bool rgrsn_ldp::dynamic_programming(const vnl_matrix<double> & data,
                                    double v_min, double v_max,
                                    unsigned int nBin,
                                    int nJumpBin,
                                    unsigned int windowSize,
                                    vnl_vector<double> & optimalSignal,
                                    vnl_vector<double> & signal_variance)
{
    assert(v_min < v_max);
    // raw data to probability map
    // quantilization
    const int N = data.rows();
    vnl_matrix<double> probMap = vnl_matrix<double>(N, nBin);
    double interval = (v_max - v_min)/nBin;
    for (int r = 0; r<N; r++) {
        for (int c = 0; c<data.cols(); c++) {
            int num = value_to_bin_number(v_min, interval, data[r][c], nBin);
            probMap[r][num] += 1.0;
        }
    }
    probMap /= data.cols(); // normalization
    
    
    vcl_vector<double> optimalValues(N, 0);
    vcl_vector<int> numValues(N, 0);      // multiple values from local dynamic programming
    vcl_vector<vcl_vector<double> > all_values(N);
    for (int i = 0; i<=N - windowSize; i++) {
        // get a local probMap;
        vnl_matrix<double> localProbMap = probMap.extract(windowSize, probMap.cols(), i, 0);
        vcl_vector<int> localOptimalBins;
        rgrsn_ldp::local_dynamic_programming(localProbMap, nJumpBin, localOptimalBins);
        assert(localOptimalBins.size() == windowSize);
        for (int j = 0; j < localOptimalBins.size(); j++) {
            double value = bin_number_to_value(v_min, interval, localOptimalBins[j]);
            assert(j + i < N);
            all_values[j + i].push_back(value);
            numValues[j + i]     += 1;
            optimalValues[j + i] += value;
        }
    }
    // mean value
    for (int i = 0; i<optimalValues.size(); i++) {
        optimalValues[i] /= numValues[i];
    }
    
    // variance
    signal_variance = vnl_vector<double>(N, 0);
    for (int i = 0; i<optimalValues.size(); i++) {
        assert(all_values[i].size() > 0);
        if (all_values[i].size() == 1) {
            signal_variance[i] = 0.0001;
        }
        else
        {
            double dump_mean = 0.0;
            double sigma = 0.0;
            VnlPlus::mean_std(&all_values[i][0], (int)all_values[i].size(), dump_mean, sigma);
            signal_variance[i] = sigma + 0.0001; // avoid zero
        }
    }
    optimalSignal = vnl_vector<double>(&optimalValues[0], (int)optimalValues.size());
    
    // save variance with the size of window size, for test purpose
    if(0)
    {
        vcl_vector<vnl_vector<double> > all_value_vecs;
        for (int i = 0; i<all_values.size(); i++) {
            if (all_values[i].size() == windowSize) {
                all_value_vecs.push_back(VnlPlus::vector_2_vec(all_values[i]));
            }
        }
        
        vcl_string save_file("ldp_all_prediction.mat");
        vnl_matlab_filewrite awriter(save_file.c_str());
        awriter.write(VnlPlus::vector_2_mat(all_value_vecs), "ldp_all_opt_path");
        printf("save to %s\n", save_file.c_str());
    }
    return true;
}