示例#1
0
文件: PCA.cpp 项目: EQ94/Shark
//! Returns a model mapping encoded data from the
//! m-dimensional PCA coordinate system back to the
//! n-dimensional original coordinate system.
void PCA::decoder(LinearModel<>& model, std::size_t m) {
	if(!m) m = std::min(m_n,m_l);
	if( m == m_n && !m_whitening){
		model.setStructure(m_eigenvectors, m_mean);
	}
	RealMatrix A = columns(m_eigenvectors, 0, m);
	if(m_whitening){
		for(std::size_t i=0; i<A.size2(); i++) {
			//take care of numerical difficulties for very small eigenvalues.
			if(m_eigenvalues(i)/m_eigenvalues(0) < 1.e-15){
				column(A,i).clear();		
			}
			else{
				column(A, i) = column(A, i) * std::sqrt(m_eigenvalues(i));
			}
		}
	}

	model.setStructure(A, m_mean);
}
示例#2
0
文件: SigmoidFit.cpp 项目: EQ94/Shark
// optimize the sigmoid using rprop on the negative log-likelihood
void SigmoidFitRpropNLL::train(SigmoidModel& model, LabeledData<RealVector, unsigned int> const& dataset)
{
	LinearModel<> trainModel;
	trainModel.setStructure(1,1,model.hasOffset());
	CrossEntropy loss;
	ErrorFunction modeling_error( dataset, &trainModel, &loss );
	IRpropPlus rprop;
	rprop.init( modeling_error );
	for (unsigned int i=0; i<m_iterations; i++) {
		rprop.step( modeling_error );
	}
	RealVector solution(2,0.0); 
	solution(0) = rprop.solution().point(0);
	if(model.slopeIsExpEncoded()){
		solution(0) = std::log(solution(0));
	}
	if(model.hasOffset())
		solution(1) =-rprop.solution().point(1);
	model.setParameterVector(solution);
}
示例#3
0
void LinearRegression::train(LinearModel<>& model, LabeledData<RealVector, RealVector> const& dataset){
	std::size_t inputDim = inputDimension(dataset);
	std::size_t outputDim = labelDimension(dataset);
	std::size_t numInputs = dataset.numberOfElements();
	std::size_t numBatches = dataset.numberOfBatches();

	//Let P be the matrix of points with n rows and X=(P|1). the 1 rpresents the bias weight
	//Let A = X^T X + lambda * I
	//than whe have (for lambda = 0)
	//A = ( P^T P  P^T 1)
	//       ( 1^T P  1^T1)
	RealMatrix matA(inputDim+1,inputDim+1,0.0);
	blas::Blocking<RealMatrix> Ablocks(matA,inputDim,inputDim);
	//compute A and the label matrix batchwise
	typedef LabeledData<RealVector, RealVector>::const_batch_reference BatchRef;
	for (std::size_t b=0; b != numBatches; b++){
		BatchRef batch = dataset.batch(b);
		symm_prod(trans(batch.input),Ablocks.upperLeft(),false);
		noalias(column(Ablocks.upperRight(),0))+=sum_rows(batch.input);
	}
	row(Ablocks.lowerLeft(),0) = column(Ablocks.upperRight(),0);
	matA(inputDim,inputDim) = numInputs;
	//X^TX+=lambda* I
	diag(Ablocks.upperLeft())+= blas::repeat(m_regularization,inputDim);
	
	
	//we also need to compute X^T L= (P^TL, 1^T L) where L is the matrix of labels 
	RealMatrix XTL(inputDim + 1,outputDim,0.0);
	for (std::size_t b=0; b != numBatches; b++){
		BatchRef batch = dataset.batch(b);
		RealSubMatrix PTL = subrange(XTL,0,inputDim,0,outputDim);
		axpy_prod(trans(batch.input),batch.label,PTL,false);
		noalias(row(XTL,inputDim))+=sum_rows(batch.label);
	}	
	
	//we solve the system A Beta = X^T L
	//usually this is solved via the moore penrose inverse:
	//Beta = A^-1 T
	//but it is faster und numerically more stable, if we solve it as a symmetric system
	//w can use in-place solve
	RealMatrix&  beta = XTL;
	blas::solveSymmSemiDefiniteSystemInPlace<blas::SolveAXB>(matA,beta);
	
	RealMatrix matrix = subrange(trans(beta), 0, outputDim, 0, inputDim);
	RealVector offset = row(beta,inputDim);
	
	// write parameters into the model
	model.setStructure(matrix, offset);
}
示例#4
0
文件: PCA.cpp 项目: EQ94/Shark
//! Returns a model mapping the original data to the
//! m-dimensional PCA coordinate system.
void PCA::encoder(LinearModel<>& model, std::size_t m) {
	if(!m) m = std::min(m_n,m_l);
	
	RealMatrix A = trans(columns(m_eigenvectors, 0, m) );
	RealVector offset = -prod(A, m_mean);
	if(m_whitening){
		for(std::size_t i=0; i<A.size1(); i++) {
			//take care of numerical difficulties for very small eigenvalues.
			if(m_eigenvalues(i)/m_eigenvalues(0) < 1.e-15){
				row(A,i).clear();
				offset(i) = 0;			
			}
			else{
				row(A, i) /= std::sqrt(m_eigenvalues(i));
				offset(i) /= std::sqrt(m_eigenvalues(i));
			}
		}
	}
	model.setStructure(A, offset);
}