Beispiel #1
0
MatrixXd compute_empirical_T3(SparseMatrix<double> whitened_data, VectorXd y_mean){
	int nx = (int)whitened_data.cols();
	//printf("nx = %d\n", nx);
	double shift12 = (alpha0 + 1.0)*(alpha0 + 2.0) / (2.0*(double)nx);
	double shift01 = -alpha0 *(alpha0 + 1.0) / (2.0*(double)nx);
	double shift00 = alpha0*alpha0;

	MatrixXd emp_T3_whitened = MatrixXd::Zero(KHID, KHID * KHID);
	for(int n=0; n<nx; n++){
		VectorXd y = whitened_data.col(n);
		
		MatrixXd temp0, temp1, temp2, temp3, temp4;
		temp0.noalias() = shift12 * (y * y.transpose());
		temp1.noalias() = shift01 * (y * y.transpose());
		temp2.noalias() = shift01 * (y * y_mean.transpose());
		temp3.noalias() = shift01 * (y_mean * y.transpose());
		temp4.noalias() = shift00 * (y_mean * y_mean.transpose());
		for(int i=0; i < KHID; i++){
			emp_T3_whitened.block(0,i*KHID, KHID, KHID).noalias() += temp0*y(i);
			emp_T3_whitened.block(0,i*KHID, KHID, KHID).noalias() += temp1*y_mean(i);
			emp_T3_whitened.block(0,i*KHID, KHID, KHID).noalias() += temp2*y(i);
			emp_T3_whitened.block(0,i*KHID, KHID, KHID).noalias() += temp3*y(i);
			emp_T3_whitened.block(0,i*KHID, KHID, KHID).noalias() += temp4*y_mean(i);			
		}
	}
	return emp_T3_whitened;
}
Beispiel #2
0
void DMultivariateGaussian::body(const VectorXd &x )
{
	// Look http://en.wikipedia.org/wiki/Multinomial_distribution
	
	double n=x.size();
	double value=0.0;
	if (isStandard)
	{
		double factor1 = 1.0/pow(2.0*M_PI,n/2.0);
		double factor2 = 1.0/( sqrt(abs(covar.determinant()) ) );

		VectorXd dx = x-mean;
		RowVectorXd dxt =  dx.transpose();
	
		VectorXd x1 = invCovar*dx;

		double arg = -0.5* (x1.dot(dxt));
		value = factor1*factor2*exp(arg);
	}
	else
	{
		double factor1 = 1.0/pow(2.0*M_PI,n/2.0);
		double sigma=1.0;
		double factor2 = 1.0/sigma;
	
		VectorXd dx = x-mean;
		RowVectorXd dxt =  dx.transpose();
	
		VectorXd x1 = invCovar*dx;

		double arg = -0.5* (x1.dot(dxt));
		value = factor1*factor2*exp(arg);	
	}
	result = value;
}
// [[Rcpp::depends(RcppEigen)]]
// [[Rcpp::export]]
MatrixXd gibbsSamplerC(int nMC,
                       VectorXd y,
                       MatrixXd X,
                       double a = 1.1,
                       double b = 1.1,
                       double kappa = 0.1) {

    int n = X.rows();
    int p = X.cols();
    
    VectorXd beta = VectorXd::Zero(p);
    MatrixXd Res(nMC, p+1);
    
    // take Cholesky decomposition of M matrix
    MatrixXd M = ( kappa * MatrixXd::Identity(p, p) ) + ( X.transpose() * X );
    LLT<MatrixXd> lltOfM(M);
    MatrixXd L = lltOfM.matrixL();
    
    // to be used in draws of sigmasq estimates
    double A = a + 0.5*(n+p);
    
    // to be used in draws of beta estimates
    VectorXd beta_means = M.inverse() * (X.transpose() * y);
    
    // run Markov Chain
    for ( int i=0; i<nMC; ++i ) {
        
        // UPDATE SIGMASQ ESTIMATE
        
        // update scale parameter
        VectorXd mat = y - (X * beta);
        double beta_val = (beta.transpose() * beta).sum();
        double mat_val = (mat.transpose() * mat).sum();
        double B = ( b + 0.5 * kappa * beta_val ) + ( 0.5 * mat_val );
        
        // new sigmasq inverse gamma draw
        double gamma_draw = rgamma(1, A, 1/B)[0];
        double new_sigmasq = 1.0/gamma_draw;
        
        
        // UPDATE BETA ESTIMATES
        
        for ( int c=0; c<p; ++c ) {
            beta(c) = beta_means(c) + sqrt(new_sigmasq) * L.inverse().sum() * rnorm(1, 0, 1)[0];
        }
        
        
        // STORE RESULTS
        
        Res(i, p) = new_sigmasq;
        Res.row(i).leftCols(p) = beta;
        
    }
    
    return Res;
    
}
double cRBLayer::getEnergy(VectorXd& vNodes){
    VectorXd wx_b = vNodes.transpose()*W;
    wx_b=wx_b+hb;
    double vxb = vNodes.transpose()*vb;
    VectorXd ones(h);
    ones.setOnes();
    wx_b.array().exp();
    wx_b+=ones;
    wx_b.array().log();
    return -wx_b.sum()-vxb;
}
Beispiel #5
0
void HmcSampler::_getNextQuadraticHitTime(const VectorXd & a, const VectorXd & b, double & hit_time, int & cn , const bool first_bounce ){
        hit_time=0;
  
        double mint;
        if (first_bounce) {mint=0;}
        else {mint=min_t;}
    
    for (int i=0; i != quadraticConstraints.size(); i++ ){
        
        QuadraticConstraint qc = quadraticConstraints[i];
        double q1= - ((a.transpose())*(qc.A))*a;
        q1 = q1 + ((b.transpose())*(qc.A))*b;
        double q2= (qc.B).dot(b);
        double q3= qc.C + a.transpose()*(qc.A)*a;
        double q4= 2*b.transpose()*(qc.A)*a;
        double q5= (qc.B).dot(a);

        double r4 = q1*q1 + q4*q4;
        double r3 = 2*q1*q2 + 2*q4*q5;
        double r2 = q2*q2 + 2*q1*q3 + q5*q5 -q4*q4;
        double r1 = 2*q2*q3 - 2*q4*q5;
        double r0=  q3*q3 - q5*q5;

        double roots[]={0,0,0,0};
        double aa = r3/r4;
        double bb = r2/r4;
        double cc = r1/r4;
        double dd = r0/r4;

        //Solve quartics of the form x^4 + aa x^3 + bb x^2 + cc x + dd ==0
        int sols = quarticSolve(aa, bb, cc, dd, roots[0], roots[1],  roots[2],  roots[3]);
        for (int j=0; j<sols; j++){
            double r = roots[j];
            if (abs(r) <=1 ){               
                double l1 = q1*r*r + q2*r + q3;
                double l2 = -sqrt(1-r*r)*(q4*r + q5); 
                if (l1/l2 > 0){
                    double t = acos(r);
                    if (   t> mint      && (hit_time == 0 || t < hit_time)){
                       hit_time=t;
                       cn=i;                                          
                    }                    
                }
            }            
        }                
    }    
    
    
}
Beispiel #6
0
void SOGP::reduceBasisVectorSet(unsigned int index) {

    unsigned int end = this->current_size-1;
    VectorXd zero_vector = VectorXd::Zero(this->current_size);

    VectorXd alpha_star = this->alpha.row(index);
    VectorXd last_item = this->alpha.row(end);
    alpha.block(index,0,1,this->output_dim) = last_item.transpose();
    alpha.block(end,0,1, this->output_dim) = VectorXd::Zero(this->output_dim).transpose();
    double cstar = this->C(index, index);
    VectorXd Cstar = this->C.col(index);
    Cstar(index) = Cstar(end);
    Cstar.conservativeResize(end);
    VectorXd Crep = C.col(end);
    Crep(index) = Crep(end);
    C.block(index, 0, 1, this->current_size) = Crep.transpose();
    C.block(0, index, this->current_size, 1) = Crep;
    C.block(end, 0, 1, this->current_size) = zero_vector.transpose();
    C.block(0, end, this->current_size,1) = zero_vector;

    double qstar = this->Q(index, index);
    VectorXd Qstar = this->Q.col(index);
    Qstar(index) = Qstar(end);
    Qstar.conservativeResize(end);
    VectorXd Qrep = Q.col(end);
    Qrep(index) = Qrep(end);
    Q.block(index, 0, 1, this->current_size) = Qrep.transpose();
    Q.block(0, index, this->current_size, 1) = Qrep;
    Q.block(end, 0, 1, this->current_size) = zero_vector.transpose();
    Q.block(0, end, this->current_size,1) = zero_vector;

    VectorXd qc = (Qstar + Cstar)/(qstar + cstar);
    for (unsigned int i=0; i<this->output_dim; i++) {
        VectorXd diffAlpha = alpha.block(0,i,end,1) - alpha_star(i)*qc;
        alpha.block(0,i,end,1) = diffAlpha;
    }

    MatrixXd oldC = C.block(0,0, end, end);
    C.block(0,0, end,end) = oldC + (Qstar*Qstar.transpose())/qstar -
            ((Qstar + Cstar)*((Qstar + Cstar).transpose()))/(qstar+cstar);

    MatrixXd oldQ = Q.block(0,0,end,end);
    Q.block(0,0, end, end) = oldQ - (Qstar*Qstar.transpose())/qstar;

    this->basis_vectors[index] = this->basis_vectors[end];
    this->basis_vectors.pop_back();

    this->current_size = end;
}
Beispiel #7
0
void SOGP::predict(const VectorXd &state, VectorXd &prediction,
                   VectorXd &prediction_variance) {
    //check if we have initialised the system
    if (!this->initialized) {
        throw OTLException("SOGP not yet initialised");
    }

    double kstar = kernel->eval(state,state);

    //check if we not been trained
    if (this->current_size == 0) {
        prediction = VectorXd::Zero(this->output_dim);
        prediction_variance = VectorXd::Ones(this->output_dim)*
                (kstar + this->noise);
        return;
    }

    VectorXd k;
    kernel->eval(state, this->basis_vectors, k);
    //std::cout << "K: \n" << k << std::endl;
    //std::cout << "alpha: \n" << this->alpha.block(0,0,this->current_size, this->output_dim) << std::endl;

    prediction = k.transpose() *this->alpha.block(0,0,this->current_size, this->output_dim);
    prediction_variance = VectorXd::Ones(this->output_dim)*
            (k.dot(this->C.block(0,0, this->current_size, this->current_size)*k)
             + kstar + this->noise);

    return;
}
Beispiel #8
0
void parameters::Likelihood(const VectorXd & eff){
	 VectorXd loc;
	 loc.resize(m_proba.rows());
	 loc=(m_proba.rowwise().sum().array().log());
	 m_loglikelihood=eff.transpose()*loc;
	 m_bic=m_loglikelihood - 0.5*m_nbparam*log(eff.sum());
}
Beispiel #9
0
MatrixXd WishartUnit(int m, int df)
{
    MatrixXd c(m,m);
    c.setZero();

    for ( int i = 0; i < m; i++ ) {
        std::gamma_distribution<> gam(0.5*(df - i));
        c(i,i) = sqrt(2.0 * gam(rng));
        VectorXd r = nrandn(m-i-1);
        c.block(i,i+1,1,m-i-1) = r.transpose();
    }

    MatrixXd ret = c.transpose() * c;

#ifdef TEST_MVNORMAL
    cout << "WISHART UNIT {\n" << endl;
    cout << "  m:\n" << m << endl;
    cout << "  df:\n" << df << endl;
    cout << "  ret;\n" << ret << endl;
    cout << "  c:\n" << c << endl;
    cout << "}\n" << ret << endl;
#endif

    return ret;
}
Beispiel #10
0
void CLogistic::TrainOnevsAll(const Matrix<double, Dynamic, Dynamic, RowMajor>& X, const VectorXd& y_class, int num_labels, double lambda)
{
	/*
		trains multiple logistic regression classifiers and returns all
		the classifiers in a matrix classifier, where the i-th row of classifier 
		corresponds to the classifier for label i
	*/

	int m = X.rows();
	int n = X.cols();
	classifier = Matrix<double, Dynamic, Dynamic, RowMajor>::Zero(num_labels, n);

	// Iterate through all the classification classes
	for(int class_ndx = 0; class_ndx < num_labels; class_ndx++)
	{
		VectorXd theta  = VectorXd::Zero(n);

		// classify one vs. all
		VectorXd c = VectorXd::Zero(y_class.rows());
		for (int point_ndx = 0; point_ndx < y_class.rows(); point_ndx++)
			c(class_ndx) = (y_class(point_ndx) == class_ndx ? 1.0 : .0);

		GradientDescent(X, c, theta, lambda);

		// store the result inside classifier
		classifier.row(class_ndx) = theta.transpose();
	}
}
Beispiel #11
0
double HmcSampler::_verifyConstraints(const VectorXd & b){
  double r =0;
  int idx  =0;

  for (int i=0; i != (int)quadraticConstraints.size(); i++ ){
    QuadraticConstraint qc = quadraticConstraints[i];
    double check = ((b.transpose())*(qc.A))*b + (qc.B).dot(b) + qc.C;
    if (i==0 || check < r) {
      r = check;
    }
  }

  for (int i=0; i != (int)linearConstraints.size(); i++ ){
    LinearConstraint lc = linearConstraints[i];
    double check = (lc.f).dot(b) + lc.g;
    if (i==0 || check < r) {
      r = check;
      idx = i;
    }
  }

  // if (r < 0) {
  //   LinearConstraint lc = linearConstraints[idx];
  //   double fpart = (lc.f).dot(b);
  //   double gpart = lc.g;
  //   printf("The %ith constraint is negative. fb: %g, g: %g\n", idx, fpart, gpart);
  // }

  return r;
}
Beispiel #12
0
ArrayXd squaredDistsToVectors(const MatrixXd& X, const MatrixXd& V) {
	auto prods = -2. * (X * V.transpose());
	MatrixXd dists = prods;
	VectorXd rowSquaredNorms = X.rowwise().squaredNorm();
	VectorXd colSquaredNorms = V.rowwise().squaredNorm();
	RowVectorXd colSquaredNormsAsRow = colSquaredNorms.transpose();
	dists.colwise() += rowSquaredNorms;
	dists.rowwise() += colSquaredNormsAsRow;

	// // does the above compute the distances properly? -> yes
	// ArrayXd trueDists = ArrayXd(X.rows(), V.rows());
	// for (int i = 0; i < X.rows(); i++) {
	// 	for (int j = 0; j < V.rows(); j++) {
	// 		VectorXd diff = X.row(i) - V.row(j);
	// 		trueDists(i, j) = diff.squaredNorm();

	// 		auto gap = fabs(trueDists(i, j) - dists(i, j));
	// 		if (gap > .001) {
	// 			printf("WE'RE COMPUTING THE DISTANCES WRONG!!!");
	// 		}
	// 		assert(gap < .001);
	// 	}
	// }

	return dists.array();
}
Beispiel #13
0
void insertRow(MatrixXd &mat, const VectorXd &row)
{
  assert(mat.cols() == row.size());
  int rows = mat.rows();
  int cols = mat.cols();
  mat.noalias() = (MatrixXd(rows+1,cols) << mat, row.transpose()).finished();
}
double evalwa(const VectorXd & a0, const VectorXd & ainf, const vector<MatrixXd> & wa, const vector<int> s) {
	RowVectorXd a = a0.transpose();
	for (vector<int>::const_iterator it = s.begin(); it != s.end(); it++) {
		a *= wa[*it];
	}
	return a*ainf;
}
Beispiel #15
0
int main(int argc, const char * argv[])
{

    // insert code here...
    int m = 2;
    int n = 4;
    MatrixXd A(m,n);
    VectorXd c(n);
    VectorXd b(m);
    VectorXd index(m);
    /*
    A << 4, 3, 1, 0, 0,
    2, 3, 0, 1, 0,
    4, 2, 0, 0, 1;
    c << 9,12,0,0,0;
    b << 180, 150, 160;
    
    index<<2,3,4;
    */
    A<<-1, 2,1,0,
    1,0,0,1;
    c<<-1,4,0,0;
    b<<30,30;
    Simplex sim(A,b,c);
    VectorXd sol = sim.SolveLP(1);
    index = sim.getIdx();
    double J = sim.getZ();
    cout<<"the solution is:\n"<<index.transpose()<<endl<<sol.transpose()<<endl;
    cout<<"the optimal value is:  "<<J<<endl;
    return 0;
}
Beispiel #16
0
void FunctionApproximatorGPR::train(const MatrixXd& inputs, const MatrixXd& targets)
{
  if (isTrained())  
  {
    cerr << "WARNING: You may not call FunctionApproximatorGPR::train more than once. Doing nothing." << endl;
    cerr << "   (if you really want to retrain, call reTrain function instead)" << endl;
    return;
  }
  
  assert(inputs.rows() == targets.rows());
  assert(inputs.cols()==getExpectedInputDim());

  const MetaParametersGPR* meta_parameters_gpr = 
    dynamic_cast<const MetaParametersGPR*>(getMetaParameters());
  
  double max_covar = meta_parameters_gpr->maximum_covariance();
  VectorXd sigmas = meta_parameters_gpr->sigmas();
  
  
  // Compute the gram matrix
  // In a gram matrix, every input point is itself a center
  MatrixXd centers = inputs;
  // Replicate sigmas, because they are the same for each data point/center
  MatrixXd widths = sigmas.transpose().colwise().replicate(centers.rows()); 

  MatrixXd gram(inputs.rows(),inputs.rows());
  bool normalize_activations = false;
  bool asymmetric_kernels = false;
  BasisFunction::Gaussian::activations(centers,widths,inputs,gram,normalize_activations,asymmetric_kernels);
  
  gram *= max_covar;

  setModelParameters(new ModelParametersGPR(inputs,targets,gram,max_covar,sigmas));
  
}
Beispiel #17
0
void CMT::STM::initialize(const MatrixXd& input, const MatrixXd& output) {
	if(input.rows() != dimIn() || output.rows() != dimOut())
		throw Exception("Data has wrong dimensionality.");
	if(input.cols() != output.cols())
		throw Exception("The number of inputs and outputs should be the same.");

	Array<bool, 1, Dynamic> spikes = output.array() > 0.5;
	int numSpikes = spikes.sum();

	if(numSpikes > dimInNonlinear() && dimInNonlinear() > 0) {
		mSharpness = 1.;
		MatrixXd inputNonlinear = input.topRows(dimInNonlinear());
		MatrixXd inputs1(inputNonlinear.rows(), numSpikes);
		MatrixXd inputs0(inputNonlinear.rows(), spikes.size() - numSpikes);

		// separate data into spike-triggered and non-spike-triggered stimuli
		for(int i = 0, i0 = 0, i1 = 0; i < spikes.size(); ++i)
			if(spikes[i])
				inputs1.col(i1++) = inputNonlinear.col(i);
			else
				inputs0.col(i0++) = inputNonlinear.col(i);

		// spike-triggered/non-spike-triggered mean and precision
		VectorXd m1 = inputs1.rowwise().mean();
		VectorXd m0 = inputs0.rowwise().mean();

		MatrixXd S1 = covariance(inputs1).inverse();
		MatrixXd S0 = covariance(inputs0).inverse();

		// parameters of a quadratic model
		MatrixXd K = (S0 - S1) / 2.;
		VectorXd w = S1 * m1 - S0 * m0;
		double p = static_cast<float>(numSpikes) / output.cols();
		double a = 0.5 * (m0.transpose() * S0 * m0)(0, 0) - 0.5 * (m1.transpose() * S1 * m1)(0, 0)
			+ 0.5 * logDetPD(S1) - 0.5 * logDetPD(S0) + log(p) - log(1. - p)
			- log(mNumComponents);

		// decompose matrix into eigenvectors
		SelfAdjointEigenSolver<MatrixXd> eigenSolver(K);
		VectorXd eigVals = eigenSolver.eigenvalues();
		MatrixXd eigVecs = eigenSolver.eigenvectors();
		VectorXi indices = argSort(eigVals.array().abs());

		// use most informative eigenvectors as features
		for(int i = 0; i < mNumFeatures && i < indices.size(); ++i) {
			int j = indices[i];
			mWeights.col(i).setConstant(eigVals[j]);
			mFeatures.col(i) = eigVecs.col(j);
		}

		mWeights = mWeights.array() * (0.5 + 0.5 * ArrayXXd::Random(mNumComponents, mNumFeatures).abs());
		mPredictors.rowwise() = w.transpose();
		mPredictors += sampleNormal(mNumComponents, mDimInNonlinear).matrix() * log(mNumComponents) / 10.;
		mBiases.setConstant(a);
		mBiases += VectorXd::Random(mNumComponents) * log(mNumComponents) / 100.;
	}

	if(dimInLinear() > 0)
		mLinearPredictor = input.bottomRows(dimInLinear()) * output.transpose() / numSpikes;
}
Beispiel #18
0
int main()
{
  int n = 10;
  MatrixXd mat(n,2);
  for (int i=0; i<n; i++){
    mat(i,0) = ((double)random())/RAND_MAX;
    mat(i,1) = ((double)random())/RAND_MAX;
  }

  MatrixXd centered = mat.rowwise() - mat.colwise().mean();
  MatrixXd cov = centered.adjoint()*centered;

  SelfAdjointEigenSolver<MatrixXd> eig(cov);

  MatrixXd ev = eig.eigenvectors();

  MatrixXd rotated = ev*mat.transpose();
  VectorXd maxCoeff = rotated.rowwise().maxCoeff();
  VectorXd minCoeff = rotated.rowwise().minCoeff();
  VectorXd center = ev.transpose()*(maxCoeff+minCoeff)/2;
  std::cout << "center:" << center.transpose() << std::endl;
  std::cout << "rotation:" << ev << std::endl;
  std::cout << "extent:"
	    << (maxCoeff-minCoeff).transpose() << std::endl;

  return 0;
}
Beispiel #19
0
 void printProblem()
 {
     printName();
     std::cout << std::endl << "A_nodes_: " << std::endl << MatrixXi::Identity(A_nodes_.rows(), A_nodes_.rows()) * A_nodes_  << std::endl << std::endl;
     std::cout << "A_: " << std::endl << MatrixXd::Identity(A_.rows(), A_.rows()) * A_  << std::endl << std::endl;
     std::cout << "b_: " << std::endl << b_.transpose() << std::endl << std::endl;
 }
Beispiel #20
0
void CGppe::Approx_CGppe_Laplace(const VectorXd & theta_x, const VectorXd& theta_t, const double& sigma, const MatrixXd& t, const MatrixXd &x, const TypePair & all_pairs,
                               const VectorXd & idx_global, const VectorXd& idx_global_1, const VectorXd& idx_global_2,
                               const VectorXd& ind_t, const VectorXd& ind_x, int M, int N)
{
    //Parameters function initialization
    double eps = 1E-6, psi_new, psi_old;
    M = all_pairs.rows();
    int n = M * N;
    f = VectorXd::Zero(n);
    VectorXd fvis = VectorXd::Zero(idx_global.rows());
    VectorXd deriv;
    double loglike = 0;

    covfunc_t->SetTheta(theta_t);
    covfunc_x->SetTheta(theta_x);

    MatrixXd Kt = covfunc_t->ComputeGrandMatrix(t);
    Kx = covfunc_x->ComputeGrandMatrix(x);

    MatrixXd K = GetMat(Kt, ind_t, ind_t).array() * GetMat(Kx, ind_x, ind_x).array();

    loglike = log_likelihood( sigma, all_pairs, idx_global_1, idx_global_2, M, N);
    Kinv = K.inverse();
    psi_new = loglike - 0.5 * fvis.transpose() * Kinv * fvis;
    psi_old = INT_MIN;
    while ((psi_new - psi_old) > eps)
    {
        psi_old = psi_new;
        deriv = deriv_log_likelihood_CGppe_fast( sigma, all_pairs, idx_global_1, idx_global_2, M, N);
        W = -deriv2_log_likelihood_CGppe_fast(sigma, all_pairs, idx_global_1, idx_global_2, M, N);
        W = GetMat(W, idx_global, idx_global);
        llt.compute(W + Kinv);
        L = llt.matrixL(); //no need to extract the triangular matrix here
        fvis = llt.solve(GetVec(deriv, idx_global) + W * fvis);
        for (int w = 0;w < idx_global.rows();w++)
        {
            f(idx_global(w)) = fvis(w);
        }
        loglike = log_likelihood( sigma, all_pairs, idx_global_1, idx_global_2, M, N);
        psi_new = loglike - 0.5 * fvis.transpose() * Kinv * fvis;
    }





}
Beispiel #21
0
double drwnGaussian::evaluateSingle(const vector<double>& x) const
{
    DRWN_ASSERT(x.size() == (unsigned)_n);
    guaranteeInvSigma();

    VectorXd z = Eigen::Map<const VectorXd>(&x[0], _n) - _mu;
    return -0.5 * (z.transpose() * (*_invSigma) * z)(0) + _logZ;
}
 void updateHessian() const
 {
   MatrixXd& H = *IFunction<PARTIAL_XX>::_val[0];
   VectorXd q = static_cast<VectorXd>(x).array().inverse();
   H = (x.getSize()*q.array()*q.array()).matrix().asDiagonal();
   H -= q*q.transpose();    //Can be optimized by taking into account the symmetry of H and qq'
   H *= -getValue(0)/(x.getSize()*x.getSize());
 }
Beispiel #23
0
double drwnGaussian::evaluateSingle(const VectorXd& x) const
{
    DRWN_ASSERT(x.rows() == _n);
    guaranteeInvSigma();

    VectorXd z = x - _mu;
    return -0.5 * (z.transpose() * (*_invSigma) * z)(0) + _logZ;
}
Beispiel #24
0
MatrixXd drwnNNGraphLSparseLearner::computeSubGradient()
{
    DRWN_FCN_TIC;
    MatrixXd G = MatrixXd::Zero(_Lt.rows(), _Lt.cols());

    // gradient of \lambda \sum_u \alpha(y_u) \sum_{v \in {\cal N}_u^{+}} d_M(u,v)
    drwnNNGraphNodeIndex u(0, 0);
#if 0
    for (u.imgIndx = 0; u.imgIndx < _posGraph.numImages(); u.imgIndx++) {
        for (u.segId = 0; u.segId < _posGraph[u.imgIndx].numNodes(); u.segId++) {
            const drwnNNGraphEdgeList& e = _posGraph[u].edges;
            for (drwnNNGraphEdgeList::const_iterator kt = e.begin(); kt != e.end(); ++kt) {
                const drwnNNGraphNodeIndex v(kt->targetNode);
                const VectorXd delta((_graph[u].features - _graph[v].features).cast<double>());
                G += delta * delta.transpose();
            }
        }
    }
    G *= _lambda;
#else
    G = _lambda * _X;
#endif

    // subgradient of \sum_u \alpha(y_u) [\max_{v,w} d_M(u,v) - d_M(u,w) + 1]_{\geq 0}
#ifndef USE_THREADING
    DRWN_TODO;
#else
    // prepare thread data
    const unsigned nJobs = std::min((unsigned)_posGraph.numImages(),
        std::max((unsigned)1, drwnThreadPool::MAX_THREADS));
    vector<set<unsigned> > imgIndxes(nJobs);
    for (unsigned imgIndx = 0; imgIndx < _posGraph.numImages(); imgIndx++) {
        imgIndxes[imgIndx % nJobs].insert(imgIndx);
    }

    // start threads
    drwnThreadPool threadPool(nJobs);
    threadPool.start();
    vector<drwnNNGraphSparseSubGradientThread *> jobs(nJobs);
    for (unsigned i = 0; i < nJobs; i++) {
        jobs[i] = new drwnNNGraphSparseSubGradientThread(this, &_G, &_updateCache, imgIndxes[i]);
        threadPool.addJob(jobs[i]);
    }
    threadPool.finish();

    for (unsigned i = 0; i < jobs.size(); i++) {
        delete jobs[i];
    }
    jobs.clear();
#endif

    G += _G;

    DRWN_FCN_TOC;
    //return 2.0 * _Lt * G;
    return (2.0 * _Lt * G).triangularView<Eigen::Upper>();
}
Beispiel #25
0
// evaluate (log-likelihood)
void drwnGaussian::evaluate(const MatrixXd& x, VectorXd& p) const
{
    DRWN_ASSERT((x.cols() == _n) && (x.rows() == p.rows()));

    guaranteeInvSigma();
    for (int i = 0; i < x.rows(); i++) {
        VectorXd z = x.row(i).transpose() - _mu;
        p(i) = -0.5 * (z.transpose() * (*_invSigma) * z)(0) + _logZ;
    }
}
Beispiel #26
0
void Atimesx(const Matrix & d, const Matrix & a, const Matrix & x, Matrix & Ax)
{
    int n=d.size();
    Ax=(d.array()*x.array()).matrix();
    VectorXd Axcut=Ax.head(n-1);
    VectorXd acut = a.head(n-1);
    VectorXd xcut = x.head(n-1);
    
    Ax << Axcut + x(n-1)*acut, Ax(n-1)+ acut.transpose()*xcut;
}
Beispiel #27
0
void testPCA()
{
    VectorXd mu = VectorXd::Zero(5);
    MatrixXd sigma(5, 5);
    sigma << 0.99100, 0.91660, 1.04247, 0.75531, 0.49842,
        0.91660, 2.00318, 1.81101, 0.51745, 1.00774,
        1.04247, 1.81101, 2.09615, 0.55339, 1.36639,
        0.75531, 0.51745, 0.55339, 0.67500, 0.15996,
        0.49842, 1.00774, 1.36639, 0.15996, 1.16316;

    drwnPCA pca(drwnSuffStats(1.0, mu, sigma));

    pca.dump();

    drwnXMLDoc xml;
    drwnXMLNode *node = drwnAddXMLChildNode(xml, "test", NULL, false);
    pca.save(*node);

    drwnXMLNode* child = node->first_node("translation");
    VectorXd translation;
    drwnXMLUtils::deserialize(*child, translation);
    cout << translation.transpose() << endl;

    child = node->first_node("projection");
    MatrixXd projection;
    drwnXMLUtils::deserialize(*child, projection);
    cout << projection << endl;

    sigma = projection * sigma * projection.transpose();
    drwnPCA pca2(drwnSuffStats(1.0, mu, sigma));

    xml.clear();
    node = drwnAddXMLChildNode(xml, "test", NULL, false);
    pca2.save(*node);

    child = node->first_node("translation");
    drwnXMLUtils::deserialize(*child, translation);
    cout << translation.transpose() << endl;

    child = node->first_node("projection");
    drwnXMLUtils::deserialize(*child, projection);
    cout << projection << endl;
}
Beispiel #28
0
double ObjectiveMLSSparse::eval(const VectorXd& x) const
{
  VectorXd vals = -x.transpose() * (*A_);
  double obj = 0.0;
  for(int i = 0; i < vals.rows(); ++i)
    obj -= logsig(vals(i) - (*b_)(i));
  
  obj /= (double)A_->cols();
  return obj;
}
Beispiel #29
0
void drwnGaussian::evaluate(const vector<vector<double> >& x, vector<double>& p) const
{
    DRWN_ASSERT(x.size() == p.size());

    guaranteeInvSigma();
    for (unsigned i = 0; i < x.size(); i++) {
        DRWN_ASSERT(x[i].size() == (unsigned)_n);
        VectorXd z = Eigen::Map<const VectorXd>(&x[i][0], _n) - _mu;
        p[i] = -0.5 * (z.transpose() * (*_invSigma) * z)(0) + _logZ;
    }
}
Beispiel #30
0
// Implements Equation S12 in the Supplementary 
MatrixXd expected_dissimilarities(const MatrixXd &J, const MatrixXd &M, const VectorXd &W) {
  int n = J.rows();
  int o = J.cols();
  VectorXd JW = J*W.head(o);
  VectorXd u_n = VectorXd::Ones(n);
  MatrixXd Delta = J*resistance_distance(M,o)*J.transpose();
  Delta.noalias() += 0.5*JW*u_n.transpose();
  Delta.noalias() += 0.5*u_n*JW.transpose();
  Delta.diagonal() -= JW;
  return (Delta);
}