Beispiel #1
0
  void eval(double t, Map<MatrixXd> &y) {
    if (t < m_breaks(0)) t = m_breaks(0);
    if (t > m_breaks(m_breaks.rows() - 1)) t = m_breaks(m_breaks.rows() - 1);

    Map<VectorXd> r(y.data(), m_d1 * m_d2);  // reshape

    // use cached index if possible
    int idx;
    if (m_cached_idx >= 0 &&
        m_cached_idx < m_breaks.rows()     // valid m_cached_idx?
        && t < m_breaks[m_cached_idx + 1]  // still in same interval?
        && ((m_cached_idx == 0) ||
            (t >= m_breaks[m_cached_idx]))) {  // left up to -infinity
      idx = m_cached_idx;
    } else {
      // search for the correct interval
      idx = static_cast<int>(m_breaks.rows()) - 2;
      // todo: binary search
      for (int b = static_cast<int>(m_breaks.rows()) - 2; b >= 1;
           b--) {  // ignore first and last break
        if (t < m_breaks(b)) idx = b - 1;
      }
      m_cached_idx = idx;
    }

    int base = idx * m_d1 * m_d2;
    double local = t - m_breaks[idx];
    r = m_coefs.block(base, 0, m_d1 * m_d2, 1);
    for (int i = 2; i <= m_order; i++) {
      r = local * r + m_coefs.block(base, i - 1, m_d1 * m_d2, 1);
    }
  }
Beispiel #2
0
void Reservoir::linear_activation(VectorXd &inputs, VectorXd &results) {
    unsigned int num_elems = (inputs.rows() > inputs.cols()) ? inputs.rows() : inputs.cols();
    results.resize(num_elems, 1);
    for (unsigned int i=0; i<num_elems; i++) {
        results(i) = inputs(i);
    }
}
Beispiel #3
0
void mexFunction(int nlhs, mxArray *plhs[],
    int nrhs, const mxArray *prhs[]) {
	double *i_oesgp_object;

	i_oesgp_object =  mxGetPr(prhs[0]);
    
    VectorXd prediction;
    VectorXd prediction_variance;
    
    ObjectHandle<OESGP>* handle = 
            ObjectHandle<OESGP>::from_mex_handle(prhs[0]);
	handle->get_object().predict(prediction, prediction_variance);
    
    
    if (nlhs >= 1) {
        plhs[0] = mxCreateDoubleMatrix(prediction.rows(), 1, mxREAL);
        double *output = mxGetPr(plhs[0]);
        for (unsigned int i=0; i<prediction.rows(); i++) {
            output[i] = prediction[i];
        }
    } 
    
    if (nlhs >= 2) {
        plhs[1] = mxCreateDoubleMatrix(prediction_variance.rows(), 1, mxREAL);
        double *output = mxGetPr(plhs[1]);
        for (unsigned int i=0; i<prediction_variance.rows(); i++) {
            output[i] = prediction_variance[i];
        }        
    } 
}
Beispiel #4
0
MatrixXd Spectrogram::make_spectrogram(VectorXd signal, qint32 window_size = 0)
{
    if(window_size == 0)
        window_size = signal.rows()/4;

    Eigen::FFT<double> fft;
    MatrixXd tf_matrix = MatrixXd::Zero(signal.rows()/2, signal.rows());

    for(qint32 translate = 0; translate < signal.rows(); translate++)
    {
        VectorXd envelope = gauss_window(signal.rows(), window_size, translate);

        VectorXd windowed_sig = VectorXd::Zero(signal.rows());
        VectorXcd fft_win_sig = VectorXcd::Zero(signal.rows());

        VectorXd real_coeffs = VectorXd::Zero(signal.rows()/2);

        for(qint32 sample = 0; sample < signal.rows(); sample++)
            windowed_sig[sample] = signal[sample] * envelope[sample];

        fft.fwd(fft_win_sig, windowed_sig);

        for(qint32 i= 0; i<signal.rows()/2; i++)
        {
            qreal value = pow(abs(fft_win_sig[i]), 2.0);
            real_coeffs[i] = value;
        }

        tf_matrix.col(translate) = real_coeffs;
    }
    return tf_matrix;
}
Beispiel #5
0
void CLogistic::TrainOnevsAll(const Matrix<double, Dynamic, Dynamic, RowMajor>& X, const VectorXd& y_class, int num_labels, double lambda)
{
	/*
		trains multiple logistic regression classifiers and returns all
		the classifiers in a matrix classifier, where the i-th row of classifier 
		corresponds to the classifier for label i
	*/

	int m = X.rows();
	int n = X.cols();
	classifier = Matrix<double, Dynamic, Dynamic, RowMajor>::Zero(num_labels, n);

	// Iterate through all the classification classes
	for(int class_ndx = 0; class_ndx < num_labels; class_ndx++)
	{
		VectorXd theta  = VectorXd::Zero(n);

		// classify one vs. all
		VectorXd c = VectorXd::Zero(y_class.rows());
		for (int point_ndx = 0; point_ndx < y_class.rows(); point_ndx++)
			c(class_ndx) = (y_class(point_ndx) == class_ndx ? 1.0 : .0);

		GradientDescent(X, c, theta, lambda);

		// store the result inside classifier
		classifier.row(class_ndx) = theta.transpose();
	}
}
Beispiel #6
0
MatrixXd HessianMEL::eval(const VectorXd& x) const
{
  MatrixXd hess = MatrixXd::Zero(x.rows(), x.rows());
  for(int i = 0; i < a_.cols(); ++i)
    hess += a_.col(i) * a_.col(i).transpose() * exp(x.dot(a_.col(i)) + b_(i));
  
  hess /= (double)a_.cols();
  return hess;
}
Beispiel #7
0
// This function solves a linear system using a Cholesky
// decomposition. This implementation seems faster and more robust
// than scipy's spsolve.
VectorXd
solve_cholesky(const SparseMatrix &h,
               const VectorXd &b)
{
  assert(b.rows() == h.rows());

  Eigen::SimplicialLLT<SparseMatrix> solver(h);
  VectorXd r = solver.solve(b);
  assert(r.rows() == h.cols());

  return r;
}
Beispiel #8
0
void IKoptions::setqdf(const VectorXd &lb, const VectorXd &ub) {
  if (lb.rows() != this->nq || ub.rows() != this->nq) {
    cerr << "qdf_lb and qdf_ub must be nq x 1 column vector" << endl;
  }
  for (int i = 0; i < this->nq; i++) {
    if (lb(i) > ub(i)) {
      cerr << "qdf_lb must be no larger than qdf_ub" << endl;
    }
  }
  this->qdf_lb = lb;
  this->qdf_ub = ub;
}
 void MultivariateFNormalSufficientSparse::set_FM(const VectorXd& FM)
 {
   if (FM.rows() != FM_.rows() || FM.cols() != FM_.cols() || FM != FM_){
       if (FM.rows() != M_) {
           IMP_THROW("size mismatch for FM: got "
                   <<FM.rows() << " instead of " << M_, ModelException);
           }
       FM_=FM;
       IMP_LOG(TERSE, "MVNsparse:   set FM to new vector" << std::endl);
       compute_epsilon();
   }
 }
Beispiel #10
0
        void ContactDynamics::updateTauStar() {
            int startRow = 0;
            for (int i = 0; i < getNumSkels(); i++) {
                if (mSkels[i]->getImmobileState())
                    continue;

                VectorXd tau = mSkels[i]->getExternalForces() + mSkels[i]->getInternalForces();
                VectorXd tauStar = (mSkels[i]->getMassMatrix() * mSkels[i]->getQDotVector()) - (mDt * (mSkels[i]->getCombinedVector() - tau));
                mTauStar.segment(startRow, tauStar.rows()) = tauStar;
                startRow += tauStar.rows();
            }
        }
Beispiel #11
0
        void ConstraintDynamics::updateTauStar() {
            int startRow = 0;
            for (int i = 0; i < mSkels.size(); i++) {
                if (mSkels[i]->getImmobileState())
                    continue;

                VectorXd tau = mSkels[i]->getExternalForces() + mSkels[i]->getInternalForces();
                VectorXd tauStar = (mSkels[i]->getMassMatrix() * mSkels[i]->getPoseVelocity()) - (mDt * (mSkels[i]->getCombinedVector() - tau));
                mTauStar.block(startRow, 0, tauStar.rows(), 1) = tauStar;
                startRow += tauStar.rows();
            }
        }
mfloat_t CVarianceDecomposition::getLMLgradGPkronSum() 
{
	mfloat_t out = 0;

	VectorXd grad = this->gp->LMLgrad()["covarc1"];
	for (muint_t i=0; i<(muint_t)grad.rows(); i++)	out +=std::pow(grad(i),2);
	grad = this->gp->LMLgrad()["covarc2"];
		for (muint_t i=0; i<(muint_t)grad.rows(); i++)	out +=std::pow(grad(i),2);
	// Square Root
	out = std::sqrt(out);

	return out;
}
 void MultivariateFNormalSufficientSparse::set_Fbar(const VectorXd& Fbar)
 {
   if (Fbar.rows() != Fbar_.rows() || Fbar.cols() != Fbar_.cols()
           || Fbar != Fbar_){
       if (Fbar.rows() != M_) {
           IMP_THROW("size mismatch for Fbar: got "
                   << Fbar.rows() << " instead of " << M_, ModelException);
           }
       Fbar_=Fbar;
       IMP_LOG(TERSE, "MVNsparse:   set Fbar to new vector" << std::endl);
       compute_epsilon();
   }
 }
 void MultivariateFNormalSufficient::set_FM(const VectorXd& FM)
 {
   if (FM.rows() != FM_.rows() || FM.cols() != FM_.cols() || FM != FM_){
       CHECK(FM.rows() == M_,
           "size mismatch for FM: got "
           <<FM.rows() << " instead of " << M_);
       FM_=FM;
       LOG( "MVN:   set FM to new vector" << std::endl);
       flag_epsilon_ = false;
       flag_Peps_ = false;
   }
   flag_FM_ = true;
 }
 void MultivariateFNormalSufficient::set_FM(const VectorXd& FM)
 {
   if (FM.rows() != FM_.rows() || FM.cols() != FM_.cols() || FM != FM_){
       if (FM.rows() != M_) {
           IMP_THROW("size mismatch for FM: got "
                   <<FM.rows() << " instead of " << M_, ModelException);
           }
       FM_=FM;
       IMP_LOG(TERSE, "MVN:   set FM to new vector" << std::endl);
       flag_epsilon_ = false;
       flag_Peps_ = false;
   }
   flag_FM_ = true;
 }
Beispiel #16
0
void Spectral::generate_kernel_matrix(){

	// Fill kernel matrix
	K.resize(X.rows(),X.rows());
	for(unsigned int i = 0; i < X.rows(); i++){
		for(unsigned int j = i; j < X.rows(); j++){
			K(i,j) = K(j,i) = kernel(X.row(i),X.row(j));
			//if(i == 0) cout << K(i,j) << " ";
		}	
	}

	// Normalise kernel matrix	
	VectorXd d = K.rowwise().sum();
	for(unsigned int i = 0; i < d.rows(); i++){
		d(i) = 1.0/sqrt(d(i));
	}
	auto F = d.asDiagonal();
	MatrixXd l = (K * F);
	for(unsigned int i = 0; i < l.rows(); i++){
		for(unsigned int j = 0; j < l.cols(); j++){
			l(i,j) = l(i,j) * d(i);
		}
	}		
	K = l;

}
Beispiel #17
0
drwnSuffStats::drwnSuffStats(double count, VectorXd& sum, MatrixXd& sum2) :
    _pairStats(DRWN_PSS_FULL), _count(count), _sum(sum), _sum2(sum2)
{
    DRWN_ASSERT(_sum2.rows() == _sum2.cols());
    DRWN_ASSERT(_sum2.rows() == _sum.rows());
    _n = sum.rows();
}
 void MultivariateFNormalSufficient::set_Fbar(const VectorXd& Fbar)
 {
   if (Fbar.rows() != Fbar_.rows() || Fbar.cols() != Fbar_.cols()
           || Fbar != Fbar_){
       CHECK(Fbar.rows() == M_,
           "size mismatch for Fbar: got "
                   << Fbar.rows() << " instead of " << M_);
       Fbar_=Fbar;
       LOG( "MVN:   set Fbar to new vector" << std::endl);
       flag_epsilon_ = false;
       flag_W_ = false;
       flag_PW_ = false;
       flag_Peps_ = false;
   }
   flag_Fbar_ = true;
 }
void UKF::UpdateState(const VectorXd &z, const VectorXd &z_pred, const MatrixXd &S, const MatrixXd &Zsig) {

  int n_z = z_pred.rows();

  // calculate cross correlation
  MatrixXd Tc = MatrixXd(n_x_, n_z);
  Tc.fill(0.0);
  for (int i = 0; i < 2 * n_aug_ + 1; i++) {
    VectorXd z_diff = Zsig.col(i) - z_pred;
    while (z_diff(1) >  M_PI) z_diff(1) -= 2. * M_PI;
    while (z_diff(1) < -M_PI) z_diff(1) += 2. * M_PI;
    VectorXd x_diff = Xsig_pred_.col(i) - x_;
    while (x_diff(3) >  M_PI) x_diff(3) -= 2. * M_PI;
    while (x_diff(3) < -M_PI) x_diff(3) += 2. * M_PI;
    Tc = Tc + weights_(i) * x_diff * z_diff.transpose();
  }

  MatrixXd K = Tc * S.inverse();  // Kalman gain K
  VectorXd z_diff = z - z_pred;

  while (z_diff(1) >  M_PI) z_diff(1) -= 4. * M_PI;
  while (z_diff(1) < -M_PI) z_diff(1) += 4. * M_PI;

  //update
  x_ = x_ + K * z_diff;
  P_ = P_ - K * S * K.transpose();
}
MultivariateFNormalSufficientSparse::MultivariateFNormalSufficientSparse(
        const VectorXd& Fbar, double JF, const VectorXd& FM, int Nobs,
        const SparseMatrix<double>& W, const SparseMatrix<double>& Sigma,
        cholmod_common *c)
        : Object("Multivariate Normal distribution %1%")
{
        c_ = c;
        W_=nullptr;
        Sigma_=nullptr;
        P_=nullptr;
        PW_=nullptr;
        epsilon_=nullptr;
        L_=nullptr;
        N_=Nobs;
        M_=Fbar.rows();
        IMP_LOG(TERSE, "MVNsparse: sufficient statistics init with N=" << N_
                << " and M=" << M_ << std::endl);
        IMP_USAGE_CHECK( N_ > 0,
            "please provide at least one observation per dimension");
        IMP_USAGE_CHECK( M_ > 0,
            "please provide at least one variable");
        FM_=FM;
        set_Fbar(Fbar); //also computes epsilon
        set_W(W);
        set_JF(JF);
        set_Sigma(Sigma);
}
Beispiel #21
0
 double Model::score(const VectorXd& psi) const
 {
   ROS_ASSERT(psi.rows() == eweights_.rows() + nweights_.rows());
   double val = psi.head(eweights_.rows()).dot(eweights_);
   val += psi.tail(nweights_.rows()).dot(nweights_);
   return val;
 }
Beispiel #22
0
void CGppe::Approx_CGppe_Laplace(const VectorXd & theta_x, const VectorXd& theta_t, const double& sigma, const MatrixXd& t, const MatrixXd &x, const TypePair & all_pairs,
                               const VectorXd & idx_global, const VectorXd& idx_global_1, const VectorXd& idx_global_2,
                               const VectorXd& ind_t, const VectorXd& ind_x, int M, int N)
{
    //Parameters function initialization
    double eps = 1E-6, psi_new, psi_old;
    M = all_pairs.rows();
    int n = M * N;
    f = VectorXd::Zero(n);
    VectorXd fvis = VectorXd::Zero(idx_global.rows());
    VectorXd deriv;
    double loglike = 0;

    covfunc_t->SetTheta(theta_t);
    covfunc_x->SetTheta(theta_x);

    MatrixXd Kt = covfunc_t->ComputeGrandMatrix(t);
    Kx = covfunc_x->ComputeGrandMatrix(x);

    MatrixXd K = GetMat(Kt, ind_t, ind_t).array() * GetMat(Kx, ind_x, ind_x).array();

    loglike = log_likelihood( sigma, all_pairs, idx_global_1, idx_global_2, M, N);
    Kinv = K.inverse();
    psi_new = loglike - 0.5 * fvis.transpose() * Kinv * fvis;
    psi_old = INT_MIN;
    while ((psi_new - psi_old) > eps)
    {
        psi_old = psi_new;
        deriv = deriv_log_likelihood_CGppe_fast( sigma, all_pairs, idx_global_1, idx_global_2, M, N);
        W = -deriv2_log_likelihood_CGppe_fast(sigma, all_pairs, idx_global_1, idx_global_2, M, N);
        W = GetMat(W, idx_global, idx_global);
        llt.compute(W + Kinv);
        L = llt.matrixL(); //no need to extract the triangular matrix here
        fvis = llt.solve(GetVec(deriv, idx_global) + W * fvis);
        for (int w = 0;w < idx_global.rows();w++)
        {
            f(idx_global(w)) = fvis(w);
        }
        loglike = log_likelihood( sigma, all_pairs, idx_global_1, idx_global_2, M, N);
        psi_new = loglike - 0.5 * fvis.transpose() * Kinv * fvis;
    }





}
mfloat_t CVarianceDecomposition::getLMLgradGPbase() 
{
	if (!this->is_init)
		throw CLimixException("CVarianceDecomposition: the term is not initialised!");
	mfloat_t out = 0;
	// Squared Norm of LMLgrad["covar"]
	VectorXd grad = this->gp->LMLgrad()["covar"];
	VectorXd filter = this->gp->getParamMask()["covar"];
	for (muint_t i=0; i<(muint_t)grad.rows(); i++)
		if (filter(i)==1)	out +=std::pow(grad(i),2);
	// Squared Norm of LMLgrad["dataTerm"]
	grad = this->gp->LMLgrad()["dataTerm"];
	for (muint_t i=0; i<(muint_t)grad.rows(); i++)	out +=std::pow(grad(i),2);
	// Square Root
	out = std::sqrt(out);
	return out;
}
Matrix2Xd normalizePoints( const Matrix2Xd& points, const VectorXd& indices,
    Ref<Matrix3d> T ) {
  Matrix2Xd pointsOut( 2, indices.rows() );

  // Calculate center
  Vector2d center(0,0);
  for ( int j = 0; j < indices.rows(); j++ ) {
    int i = indices(j);
    center += points.col(i);
    pointsOut.col(j) = points.col(i);
  }
  center /= indices.rows();

  // Calculate mean distance from center
  double meanDistanceFromCenter = 0;
  for ( int i = 0; i < pointsOut.cols(); i++ ) {
    pointsOut.col(i) -= center;
    meanDistanceFromCenter += pointsOut.col(i).norm();
  }
  meanDistanceFromCenter /= pointsOut.cols();

  // Calculate scale
  double scale;
  if ( meanDistanceFromCenter > 0 ) {
    scale = sqrt(2) / meanDistanceFromCenter;
  } else {
    scale = 1;
  }

  // Compute matrix to translate and scale points
  T << scale, 0, -scale*center(0),
       0, scale, -scale*center(1),
       0,     0,                1;

  // scale points
  if ( points.cols() > 2 ) {
    pointsOut = T.block<2,2>(0,0) * points;
    for( int i = 0; i < pointsOut.cols(); i++ ) {
      pointsOut.col(i) += T.block<2,1>(0,2);
    }
  } else {
    pointsOut *= scale;
  }

  return pointsOut;
}
Beispiel #25
0
double drwnGaussian::evaluateSingle(const VectorXd& x) const
{
    DRWN_ASSERT(x.rows() == _n);
    guaranteeInvSigma();

    VectorXd z = x - _mu;
    return -0.5 * (z.transpose() * (*_invSigma) * z)(0) + _logZ;
}
 void MultivariateFNormalSufficient::set_Fbar(const VectorXd& Fbar)
 {
   if (Fbar.rows() != Fbar_.rows() || Fbar.cols() != Fbar_.cols()
           || Fbar != Fbar_){
       if (Fbar.rows() != M_) {
           IMP_THROW("size mismatch for Fbar: got "
                   << Fbar.rows() << " instead of " << M_, ModelException);
           }
       Fbar_=Fbar;
       IMP_LOG(TERSE, "MVN:   set Fbar to new vector" << std::endl);
       flag_epsilon_ = false;
       flag_W_ = false;
       flag_PW_ = false;
       flag_Peps_ = false;
   }
   flag_Fbar_ = true;
 }
Beispiel #27
0
void Window::setState(const VectorXd &State) {
    if (State.rows() != this->total_window_size) {
        throw OTLException("Sorry, the feature you want to set is not the right length");
    }
    //set the State
    this->window = State;
    return;
}
	void setup(VectorXd const & xvals, VectorXd const & yvals)
	{
		// x,y data must be of the same size
		assert(xvals.rows()==yvals.rows());
		// There must be at least two data points
		assert(xvals.rows()>1);

		// Store x,y dats points
		m_x = xvals;
		m_y = yvals;
		m_N = xvals.rows();

		// Check data points are sorted in x
		for(Index i=1;i<m_N;i++) {assert(m_x(i) > m_x(i-1));}

		m_ready = true;
	}
Beispiel #29
0
MatrixXd HessianMLSSparse::eval(const VectorXd& x) const
{
  MatrixXd hess = MatrixXd::Zero(x.rows(), x.rows());

  VectorXd vals = x.transpose() * (*A_);
  for(int i = 0; i < A_->cols(); ++i) {
    double mult = sigmoid(vals(i) + b_->coeffRef(i)) * sigmoid(-vals(i) - b_->coeffRef(i));
    for(Eigen::SparseMatrix<double, Eigen::ColMajor>::InnerIterator it1(*A_, i); it1; ++it1) {
      for(Eigen::SparseMatrix<double, Eigen::ColMajor>::InnerIterator it2(*A_, i); it2; ++it2) {  
        hess(it1.row(), it2.row()) += it1.value() * it2.value() * mult;
      }
    }
  }
    
  hess /= (double)A_->cols();
  return hess;
}
double standard_deviation(const VectorXd& xx)
{
    const double mean = xx.mean();
    double accum = 0;
    for (int kk=0, kk_max=xx.rows(); kk<kk_max; kk++)
        accum += (xx(kk)-mean)*(xx(kk)-mean);
    return sqrt(accum)/(xx.size()-1);
}