ConvexObjectivePtr CostFromFunc::convex(const vector<double>& xin) {
  VectorXd x = getVec(xin, vars_);

  ConvexObjectivePtr out(new ConvexObjective());
  QuadExpr quad;
  if (!full_hessian_) {
    double val;
    VectorXd grad,hess;
    calcGradAndDiagHess(*f_, x, epsilon_, val, grad, hess);
    hess = hess.cwiseMax(VectorXd::Zero(hess.size()));
    //QuadExpr& quad = out->quad_;
    quad.affexpr.constant = val - grad.dot(x) + .5*x.dot(hess.cwiseProduct(x));
    quad.affexpr.vars = vars_;
    quad.affexpr.coeffs = toDblVec(grad - hess.cwiseProduct(x));
    quad.vars1 = vars_;
    quad.vars2 = vars_;
    quad.coeffs = toDblVec(hess*.5);
  }
  else {
    double val;
    VectorXd grad;
    MatrixXd hess;
    calcGradHess(f_, x, epsilon_, val, grad, hess);

    MatrixXd pos_hess = MatrixXd::Zero(x.size(), x.size());
    Eigen::SelfAdjointEigenSolver<MatrixXd> es(hess);
    VectorXd eigvals = es.eigenvalues();
    MatrixXd eigvecs = es.eigenvectors();
    for (size_t i=0, end = x.size(); i != end; ++i) { //tricky --- eigen size() is signed
      if (eigvals(i) > 0) pos_hess += eigvals(i) * eigvecs.col(i) * eigvecs.col(i).transpose();
    }

    //QuadExpr& quad = out->quad_;
    quad.affexpr.constant = val - grad.dot(x) + .5*x.dot(pos_hess * x);
    quad.affexpr.vars = vars_;
    quad.affexpr.coeffs = toDblVec(grad - pos_hess * x);

    int nquadterms = (x.size() * (x.size()-1))/2;
    quad.coeffs.reserve(nquadterms);
    quad.vars1.reserve(nquadterms);
    quad.vars2.reserve(nquadterms);
    for (size_t i=0, end = x.size(); i != end; ++i) { //tricky --- eigen size() is signed
      quad.vars1.push_back(vars_[i]);
      quad.vars2.push_back(vars_[i]);
      quad.coeffs.push_back(pos_hess(i,i)/2);
      for (size_t j=i+1; j != end; ++j) {  //tricky --- eigen size() is signed
        quad.vars1.push_back(vars_[i]);
        quad.vars2.push_back(vars_[j]);
        quad.coeffs.push_back(pos_hess(i,j));
      }
    }
  }
  out->addQuadExpr(quad);

  return out;
}
Exemplo n.º 2
0
bool ConjugateGradientType::improve_energy(bool verbose) {
  iter++;
  //printf("I am running ConjugateGradient::improve_energy\n");
  const double E0 = energy();
  if (E0 != E0) {
    // There is no point continuing, since we're starting with a NaN!
    // So we may as well quit here.
    if (verbose) {
      printf("The initial energy is a NaN, so I'm quitting early from ConjugateGradientType::improve_energy.\n");
      f.print_summary("has nan:", E0);
      fflush(stdout);
    }
    return false;
  }
  double gdotd;
  {
    const VectorXd g = -grad();
    // Let's immediately free the cached gradient stored internally!
    invalidate_cache();

    // Note: my notation vaguely follows that of
    // [wikipedia](http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method).
    // I use the Polak-Ribiere method, with automatic direction reset.
    // Note that we could save some memory by using Fletcher-Reeves, and
    // it seems worth implementing that as an option for
    // memory-constrained problems (then we wouldn't need to store oldgrad).
    double beta = g.dot(g - oldgrad)/oldgradsqr;
    oldgrad = g;
    if (beta < 0 || beta != beta || oldgradsqr == 0) beta = 0;
    oldgradsqr = oldgrad.dot(oldgrad);
    direction = g + beta*direction;
    gdotd = oldgrad.dot(direction);
    if (gdotd < 0) {
      direction = oldgrad; // If our direction is uphill, reset to gradient.
      if (verbose) printf("reset to gradient...\n");
      gdotd = oldgrad.dot(direction);
    }
  }

  Minimizer lm = linmin(f, gd, kT, x, direction, -gdotd, &step);
  for (int i=0; i<100 && lm.improve_energy(verbose); i++) {
    if (verbose) lm.print_info("\t");
  }
  if (verbose) {
    //lm->print_info();
    print_info();
    printf("grad*dir/oldgrad*dir = %g\n", grad().dot(direction)/gdotd);
  }
  return (energy() < E0);
}
Exemplo n.º 3
0
void SOGP::predict(const VectorXd &state, VectorXd &prediction,
                   VectorXd &prediction_variance) {
    //check if we have initialised the system
    if (!this->initialized) {
        throw OTLException("SOGP not yet initialised");
    }

    double kstar = kernel->eval(state,state);

    //check if we not been trained
    if (this->current_size == 0) {
        prediction = VectorXd::Zero(this->output_dim);
        prediction_variance = VectorXd::Ones(this->output_dim)*
                (kstar + this->noise);
        return;
    }

    VectorXd k;
    kernel->eval(state, this->basis_vectors, k);
    //std::cout << "K: \n" << k << std::endl;
    //std::cout << "alpha: \n" << this->alpha.block(0,0,this->current_size, this->output_dim) << std::endl;

    prediction = k.transpose() *this->alpha.block(0,0,this->current_size, this->output_dim);
    prediction_variance = VectorXd::Ones(this->output_dim)*
            (k.dot(this->C.block(0,0, this->current_size, this->current_size)*k)
             + kstar + this->noise);

    return;
}
Exemplo n.º 4
0
double cosangle(VectorXd a, VectorXd b) {
    if (a.norm() < 1e-6 || b.norm() < 1e-6) {
        return 1;
    } else {
        return a.dot(b) / (a.norm() * b.norm());
    }
}
Exemplo n.º 5
0
VectorXd Optimizer::compute_dog_leg(double alpha, const VectorXd& h_sd,
    const VectorXd& h_gn, double delta, double& gain_ratio_denominator) {
  if (h_gn.norm() <= delta) {
    gain_ratio_denominator = current_SSE_at_linpoint;
    return h_gn;
  }

  double h_sd_norm = h_sd.norm();

  if ((alpha * h_sd_norm) >= delta) {
    gain_ratio_denominator = delta * (2 * alpha * h_sd_norm - delta)
        / (2 * alpha);
    return (delta / h_sd_norm) * h_sd;
  } else {
    // complicated case: calculate intersection of trust region with
    // line between Gauss-Newton and steepest descent solutions
    VectorXd a = alpha * h_sd;
    VectorXd b = h_gn;
    double c = a.dot(b - a);
    double b_a_norm2 = (b - a).squaredNorm();
    double a_norm2 = a.squaredNorm();
    double delta2 = delta * delta;
    double sqrt_term = sqrt(c * c + b_a_norm2 * (delta2 - a_norm2));
    double beta;
    if (c <= 0) {
      beta = (-c + sqrt_term) / b_a_norm2;
    } else {
      beta = (delta2 - a_norm2) / (c + sqrt_term);
    }

    gain_ratio_denominator = .5 * alpha * (1 - beta) * (1 - beta) * h_sd_norm
        * h_sd_norm + beta * (2 - beta) * current_SSE_at_linpoint;
    return (alpha * h_sd + beta * (h_gn - alpha * h_sd));
  }
}
  bool NewtonMinimizerGradHessian::zoom(const double& wolfe1, const double& wolfe2, double& lo, double& hi, VectorXd& try_grad, VectorXd& direction, double& grad0_dir, double& val0, double& val_lo, VectorXd& init_params, VectorXd& try_params, unsigned int max_iter, double& result)
  {
    double tryval = val0;
    double alpha = tryval;
    double temp1 = tryval;
    double temp2 = tryval;
    double temp3 = tryval;
    bool bounds = true;
    
    unsigned int counter = 1;
    while(true)
    {
      alpha = lo + hi;
      alpha*=0.5;
      try_params = direction;
      try_params *= alpha;
      try_params += init_params;
      bounds = function->calcValGrad(try_params, tryval, try_grad);
      for(unsigned int i=0;i<fixparameter.size();++i)
      {
        if(fixparameter[i] != 0)
        {
          try_grad[i] = 0.;
        }
      }
      temp1 = wolfe1*alpha;
      temp1 *= grad0_dir;
      temp1 += val0;
      
      if( ( tryval > temp1 ) || ( tryval >= val_lo ) || (bounds == false) )
      {
//         if( (fabs((tryval - val_lo)/(tryval)) < 1.0e-4) && (tryval < val_lo) ){result = alpha;return true;}
        if( (fabs((tryval - val_lo)/(tryval)) < 1.0e-4) ){result = alpha;return true;}
        hi = alpha;
      }
      else
      {
        temp1 = try_grad.dot(direction);
        temp2 = -wolfe2*grad0_dir;
        temp3 = fabs(temp1);
        
        if( temp3 <= fabs(temp2) )
        {
          result = alpha;
          return bounds;
        }
        temp3 = hi - lo;
        temp1 *= temp3;
        if( temp1 >= 0.)
        {
          hi = lo;
        }
        lo = alpha;
        val_lo = tryval;
      }
      counter++;
      if(counter > max_iter){return false;}
    }
  }
Exemplo n.º 7
0
AffExpr affFromValGrad(double y, const VectorXd& x, const VectorXd& dydx, const VarVector& vars) {
    AffExpr aff;
    aff.constant = y - dydx.dot(x);
    aff.coeffs = toDblVec(dydx);
    aff.vars = vars;
    aff = cleanupAff(aff);
    return aff;
}
Exemplo n.º 8
0
double ObjectiveMLS::eval(const VectorXd& x) const
{ 
  double obj = 0.0;
  for(int i = 0; i < a_.cols(); ++i)
    obj -= logsig(-x.dot(a_.col(i)) - b_(i));
  
  obj /= (double)a_.cols();
  return obj;
}
Exemplo n.º 9
0
MatrixXd HessianMEL::eval(const VectorXd& x) const
{
  MatrixXd hess = MatrixXd::Zero(x.rows(), x.rows());
  for(int i = 0; i < a_.cols(); ++i)
    hess += a_.col(i) * a_.col(i).transpose() * exp(x.dot(a_.col(i)) + b_(i));
  
  hess /= (double)a_.cols();
  return hess;
}
Exemplo n.º 10
0
VectorXd GradientMEL::eval(const VectorXd& x) const
{
  VectorXd grad = VectorXd::Zero(x.rows());
  for(int i = 0; i < a_.cols(); ++i)
    grad += a_.col(i) * exp(x.dot(a_.col(i)) + b_(i));
  
  grad /= (double)a_.cols();
    return grad;
}
Exemplo n.º 11
0
double ObjectiveMEL::eval(const VectorXd& x) const
{
  double obj = 0.0;
  for(int i = 0; i < a_.cols(); ++i)
    obj += exp(x.dot(a_.col(i)) + b_(i));
  
  obj /= (double)a_.cols();
  return obj;
}
Exemplo n.º 12
0
VectorXd GradientMLSNoCopy::eval(const VectorXd& x) const
{
  VectorXd grad = VectorXd::Zero(x.rows());
  for(int i = 0; i < A_->cols(); ++i)
    grad += A_->col(i) * sigmoid(x.dot(A_->col(i)) + b_->coeff(i));
  
  grad /= (double)A_->cols();
  return grad;
}
Exemplo n.º 13
0
double Triangle<ConcreteShape>::integrateField(const VectorXd &field) {

  double val = 0;
  Matrix<double,2,2> inverse_Jacobian;
  double detJ;
  std::tie(inverse_Jacobian,detJ) = ConcreteShape::inverseJacobian(mVtxCrd);
  val = detJ*field.dot(mIntegrationWeights);
  return val;

}
Exemplo n.º 14
0
double Spectral::kernel(const VectorXd& a, const VectorXd& b){

	switch(kernel_type){
	    case 2  :
	    	return(pow(a.dot(b)+constant,order));
	    default : 
	    	return(exp(-gamma*((a-b).squaredNorm())));
	}

}
Exemplo n.º 15
0
void denseFisherMetric::bounceP(const VectorXd& normal)
{
    
    mAuxVector = mGL.solve(normal);
    
    double C = -2.0 * mP.dot(mAuxVector);
    C /= normal.dot(mAuxVector);
    
    mP += C * normal;
    
}
Exemplo n.º 16
0
 void PositionConstraint::fillObjGrad(std::vector<double>& dG) {
     VectorXd dP = evalCon();
     for(int dofIndex = 0; dofIndex < mNode->getNumDependentDofs(); dofIndex++) {
         int i = mNode->getDependentDof(dofIndex);            
         const Var* v = mVariables[i];
         double w = v->mWeight;
         VectorXd J = xformHom(mNode->getDerivWorldTransform(dofIndex), mOffset);
         J /= w;
         dG[i] += 2 * dP.dot(J);
     }
 }
double dEda(const VectorXd &XY,
            const VectorXd &s0,
            const vector<spring> &springlist,
            const vector<vector<int>> &springpairs,
            double kappa,
            const double g11,
            const double g12,
            const double g22)
{  
    double out;
    out=s0.dot(HarmonicGradient(springlist,XY,g11,g12,g22)+BendingGrad(springpairs,springlist,XY,kappa,g11,g12,g22));
    return out;
}
NaiveBayesClassifier::NaiveBayesClassifier(const vector<VectorXd>& x,
                                           const vector<int>& y) :
k(0), d(0), p(), mu(), var() {
    // n is the number of points
    unsigned n = x.size();
    assert(n > 0);
    assert(y.size() == n);

    // d is the dimensionality
    d = x[0].size();
    for (const VectorXd& v : x)
        assert(v.size() == d);

    // number of classes
    k = *(std::max_element(y.cbegin(), y.cend())) + 1;

    for (int i = 0; i < k; ++i) {
        // find all points in class i
        vector<VectorXd> xi;
        for (unsigned j = 0; j < n; ++j)
            if (y[j] == i)
                xi.push_back(x[j]);

        // ni is the number of points in class i
        int ni = xi.size();
        assert(ni > 0);

        // prior probability
        p.push_back((double)ni / (double)n);

        // class mean
        VectorXd m = VectorXd::Zero(d);
        for (const VectorXd& v : xi)
            m += v;
        m /= ni;
        mu.push_back(m);

        // centered data matrix
        MatrixXd z(d, ni);
        for (int j = 0; j < ni; ++j)
            z.col(j) = xi[j] - m;

        // class-specific attribute variances
        VectorXd variance(d);
        for (int j = 0; j < d; ++j) {
            VectorXd zj = z.row(j);
            variance(j) = (1.0/ni) * zj.dot(zj);
        }
        var.push_back(variance);
    }
}
double BacktrackingLineSearch::step_size(std::function<double (const VectorXd&)> f,
                                         const VectorXd &dfx,
                                         const VectorXd &x,
                                         const VectorXd &direction) const {
  auto m = direction.dot(dfx);
  auto t = -_c * m;
  auto fx = f(x);
  auto step = _alpha;
  for (unsigned int i = 0; i < _niter; i++) {
    VectorXd new_x = x + direction*step;
    if ((fx - f(new_x)) > step * t) {
      break;
    }
      step = step * _tau;
  }
  return step;
}
Exemplo n.º 20
0
void doConjStep(VectorXd &XY,
                VectorXd &s0,
                VectorXd &gradE,
                const vector<spring> &springlist,
                const vector<vector<int>> &springpairs,
		int bendingOn,
                double kappa,
                int conjsteps,
                double g11,
                double g12,
                double g22)
{
    double a1=0.0;
    double a2=1.0;
    double betan;
    VectorXd gradEn(gradE.size());
    VectorXd sn(s0.size());
    functor network(XY,s0,springlist,springpairs,kappa,g11,g12,g22);
   doBracketfind(a1,a2,network);
    if(doBracketfind(a1,a2,network))
    {
        // double an=doFalsePosition(network,a1,a2);
        //double an=Ridder(network,a1,a2);
        double an=Brent(network,a1,a2,1e-12);
        //Update the positions.
        XY=XY+an*s0;
	if(bendingOn==0){
	  gradEn=HarmonicGradient(springlist,XY,g11,g12,g22);
	} else{
	  gradEn=HarmonicGradient(springlist,XY,g11,g12,g22)+BendingGrad(springpairs,springlist,XY,kappa,g11,g12,g22);
	}
	betan=(gradEn-gradE).dot(gradEn)/(gradE.dot(gradE));
        //Did not find bracket, reset CG-method    
    } else{
        betan=0.0;
	if(bendingOn==0){
	  gradE=HarmonicGradient(springlist,XY,g11,g12,g22);
	} else{
	    gradE=HarmonicGradient(springlist,XY,g11,g12,g22)+BendingGrad(springpairs,springlist,XY,kappa,g11,g12,g22);
	}
	s0=-gradE;
        //cout<<"Bracket failed, Reset CG"<<endl;
        return;
    }
    if(conjsteps%5 ==0.0) betan=0.0;
    if(betan<0.0) betan=0; //max(betan,0)
    if(abs(gradEn.dot(gradE))>.5*gradE.dot(gradE)) betan=0.0; 
    if(-2*gradE.dot(gradE)>gradE.dot(s0) && gradE.dot(s0) >-.2*gradE.dot(gradE)) betan=0.0;
    //cout<<"\r"<<"** Beta "<<betan<<flush;
    sn=-gradEn+betan*s0;    
    gradE=gradEn;
    s0=sn;
}
Exemplo n.º 21
0
double line_search(fcn_Rn_to_R f, VectorXd x0, VectorXd v0, VectorXd Dfx0 ) {
	// Performs a line search. Takes in a function (f), initial point (x), and a
	// direction (v). Tries to find a scalar value (a) such that f(x + av) is
	// minimized.
	//
	double alpha = 1;
	double beta =  0.2;
	double tau = 0.5;

	double fval = f(x0);
	double stepsize = beta * Dfx0.dot(v0);

	while ( f(x0 + alpha*v0) > fval + alpha*stepsize ) {
		alpha = tau*alpha;
		//cout << "Step size : " << alpha << endl;
	}


	return alpha;
}
// [[Rcpp::export()]]
VectorXd sample_delta_c_Eigen(
    VectorXd delta,
    VectorXd tauh,
    Map<MatrixXd> Lambda_prec,
    double delta_1_shape,
    double delta_1_rate,
    double delta_2_shape,
    double delta_2_rate,
    Map<MatrixXd> randg_draws,  // all done with rate = 1;
    Map<MatrixXd> Lambda2
) {
  int times = randg_draws.rows();
  int k = tauh.size();

  MatrixXd scores_mat = Lambda_prec.cwiseProduct(Lambda2);
  VectorXd scores = scores_mat.colwise().sum();

  double rate,delta_old;
  for(int i = 0; i < times; i++){
    delta_old = delta(0);
    rate = delta_1_rate + 0.5 * (1/delta(0)) * tauh.dot(scores);
    delta(0) = randg_draws(i,0) / rate;
    // tauh = cumprod(delta);
    tauh *= delta(0)/delta_old;   // replaces re-calculating cumprod

    for(int h = 1; h < k-1; h++) {
      delta_old = delta(h);
      rate = delta_2_rate + 0.5*(1/delta(h))*tauh.tail(k-h).dot(scores.tail(k-h));
      delta(h) = randg_draws(i,h) / rate;
      // tauh = cumprod(delta);
      tauh.tail(k-h) *= delta(h)/delta_old; // replaces re-calculating cumprod
      // Rcout << (tauh - cumprod(delta)).sum() << std::endl;
    }
  }
  return(delta);
}
Exemplo n.º 23
0
/*
 *   frame_offset: used to place HxBj in right place in H
 */
bool MSCKF::getResidualH(VectorXd& ri, MatrixXd& Hi, Vector3d feature_pose, MatrixXd measure, MatrixXd pose_mtx, int frame_offset)
{
    int num_frame = (int)pose_mtx.cols();
    int errorStateLength = (int)fullErrorCovariance.rows();
    
    ri = VectorXd::Zero(2 * num_frame);
    Hi = MatrixXd::Zero(2 * num_frame, errorStateLength);    // Hi cols == error state length
    
    MatrixXd HxBj, Hc;
    MatrixXd Mij, tmp39;
    
    MatrixXd Hfi;
    MatrixXd Hf; // use double precision to increase numerial result
    Hfi = MatrixXd::Zero(2 * num_frame, 3);
    
    for(int j = 0; j < num_frame; j++)
    {
        cout << "frame is " << frame_offset+j << endl;
        Matrix3d R_gb = quaternion_to_R(pose_mtx.block<4, 1>(0, j));

        //double xx = pts[i * 3 + 0] - position(0);
        //double yy = pts[i * 3 + 1] - position(1);
        //double zz = pts[i * 3 + 2] - position(2);
        //Vector3d local_point = Ric.inverse() * (quat.inverse() * Vector3d(xx, yy, zz) - Tic);
        Vector3d feature_in_c = R_cb * R_gb.transpose() * (feature_pose - pose_mtx.block<3, 1>(4, j)) + fullNominalState.segment(16, 3);
        //Vector2d projPtr = projectPoint(feature_pose, R_gb, pose_mtx.block<3, 1>(4, j), fullNominalState.segment(16, 3));
        
    //zij = cam.h(R_cb * R_gb.transpose() * (feature_pose - p_gb) + p_cb);
        Vector2d projPtr = projectCamPoint(feature_in_c);
        //cout << "projPtr projectPoint" << projPtr.transpose() << endl;
        /*
        if (feature_in_c(2) < 1e-4)
        {
          projPtr = measure.col(j);
          return false;
        }
        else
        {
          projPtr = projectCamPoint(feature_in_c);
        }*/
        //if (projPtr(0)<0 || projPtr(1)>800 || projPtr(1)<0||projPtr(1)>800)
        //  return false;
       // cout << "projPtr" << projPtr.transpose() << endl;

        cout << "measure is " << measure.col(j).transpose() << endl;
        cout << "feature in c is " << feature_in_c.transpose() << endl;
        cout << "estimat is " << projPtr.transpose() << endl;
        ri.segment(j * 2, 2) = measure.col(j) - projPtr;
        // cout << "ri piece is" << ri.segment(j * 2, 2)  <<endl;
        if (ri.segment(j * 2, 2).norm() > 2.0)
        {
            return false;
        }

        Mij = cam.Jh(feature_in_c) * R_cb * R_gb.transpose();
        tmp39 = MatrixXd::Zero(3, 9);

        tmp39.block<3, 3>(0, 0) = skew_mtx(feature_pose - pose_mtx.block<3, 1>(4, j));
        tmp39.block<3, 3>(0, 3) = -Matrix3d::Identity();
        
        HxBj = Mij * tmp39;                           // 2x9
        Hc = cam.Jh(feature_in_c);   // 2x3
        // cout << "Mij is " << endl << Mij << endl;
        // cout << "tmp39 is " << endl << tmp39 << endl;
        // cout << "HxBj is " << endl << HxBj << endl;
        // cout << "Hc is " << endl << Hc << endl;
        
        Hi.block<2, 9>(j * 2, ERROR_STATE_SIZE + 3 + ERROR_POSE_STATE_SIZE * (frame_offset + j)) = HxBj;
        Hi.block<2, 3>(j * 2, ERROR_STATE_SIZE) = Hc;
        
        Hf = cam.Jh(feature_in_c) * R_cb * R_gb.transpose();
        Hfi.block<2, 3>(j * 2, 0) = Hf.cast<double>();
    }
    // now carry out feature error marginalization
    JacobiSVD<MatrixXd> svd(Hfi.transpose(), ComputeFullV);
    MatrixXd left_null = svd.matrixV().cast<double>().rightCols(2 * num_frame - 3).transpose();
    
//    MatrixXd S = svd.singularValues().asDiagonal();
//    MatrixXd U = svd.matrixU();
//    MatrixXd V = svd.matrixV();
//    MatrixXd D = Hfi.transpose()-U*S*V.transpose();
//    std::cout << "\n" << D.norm() << "  " << sqrt((D.adjoint()*D).trace()) << "\n";
    
    // printf("left null size (%d, %d)\n", left_null.rows(), left_null.cols());
    // printf("ri size (%d, %d)\n", ri.rows(), ri.cols());
    // printf("Hi size (%d, %d)\n", Hi.rows(), Hi.cols());
    // cout << "before left null" << endl;
    // cout << "------------------" << endl;
    // cout << ri << endl;
    // cout << Hi << endl;

   ri = left_null * ri;
   Hi = left_null * Hi;


   // cout << "one measure, one H" << endl;
   // cout << "------------------" << endl;
   // cout << ri << endl;
   // cout << Hi << endl;
   // printf("ri size (%d, %d)\n", ri.rows(), ri.cols());
   // printf("Hi size (%d, %d)\n", Hi.rows(), Hi.cols());

   double gamma;
   MatrixXd tmpK = Hi * fullErrorCovariance * Hi.transpose();
   MatrixXd tmpKinv = tmpK.colPivHouseholderQr().solve(MatrixXd::Identity(tmpK.rows(), tmpK.cols()));

   int k = ri.size();
   gamma = fabs(ri.dot( tmpKinv * ri));
   cout << "gamma " << gamma<< endl;

   // cout << "ha? " <<  fabs(ri.dot( Hi *Hi.transpose()* ri))<< endl;
   if (k>30) k = 30;
   if (gamma*gamma > chi_test[k])
        return false;
    else
        return true;
}
Exemplo n.º 24
0
// full reorthogonalization
double                  EigenTriangle::solve(int maxIter)
{
    int n = graph -> VertexCount();
    srand(time(0));
    // check if rows == cols()
    if (maxIter > n)
        maxIter = n;
    vector<VectorXd> v;
    v.push_back(VectorXd::Random(n));
    v[0] = v[0] / v[0].norm();
    VectorXd alpha(n);
    VectorXd beta(n + 1);
    beta[0] = 0;
    VectorXd w;
    int last = 0;
    printf("%d\n", maxIter);
    for (int i = 0; i < maxIter; i ++)
    {
        if (i * 100 / maxIter > last + 10 || i == maxIter - 1)
        {
            last = (i + 1) * 100 / maxIter;
            std::cout << "\r" << "[EigenTriangle] Processing " << last << "% ..." << std::flush;
        }
        w = adjMatrix * v[i];
        alpha[i] = w.dot(v[i]);
        if (i == maxIter - 1)
            break;
        w -= alpha[i] * v[i];
        if (i > 0)
            w -= beta[i] * v[i - 1];
        beta[i + 1] = w.norm();
        v.push_back(w / beta[i + 1]);
        for (int j = 0; j <= i; j ++)
        {
            v[i + 1] = v[i + 1] - v[i + 1].dot(v[j]) * v[j];
        }
    }
    printf("\n");
    int k = maxIter;
    MatrixXd T = MatrixXd::Zero(k, k);
    for (int i = 0; i < k; i ++)
    {
        T(i, i) = alpha[i];
        if (i < k - 1)
            T(i, i + 1) = beta[i + 1];
        if (i > 0)
            T(i, i - 1) = beta[i];
    }
    //std::cout << T << std::endl;

    Eigen::EigenSolver<MatrixXd> eigenSolver;
    eigenSolver.compute(T, /* computeEigenvectors = */ false);
    Eigen::VectorXcd eigens = eigenSolver.eigenvalues();
    double res = 0;
    for (int i = 0; i < eigens.size(); i ++)
    {
        std::complex<double> tmp = eigens[i];
        res += pow(tmp.real(), 3);
        printf("%.5lf\n", tmp.real());
    }
    res /= 6;
    return res;
}
 bool NewtonMinimizerGradHessian::findSaddlePoint(const VectorXd& start_point, VectorXd& min_point, double tol, unsigned int max_iter, double abs_tol)
 {
   try
   {
     if(!function){throw (char*)("minimize called but function has not been set");}
   }
   catch(char* str)
   {
     cout<<"Exception from NewtonMinimizerGradHessian: "<<str<<endl;
     throw;
     return false;
   }
   
   unsigned int npars = function->nPars();
   
   try
   {
     if(!(npars>0)){throw (char*)("function to be minimized has zero dimensions");}
     if(start_point.rows()!=(int)npars){throw (char*)("input to minimizer not of dimension required by function to be minimized");}
   }
   catch(char* str)
   {
     cout<<"Exception from NewtonMinimizerGradHessian: "<<str<<endl;
     throw;
     return false;
   }
   
   //parameters used for the Wolfe conditions
   double c1 = 1.0e-6;
   double c2 = 1.0e-1;
   
   VectorXd working_points[2];
   working_points[0] = VectorXd::Zero(npars);
   working_points[1] = VectorXd::Zero(npars);
   
   VectorXd* current_point = &(working_points[0]);
   VectorXd* try_point = &(working_points[1]);
   
   (*current_point) = start_point;
   
   double value=0.;
   double prev_value=0.;
   
   double dir_der=0.;
   double scale = 1.;
   double scale_temp = 1.;
   double grad0_dir = 0.;
   
   bool good_value = true;
   bool bounds = true;
   
   unsigned int search_iter = 64;
   
   VectorXd grad[2];
   grad[0] = VectorXd::Zero(npars);
   grad[1] = VectorXd::Zero(npars);
   VectorXd* current_grad = &(grad[0]);
   VectorXd* try_grad = &(grad[1]);
   
   VectorXd newgrad = VectorXd::Zero(npars);
   
   MatrixXd hessian = MatrixXd::Zero(npars, npars);
   
   VectorXd move = VectorXd::Zero(npars);
   VectorXd unit_move = VectorXd::Zero(npars);
   
   FunctionGradHessian* orig_func = function;
   SquareGradient gradsquared(orig_func);
   function = &gradsquared;
   
   //try a Newton iteration
   orig_func->calcValGradHessian((*current_point), value, (*current_grad), hessian);
   move = -hessian.fullPivLu().solve(*current_grad);
   gradsquared.calcValGrad((*current_point), value, newgrad);
   good_value=true;
   for(unsigned int i=0;i<npars;++i){if(!(move(i) == move(i))){good_value=false;break;}}
   if(good_value == false){move = -newgrad;}
   dir_der = newgrad.dot(move);
   if(dir_der>0.){move = -move;}
   gradsquared.rescaleMove((*current_point), move);
   grad0_dir = newgrad.dot(move);
   scale_temp = 1.;
   //find scale, such that move*scale satisfies the strong Wolfe conditions
   bounds = lineSearch(scale_temp, c1, c2, (*try_grad), move, grad0_dir, value, (*current_point), (*try_point), tol, abs_tol, search_iter, scale);
   if(bounds == false){min_point = start_point; function=orig_func;return false;}
   move *= scale;
   (*try_point) = ((*current_point) + move);
   orig_func->calcValGradHessian((*try_point), prev_value, (*try_grad), hessian);
   gradsquared.calcValGrad((*try_point), prev_value, newgrad);
   swap(current_point, try_point);
   swap(current_grad, try_grad);
   swap(value, prev_value);
   
   unsigned long int count = 1;
   bool converged=false;
   while(converged==false)
   {
     if((fabs((prev_value - value)/prev_value)<tol || fabs(prev_value - value)<abs_tol)){converged=true;break;}
     prev_value = value;
     //try a Newton iteration
     move = -hessian.fullPivLu().solve(*current_grad);
     gradsquared.calcValGrad((*current_point), value, newgrad);
     
     good_value=true;
     for(unsigned int i=0;i<npars;++i){if(!(move(i) == move(i))){good_value=false;break;}}
     scale_temp = 1.;
     scale_temp = fabs(value/newgrad.dot(move));
     if(good_value == false){move = -newgrad;scale_temp = fabs(value/move.dot(move));}
     dir_der = newgrad.dot(move);
     if(dir_der>0.){move = -move;}
     gradsquared.rescaleMove((*current_point), move);
     grad0_dir = newgrad.dot(move);
     //find scale, such that move*scale satisfies the strong Wolfe conditions
     bounds = lineSearch(scale_temp, c1, c2, (*try_grad), move, grad0_dir, value, (*current_point), (*try_point), tol, abs_tol, search_iter, scale);
     if(bounds == false){min_point = (*current_point); function=orig_func;return false;}
     move *= scale;
     (*try_point) = ((*current_point) + move);
     orig_func->calcValGradHessian((*try_point), value, (*try_grad), hessian);
     gradsquared.calcValGrad((*try_point), value, newgrad);
     swap(current_point, try_point);
     swap(current_grad, try_grad);
     count++;
     if(count > max_iter){break;}
   }
   orig_func->computeCovariance(value, hessian);
   min_point = (*current_point);
   function=orig_func;return converged;
 }
 bool NewtonMinimizerGradHessian::lineSearch(double& alpha, const double& wolfe1, const double& wolfe2, VectorXd& try_grad, VectorXd& direction, double& grad0_dir, double& val0, VectorXd& init_params, VectorXd& try_params, const double& precision, const double& accuracy, unsigned int max_iter, double& result)
 {
   double tryval = val0;
   double prev_val = tryval;
   double prev_alpha = tryval;
   double lo = tryval;
   double hi = tryval;
   double temp1 = tryval;
   double temp2 = tryval;
   double temp3 = tryval;
   unsigned int i = 1;
   bool bounds = true;
   
   prev_alpha = 0.;
   while(true)
   {
     try_params = direction;
     try_params *= alpha;
     try_params += init_params;
     
     bounds = function->calcValGrad(try_params, tryval, try_grad);
     for(unsigned int j=0;j<fixparameter.size();++j)
     {
       if(fixparameter[j] != 0)
       {
         try_grad[j] = 0.;
       }
     }
     if(bounds == false)
     {
       while(true)
       {
         alpha *= 0.5;
         try_params = direction;
         try_params *= alpha;
         try_params += init_params;
         
         bounds = function->calcValGrad(try_params, tryval, try_grad);
         for(unsigned int j=0;j<fixparameter.size();++j)
         {
           if(fixparameter[j] != 0)
           {
             try_grad[j] = 0.;
           }
         }
         if(bounds==true)
         {
           if(tryval < val0)
           {
             result=alpha;
             return true;
           }
         }
         if(i>max_iter){return false;}
         i++;
       }
       
       
       
       alpha = 0.5*(prev_alpha + alpha);
       i++;
       if(i > max_iter){return false;}
       continue;
     }
     
     temp1 = wolfe1*alpha;
     temp1 *= grad0_dir;
     temp1 += val0;
     if( ( tryval > temp1 ) || ( ( tryval > prev_val ) && (i>1) ))
     {
       lo = prev_alpha;
       hi = alpha;
       return zoom(wolfe1, wolfe2, lo, hi, try_grad, direction, grad0_dir, val0, prev_val, init_params, try_params, max_iter, result);
     }
     temp1 = try_grad.dot(direction);
     temp2 = -wolfe2*grad0_dir;
     temp3 = fabs(temp1);
     
     if( temp3 <= fabs(temp2) )
     {
       result = alpha;
       return bounds;
     }
     if( temp1 >= 0. )
     {
       lo = alpha;
       hi = prev_alpha;
       return zoom(wolfe1, wolfe2, lo, hi, try_grad, direction, grad0_dir, val0, tryval, init_params, try_params, max_iter, result);
     }
     prev_val = tryval;
     prev_alpha = alpha;
     alpha *= 2.;
     i++;
     if(i > max_iter){return false;}
   }
 }
Exemplo n.º 27
0
double drwnLPSolver::solve()
{
    DRWN_FCN_TIC;
    const int n = _A.cols();
    const int m = _A.rows();

    // find feasible starting point
    if (_x.rows() == 0) {
        _x = VectorXd::Zero(n);
        for (int i = 0; i < n; i++) {
            if (isUnbounded(i)) continue;

            if (_ub[i] == DRWN_DBL_MAX) {
                _x[i] = _lb[i] + 1.0;
            } else if (_lb[i] == -DRWN_DBL_MAX) {
                _x[i] = _ub[i] - 1.0;
            } else {
                _x[i] = 0.5 * (_lb[i] + _ub[i]);
            }
        }
    }

    // initialize kkt system variables
    MatrixXd F = MatrixXd::Zero(n + m, n + m);
    VectorXd g = VectorXd::Zero(n + m);

    F.block(n, 0, m, n) = _A;
    F.block(0, n, n, m) = _A.transpose();

    // initialize dual variables (if needed)
    VectorXd nu = VectorXd::Zero(m);

    // iterate on interior point method
    double t = t0;
    while (1) {
        // determine feasibility
        const bool bFeasible = ((_b - _A * _x).squaredNorm() < eps);
        if (!bFeasible) {
            DRWN_LOG_VERBOSE("...finding feasible point");
        }

        // centering step and update
        for (unsigned iter = 0; iter < maxiters; iter++) {

            //! \todo solve with blockwise elimination
            // construct KKT system, Fx = g
            // | H A^T | | dx | = |  - g   |
            // | A  0  | | w  |   | b - Ax |

            for (int i = 0; i < n; i++) {
                F(i, i) = 0.0;
                if (_ub[i] != DRWN_DBL_MAX) {
                    F(i, i) += 1.0 / ((_ub[i] - _x[i]) * (_ub[i] - _x[i]));
                }
                if (_lb[i] != -DRWN_DBL_MAX) {
                    F(i, i) += 1.0 / ((_x[i] - _lb[i]) * (_x[i] - _lb[i]));
                }
            }

            for (int i = 0; i < n; i++) {
                g[i] = - t * _c[i];
                if (_ub[i] != DRWN_DBL_MAX) {
                    g[i] += 1.0 / (_x[i] - _ub[i]);
                }
                if (_lb[i] != -DRWN_DBL_MAX) {
                    g[i] += 1.0 / (_x[i] - _lb[i]);
                }
            }
            g.tail(m) = _b - _A * _x;

            // check terminating condition
            const double r_primal = g.tail(m).squaredNorm();
            const double r_dual = (_A.transpose() * nu - g.head(n)).squaredNorm();
            //if (!bFeasible && (r_primal + r_dual < drwnLPSolver::eps)) break;
            if (!bFeasible && (r_primal < drwnLPSolver::eps)) break;

            // solve KKT system
            const VectorXd dxnu = F.fullPivLu().solve(g);

            const double lambda_sqr = g.head(n).dot(dxnu.head(n));
            if (bFeasible && (0.5 * lambda_sqr < 1.0e-6)) break;

            if (bFeasible) {
                // feasible line search
                const double f_prev = t * _c.dot(_x) + barrierFunction(_x);
                const double delta_f = alpha * g.dot(dxnu.head(n));

                double step = 1.0;
                while (1) {
                    const VectorXd nx = _x + step * dxnu.head(n);
                    if (isWithinBounds(nx)) {
                        const double f = t * _c.dot(nx) + barrierFunction(nx);
                        if (f - f_prev < step * delta_f) {
                            _x = nx;
                            break;
                        }
                    }
                    step *= beta;
                }
            } else {
                // infeasible start line search
                double step = 1.0;
                while (1) {
                    const VectorXd nx = _x + step * dxnu.head(n);
                    if (isWithinBounds(nx)) {
                        const VectorXd nnu = (1.0 - step) * nu + step * dxnu.tail(m);
                        const double r = (_A.transpose() * nnu - g.head(n)).squaredNorm() +
                            (_b - _A * nx).squaredNorm();
                        if (r <= (1.0 - alpha * step) * (r_primal + r_dual)) {
                            _x = nx;
                            nu = nnu;
                            break;
                        }
                    }
                    step *= beta;
                }
            }
        }

        // check if feasible point was found
        if (!bFeasible && ((_b - _A * _x).squaredNorm() > eps)) {
            DRWN_LOG_WARNING("...could not find a feasible point (residual norm is " <<
                (_b - _A * _x).norm() << ")");
            DRWN_FCN_TOC;
            return DRWN_DBL_MAX;
        }

        DRWN_LOG_VERBOSE("...objective is " << _c.dot(_x));

        // check stopping criteria
        if (m < eps * t) break;

        // update barrier function multiplier
        t *= mu;
    }

    // compute true objective and return
    DRWN_FCN_TOC;
    return _c.dot(_x);
}
Exemplo n.º 28
0
double drwnSparseLPSolver::solve()
{
    DRWN_FCN_TIC;
    const int n = _A.cols();
    const int m = _A.rows();

    // find feasible starting point
    if (_x.rows() == 0) {
        _x = VectorXd::Zero(n);
        for (int i = 0; i < n; i++) {
            if (isUnbounded(i)) continue;

            if (_ub[i] == DRWN_DBL_MAX) {
                _x[i] = _lb[i] + 1.0;
            } else if (_lb[i] == -DRWN_DBL_MAX) {
                _x[i] = _ub[i] - 1.0;
            } else {
                _x[i] = 0.5 * (_lb[i] + _ub[i]);
            }
        }
    }

    // initialize kkt system variables
    SparseMatrix<double> F(n + m, n + m);
    VectorXd g = VectorXd::Zero(n + m);

    vector<Eigen::Triplet<double> > entries;
    entries.reserve(n + 2 * _A.nonZeros());
    for (int i = 0; i < n; i++) {
        entries.push_back(Eigen::Triplet<double>(i, i, 1.0));
    }

    for (int k = 0; k < _A.outerSize(); k++) {
        for (SparseMatrix<double>::InnerIterator it(_A, k); it; ++it) {
            entries.push_back(Eigen::Triplet<double>(n + it.row(), it.col(), it.value()));
            entries.push_back(Eigen::Triplet<double>(it.col(), n + it.row(), it.value()));
        }
    }

    F.setFromTriplets(entries.begin(), entries.end());

    // initialize dual variables (if needed)
    VectorXd nu = VectorXd::Zero(m);

    // iterate on interior point method
    double t = drwnLPSolver::t0;
    while (1) {
        // determine feasibility
        const bool bFeasible = ((_b - _A * _x).squaredNorm() < drwnLPSolver::eps);
        if (!bFeasible) {
            DRWN_LOG_VERBOSE("...finding feasible point");
        }

        // centering step and update
        for (unsigned iter = 0; iter < drwnLPSolver::maxiters; iter++) {

            //! \todo solve with blockwise elimination
            // construct KKT system, Fx = g
            // | H A^T | | dx | = |  - g   |
            // | A  0  | | w  |   | b - Ax |

            for (int i = 0; i < n; i++) {
                F.coeffRef(i, i) = 0.0;
                if (_ub[i] != DRWN_DBL_MAX) {
                    F.coeffRef(i, i) += 1.0 / ((_ub[i] - _x[i]) * (_ub[i] - _x[i]));
                }
                if (_lb[i] != -DRWN_DBL_MAX) {
                    F.coeffRef(i, i) += 1.0 / ((_x[i] - _lb[i]) * (_x[i] - _lb[i]));
                }
            }

            for (int i = 0; i < n; i++) {
                g[i] = - t * _c[i];
                if (_ub[i] != DRWN_DBL_MAX) {
                    g[i] += 1.0 / (_x[i] - _ub[i]);
                }
                if (_lb[i] != -DRWN_DBL_MAX) {
                    g[i] += 1.0 / (_x[i] - _lb[i]);
                }
            }
            g.tail(m) = _b - _A * _x;

            // check terminating condition
            const double r_primal = g.tail(m).squaredNorm();
            const double r_dual = (_A.transpose() * nu - g.head(n)).squaredNorm();
            //if (!bFeasible && (r_primal + r_dual < drwnLPSolver::eps)) break;
            if (!bFeasible && (r_primal < drwnLPSolver::eps)) break;

            // solve KKT system
            SimplicialLDLT<SparseMatrix<double> > chol(F);
            const VectorXd dxnu = chol.solve(g);

            const double lambda_sqr = g.head(n).dot(dxnu.head(n));
            if (bFeasible && (0.5 * lambda_sqr < 1.0e-6)) break;

            if (bFeasible) {
                // feasible line search
                const double f_prev = t * _c.dot(_x) + barrierFunction(_x);
                const double delta_f = drwnLPSolver::alpha * g.dot(dxnu.head(n));

                double step = 1.0;
                while (1) {
                    const VectorXd nx = _x + step * dxnu.head(n);
                    if (isWithinBounds(nx)) {
                        const double f = t * _c.dot(nx) + barrierFunction(nx);
                        if (f - f_prev < step * delta_f) {
                            _x = nx;
                            break;
                        }
                    }
                    step *= drwnLPSolver::beta;
                }
            } else {
                // infeasible start line search
                DRWN_LOG_DEBUG("...iteration " << iter << ", primal residual " << r_primal
                    << ", dual residual " << r_dual);

                double step = 1.0;
                while (1) {
                    const VectorXd nx = _x + step * dxnu.head(n);
                    if (isWithinBounds(nx)) {
                        const VectorXd nnu = (1.0 - step) * nu + step * dxnu.tail(m);
                        const double r = (_A.transpose() * nnu - g.head(n)).squaredNorm() +
                            (_b - _A * nx).squaredNorm();
                        if (r <= (1.0 - drwnLPSolver::alpha * step) * (r_primal + r_dual)) {
                            _x = nx;
                            nu = nnu;
                            break;
                        }
                    }
                    step *= drwnLPSolver::beta;
                }
            }
        }

        // check if feasible point was found
        if (!bFeasible && ((_b - _A * _x).squaredNorm() > drwnLPSolver::eps)) {
            DRWN_LOG_WARNING("...could not find a feasible point (residual norm is " <<
                (_b - _A * _x).norm() << ")");
            DRWN_FCN_TOC;
            return DRWN_DBL_MAX;
        }

        DRWN_LOG_VERBOSE("...objective is " << _c.dot(_x));

        // check stopping criteria
        if (m < drwnLPSolver::eps * t) break;

        // update barrier function multiplier
        t *= drwnLPSolver::mu;
    }

    // compute true objective and return
    DRWN_FCN_TOC;
    return _c.dot(_x);
}
Exemplo n.º 29
0
inline double Prob_Cijs(VectorXd& beta , VectorXd& aij){

    double dotprod = beta.dot(aij);
    double prob = 1/(1+exp(dotprod));
    return(prob);
}
Exemplo n.º 30
0
RcppExport SEXP nniv(SEXP arg1, SEXP arg2, SEXP arg3) {
  // 3 arguments
  // arg1 for parameters
  // arg2 for data
  // arg3 for Gibbs

  // data
  List list2(arg2); 

  const MatrixXd X=as< Map<MatrixXd> >(list2["X"]),
    Z=as< Map<MatrixXd> >(list2["Z"]);

  const VectorXd t=as< Map<VectorXd> >(list2["t"]),
    y=as< Map<VectorXd> >(list2["y"]);

  const int N=X.rows(), p=X.cols(), q=Z.cols(), r=p+q, s=p+r;

  // parameters
  List list1(arg1), 
    beta_info=list1["beta"], 
    Tprec_info=list1["Tprec"], 
    mu_info=list1["mu"], 
    theta_info=list1["theta"];

  // prior parameters
  List beta_prior=beta_info["prior"],
    Tprec_prior=Tprec_info["prior"],
    mu_prior=mu_info["prior"],
    theta_prior=theta_info["prior"];

  const double Tprec_prior_nu=as<double>(Tprec_prior["nu"]);
  const Matrix2d Tprec_prior_Psi=as< Map<MatrixXd> >(Tprec_prior["Psi"]);

  const double beta_prior_mean=as<double>(beta_prior["mean"]);
  const double beta_prior_prec=as<double>(beta_prior["prec"]);

  const Vector2d mu_prior_mean=as< Map<VectorXd> >(mu_prior["mean"]);
  const Matrix2d mu_prior_prec=as< Map<MatrixXd> >(mu_prior["prec"]);

  const VectorXd theta_prior_mean=as< Map<VectorXd> >(theta_prior["mean"]);
  const MatrixXd theta_prior_prec=as< Map<MatrixXd> >(theta_prior["prec"]);

  // initialize parameters
  double beta=as<double>(beta_info["init"]); 

  Matrix2d Tprec=as< Map<MatrixXd> >(Tprec_info["init"]);
  Vector2d mu   =as< Map<VectorXd> >(mu_info["init"]);
  VectorXd theta=as< Map<VectorXd> >(theta_info["init"]);

  // Gibbs
  List list3(arg3); //, save=list3["save"];

  const int burnin=as<int>(list3["burnin"]), M=as<int>(list3["M"]), 
    thin=as<int>(list3["thin"]), m=7+s;

  MatrixXd GS(M, m);

  // prior parameter intermediate values
  double beta_prior_prod=beta_prior_prec * beta_prior_mean;

  VectorXd theta_prior_prod=theta_prior_prec * theta_prior_mean;

  Vector2d mu_prior_prod=mu_prior_prec*mu_prior_mean;

  // parameter intermediate values
  Matrix2d Sigma, B_inverse;

  Sigma=Tprec.inverse();
  B_inverse.setIdentity();

  VectorXd gamma=theta.segment(0, p), 
    delta=theta.segment(p, q), 
    eta  =theta.segment(r, p);

  /*
    MatrixXd Theta(2, r);

    Theta.row(0)=theta.segment(0, r);
    Theta.bottomLeftCorner(1, q)=RowVectorXd::Zero(q);
    Theta.bottomRightCorner(1, p)=eta.transpose();
  */

  Vector2d eps, eps_sum;

  MatrixXd D(N, 2), theta_cond_var_root(s, s), W(2, s);

  W.setZero();

  MatrixXd theta_cond_prec(s, s);

  VectorXd theta_cond_prod(s), w(r);

  Matrix2d mu_cond_prec, mu_cond_var_root, E;

  Vector2d u, R, mu_cond_prod, mu_u;

  double beta_scale, beta_prec, beta_prod, beta_cond_var, beta_cond_mean;

  int h=0, i, l; 


  // Gibbs loop
  //for(int l=-burnin; l<=(M-1)*thin; ++l) {

  l=-burnin;

  do{
    // sample beta
    D.col(0).setConstant(-mu[0]);
    D.col(1).setConstant(-mu[1]);

    D.col(0) += (t - X*gamma - Z*delta);
    D.col(1) += (y - X*eta);

    beta_scale=1./(Sigma(0, 0)*t.dot(t));

    beta_prec=1./(beta_scale*Sigma.determinant());

    beta_prod=beta_prec*beta_scale
      *((Sigma(0, 0)*D.col(1)-Sigma(0, 1)*D.col(0)).array()*t.array()).sum();

    beta_cond_var=1./(beta_prec+beta_prior_prec);

    beta_cond_mean=beta_cond_var*(beta_prod+beta_prior_prod);

    beta=rnorm1d(beta_cond_mean, sqrt(beta_cond_var));

    B_inverse(1, 0)=-beta;

    // sample theta
    theta_cond_prec=theta_prior_prec;
    theta_cond_prod=theta_prior_prod;

    for(i=0; i<N; ++i) {
      /*
	W.topLeftCorner(1, p)=X.row(i);
	W.block(0, p, 1, q)=Z.row(i);
	W.bottomRightCorner(1, p)=X.row(i);
      */

      W.block(0, 0, 1, p)=X.row(i);
      W.block(0, p, 1, q)=Z.row(i);
      W.block(1, r, 1, p)=X.row(i);

      theta_cond_prec += (W.transpose() * Tprec * W);

      u[0]=t[i];
      u[1]=y[i];

      R=B_inverse*u-mu;

      theta_cond_prod += (W.transpose() * Tprec * R);
    }

    theta_cond_var_root=inv_root_chol(theta_cond_prec);
    // theta_cond_var_root=inv_root_svd(theta_cond_prec); // for validation only

    theta=theta_cond_var_root*(rnormXd(s)+theta_cond_var_root.transpose()*theta_cond_prod);

    gamma=theta.segment(0, p); 
    delta=theta.segment(p, q); 
    eta  =theta.segment(r, p);

    /*
      Theta.topRows(1)=theta.segment(0, r).transpose();
      Theta.bottomRightCorner(1, p)=eta.transpose();
    */

    // sample mu
    eps_sum.setZero();
    //W.setZero();

    E.setZero();

    for(i=0; i<N; ++i) {
      /*
	W.topLeftCorner(1, p)=X.row(i);
	W.block(0, p, 1, q)=Z.row(i);
	W.bottomRightCorner(1, p)=X.row(i);
      */

      W.block(0, 0, 1, p)=X.row(i);
      W.block(0, p, 1, q)=Z.row(i);
      W.block(1, r, 1, p)=X.row(i);

      /*
	w.segment(0, q)=Z.row(i);
	w.segment(q, p)=X.row(i);
      */

      u[0]=t[i];
      u[1]=y[i];

      //eps += B_inverse*u - Theta*w;
      eps = B_inverse*u - W*theta;
      eps_sum += eps;
      eps -= mu;
      E += eps*eps.transpose();
    }

    mu_cond_prod=Tprec*eps_sum+mu_prior_prod;

    mu_cond_prec=(N*Tprec+mu_prior_prec);

    mu_cond_var_root=inv_root_chol(mu_cond_prec);
    // mu_cond_var_root=inv_root_svd(mu_cond_prec); // for validation only

    mu=mu_cond_var_root*(rnormXd(2)+mu_cond_var_root.transpose()*mu_cond_prod);

    // sample Tprec
    Tprec = rwishart((E+Tprec_prior_Psi).inverse(), N+Tprec_prior_nu);
    Sigma = Tprec.inverse();

    if(l>=0 && l%thin == 0) {
      h = (l/thin);

      GS.block(h, 0, 1, s)=theta.transpose();

      GS(h, s)=beta;
      GS(h, s+1)=mu[0];
      GS(h, s+2)=mu[1];
      GS(h, s+3)=Tprec(0, 0);
      GS(h, s+4)=Tprec(0, 1);
      GS(h, s+5)=Tprec(0, 1);
      GS(h, s+6)=Tprec(1, 1);
    }

    l++;

  } while (l<=(M-1)*thin && beta==beta); 

  if(beta != beta) GS.conservativeResize(h+1, m);

  return wrap(GS);
}