double cosangle(VectorXd a, VectorXd b) {
    if (a.norm() < 1e-6 || b.norm() < 1e-6) {
        return 1;
    } else {
        return a.dot(b) / (a.norm() * b.norm());
    }
}
Esempio n. 2
0
VectorXd Optimizer::compute_dog_leg(double alpha, const VectorXd& h_sd,
    const VectorXd& h_gn, double delta, double& gain_ratio_denominator) {
  if (h_gn.norm() <= delta) {
    gain_ratio_denominator = current_SSE_at_linpoint;
    return h_gn;
  }

  double h_sd_norm = h_sd.norm();

  if ((alpha * h_sd_norm) >= delta) {
    gain_ratio_denominator = delta * (2 * alpha * h_sd_norm - delta)
        / (2 * alpha);
    return (delta / h_sd_norm) * h_sd;
  } else {
    // complicated case: calculate intersection of trust region with
    // line between Gauss-Newton and steepest descent solutions
    VectorXd a = alpha * h_sd;
    VectorXd b = h_gn;
    double c = a.dot(b - a);
    double b_a_norm2 = (b - a).squaredNorm();
    double a_norm2 = a.squaredNorm();
    double delta2 = delta * delta;
    double sqrt_term = sqrt(c * c + b_a_norm2 * (delta2 - a_norm2));
    double beta;
    if (c <= 0) {
      beta = (-c + sqrt_term) / b_a_norm2;
    } else {
      beta = (delta2 - a_norm2) / (c + sqrt_term);
    }

    gain_ratio_denominator = .5 * alpha * (1 - beta) * (1 - beta) * h_sd_norm
        * h_sd_norm + beta * (2 - beta) * current_SSE_at_linpoint;
    return (alpha * h_sd + beta * (h_gn - alpha * h_sd));
  }
}
Esempio n. 3
0
double probutils::eigpower (const MatrixXd& A, VectorXd& eigvec)
{
  // Check if A is square
  if (A.rows() != A.cols())
    throw invalid_argument("Matrix A must be square!");

  // Check if A is a scalar
  if (A.rows() == 1)
  {
    eigvec.setOnes(1);
    return A(0,0);
  }

  // Initialise working vectors
  VectorXd v = VectorXd::LinSpaced(A.rows(), -1, 1);
  VectorXd oeigvec(A.rows());

  // Initialise eigenvalue and eigenvectors etc
  double eigval = v.norm();
  double vdist = numeric_limits<double>::infinity();
  eigvec = v/eigval;

  // Loop until eigenvector converges or we reach max iterations
  for (int i=0; (vdist>EIGCONTHRESH) && (i<MAXITER); ++i)
  {
    oeigvec = eigvec;
    v.noalias() = A * oeigvec;
    eigval = v.norm();
    eigvec = v/eigval;
    vdist = (eigvec - oeigvec).norm();
  }

  return eigval;
}
Esempio n. 4
0
    List fastLm(Rcpp::NumericMatrix Xs, Rcpp::NumericVector ys, int type) {
        const Map<MatrixXd>  X(as<Map<MatrixXd> >(Xs));
        const Map<VectorXd>  y(as<Map<VectorXd> >(ys));
        Index                n = X.rows();
        if ((Index)y.size() != n) throw invalid_argument("size mismatch");

			    // Select and apply the least squares method
        lm                 ans(do_lm(X, y, type));

			    // Copy coefficients and install names, if any
        NumericVector     coef(wrap(ans.coef()));

        List          dimnames(NumericMatrix(Xs).attr("dimnames"));
        if (dimnames.size() > 1) {
            RObject   colnames = dimnames[1];
            if (!(colnames).isNULL())
                coef.attr("names") = clone(CharacterVector(colnames));
        }
	    
        VectorXd         resid = y - ans.fitted();
        int               rank = ans.rank();
        int                 df = (rank == ::NA_INTEGER) ? n - X.cols() : n - rank;
        double               s = resid.norm() / std::sqrt(double(df));
			    // Create the standard errors
        VectorXd            se = s * ans.se();

        return List::create(_["coefficients"]  = coef,
                            _["se"]            = se,
                            _["rank"]          = rank,
                            _["df.residual"]   = df,
                            _["residuals"]     = resid,
                            _["s"]             = s,
                            _["fitted.values"] = ans.fitted());
    }
VectorXd MotionModel::V2Q(VectorXd V)
{
	// Converts from rotation vector to quaternion representation
	// Javier Civera book P130. A.15
	size_t V_size;
	double norm = 0, v_norm = 0;
	VectorXd Q(4), V_norm;
	V_size = V.size();
	// Norm of the V
	//for (i = 0; i < V_size; ++i)
	//{
	//	norm += V(i) * V(i);
	//}
	//norm = sqrt(norm);

	norm = V.norm();
	if (norm < eps)
		Q << 1, 0, 0, 0;
	else
	{
		V_norm = V / norm;
		//for (i = 0; i < V_size; ++i)
		//{
		//	v_norm += V_norm(i) * V_norm(i);
		//}
		//v_norm = sqrt(v_norm);

		v_norm = V_norm.norm();
		// Quaternion cos(theta/2)+sin(theta/2)uxI+sin(theta/2)uyJ+sin(theta/2)uzK
		Q << cos(norm / 2), sin(norm / 2) * V_norm / v_norm;
	}
	return Q; // Q is a 4x1 vector
}
Esempio n. 6
0
void SolveAmpere()
{
    int i,j;

    // First allocate the matrix and vectors for the finite difference scheme, source, and solution
    MatrixXd AmMatrix = MatrixXd::Zero((M)*(N),(M)*(N));
    VectorXd Source = VectorXd::Zero((M)*(N));
    VectorXd Soln = VectorXd::Zero((M)*(N));

    // The finite difference scheme is applied
    createAmpereMatrix(AmMatrix,Source);

    // Now call LinAlgebra package to invert the matrix and obtain the new potential values
    Soln = AmMatrix.colPivHouseholderQr().solve(Source);

    // Test to make sure solution is actually a solution (Uses L2 norm)
    double L2error = (AmMatrix*Soln - Source).norm() / Source.norm();

    //cout << (AmMatrix) << endl;
    //cout << Source << endl;
    //cout << (AmMatrix*Soln - Source) << endl;

    cout << "Ampere Matrix L2 error = " << L2error << endl;

    // Copy the solution to the newState vector
    for(i=0;i<M;i++) for(j=0;j<N;j++) newState[Apot][i][j] = Soln(Sidx(i,j));

    // Apot has been updated, we don't need to renormalize
};
Esempio n. 7
0
 virtual double getEnergy(const VectorXd &q) const
 {
     VectorXd gradq;
     SparseMatrix<double> dgdq, hessq;
     int derivs = ElasticEnergy::DR_DQ;
     double energyB, energyS;
     m_.elasticEnergy(q_, q, energyB, energyS, gradq, hessq, dgdq, derivs);
     return gradq.norm();
 }
Esempio n. 8
0
 virtual double getEnergyAndDerivatives(const VectorXd &q, VectorXd &grad, SparseMatrix<double> &hess) const
 {
     SparseMatrix<double> dgdq, hessq;
     VectorXd gradq;
     double energyB, energyS;
     int derivs = ElasticEnergy::DR_DQ | ElasticEnergy::DR_DGDQ;
     m_.elasticEnergy(q_, q, energyB, energyS, gradq, hessq, dgdq, derivs);
     grad = dgdq*gradq;
     hess = dgdq*dgdq.transpose();
     return gradq.norm();
 }
Esempio n. 9
0
// [[Rcpp::export]]
double R2pred(MatrixXd Xt, VectorXd yt, MatrixXd Xv, VectorXd yv) {
    const VectorXd coefHat(betaHat(Xt, yt));
    const VectorXd fitted(Xv * coefHat);
    const VectorXd resid(yv - fitted);
    const VectorXd residNull(dev(yv));
    const double SSerr(pow(resid.norm(), 2));
    const double SStot(pow(residNull.norm(), 2));
    // Rcpp::Rcout << SSerr << "\n\n" << SStot << std::endl;
    const double out(1 - (SSerr/SStot));
    return out;
}
Esempio n. 10
0
// [[Rcpp::export]]
double R2(MatrixXd X, VectorXd y) {
    // const double n(X.rows());  // FIXME:  not needed?
    const VectorXd coefHat(betaHat(X, y));
    const VectorXd fitted(X * coefHat);
    const VectorXd resid(y - fitted);
    const VectorXd residNull(dev(y));
    const double SSerr(pow(resid.norm(), 2));
    const double SStot(pow(residNull.norm(), 2));
    // Rcpp::Rcout << SSerr << "\n\n" << SStot << std::endl;
    const double out(1 - (SSerr/SStot));
    return out;
}
Esempio n. 11
0
VectorXd LowRankRepresentation::solve_l2(VectorXd& w, double lambda)
{
    double nw;
    VectorXd x;

    nw = w.norm();
    if(nw > lambda)
        x = (nw - lambda) * w / nw;
    else
        x = VectorXd::Zero(w.size(),1);
    return x;
}
Esempio n. 12
0
void function1_grad(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr){

  CasADi::SXFunction &fun = ((CasADi::SXFunction*)ptr)[0];
  CasADi::SXFunction &grad_fun = ((CasADi::SXFunction*)ptr)[1];

  VectorXd eig_x(x.length());
  for (int i = 0; i < eig_x.size(); ++i)
	eig_x[i] = x[i];
  
  static VectorXd f,g;
  CASADI::evaluate(fun, eig_x, f);
  CASADI::evaluate(grad_fun, eig_x, g);
  assert_eq(f.size(),1);
  assert_eq(g.size(), grad.length());
  assert_eq(g.norm(), g.norm());
  assert_eq(f[0], f[0]);

  func = f[0];
  for (int i = 0; i < g.size(); ++i)
    grad[i] = g[i];

  DEBUG_LOG("f = "<<func);
}
// Method performs a step towards target; such target could  
// be a 3D vector (X,Y,Z) or a 6D vector (X,Y,Z,R,P,Y)
VectorXd JointMover::OneStepTowardsXYZRPY( VectorXd _q, VectorXd _targetXYZRPY) {
  assert(_targetXYZRPY.size() >= 3);
  assert(_q.size() > 3);
  bool both = (_targetXYZRPY.size() == 6);
  //GetXYZ also updates the config to _q, so Jaclin use an updated value
  VectorXd delta = _targetXYZRPY - GetXYZRPY(_q, both); 
  //if target also specifies roll, pitch and yaw compute entire Jacobian; otherwise, just translational Jacobian
  VectorXd dConfig = GetPseudoInvJac(both)*delta;
  
  // Constant to not let vector to be larger than mConfigStep
  double alpha = min((mConfigStep/dConfig.norm()), 1.0);
  dConfig = alpha*dConfig;
  return _q + dConfig;
}
Esempio n. 14
0
/* Computes the minimum of a function f:: R^n -> R using the BFGS method */
VectorXd minimize_bfgs(fcn_Rn_to_R f, VectorXd x0, fcn_Rn_to_Rn Df ) {

	cout << "Begin BFGS minimization";

	// Initial checks

	VectorXd p, s, x, Dfx , Dfx_next, y;

	// Initial guess of Hessian
	int N = x0.size();
	MatrixXd B = x0.norm() * MatrixXd::Identity(N, N);
	MatrixXd term3;
	double alpha = 1;

	x = x0;  // Should this be done in place?
	Dfx = Df(x0);
	int MAX_ITER = 5000;
	int iter = 0;

	double tol = 1e-5;


	while( (Dfx.norm() > tol) && (iter < MAX_ITER)) {
		cout << ".";
		p = -B.ldlt().solve(Dfx);
		p.normalize();

		alpha = line_search(f, x, p, Dfx);
		s = alpha*p;
		x += s;

		Dfx_next = Df(x);
		y = Dfx_next - Dfx;

		term3 = B*s*s.transpose()*B/(s.transpose()*B*s);
		B +=  (y*y.transpose())/(y.transpose()*s) - term3;

		Dfx = Dfx_next;
		
		iter++;
	}
	cout << endl;

	if (iter >= MAX_ITER){
		cout << "WARNING: Reached max iterations before converging" << endl;
	}

	return x;
}
// Method performs a Jacobian towards specified target; such target could  
// be a 3D vector (X,Y,Z) or a 6D vector (X,Y,Z,R,P,Y)
bool JointMover::GoToXYZRPY( VectorXd _qStart, VectorXd _targetXYZRPY, VectorXd &_qResult, std::list<Eigen::VectorXd> &path) {
  _qResult = _qStart;
  bool both = (_targetXYZRPY.size() == 6);
  // GetXYZ also updates the config to _qResult, so Jaclin use an updated value
  VectorXd delta = _targetXYZRPY - GetXYZRPY(_qResult, both); 
  
  int iter = 0;
  while( delta.norm() > mWorkspaceThresh && iter < mMaxIter ) {
    _qResult = OneStepTowardsXYZRPY(_qResult, _targetXYZRPY);
    path.push_back(_qResult);
    delta = (_targetXYZRPY - GetXYZRPY(_qResult, both) );
    //PRINT(delta.norm());
    iter++;
  }
  return iter < mMaxIter;
}
Esempio n. 16
0
// [[Rcpp::export]]
List myFastLm(MapMatd X, MapMatd y){
  const int n(X.rows()), p(X.cols());
  const LLT<MatrixXd> llt(AtA(X));
  const VectorXd betahat(llt.solve(X.adjoint() * y));
  const VectorXd fitted(X * betahat);
  const VectorXd resid(y - fitted);
  const int df(n - p);
  const double s(resid.norm() / std::sqrt(double(df)));
  const MatrixXd cov(llt.matrixL().solve(MatrixXd::Identity(p, p)));
  const MatrixXd cov2(MatrixXd(p,p).setZero().selfadjointView<Lower>().rankUpdate(cov.adjoint()) );

  return List::create(
    Named("coefficients") = betahat,
    Named("fitted.values") = fitted,
    Named("residuals") = resid,
    Named("s") = s,
    Named("df.residual") = df,
    Named("rank") = p,
    Named("cov") = cov2*s*s); 
}
Esempio n. 17
0
    extern "C" SEXP fastLm(SEXP Xs, SEXP ys, SEXP type) {
	try {
	    const Map<MatrixXd>  X(as<Map<MatrixXd> >(Xs));
	    const Map<VectorXd>  y(as<Map<VectorXd> >(ys));
	    Index                n = X.rows();
	    if ((Index)y.size() != n) throw invalid_argument("size mismatch");

				// Select and apply the least squares method
	    lm                 ans(do_lm(X, y, ::Rf_asInteger(type)));

				// Copy coefficients and install names, if any
	    NumericVector     coef(wrap(ans.coef()));
	    List          dimnames(NumericMatrix(Xs).attr("dimnames"));
	    if (dimnames.size() > 1) {
		RObject   colnames = dimnames[1];
		if (!(colnames).isNULL())
		    coef.attr("names") = clone(CharacterVector(colnames));
	    }
	    
	    VectorXd         resid = y - ans.fitted();
	    int               rank = ans.rank();
	    int                 df = (rank == ::NA_INTEGER) ? n - X.cols() : n - rank;
	    double               s = resid.norm() / std::sqrt(double(df));
				// Create the standard errors
	    VectorXd            se = s * ans.se();

	    return List::create(_["coefficients"]  = coef,
				_["se"]            = se,
				_["rank"]          = rank,
				_["df.residual"]   = df,
				_["residuals"]     = resid,
				_["s"]             = s,
				_["fitted.values"] = ans.fitted());

	} catch( std::exception &ex ) {
	    forward_exception_to_r( ex );
	} catch(...) { 
	    ::Rf_error( "c++ exception (unknown reason)" ); 
	}
	return R_NilValue; // -Wall
    }
Esempio n. 18
0
bool Quickmin::step(double maxMove)
{
    VectorXd force = -objf->getGradient();
    if (parameters->optQMSteepestDecent) {
        velocity.setZero();
    }
    else {
        if (velocity.dot(force) < 0) {
            velocity.setZero();
        }
        else {
            VectorXd f_unit = force/force.norm();
            velocity = velocity.dot(f_unit) * f_unit;
        }
    }
    
    velocity += force * dt;
    VectorXd dr = helper_functions::maxAtomMotionAppliedV(velocity * dt, parameters->optMaxMove);
    objf->setPositions(objf->getPositions() + dr);  
    iteration++;
    return objf->isConverged();
}
Esempio n. 19
0
void linear_reg::estimate(regdata& rdatain, int verbose, double tol_chol,
                          int model, int interaction, int ngpreds,
                          masked_matrix& invvarmatrixin, int robust,
                          int nullmodel)
{
    //suda ineraction parameter
    // model should come here
    regdata rdata = rdatain.get_unmasked_data();
    if (invvarmatrixin.length_of_mask != 0)
    {
        invvarmatrixin.update_mask(rdatain.masked_data);
        //  invvarmatrixin.masked_data->print();
    }
    if (verbose)
    {
        cout << rdata.is_interaction_excluded
                << " <-irdata.is_interaction_excluded\n";
        // std::cout << "invvarmatrix:\n";
        // invvarmatrixin.masked_data->print();
        std::cout << "rdata.X:\n";
        rdata.X.print();
    }
    mematrix<double> X = apply_model(rdata.X, model, interaction, ngpreds,
                                     rdata.is_interaction_excluded, false,
                                     nullmodel);
    if (verbose)
    {
        std::cout << "X:\n";
        X.print();
        std::cout << "Y:\n";
        rdata.Y.print();
    }
    int length_beta = X.ncol;
    beta.reinit(length_beta, 1);
    sebeta.reinit(length_beta, 1);
    //Han Chen
    if (length_beta > 1)
    {
        if (model == 0 && interaction != 0 && ngpreds == 2 && length_beta > 2)
        {
            covariance.reinit(length_beta - 2, 1);
        }
        else
        {
            covariance.reinit(length_beta - 1, 1);
        }
    }

    //Oct 26, 2009
    mematrix<double> tX = transpose(X);
    if (invvarmatrixin.length_of_mask != 0)
    {
        tX = tX * invvarmatrixin.masked_data;
        //!check if quicker
        //tX = productXbySymM(tX,invvarmatrix);
        // = invvarmatrix*X;
        // std::cout<<"new tX.nrow="<<X.nrow<<" tX.ncol="<<X.ncol<<"\n";
    }

    mematrix<double> tXX = tX * X;
    double N = X.nrow;

#if EIGEN_COMMENTEDOUT
    MatrixXd Xeigen    = X.data;
    MatrixXd tXeigen   = Xeigen.transpose();
    MatrixXd tXXeigen  = tXeigen * Xeigen;
    VectorXd Yeigen    = rdata.Y.data;
    VectorXd tXYeigen  = tXeigen * Yeigen;
    // Solve X^T * X * beta = X^T * Y for beta:
    VectorXd betaeigen = tXXeigen.fullPivLu().solve(tXYeigen);
    beta.data = betaeigen;

    if (verbose)
    {
        std::cout << setprecision(9) << "Xeigen:\n"  << Xeigen  << endl;
        std::cout << setprecision(9) << "tX:\n"  << tXeigen  << endl;
        std::cout << setprecision(9) << "tXX:\n" << tXXeigen << endl;
        std::cout << setprecision(9) << "tXY:\n" << tXYeigen << endl;
        std::cout << setprecision(9) << "beta:\n"<< betaeigen << endl;
        printf("----\n");
        printf("beta[0] = %e\n", betaeigen.data()[0]);
        printf("----\n");
//        (beta).print();
        double relative_error = (tXXeigen * betaeigen - tXYeigen).norm() / tXYeigen.norm(); // norm() is L2 norm
        cout << "The relative error is:\n" << relative_error << endl;

    }

    // This one is needed later on in this function
    mematrix<double> tXX_i = invert(tXX);
#else
    //
    // use cholesky to invert
    //
    mematrix<double> tXX_i = tXX;
    cholesky2_mm(tXX_i, tol_chol);
    chinv2_mm(tXX_i);
    // before was
    // mematrix<double> tXX_i = invert(tXX);

    mematrix<double> tXY = tX * (rdata.Y);
    beta = tXX_i * tXY;

    if (verbose)
    {
        std::cout << "tX:\n";
        tX.print();
        std::cout << "tXX:\n";
        tXX.print();
        std::cout << "chole tXX:\n";
        tXX_i.print();
        std::cout << "tXX-1:\n";
        tXX_i.print();
        std::cout << "tXY:\n";
        tXY.print();
        std::cout << "beta:\n";
        (beta).print();
    }
#endif
    // now compute residual variance
    sigma2 = 0.;
    mematrix<double> ttX = transpose(tX);
    mematrix<double> sigma2_matrix = rdata.Y;
    mematrix<double> sigma2_matrix1 = ttX * beta;
    //        std::cout << "sigma2_matrix\n";
    //        sigma2_matrix.print();
    //
    //        std::cout << "sigma2_matrix1\n";
    //        sigma2_matrix1.print();
    sigma2_matrix = sigma2_matrix - sigma2_matrix1;
    //        std::cout << "sigma2_matrix\n";
    //        sigma2_matrix.print();
    static double val;
    //  std::cout << "sigma2_matrix.nrow=" << sigma2_matrix.nrow
    //            << "sigma2_matrix.ncol" << sigma2_matrix.ncol
    //            <<"\n";

    for (int i = 0; i < sigma2_matrix.nrow; i++)
    {
        val = sigma2_matrix.get(i, 0);
        //            std::cout << "val = " << val << "\n";
        sigma2 += val * val;
        //            std::cout << "sigma2+= " << sigma2 << "\n";
    }

    double sigma2_internal = sigma2 / (N - static_cast<double>(length_beta));
    // now compute residual variance
    //      sigma2 = 0.;
    //      for (int i =0;i<(rdata.Y).nrow;i++)
    //          sigma2 += ((rdata.Y).get(i,0))*((rdata.Y).get(i,0));
    //      for (int i=0;i<length_beta;i++)
    //          sigma2 -= 2. * (beta.get(i,0)) * tXY.get(i,0);
    //      for (int i=0;i<(length_beta);i++)
    //      for (int j=0;j<(length_beta);j++)
    //          sigma2 += (beta.get(i,0)) * (beta.get(j,0)) * tXX.get(i,j);
    //  std::cout<<"sigma2="<<sigma2<<"\n";
    //  std::cout<<"sigma2_internal="<<sigma2_internal<<"\n";
    //      replaced for ML
    //      sigma2_internal = sigma2/(N - double(length_beta) - 1);
    //        std::cout << "sigma2/=N = "<< sigma2 << "\n";
    sigma2 /= N;
    //  std::cout<<"N="<<N<<", length_beta="<<length_beta<<"\n";
    if (verbose)
    {
        std::cout << "sigma2 = " << sigma2 << "\n";
    }
    /*
     loglik = 0.;
     double ss=0;
     for (int i=0;i<rdata.nids;i++) {
     double resid = rdata.Y[i] - beta.get(0,0); // intercept
     for (int j=1;j<beta.nrow;j++) resid -= beta.get(j,0)*X.get(i,j);
     // residuals[i] = resid;
     ss += resid*resid;
     }
     sigma2 = ss/N;
     */
    //cout << "estimate " << rdata.nids << "\n";
    //(rdata.X).print();
    //for (int i=0;i<rdata.nids;i++) cout << rdata.masked_data[i] << " ";
    //cout << endl;
    loglik = 0.;
    double halfrecsig2 = .5 / sigma2;
    for (int i = 0; i < rdata.nids; i++)
    {
        double resid = rdata.Y[i] - beta.get(0, 0); // intercept
        for (int j = 1; j < beta.nrow; j++)
            resid -= beta.get(j, 0) * X.get(i, j);
        residuals[i] = resid;
        loglik -= halfrecsig2 * resid * resid;
    }
    loglik -= static_cast<double>(rdata.nids) * log(sqrt(sigma2));
    // cout << "estimate " << rdata.nids << "\n";
    //
    // Ugly fix to the fact that if we do mmscore, sigma2 is already
    //  in the matrix...
    //      YSA, 2009.07.20
    //
    //cout << "estimate 0\n";
    if (invvarmatrixin.length_of_mask != 0)
        sigma2_internal = 1.0;

    mematrix<double> robust_sigma2(X.ncol, X.ncol);
    if (robust)
    {
        mematrix<double> XbyR = X;
        for (int i = 0; i < X.nrow; i++)
            for (int j = 0; j < X.ncol; j++)
            {
                double tmpval = XbyR.get(i, j) * residuals[i];
                XbyR.put(tmpval, i, j);
            }
        XbyR = transpose(XbyR) * XbyR;
        robust_sigma2 = tXX_i * XbyR;
        robust_sigma2 = robust_sigma2 * tXX_i;
    }
    //cout << "estimate 0\n";
    for (int i = 0; i < (length_beta); i++)
    {
        if (robust)
        {
            // cout << "estimate :robust\n";
            double value = sqrt(robust_sigma2.get(i, i));
            sebeta.put(value, i, 0);
            //Han Chen
            if (i > 0)
            {
                if (model == 0 && interaction != 0 && ngpreds == 2
                        && length_beta > 2)
                {
                    if (i > 1)
                    {
                        double covval = robust_sigma2.get(i, i - 2);
                        covariance.put(covval, i - 2, 0);
                    }
                }
                else
                {
                    double covval = robust_sigma2.get(i, i - 1);
                    covariance.put(covval, i - 1, 0);
                }
            }
            //Oct 26, 2009
        }
        else
        {
            // cout << "estimate :non-robust\n";
            double value = sqrt(sigma2_internal * tXX_i.get(i, i));
            sebeta.put(value, i, 0);
            //Han Chen
            if (i > 0)
            {
                if (model == 0 && interaction != 0 && ngpreds == 2
                        && length_beta > 2)
                {
                    if (i > 1)
                    {
                        double covval = sigma2_internal * tXX_i.get(i, i - 2);
                        covariance.put(covval, i - 2, 0);
                    }
                }
                else
                {
                    double covval = sigma2_internal * tXX_i.get(i, i - 1);
                    covariance.put(covval, i - 1, 0);
                }
            }
            //Oct 26, 2009
        }
    }
    //cout << "estimate E\n";
    if (verbose)
    {
        std::cout << "sebeta (" << sebeta.nrow << "):\n";
        sebeta.print();
    }
}
Esempio n. 20
0
double QPSolver::GetValue(const VectorXd& sol)
{
    VectorXd result = mLhs * sol + mRhs;
    return result.norm() * result.norm();
}
Esempio n. 21
0
// full reorthogonalization
double                  EigenTriangle::solve(int maxIter)
{
    int n = graph -> VertexCount();
    srand(time(0));
    // check if rows == cols()
    if (maxIter > n)
        maxIter = n;
    vector<VectorXd> v;
    v.push_back(VectorXd::Random(n));
    v[0] = v[0] / v[0].norm();
    VectorXd alpha(n);
    VectorXd beta(n + 1);
    beta[0] = 0;
    VectorXd w;
    int last = 0;
    printf("%d\n", maxIter);
    for (int i = 0; i < maxIter; i ++)
    {
        if (i * 100 / maxIter > last + 10 || i == maxIter - 1)
        {
            last = (i + 1) * 100 / maxIter;
            std::cout << "\r" << "[EigenTriangle] Processing " << last << "% ..." << std::flush;
        }
        w = adjMatrix * v[i];
        alpha[i] = w.dot(v[i]);
        if (i == maxIter - 1)
            break;
        w -= alpha[i] * v[i];
        if (i > 0)
            w -= beta[i] * v[i - 1];
        beta[i + 1] = w.norm();
        v.push_back(w / beta[i + 1]);
        for (int j = 0; j <= i; j ++)
        {
            v[i + 1] = v[i + 1] - v[i + 1].dot(v[j]) * v[j];
        }
    }
    printf("\n");
    int k = maxIter;
    MatrixXd T = MatrixXd::Zero(k, k);
    for (int i = 0; i < k; i ++)
    {
        T(i, i) = alpha[i];
        if (i < k - 1)
            T(i, i + 1) = beta[i + 1];
        if (i > 0)
            T(i, i - 1) = beta[i];
    }
    //std::cout << T << std::endl;

    Eigen::EigenSolver<MatrixXd> eigenSolver;
    eigenSolver.compute(T, /* computeEigenvectors = */ false);
    Eigen::VectorXcd eigens = eigenSolver.eigenvalues();
    double res = 0;
    for (int i = 0; i < eigens.size(); i ++)
    {
        std::complex<double> tmp = eigens[i];
        res += pow(tmp.real(), 3);
        printf("%.5lf\n", tmp.real());
    }
    res /= 6;
    return res;
}
Esempio n. 22
0
void SlamSystem::BA()
{
    R.resize(numOfState);
    T.resize(numOfState);
    vel.resize(numOfState);
    for (int i = 0; i < numOfState ; i++)
    {
        int k = head + i;
        if (k >= slidingWindowSize){
          k -= slidingWindowSize;
        }
        R[k] = slidingWindow[k]->R_bk_2_b0;
        T[k] = slidingWindow[k]->T_bk_2_b0;
        vel[k] = slidingWindow[k]->v_bk;
    }

    int sizeofH = 9 * numOfState;
    MatrixXd HTH(sizeofH, sizeofH);
    VectorXd HTb(sizeofH);
    MatrixXd tmpHTH;
    VectorXd tmpHTb;

    MatrixXd H_k1_2_k(9, 18);
    MatrixXd H_k1_2_k_T;
    VectorXd residualIMU(9);

    MatrixXd H_i_2_j(9, 18);
    MatrixXd H_i_2_j_T;
    VectorXd residualCamera(9);
    MatrixXd tmpP_inv(9, 9);

    for (int iterNum = 0; iterNum < maxIterationBA; iterNum++)
    {
      HTH.setZero();
      HTb.setZero();

      //1. prior constraints
      int m_sz = margin.size;
      VectorXd dx = VectorXd::Zero(STATE_SZ(numOfState - 1));

      if (m_sz != (numOfState - 1)){
        assert("prior matrix goes wrong!!!");
      }

      for (int i = numOfState - 2; i >= 0; i--)
      {
        int k = head + i;
        if (k >= slidingWindowSize){
          k -= slidingWindowSize;
        }
        //dp, dv, dq
        dx.segment(STATE_SZ(i), 3) = T[k] - slidingWindow[k]->T_bk_2_b0;
        dx.segment(STATE_SZ(i) + 3, 3) = vel[k] - slidingWindow[k]->v_bk;
        dx.segment(STATE_SZ(i) + 6, 3) = Quaterniond(slidingWindow[k]->R_bk_2_b0.transpose() * R[k]).vec() * 2.0;
      }
      HTH.block(0, 0, STATE_SZ(m_sz), STATE_SZ(m_sz)) += margin.Ap.block(0, 0, STATE_SZ(m_sz), STATE_SZ(m_sz));
      HTb.segment(0, STATE_SZ(m_sz)) -= margin.Ap.block(0, 0, STATE_SZ(m_sz), STATE_SZ(m_sz))*dx;
      HTb.segment(0, STATE_SZ(m_sz)) -= margin.bp.segment(0, STATE_SZ(m_sz));

      //2. imu constraints
      for (int i = numOfState-2; i >= 0; i-- )
      {
        int k = head + i;
        if (k >= slidingWindowSize){
          k -= slidingWindowSize;
        }
        if ( slidingWindow[k]->imuLinkFlag == false){
          continue;
        }
        int k1 = k + 1;
        if (k1 >= slidingWindowSize){
          k1 -= slidingWindowSize;
        }
        residualIMU = math.ResidualImu(T[k], vel[k], R[k],
                                   T[k1], vel[k1], R[k1],
                                   gravity_b0, slidingWindow[k]->timeIntegral,
                                   slidingWindow[k]->alpha_c_k, slidingWindow[k]->beta_c_k, slidingWindow[k]->R_k1_k);
        H_k1_2_k = math.JacobianImu(T[k], vel[k], R[k],
                                   T[k1], vel[k1], R[k1],
                                   gravity_b0, slidingWindow[k]->timeIntegral);
        H_k1_2_k_T = H_k1_2_k.transpose();
        H_k1_2_k_T *= slidingWindow[k]->P_k.inverse();
        HTH.block(i * 9, i * 9, 18, 18) += H_k1_2_k_T  *  H_k1_2_k;
        HTb.segment(i * 9, 18) -= H_k1_2_k_T * residualIMU;
      }

      //3. camera constraints
      for (int i = 0; i < numOfState; i++)
      {
        int currentStateID = head + i;
        if (currentStateID >= slidingWindowSize){
          currentStateID -= slidingWindowSize;
        }
        if (slidingWindow[currentStateID]->keyFrameFlag == false){
          continue;
        }

        list<int>::iterator iter = slidingWindow[currentStateID]->cameraLinkList.begin();
        for (; iter != slidingWindow[currentStateID]->cameraLinkList.end(); iter++ )
        {
          int linkID = *iter;

          H_i_2_j = math.JacobianDenseTracking(T[currentStateID], R[currentStateID], T[linkID], R[linkID] ) ;
          residualCamera = math.ResidualDenseTracking(T[currentStateID], R[currentStateID], T[linkID], R[linkID],
                                                      slidingWindow[currentStateID]->cameraLink[linkID].T_bi_2_bj,
                                                      slidingWindow[currentStateID]->cameraLink[linkID].R_bi_2_bj ) ;
//          residualCamera.segment(0, 3) = R[linkID].transpose()*(T[currentStateID] - T[linkID]) - slidingWindow[currentStateID]->cameraLink[linkID].T_bi_2_bj;
//          residualCamera.segment(3, 3).setZero();
//          residualCamera.segment(6, 3) = 2.0 * (Quaterniond(slidingWindow[currentStateID]->cameraLink[linkID].R_bi_2_bj.transpose()) * Quaterniond(R[linkID].transpose()*R[currentStateID])).vec();

          tmpP_inv.setZero();
          tmpP_inv.block(0, 0, 3, 3) = slidingWindow[currentStateID]->cameraLink[linkID].P_inv.block(0, 0, 3, 3) ;
          tmpP_inv.block(0, 6, 3, 3) = slidingWindow[currentStateID]->cameraLink[linkID].P_inv.block(0, 3, 3, 3) ;
          tmpP_inv.block(6, 0, 3, 3) = slidingWindow[currentStateID]->cameraLink[linkID].P_inv.block(3, 0, 3, 3) ;
          tmpP_inv.block(6, 6, 3, 3) = slidingWindow[currentStateID]->cameraLink[linkID].P_inv.block(3, 3, 3, 3) ;
          double r_v = residualCamera.segment(0, 3).norm() ;
          if ( r_v > huber_r_v ){
            tmpP_inv *= huber_r_v/r_v ;
          }
          double r_w = residualCamera.segment(6, 3).norm() ;
          if ( r_w > huber_r_w ){
            tmpP_inv *= huber_r_w/r_w ;
          }

          H_i_2_j_T = H_i_2_j.transpose();
          H_i_2_j_T *= tmpP_inv;

          tmpHTH = H_i_2_j_T  *  H_i_2_j;
          tmpHTb = H_i_2_j_T * residualCamera;

          int currentStateIDIndex = currentStateID - head;
          if ( currentStateIDIndex < 0){
            currentStateIDIndex += slidingWindowSize;
          }
          int linkIDIndex = linkID - head  ;
          if (linkIDIndex < 0){
            linkIDIndex += slidingWindowSize;
          }

          HTH.block(currentStateIDIndex * 9, currentStateIDIndex * 9, 9, 9) += tmpHTH.block(0, 0, 9, 9);
          HTH.block(currentStateIDIndex * 9, linkIDIndex * 9, 9, 9) += tmpHTH.block(0, 9, 9, 9);
          HTH.block(linkIDIndex * 9, currentStateIDIndex * 9, 9, 9) += tmpHTH.block(9, 0, 9, 9);
          HTH.block(linkIDIndex * 9, linkIDIndex * 9, 9, 9) += tmpHTH.block(9, 9, 9, 9);

          HTb.segment(currentStateIDIndex * 9, 9) -= tmpHTb.segment(0, 9);
          HTb.segment(linkIDIndex * 9, 9) -= tmpHTb.segment(9, 9);
        }
      }
//      printf("[numList in BA]=%d\n", numList ) ;

      //solve the BA
      //cout << "HTH\n" << HTH << endl;

      LLT<MatrixXd> lltOfHTH = HTH.llt();
      ComputationInfo info = lltOfHTH.info();
      if (info == Success)
      {
        VectorXd dx = lltOfHTH.solve(HTb);

 //       printf("[BA] %d %f\n",iterNum, dx.norm() ) ;

 //       cout << iterNum << endl ;
 //       cout << dx.transpose() << endl ;

        //cout << "iteration " << iterNum << "\n" << dx << endl;
#ifdef DEBUG_INFO
        geometry_msgs::Vector3 to_pub ;
        to_pub.x = dx.norm() ;
        printf("%d %f\n",iterNum, to_pub.x ) ;
        pub_BA.publish( to_pub ) ;
#endif

        VectorXd errorUpdate(9);
        for (int i = 0; i < numOfState; i++)
        {
          int k = head + i;
          if (k >= slidingWindowSize){
            k -= slidingWindowSize;
          }
          errorUpdate = dx.segment(i * 9, 9);
          T[k] += errorUpdate.segment(0, 3);
          vel[k] += errorUpdate.segment(3, 3);

          Quaterniond q(R[k]);
          Quaterniond dq;
          dq.x() = errorUpdate(6) * 0.5;
          dq.y() = errorUpdate(7) * 0.5;
          dq.z() = errorUpdate(8) * 0.5;
          dq.w() = sqrt(1 - SQ(dq.x()) * SQ(dq.y()) * SQ(dq.z()));
          R[k] = (q * dq).normalized().toRotationMatrix();
        }
        //cout << T[head].transpose() << endl;
      }
      else
      {
        ROS_WARN("LLT error!!!");
        iterNum = maxIterationBA;
        //cout << HTH << endl;
        //FullPivLU<MatrixXd> luHTH(HTH);
        //printf("rank = %d\n", luHTH.rank() ) ;
        //HTH.rank() ;
      }
    }

    // Include correction for information vector
    int m_sz = margin.size;
    VectorXd r0 = VectorXd::Zero(STATE_SZ(numOfState - 1));
    for (int i = numOfState - 2; i >= 0; i--)
    {
      int k = head + i;
      if (k >= slidingWindowSize){
        k -= slidingWindowSize;
      }
      //dp, dv, dq
      r0.segment(STATE_SZ(i), 3) = T[k] - slidingWindow[k]->T_bk_2_b0;
      r0.segment(STATE_SZ(i) + 3, 3) = vel[k] - slidingWindow[k]->v_bk;
      r0.segment(STATE_SZ(i) + 6, 3) = Quaterniond(slidingWindow[k]->R_bk_2_b0.transpose() * R[k]).vec() * 2.0;
    }
    margin.bp.segment(0, STATE_SZ(m_sz)) += margin.Ap.block(0, 0, STATE_SZ(m_sz), STATE_SZ(m_sz))*r0;

    for (int i = 0; i < numOfState ; i++)
    {
        int k = head + i;
        if (k >= slidingWindowSize){
          k -= slidingWindowSize;
        }
        slidingWindow[k]->R_bk_2_b0 = R[k];
        slidingWindow[k]->T_bk_2_b0 = T[k];
        slidingWindow[k]->v_bk = vel[k];
    }
}
Esempio n. 23
0
void Optimizer::levenberg_marquardt(const Properties& prop,
    int* num_iterations) {
  int num_iter = 0;
  double lambda = prop.lm_lambda0;
  // Using linpoint as current estimate below.
  function_system.estimate_to_linpoint();

  // Get the current Jacobian at the linearization point.
  SparseSystem jacobian = function_system.jacobian();

  // Get the error residual vector at the current linearization point.
  VectorXd r = function_system.weighted_errors(LINPOINT);

  // Record the absolute sum-of-squares error (i.e., objective function value) here.
  double error = r.squaredNorm();

#ifdef USE_PDL_STOPPING_CRITERIA
  // Compute the gradient direction vector at the current linearization point
  VectorXd g = mul_SparseMatrixTrans_Vector(jacobian, r);
#endif

  double error_diff, error_new;

  // solve at J'J + lambda*diag(J'J)
  VectorXd delta = compute_gauss_newton_step(jacobian, &function_system._R,
      lambda);

  while (
  // We ALWAYS use these stopping criteria
  ((prop.max_iterations <= 0) || (num_iter < prop.max_iterations))
      && (delta.norm() > prop.epsilon2)

#ifdef USE_PDL_STOPPING_CRITERIA
      && (r.lpNorm<Eigen::Infinity>() > prop.epsilon3)
      && (g.lpNorm<Eigen::Infinity>() > prop.epsilon1)
#else
      && (error > prop.epsilon_abs)
#endif
  )  // end while conditional
  {
    num_iter++;

    // remember the last accepted linearization point
    function_system.linpoint_to_estimate();
    // Apply the delta vector DIRECTLY TO THE LINEARIZATION POINT!
    function_system.self_exmap(delta);
    error_new = function_system.weighted_errors(LINPOINT).squaredNorm();
    error_diff = error - error_new;
    // feedback
    if (!prop.quiet) {
      cout << "LM Iteration " << num_iter << ": (lambda=" << lambda << ") ";
      if (error_diff > 0.) {
        cout << "residual: " << error_new << endl;
      } else {
        cout << "rejected" << endl;
      }
    }
    // decide if acceptable
    if (error_diff > 0.) {

#ifndef USE_PDL_STOPPING_CRITERIA
      if (error_diff < prop.epsilon_rel * error) {
        break;
      }
#endif

      // Update lambda
      lambda /= prop.lm_lambda_factor;

      // Record the error at the newly-accepted estimate.
      error = error_new;

      // Relinearize around the newly-accepted estimate.
      jacobian = function_system.jacobian();

#ifdef USE_PDL_STOPPING_CRITERIA
      r = function_system.weighted_errors(LINPOINT);
      g = mul_SparseMatrixTrans_Vector(jacobian, r);
#endif
    } else {
      // reject new estimate
      lambda *= prop.lm_lambda_factor;
      // restore previous estimate
      function_system.estimate_to_linpoint();
    }

    // Compute the step for the next iteration.
    delta = compute_gauss_newton_step(jacobian, &function_system._R, lambda);

  } // end while

  if (num_iterations != NULL) {
    *num_iterations = num_iter;
  }
  // Copy current estimate contained in linpoint.
  function_system.linpoint_to_estimate();
}
Esempio n. 24
0
void Optimizer::gauss_newton(const Properties& prop, int* num_iterations) {
  // Batch optimization
  int num_iter = 0;

  // Set the new linearization point to be the current estimate.
  function_system.estimate_to_linpoint();
  // Compute Jacobian about current estimate.
  SparseSystem jacobian = function_system.jacobian();

  // Get the current error residual vector
  VectorXd r = function_system.weighted_errors(LINPOINT);

#ifdef USE_PDL_STOPPING_CRITERIA
  // Compute the current gradient direction vector
  VectorXd g = mul_SparseMatrixTrans_Vector(jacobian, r);
#else
  double error = r.squaredNorm();
  double error_new;
  // We haven't computed a step yet, so this initialization is to ensure
  // that we never skip over the while loop as a result of failing
  // change-in-error check.
  double error_diff = prop.epsilon_rel * error + 1;
#endif

  // Compute Gauss-Newton step h_{gn} to get to the next estimated optimizing point.
  VectorXd delta = compute_gauss_newton_step(jacobian);

  while (
  // We ALWAYS use these criteria
  ((prop.max_iterations <= 0) || (num_iter < prop.max_iterations))
      && (delta.norm() > prop.epsilon2)

#ifdef USE_PDL_STOPPING_CRITERIA
      && (r.lpNorm<Eigen::Infinity>() > prop.epsilon3)
      && (g.lpNorm<Eigen::Infinity>() > prop.epsilon1)

#else // Custom stopping criteria for GN
      && (error > prop.epsilon_abs)
      && (fabs(error_diff) > prop.epsilon_rel * error)
#endif

  ) // end while conditional
  {
    num_iter++;

    // Apply the Gauss-Newton step h_{gn} to...
    function_system.apply_exmap(delta);
    // ...set the new linearization point to be the current estimate.
    function_system.estimate_to_linpoint();
    // Relinearize about the new current estimate.
    jacobian = function_system.jacobian();
    // Compute the error residual vector at the new estimate.
    r = function_system.weighted_errors(LINPOINT);

#ifdef USE_PDL_STOPPING_CRITERIA
    g = mul_SparseMatrixTrans_Vector(jacobian, r);
#else
    // Update the error difference in errors between the previous and
    // current estimates.
    error_new = r.squaredNorm();
    error_diff = error - error_new;
    error = error_new;  // Record the absolute error at the current estimate
#endif

    // Compute Gauss-Newton step h_{gn} to get to the next estimated
    // optimizing point.
    delta = compute_gauss_newton_step(jacobian);
    if (!prop.quiet) {
      cout << "Iteration " << num_iter << ": residual ";

#ifdef USE_PDL_STOPPING_CRITERIA
      cout << r.squaredNorm();
#else
      cout << error;
#endif
      cout << endl;
    }
  }  //end while

  if (num_iterations != NULL) {
    *num_iterations = num_iter;
  }
  _cholesky->get_R(function_system._R);
}
//TBD: J -> adfda, e should take data as input too.
//PatchFit::lm(MatrixXd (*J)(VectorXd) ,VectorXd (*e)(VectorXd), VectorXd a_Init)
VectorXd
PatchFit::lm(VectorXd a_Init)
{
  // Options
  bool normalize (false);
  double l_Init = 0.001;
  double nu = 10;

  VectorXd a (a_Init.rows(), a_Init.cols());
  a = a_Init;
  
  double l;
  l = l_Init;

  int i=0; //iteration
  //bool cc = false; // whether the last iteration committed a change to a

  double r; //residual
  
  //calculate initial error and residual
  if (normalize)
  {
    a = a/a.norm();
    //cc = true;
  }

  VectorXd ee;
  ee = PatchFit::parab(cloud_vec,a);
  r = ee.norm();
  //double r_Init = r;

  double dr = 0.0;
  MatrixXd jj;
  
  double lastr;

  // define matrices
  MatrixXd jtj, jtj_diag, aa;
  VectorXd b, da, lasta, lastee;
  while (1)
  {
    // terminating due to 0 residual at iteration i
    if (fabs(r) < EPS)
      break;

    // terminating due to minad at iteration i
    //TBD

    // terminating due to minrd at iteration i
    if ((i>0) && (this->minrd_>0.0) && (dr<0.0) && (fabs(dr)<this->minrd_*lastr))
      break;

    // terminating due to minar at iteration i
    //TBD

    // terminating due to minrr at iteration i
    //TBD
   
    // terminating due to minada at iteration i
    //TBD

    // terminating due to minrda at iteration i
    //TBD

    // terminating due to max iterations
    if (i==this->maxi_)
      break;

    i = i+1;

    // update Jacobian if necessary
    if ((i==1)||(dr<0.0))
      jj = PatchFit::parab_dfda(a);

    // calculate da
    jtj = jj.adjoint()*jj;
    jtj_diag = jtj.diagonal().asDiagonal();
    aa = jtj + (l*jtj_diag);
    b = -jj.adjoint()*ee;

    // solve LLS system
    da = lls(aa,b);
    
    // update a (will back out the update if residual increased)
    lasta = a;
    a = a+da;

    if (normalize)
      a = a/a.norm();
    
    // update error and residual
    lastr = r;
    lastee = ee;
    ee = PatchFit::parab(cloud_vec,a);
    r = ee.norm();
    dr = r-lastr;

    if (dr<0) // converging
    {
      l = l/nu;
      //cc = true;
    }
    else // diverging
    {
      l = l*nu;
      a = lasta;
      ee = lastee;
      r = lastr;
      //cc = false;
    }
  }

  if (normalize && (i==0))
    a = a/a.norm();

  return (a);
}
Esempio n. 26
0
bool QuadraticLineMinimizerType::improve_energy(bool verbose) {
  //if (verbose) printf("\t\tI am running QuadraticLineMinimizerType::improve_energy with verbose==%d\n", verbose);
  //fflush(stdout);
  // FIXME: The following probably double-computes the energy!
  const double E0 = energy();
  if (verbose) {
    printf("\t\tQuad: E0 = %25.15g", E0);
    fflush(stdout);
  }
  if (isnan(E0)) {
    // There is no point continuing, since we're starting with a NaN!
    // So we may as well quit here.
    if (verbose) {
      printf(" which is a NaN, so I'm quitting early.\n");
      fflush(stdout);
    }
    return false;
  }
  if (isnan(slope)) {
    // The slope here is a NaN, so there is no point continuing!
    // So we may as well quit here.
    if (verbose) {
      printf(", but the slope is a NaN, so I'm quitting early.\n");
      fflush(stdout);
    }
    return false;
  }
  if (slope == 0) {
    // When the slope is precisely zero, there's no point continuing,
    // as the gradient doesn't have any information about how to
    // improve things.  Or possibly we're at the minimum to numerical
    // precision.
    if (verbose) {
      printf(" which means we've arrived at the minimum!\n");
      fflush(stdout);
    }
    return false;
  }

  if (slope*(*step) > 0) {
    if (verbose) printf("Swapping sign of step with slope %g...\n", slope);
    *step *= -1;
  }
  // Here we're going to keep halving step1 until it's small enough
  // that the energy drops a tad.  This hopefully will give us a
  // reasonable starting guess.
  double step1 = *step;
  *x += step1*direction; // Step away a bit...
  double Etried = -137;
  do {
    step1 *= 0.5;
    *x -= step1*direction; // and then step back a bit less...
    invalidate_cache();
    if (energy() == Etried) {
      if (verbose) {
        printf("\tThis is silly in QuadraticLineMinimizerType::improve_energy: %g (%g vs %g)\n",
               step1, energy(), Etried);
        Grid foo(gd, *x);
        f.run_finite_difference_test("In QuadraticLineMinimizerType", kT, foo, &direction);
        fflush(stdout);
      }
      break;
    }
    Etried = energy();
  } while (energy() > E0 || isnan(energy()));
  
  const double E1 = energy();

  // Do a parabolic extrapolation with E0, E1, and gd to get curvature
  // and new step
  const double curvature = 2.0*(E1-E0-step1*slope)/(step1*step1);
  double step2 = -slope/curvature;
  if (verbose) {
    printf("   E1 = %25.15g\n", E1);
    printf("\t\tQuad: slope = %14.7g  curvature = %14.7g\n", slope, curvature);
    fflush(stdout);
  }
 
  if (curvature <= 0.0) {
    if (verbose) {
      printf("\t\tCurvature has wrong sign... %g\n", curvature);
      fflush(stdout);
    }
    if (better(E0, E1)) {
      // It doesn't look to be working, so let's panic!
      invalidate_cache();
      *x -= step1*direction;
      if (verbose) printf("\t\tQuadratic linmin not working properly!!!\n");
      //assert(!"Quadratic linmin not working well!!!\n");
      return false;
    } else {
      // It looks like we aren't moving far enough, so let's try
      // progressively doubling how far we're going.
      double Ebest = E0;
      while (energy() <= Ebest) {
        Ebest = energy();
        *x += step1*direction;
        invalidate_cache();
        step1 *= 2;
      }
      step1 *= 0.5;
      *x -= step1*direction;
      *step = step1;
    }
  } else if (E1 == E0) {
    if (verbose) {
      printf("\t\tNo change in energy... step1 is %g, direction has mag %g pos is %g\n",
             step1, direction.norm(), x->norm());
      fflush(stdout);
    }
    do {
      invalidate_cache();
      *x -= step1*direction;
      step1 *= 2;
      *x += step1*direction;
      // printf("From step1 of %10g I get delta E of %g\n", step1, energy()-E0);
    } while (energy() == E0);
    if (energy() > E0) {
      *x -= step1*direction;
      invalidate_cache();
      step1 = 0;
      printf("\t\tQuad: failed to find any improvement!  :(\n");
    } else if (isnan(energy())) {
      printf("\t\tQuad: found NaN with smallest possible step!!!\n");
      *x -= step1*direction;
      invalidate_cache();
      step1 = 0;
      //printf("\t\tQuad: put energy back to %g...\n", energy());
    }
  } else {
    *step = step2; // output the stepsize for later reuse.
    invalidate_cache();
    *x += (step2-step1)*direction; // and move to the expected minimum.
    if (verbose) {
      printf("\t\tQuad: step1 = %14.7g  step2 = %14.7g\n", step1, step2);
      printf("\t\tQuad: E2 = %25.15g\n", energy());
      fflush(stdout);
    }

    // Check that the energy did indeed drop!  FIXME: this may do
    // extra energy calculations, since it's not necessarily shared
    // with the driver routine!
    if (better(E1, energy()) && better(E1, E0)) {
      // The first try was better, so let's go with that one!
      if (verbose) printf("\t\tGoing back to the first try...\n");
      invalidate_cache();
      *x -= (step2-step1)*direction;
    } else if (energy() > E0) {
      const double E2 = energy();
      invalidate_cache();
      *x -= step2*direction;
      if (verbose) {
        printf("\t\tQuadratic linmin not working well!!! (E2 = %14.7g, E0 = %14.7g)\n", E2, energy());
        fflush(stdout);
      }
      return false;
      //assert(!"Quadratic linmin not working well!!!\n");
    }
  }
  // We always return false, because it's never recommended to call
  // improve_energy again with this algorithm.
  return false;
}
Esempio n. 27
0
void clampMag(VectorXd& v, double clamp)
{
    if(v.norm() > clamp)
        v *= clamp/v.norm();
}