Пример #1
0
	void PGSSolver::solve(const MatrixX& _A, const VectorX& _B, VectorX& _x, int _interationCount) const
	{
		int size = _B.getSize();

		//initialize x
		_x.setSize(size);
		_x.set(0);

		//make the required number of iteration
		for (int iteration = 0; iteration < _interationCount; ++iteration)
		{
			//loop through each element of x
			for (int i = 0; i < size; ++i)
			{
				//compute a(i, j) * x(j)
				float ax = 0;
				for (int j = 0; j < size; ++j)
					ax += _A(i, j) * _x(j);

				//compute x(i) = (b(i) - ax) / a(i, i)
				float res = (_B(i) - ax) / _A(i, i);
				_x(i, res);
			}
		}

	}
Пример #2
0
void AttachmentConstraint::PBDProject(VectorX& x, const SparseMatrix& inv_mass, unsigned int ns)
{
	// LOOK
	ScalarType k_prime = 1 - std::pow(1-*(m_p_pbd_stiffness), 1.0/ns);
	
	EigenVector3 p = x.block_vector(m_p0);
	EigenVector3 dp = m_fixd_point-p;

	x.block_vector(m_p0) += k_prime * dp;
}
template <typename PointSource, typename PointTarget, typename MatScalar> inline void
pcl::registration::TransformationEstimationLM<PointSource, PointTarget, MatScalar>::estimateRigidTransformation (
    const pcl::PointCloud<PointSource> &cloud_src,
    const std::vector<int> &indices_src,
    const pcl::PointCloud<PointTarget> &cloud_tgt,
    const std::vector<int> &indices_tgt,
    Matrix4 &transformation_matrix) const
{
  if (indices_src.size () != indices_tgt.size ())
  {
    PCL_ERROR ("[pcl::registration::TransformationEstimationLM::estimateRigidTransformation] Number or points in source (%lu) differs than target (%lu)!\n", indices_src.size (), indices_tgt.size ());
    return;
  }

  if (indices_src.size () < 4)     // need at least 4 samples
  {
    PCL_ERROR ("[pcl::IterativeClosestPointNonLinear::estimateRigidTransformationLM] ");
    PCL_ERROR ("Need at least 4 points to estimate a transform! Source and target have %lu points!",
               indices_src.size ());
    return;
  }

  int n_unknowns = warp_point_->getDimension ();  // get dimension of unknown space
  VectorX x (n_unknowns);
  x.setConstant (n_unknowns, 0);

  // Set temporary pointers
  tmp_src_ = &cloud_src;
  tmp_tgt_ = &cloud_tgt;
  tmp_idx_src_ = &indices_src;
  tmp_idx_tgt_ = &indices_tgt;

  OptimizationFunctorWithIndices functor (static_cast<int> (indices_src.size ()), this);
  Eigen::NumericalDiff<OptimizationFunctorWithIndices> num_diff (functor);
  //Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctorWithIndices> > lm (num_diff);
  Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctorWithIndices>, MatScalar> lm (num_diff);
  int info = lm.minimize (x);

  // Compute the norm of the residuals
  PCL_DEBUG ("[pcl::registration::TransformationEstimationLM::estimateRigidTransformation] LM solver finished with exit code %i, having a residual norm of %g. \n", info, lm.fvec.norm ());
  PCL_DEBUG ("Final solution: [%f", x[0]);
  for (int i = 1; i < n_unknowns; ++i) 
    PCL_DEBUG (" %f", x[i]);
  PCL_DEBUG ("]\n");

  // Return the correct transformation
  warp_point_->setParam (x);
  transformation_matrix = warp_point_->getTransform ();

  tmp_src_ = NULL;
  tmp_tgt_ = NULL;
  tmp_idx_src_ = tmp_idx_tgt_ = NULL;
}
bool BCCoreSiconos::callSolver(MatrixX& Mlcp, VectorX& b, VectorX& solution, VectorX& contactIndexToMu, ofstream& os)
{
#ifdef BUILD_BCPLUGIN_WITH_SICONOS
  int NC3 = Mlcp.rows();
  if(NC3<=0) return true;
  int NC = NC3/3;
  int CFS_DEBUG = 0;
  int CFS_DEBUG_VERBOSE = 0;
  if(CFS_DEBUG)
  {
    if(NC3%3 != 0           ){ os << "   warning-1 " << std::endl;return false;}
    if(       b.rows()!= NC3){ os << "   warning-2 " << std::endl;return false;}
    if(solution.rows()!= NC3){ os << "   warning-3 " << std::endl;return false;}
  } 
  for(int ia=0;ia<NC;ia++)for(int i=0;i<3;i++)prob->q [3*ia+i]= b(((i==0)?(ia):(2*ia+i+NC-1)));
  for(int ia=0;ia<NC;ia++)                    prob->mu[  ia  ]= contactIndexToMu[ia];
  prob->numberOfContacts = NC;

  if( USE_FULL_MATRIX )
  {
    prob->M->storageType = 0;
    prob->M->size0       = NC3;
    prob->M->size1       = NC3;
    double* ptmp = prob->M->matrix0 ;
    for(int ia=0;ia<NC;ia++)for(int i =0;i <3 ;i ++)
    {
      for(int ja=0;ja<NC;ja++)for(int j =0;j <3;j ++) 
      {
        ptmp[NC3*(3*ia+i)+(3*ja+j)]=Mlcp(((i==0)?(ia):(2*ia+i+NC-1)),((j==0)?(ja):(2*ja+j+NC-1)));
      }
    }
  }
  else
  {
    prob->M->storageType = 1;
    prob->M->size0       = NC3;
    prob->M->size1       = NC3;
    sparsify_A( prob->M->matrix1 , Mlcp , NC , &os);
  }
  
  fc3d_driver(prob,reaction,velocity,solops, numops);
  
  double* prea = reaction ;
  for(int ia=0;ia<NC;ia++)for(int i=0;i<3;i++) solution(((i==0)?(ia):(2*ia+i+NC-1))) = prea[3*ia+i] ;
  if(CFS_DEBUG_VERBOSE)
  {
    os << "=---------------------------------="<< std::endl; 
    os << "| res_error =" << solops->dparam[1] <<  std::endl;
    os << "=---------------------------------="<< std::endl; 
  }
#endif
  return true;
}
template <typename PointSource, typename PointTarget, typename MatScalar> void
pcl::registration::TransformationEstimationLM<PointSource, PointTarget, MatScalar>::estimateRigidTransformation (
    const pcl::PointCloud<PointSource> &cloud_src,
    const pcl::PointCloud<PointTarget> &cloud_tgt,
    Matrix4 &transformation_matrix) const
{

  // <cloud_src,cloud_src> is the source dataset
  if (cloud_src.points.size () != cloud_tgt.points.size ())
  {
    PCL_ERROR ("[pcl::registration::TransformationEstimationLM::estimateRigidTransformation] ");
    PCL_ERROR ("Number or points in source (%lu) differs than target (%lu)!\n", 
               cloud_src.points.size (), cloud_tgt.points.size ());
    return;
  }
  if (cloud_src.points.size () < 4)     // need at least 4 samples
  {
    PCL_ERROR ("[pcl::registration::TransformationEstimationLM::estimateRigidTransformation] ");
    PCL_ERROR ("Need at least 4 points to estimate a transform! Source and target have %lu points!\n", 
               cloud_src.points.size ());
    return;
  }

  int n_unknowns = warp_point_->getDimension ();
  VectorX x (n_unknowns);
  x.setZero ();
  
  // Set temporary pointers
  tmp_src_ = &cloud_src;
  tmp_tgt_ = &cloud_tgt;

  OptimizationFunctor functor (static_cast<int> (cloud_src.points.size ()), this);
  Eigen::NumericalDiff<OptimizationFunctor> num_diff (functor);
  //Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, double> lm (num_diff);
  Eigen::LevenbergMarquardt<Eigen::NumericalDiff<OptimizationFunctor>, MatScalar> lm (num_diff);
  int info = lm.minimize (x);

  // Compute the norm of the residuals
  PCL_DEBUG ("[pcl::registration::TransformationEstimationLM::estimateRigidTransformation]");
  PCL_DEBUG ("LM solver finished with exit code %i, having a residual norm of %g. \n", info, lm.fvec.norm ());
  PCL_DEBUG ("Final solution: [%f", x[0]);
  for (int i = 1; i < n_unknowns; ++i) 
    PCL_DEBUG (" %f", x[i]);
  PCL_DEBUG ("]\n");

  // Return the correct transformation
  warp_point_->setParam (x);
  transformation_matrix = warp_point_->getTransform ();

  tmp_src_ = NULL;
  tmp_tgt_ = NULL;
}
Пример #6
0
// sping gradient: k*(current_length-rest_length)*current_direction;
void SpringConstraint::EvaluateGradient(const VectorX& x, VectorX& gradient)
{
	// TODO
	//EigenVector3 g_i = (*(m_p_stiffness))*(x.block_vector(m_p0) - m_fixd_point);
    //gradient.block_vector(m_p0) += g_i;

	EigenVector3 p1=x.block_vector(m_p1);
	EigenVector3 p2=x.block_vector(m_p2);
	float currentLength=(p1-p2).norm();
	float force=(*this->m_p_stiffness)*(currentLength-this->m_rest_length);
	EigenVector3 n1=p1-p2,n2=p2-p1;
	n1.normalize();n2.normalize();
	gradient.block_vector(m_p1)+=n1*force;
	gradient.block_vector(m_p2)+=n2*force;
}
void VelocityTrackingObjective<Scalar>::setVelRefInWorldFrame(const VectorX& velRefInWorldFrame)
{
  assert(velRefInWorldFrame.size()==model_.getNbSamples());
  assert(velRefInWorldFrame==velRefInWorldFrame);

  velRefInWorldFrame_ = velRefInWorldFrame;
}
Пример #8
0
GMMExpectationMaximization::Real GMMExpectationMaximization::gauss(const VectorX & mean,
  const MatrixX & cov,const VectorX & pt) const
{
  Real det = cov.determinant();
  uint dim = mean.size();
  // check that the covariance matrix is invertible
  if (std::abs(det) < std::pow(m_epsilon,dim) * 0.1)
    return 0.0; // the gaussian has approximately zero width: the probability of any point falling into it is approximately 0.
 
  // else, compute pdf
  MatrixX inverse_cov = cov.inverse();
  VectorX dist = pt - mean;
  Real exp = - (dist.dot(inverse_cov * dist)) / 2.0;
  Real den = std::sqrt(std::pow(2.0 * M_PI,dim) * std::abs(det));
  return std::exp(exp) / den;
}
Пример #9
0
void TrajectoryWalkgen<Scalar>::setState(const VectorX& state)
{
  assert(state==state);
  assert(state.size()==3);

  noDynModel_.setState(state);
}
Пример #10
0
void Simulation::computeForces(const VectorX& x, VectorX& force)
{
    VectorX gradient;

    gradient.resize(m_mesh->m_system_dimension);
    gradient.setZero();

    // springs
    for (std::vector<Constraint*>::iterator it = m_constraints.begin(); it != m_constraints.end(); ++it)
    {
        (*it)->EvaluateGradient(x, gradient);
    }

    // external forces
    gradient -= m_external_force;

    force = -gradient;
}
void MotionConstraint<Scalar>::computeFunction(const VectorX& x0, VectorX& func,
                                               Scalar velLimit, Scalar accLimit)
{
  int N = model_.getNbSamples();

  const LinearDynamic<Scalar>& dynBaseVel = model_.getVelLinearDynamic();
  const LinearDynamic<Scalar>& dynBaseAcc = model_.getAccLinearDynamic();

  tmp_.fill(velLimit);
  tmp2_.fill(accLimit);

  func.noalias() = -getGradient()*x0;
  func.segment(0, N).noalias() += tmp_;
  func.segment(0, N).noalias() -= dynBaseVel.S * model_.getState();
  func.segment(N, N).noalias() += tmp2_;
  func.segment(N, N).noalias() -= dynBaseAcc.S * model_.getState();

}
Пример #12
0
void SpringConstraint::PBDProject(VectorX& x, const SparseMatrix& inv_mass, unsigned int ns)
{
	// TODO
	// change project order
	ScalarType k_prime = 1 - std::pow(1-*(m_p_pbd_stiffness), 1.0/ns);

	float rest_length=m_rest_length;
	EigenVector3 p1 = x.block_vector(m_p1);
	EigenVector3 p2 = x.block_vector(m_p2);

	float current_length=(p1-p2).norm();
	EigenVector3 current_direction=(p1-p2)/current_length;
	EigenVector3 dp=(current_length-rest_length)*current_direction;

	ScalarType w1=inv_mass.coeff(m_p1,m_p1);
	ScalarType w2=inv_mass.coeff(m_p2,m_p2);

	x.block_vector(m_p1) -= k_prime * dp*w1/(w1+w2);
	x.block_vector(m_p2) += k_prime * dp*w2/(w1+w2);
}
const MatrixX& Jacobian::GetNullspace()
{
	if(computeNullSpace_)
	{
		computeNullSpace_ = false;
		/*jacobianInverseNoDls_ = jacobian_;
		PseudoInverse(jacobianInverseNoDls_); // tmp while figuring out how to chose lambda*/
		//ComputeSVD();
		MatrixX id = MatrixX::Identity(jacobian_.cols(), jacobian_.cols());
		ComputeSVD();
		//Eigen::JacobiSVD<MatrixX> svd(jacobian_, Eigen::ComputeThinU | Eigen::ComputeThinV);
		MatrixX res = MatrixX::Zero(id.rows(), id.cols());
		for(int i =0; i < svd_.matrixV().cols(); ++ i)
		{
			VectorX v = svd_.matrixV().col(i);
			res += v * v.transpose();
		}
		Identitymin_ = id - res;
		//Identitymin_ = id - (jacobianInverseNoDls_* jacobian_);
	}
	return Identitymin_;
}
bool Solver_LP_abstract::readLpFromFile(const std::string& filename,
                                        VectorX &c, VectorX &lb, VectorX &ub,
                                        MatrixXX &A, VectorX &Alb, VectorX &Aub)
{
  std::ifstream in(filename.c_str(), std::ios::in | std::ios::binary);
  typename MatrixXX::Index n=0, m=0;
  in.read((char*) (&n),sizeof(typename MatrixXX::Index));
  in.read((char*) (&m),sizeof(typename MatrixXX::Index));
  c.resize(n);
  lb.resize(n);
  ub.resize(n);
  A.resize(m,n);
  Alb.resize(m);
  Aub.resize(m);
  in.read( (char *) c.data() , n*sizeof(typename MatrixXX::Scalar) );
  in.read( (char *) lb.data() , n*sizeof(typename MatrixXX::Scalar) );
  in.read( (char *) ub.data() , n*sizeof(typename MatrixXX::Scalar) );
  in.read( (char *) A.data() , m*n*sizeof(typename MatrixXX::Scalar) );
  in.read( (char *) Alb.data() , m*sizeof(typename MatrixXX::Scalar) );
  in.read( (char *) Aub.data() , m*sizeof(typename MatrixXX::Scalar) );
  in.close();
  return true;
}
TYPED_TEST(TestSecondOrderMultinomialLogisticRegression, MinimizerOverfitSmall) {
    MatrixX<TypeParam> X(2, 10);
    VectorXi y(10);

    X << 0.6097662 ,  0.53395565,  0.9499446 ,  0.67289898,  0.94173948,
         0.56675891,  0.80363783,  0.85303565,  0.15903886,  0.99518533,
         0.41655682,  0.29256121,  0.36103228,  0.29899503,  0.4957268 ,
         -0.04277318, -0.28038614, -0.12334621, -0.17497722,  0.1492248;
    y << 0, 0, 0, 0, 0, 1, 1, 1, 1, 1;
    std::vector<MatrixX<TypeParam> > X_var;
    for (int i=0; i<10; i++) {
        //X_var.push_back(MatrixX<TypeParam>::Random(2, 2).array().abs() * 0.01);
        //X_var.push_back(MatrixX<TypeParam>::Zero(2, 2));
        VectorX<TypeParam> a = VectorX<TypeParam>::Random(2).array() * 0.01;
        X_var.push_back(
            a * a.transpose()
        );
    }

    SecondOrderLogisticRegressionApproximation<TypeParam> mlr(X, X_var, y, 0);
    MatrixX<TypeParam> eta = MatrixX<TypeParam>::Zero(2, 3);

    GradientDescent<SecondOrderLogisticRegressionApproximation<TypeParam>, MatrixX<TypeParam>> minimizer(
        std::make_shared<
            ArmijoLineSearch<
                SecondOrderLogisticRegressionApproximation<TypeParam>,
                MatrixX<TypeParam>
            >
        >(),
        [](TypeParam value, TypeParam gradNorm, size_t iterations) {
            return iterations < 5000;
        }
    );
    minimizer.minimize(mlr, eta);

    EXPECT_GT(0.1, mlr.value(eta));
}
  virtual void SetUp()
  {
    using namespace MPCWalkgen;

    int nbSamples = 3;
    Real samplingPeriod = 1.0;
    bool autoCompute = true;
    VectorX variable;
    variable.setZero(2*nbSamples);
    Real feedbackPeriod = 0.5;

    vectorOfVector2 p(3);
    p[0] = Vector2(1.0, 1.0);
    p[1] = Vector2(-1.0, 1.0);
    p[2] = Vector2(1.0, -1.0);

    HumanoidFeetSupervisor<Real> feetSupervisor(nbSamples,
                                                samplingPeriod);
    feetSupervisor.setLeftFootCopConvexPolygon(ConvexPolygon<Real>(p));
    feetSupervisor.setRightFootCopConvexPolygon(ConvexPolygon<Real>(p));


    feetSupervisor.updateTimeline(variable, feedbackPeriod);

    LIPModel<Real> lip(nbSamples, samplingPeriod, autoCompute);
    lip.setFeedbackPeriod(feedbackPeriod);

    HumanoidCopConstraint<Real> copCtr(lip, feetSupervisor);

    VectorX x0 = VectorX::Zero(6);

    function_ = copCtr.getFunction(x0);
    gradient_ = copCtr.getGradient(x0.rows());
    supBounds_ = copCtr.getSupBounds(x0);
    infBounds_ = copCtr.getInfBounds(x0);
  }
  void HumanoidFootConstraint<Scalar>::computeBoundsVectors(const VectorX& x0)
  {
    int M = feetSupervisor_.getNbPreviewedSteps();

    assert(x0.rows()==2*M);

    supBound_.setConstant(2*M, Constant<Scalar>::MAXIMUM_BOUND_VALUE);
    infBound_.setConstant(2*M, -Constant<Scalar>::MAXIMUM_BOUND_VALUE);

    for(int i = 0; i<M; ++i)
    {
      const ConvexPolygon<Scalar>& cp = feetSupervisor_.getKinematicConvexPolygon(i);

      supBound_(i) = cp.getXSupBound();
      supBound_(i + M) = cp.getYSupBound();
      infBound_(i) = cp.getXInfBound();
      infBound_(i + M) = cp.getYInfBound();

      // This if-else statement adds the required terms to vectors supBound_ and infBound_
      // so that constraints are expressed in world frame
      if(i==0)
      {
        supBound_(i) += feetSupervisor_.getSupportFootStateX()(0);
        supBound_(i + M) += feetSupervisor_.getSupportFootStateY()(0);
        infBound_(i) += feetSupervisor_.getSupportFootStateX()(0);
        infBound_(i + M) += feetSupervisor_.getSupportFootStateY()(0);
      }
      else
      {
        supBound_(i) += x0(i - 1);
        supBound_(i + M) += x0(M + i - 1);
        infBound_(i) += x0(i - 1);
        infBound_(i + M) += x0(M + i - 1);
      }
    }

    // As we optimize a variation dX_ of the QP variable X_ (such that X_ = x0 + dX_),
    // the bound constraints need to be translated of x0 too.
    supBound_ -= x0;
    infBound_ -= x0;
  }
Пример #18
0
lbfgs::status lbfgs::cpu_lbfgs(float *h_x)
{
	const size_t NX = m_costFunction.getNumberOfUnknowns();

	floatdouble *d_x = new floatdouble[NX];

	for (size_t idx = 0; idx < NX; ++idx)
		d_x[idx] = h_x[idx];

	VectorX xk = VectorX::Map(d_x, NX); // x_k,     current solution
	VectorX gk(NX);                     // g_k,     gradient at x_k
	VectorX gkm1(NX);                   // g_{k-1}, gradient at x_{k-1}
	VectorX z(NX);                      // z,       search direction
	floatdouble fk;                     // f_k,     value at x_k
	floatdouble fkm1;                   // f_{k-1}, value at x_{k-1}
	floatdouble H0 = 1.0f;              // H_0,     initial inverse Hessian (diagonal, same value for all elements)

	// treat arrays as ring buffers!
	VectorX s[HISTORY_SIZE];            // s,       history of solution updates
	VectorX y[HISTORY_SIZE];            // y,       history of gradient updates
	floatdouble alpha[HISTORY_SIZE];    // alpha,   history of alphas (needed for z updates)
	floatdouble rho  [HISTORY_SIZE];    // rho,     history of rhos   (needed for z updates)

	for (size_t i = 0; i < HISTORY_SIZE; ++i)
	{
		s[i] = VectorX(NX);
		y[i] = VectorX(NX);
	}

	cpu_cost_function *cpucf = (cpu_cost_function*)&m_costFunction;

	cpucf->cpu_f_gradf(xk.data(), &fk, gk.data());

	size_t evals = 1;

	status stat = LBFGS_REACHED_MAX_ITER;

#ifdef LBFGS_VERBOSE
	std::cout << "lbfgs::cpu_lbfgs()" << std::endl;
#endif
	size_t it;
	for (it = 0; it < m_maxIter; ++it)
	{

#ifdef LBFGS_VERBOSE
		printf("f(x) = % 12e, ||grad||_2 = % 12e\n", fk, gk.norm());
#endif

		// Check for convergence
		// ---------------------

		floatdouble xSquaredNorm = std::max<floatdouble>(1.0f, xk.squaredNorm());

		if (gk.squaredNorm() < (m_gradientEps * m_gradientEps) * xSquaredNorm)
		{
			stat = LBFGS_BELOW_GRADIENT_EPS;
			break;
		}

		// Find search direction
		// ---------------------

		z = -gk;

		const size_t MAX_IDX = std::min<size_t>(it, HISTORY_SIZE);

		for (size_t i = 1; i <= MAX_IDX; ++i)
		{
			const size_t idx = index(it - i);

			alpha[idx] = s[idx].dot(z) * rho[idx];
			z -= alpha[idx] * y[idx];
		}

		z *= H0;

		for (size_t i = MAX_IDX; i > 0; --i)
		{
			const size_t idx = index(it - i);

			const floatdouble beta = rho[idx] * y[idx].dot(z);
			z += s[idx] * (alpha[idx] - beta);
		}

		// Perform backtracking line search
		// --------------------------------

		gkm1 = gk;
		fkm1 = fk;

		floatdouble step;

		if (!cpu_linesearch(xk, z, cpucf, fk, gk, evals, gkm1, fkm1, stat, step, m_maxEvals))
		{
			break;
		}

		// Update s, y, rho and H_0
		// ------------------------

		s[index(it)] = z * step;  // = x_k - x_{k-1}
		y[index(it)] = gk - gkm1;

		floatdouble yDotS = y[index(it)].dot(s[index(it)]);

		rho[index(it)] = 1.0f / yDotS;

		floatdouble yNorm2 = y[index(it)].squaredNorm();
		if (yNorm2 > 1e-5f)
			H0 = yDotS / yNorm2;
	}

	for (size_t i = 0; i < NX; ++i)
		h_x[i] = float(xk[i]);

#ifdef LBFGS_VERBOSE
	std::cout << "Number of iterations: " << it << std::endl;
	std::cout << "Number of function/gradient evaluations: " << evals << std::endl;
	std::cout << "Reason for termination: " << statusToString(stat) << std::endl;
#endif

	return stat;
}
Пример #19
0
 /**
  * Return the norm of the error vector.
  **/
 virtual Scalar getErrorNorm() const {
   return errorVector_.norm();
 }
Пример #20
0
bool cpu_linesearch(VectorX &xk, VectorX &z,
					cpu_cost_function *cpucf, floatdouble &fk, VectorX &gk,
					size_t &evals, const VectorX &gkm1, const floatdouble &fkm1,
					lbfgs::status &stat, floatdouble &step, size_t maxEvals)
{
	const floatdouble c1 = 1e-4f;
	const floatdouble c2 = 0.9f;

	const floatdouble alpha_0     = 0.0;
	const floatdouble phi_0       = fk;
	const floatdouble phi_prime_0 = z.dot(gk);

	if (phi_prime_0 >= 0.0)
	{
		stat = lbfgs::LBFGS_LINE_SEARCH_FAILED;
		return false;
	}

	const floatdouble alpha_max = 1e8;

	floatdouble alpha     = 1.0;
	floatdouble alpha_old = 0.0;
	bool second_iter = false;

	floatdouble alpha_low, alpha_high;
	floatdouble phi_low, phi_high;
	floatdouble phi_prime_low, phi_prime_high;

	for (;;)
	{
		xk += (alpha - alpha_old) * z;

		cpucf->cpu_f_gradf(xk.data(), &fk, gk.data());

		++evals;

		const floatdouble phi_alpha = fk;
		const floatdouble phi_prime_alpha = z.dot(gk);

		const bool armijo_violated = (phi_alpha > phi_0 + c1 * alpha * phi_prime_0 || (second_iter && phi_alpha >= phi_0));
		const bool strong_wolfe    = (std::abs(phi_prime_alpha) <= -c2 * phi_prime_0);

		// If both Armijo and Strong Wolfe hold, we're done
		if (!armijo_violated && strong_wolfe)
		{
			step = alpha;
			return true;
		}

		if (evals >= maxEvals)
		{
			stat = lbfgs::LBFGS_REACHED_MAX_EVALS;
			return false;
		}

		// If Armijio condition is violated, we've bracketed a viable minimum
		// Interval is [alpha_0, alpha]
		if (armijo_violated)
		{
			alpha_low      = alpha_0;
			alpha_high     = alpha;
			phi_low        = phi_0;
			phi_high       = phi_alpha;
			phi_prime_low  = phi_prime_0;
			phi_prime_high = phi_prime_alpha;
			break;
		}

		// If the directional derivative at alpha is positive, we've bracketed a viable minimum
		// Interval is [alpha, alpha_0]
		if (phi_prime_alpha >= 0)
		{
			alpha_low      = alpha;
			alpha_high     = alpha_0;
			phi_low        = phi_alpha;
			phi_high       = phi_0;
			phi_prime_low  = phi_prime_alpha;
			phi_prime_high = phi_prime_0;
			break;
		}

		// Else look to the "right" of alpha for a viable minimum
		floatdouble alpha_new = alpha + 4 * (alpha - alpha_old);
		alpha_old = alpha;
		alpha     = alpha_new;

		if (alpha > alpha_max)
		{
			stat = lbfgs::LBFGS_LINE_SEARCH_FAILED;
			return false;
		}

		second_iter = true;
	}

	// The minimum is now bracketed in [alpha_low, alpha_high]
	// Find it...
	size_t tries = 0;
	const size_t minTries = 10;

	while (true)
	{
		tries++;
		alpha_old = alpha;

		// Quadratic interpolation:
		// Least-squares fit a parabola to (alpha_low, phi_low),
		// (alpha_high, phi_high) with gradients phi_prime_low and
		// phi_prime_high and select the minimum of that parabola as
		// the new alpha

		alpha  = 0.5f * (alpha_low + alpha_high);
		alpha += (phi_high - phi_low) / (phi_prime_low - phi_prime_high);

		if (alpha < alpha_low || alpha > alpha_high)
			alpha = 0.5f * (alpha_low + alpha_high);

		xk += (alpha - alpha_old) * z;

		cpucf->cpu_f_gradf(xk.data(), &fk, gk.data());

		++evals;

		const floatdouble phi_j = fk;
		const floatdouble phi_prime_j = z.dot(gk);

		const bool armijo_violated = (phi_j > phi_0 + c1 * alpha * phi_prime_0 || phi_j >= phi_low);
		const bool strong_wolfe = (std::abs(phi_prime_j) <= -c2 * phi_prime_0);

		if (!armijo_violated && strong_wolfe)
		{
			// The Armijo and Strong Wolfe conditions hold
			step = alpha;
			break;
		}
		else if (std::abs(alpha_high - alpha_low) < 1e-5 && tries > minTries)
		{
			// The search interval has become too small
			stat = lbfgs::LBFGS_LINE_SEARCH_FAILED;
			return false;
		}
		else if (armijo_violated)
		{
			alpha_high     = alpha;
			phi_high       = phi_j;
			phi_prime_high = phi_prime_j;
		}
		else
		{
			if (phi_prime_j * (alpha_high - alpha_low) >= 0)
			{
				alpha_high     = alpha_low;
				phi_high       = phi_low;
				phi_prime_high = phi_prime_low;
			}

			alpha_low     = alpha;
			phi_low       = phi_j;
			phi_prime_low = phi_prime_j;
		}

		if (evals >= maxEvals)
		{
			stat = lbfgs::LBFGS_REACHED_MAX_EVALS;
			return false;
		}
	}

	return true;
}
Пример #21
0
// attachment spring gradient: k*(current_length)*current_direction
void AttachmentConstraint::EvaluateGradient(const VectorX& x, VectorX& gradient)
{
	// LOOK
    EigenVector3 g_i = (*(m_p_stiffness))*(x.block_vector(m_p0) - m_fixd_point);
    gradient.block_vector(m_p0) += g_i;
}
Пример #22
0
void TrajectoryWalkgen<Scalar>::setVelRefInWorldFrame(const VectorX& velRef)
{
  assert(velRef==velRef);
  assert(velRef.size()==noDynModel_.getNbSamples());
  velTrackingObj_.setVelRefInWorldFrame(velRef);
}
Пример #23
0
void TrajectoryWalkgen<Scalar>::setPosRefInWorldFrame(const VectorX& posRef)
{
  assert(posRef==posRef);
  assert(posRef.size()==noDynModel_.getNbSamples());
  posTrackingObj_.setPosRefInWorldFrame(posRef);
}
std::vector<double> Configurator::eigenToStdVector(const VectorX vec) {
    return std::vector<double>(vec.data(), vec.data() + vec.size());
}