Пример #1
0
TEST(ModelUtil, hessian) {
  
  int dim = 5;
  
  Eigen::VectorXd x(dim);
  double f;
  Eigen::VectorXd grad_f(dim);
  Eigen::MatrixXd hess_f(dim, dim);
  
  std::fstream data_stream(std::string("").c_str(), std::fstream::in);
  stan::io::dump data_var_context(data_stream);
  data_stream.close();
  
  valid_model_namespace::valid_model valid_model(data_var_context, &std::cout);
  EXPECT_NO_THROW(stan::model::hessian(valid_model, x, f, grad_f, hess_f));
  
  EXPECT_FLOAT_EQ(dim, x.size());
  EXPECT_FLOAT_EQ(dim, grad_f.size());
  EXPECT_FLOAT_EQ(dim, hess_f.rows());
  EXPECT_FLOAT_EQ(dim, hess_f.cols());
  
  // Incorporate once operands and partials has been generalized
  //domain_fail_namespace::domain_fail domain_fail_model(data_var_context, &std::cout);
  //EXPECT_THROW(stan::model::hessian(domain_fail_model, x, f, grad_f, hess_f), std::domain_error);
  
}
Пример #2
0
int rechne() {
	//double temp = 0.0;

	double *richtung;
	double schritt = 0.0;
	double x = start_x;
	double y = start_y;
	double z = start_z;
	double x_neu = 0.0;
	double y_neu = 0.0;
	double z_neu = 0.0;
	int i = 0;
	//test();	//test-Daten	

	//double funkt = 0.0;
	//funkt = f(x,y,z,n);
	//printf("funktion: %lf\n",funkt);	

	

	while(i<2000)
	{
	  //printf("iter:%d\n",i);
		richtung = grad_f(x,y,z,n);
		//printf("richtung: %lf  ", richtung[0]);
		//richtung[0] = -richtung[0];
		//printf("richtung-neg: %lf  ", richtung[0]);
		//richtung[1] = -richtung[1];
		//richtung[2] = -richtung[2];
		schritt = armijo(x,y,z,n);

		x_neu = x - schritt*richtung[0];
		y_neu = y - schritt*richtung[1];
		z_neu = z - schritt*richtung[2];
		i++;
		//auf Staionaritaet pruefen
		if (((x-x_neu)*(x-x_neu)+(y-y_neu)*(y-y_neu)+(z-z_neu)*(z-z_neu))<0.0001)
			break;
		x = x_neu;
		y = y_neu;
		z = z_neu;
		//funkt = f(x,y,z,n);
		//printf("schritt: %lf  ",schritt);
		//temp = schritt*richtung[0];
		//printf("multi: %lf\n", temp);

	}

	x = x_neu;
	y = y_neu;
	z = z_neu;
    
    posx = x;
    posy = y;
    posz = z;

	//printf("Iterationen: %i  Position: x = %lf, y = %lf, z = %lf\n",i,x,y,z);
    int ret = 0;
	return ret;
}
Пример #3
0
    /** Computes mixed Newton and interval nesting. d must be sorted. **/
    Vector roots()
    {
	assert(size(z) > 1);
	const double tol= 1.0e-6;
	Vector       start(resource(z)), lambda(resource(z));

	for (size_type i= 0; i < size(z); i++) {
	    // Equal poles -> eigenvalue 
	    if (i < size(z) - 1 && d[i] == d[i+1]) { 
		lambda[i]= d[i]; continue; }
	    
	    // Check if root is too close to pole (i.e. d[i]+eps > 0) then take this because we can't reach the root 
	    value_type next= minimal_increase(d[i]), lamb, old;
	    if (f(next) >= value_type(0)){ 
		lambda[i]= next; continue; }
		
	    if (i < size(z) - 1)
		old= lamb= start[i]= (d[i] + d[i+1]) / 2;  //start points between pols
	    else
		old= lamb= start[i]= 1.5 * d[i] - 0.5 * d[i-1];  // last start point plus half the distance to second-last

   	    while (std::abs(f(lamb)) > tol) {
		if (lamb <= d[i])		   
		    start[i]= lamb= (d[i] + start[i]) / 2;  
		else 
		    lamb-= f(lamb) / grad_f(lamb);
		if (old == lamb) break;
		old= lamb;
	    }
	    lambda[i]= lamb;
	} 
	return lambda;
    }
Пример #4
0
std::vector<double> gradient_descent(
    const std::function<double(const std::vector<double>&)>& function,
    const std::vector<double>& initial_x, const double initial_step_size,
    const double tolerance, const int max_iterations, const double delta)
{
    /*
     * x    -- argument
     * f    -- target function
     * f_x  -- value of f at x, i.e. f(x)
     */

    std::vector<double> x {initial_x};
    double step_size = initial_step_size;
    double f_x {function(x)};

    for(int iteration = 0; iteration < max_iterations; ++iteration)
    {
        // calculate gradient
        // forward difference approximation is used
        std::vector<double> grad_f(x.size());
        for(size_t i = 0; i < x.size(); ++i)
        {
            std::vector<double> delta_x {x};
            delta_x[i] += delta;
            grad_f[i] = (function(delta_x) - f_x) / delta;
        }

        // update step_size (using one-dimensional optimization)
        step_size *= 0.9; // this technique could be improved

        // update x
        for(size_t i = 0; i < x.size(); ++i)
            x[i] -= step_size * grad_f[i];

        // update function value at x
        const double f_x_new {
            function(x)
        };

        // check for convergence
        if(std::abs(f_x_new - f_x) < tolerance)
        {
            std::cout << "Number of iterations to convergence: "
                      << iteration
                      << std::endl;
            std::cout << "Function value: "
                      << f_x_new
                      << std::endl;
            return x;
        }
        else
            f_x = f_x_new;
    }

    std::cerr << "gradient_descent: max number of iterations are exceeded!"
              << std::endl;
    return x;
}
Пример #5
0
void RIMLS::Fit()
{
	double f = 0;

	Vec3d grad_f(0, 0, 0);
	do 
	{
		int i = 0;
		do 
		{
			double sumW, sumF;
			sumW = sumF = 0;
			Vec3d sumGW, sumGF, sumN;
			sumGW = sumGF = sumN = Vec3d(0.0, 0.0, 0.0);

			for (DataPoint& p : m_neighbors)
			{
				Vec3d px = m_x - p.pos();
				double fx = px.dot(p.normal());

				double alpha = 1.0;
				if (i > 0)
				{
					alpha = exp(-pow((fx - f) / m_sigmaR, 2)) *
						exp(-pow((p.normal() - grad_f).norm() / m_sigmaN, 2));
				}

				double phi = exp(-pow(px.norm() / m_sigmaT, 2));
				double w = alpha*phi;
				Vec3d grad_w = -2.0*alpha*phi*px / pow(m_sigmaT, 2);

				sumW += w;
				sumGW += grad_w;
				sumF += w*fx;
				sumGF += grad_w*fx;
				sumN += w*p.normal();
			}
			f = sumF / sumW;
			grad_f = (sumGF - f*sumGW + sumN) / sumW;
		} while (++i<m_iter && !convergence());

		m_x -= f*grad_f;

	} while ((f*grad_f).norm() > m_threshold);
}
Пример #6
0
//armijo-Schrittweite bestimmen
double armijo(double x, double y, double z, int n) {

	double schritt = 1;
	int i = 0;
	double ls = 0.0;
	double rs = 0.0;
	double *gradient;
	gradient = grad_f(x,y,z,n);

	while (i < 5000)
	{
		ls = f(x+schritt*(-gradient[0]),y+schritt*(-gradient[1]),z+schritt*(-gradient[2]),n);
		rs = f(x,y,z,n) + 0.5*schritt*(-gradient[0]*gradient[0]-gradient[1]*gradient[1]-gradient[2]*gradient[2]);
		if (ls <= rs)
			break;
		schritt = schritt / 2;
		i++;
	}

	return schritt;

}