CppAD::ADFun<T>* CondExp_vpvpFunc(const std::vector<CppAD::AD<T> >& X) {
    using namespace CppAD;
    using namespace std;

    assert(X.size() == 3);

    // parameter value
    AD<T> one = T(1.);
    AD<T> zero = T(0.);

    // dependent variable vector 
    std::vector< AD<T> > Y(5);

    // CondExp(variable, parameter, variable, variable)
    Y[0] = CondExpLt(X[0], one, X[1] * X[2], zero);
    Y[1] = CondExpLe(X[0], one, X[1] * X[2], zero);
    Y[2] = CondExpEq(X[0], one, X[1] * X[2], zero);
    Y[3] = CondExpGe(X[0], one, X[1] * X[2], zero);
    Y[4] = CondExpGt(X[0], one, X[1] * X[2], zero);

    // create f: X -> Y 
    ADFun<T>* fun = new ADFun<T> (X, Y);
    // The option 'no_conditional_skip' is essential in order to avoid an 
    // assertion failure in CppAD and so that branches are not skipped with NDEBUG defined
    fun->optimize();

    return fun;
}
Пример #2
0
	checkpoint(const char* name, 
		Algo& algo, const ADVector& ax, ADVector& ay)
	: atomic_base<Base>(name)
	{	CheckSimpleVector< CppAD::AD<Base> , ADVector>();

		// make a copy of ax because Independent modifies AD information
		ADVector x_tmp(ax);
		// delcare x_tmp as the independent variables
	 	Independent(x_tmp);
		// record mapping from x_tmp to ay
		algo(x_tmp, ay); 
		// create function f_ : x -> y
		f_.Dependent(ay);
		// suppress checking for nan in f_ results
		// (see optimize documentation for atomic functions)
		f_.check_for_nan(false);
		// now optimize (we expect to use this function many times).
		f_.optimize();
		// now disable checking of comparison opertaions
		// 2DO: add a debugging mode that checks for changes and aborts
		f_.compare_change_count(0);
	}
Пример #3
0
void BenderQuad(
	const BAvector   &x     , 
	const BAvector   &y     , 
	Fun               fun   , 
	BAvector         &g     ,
	BAvector         &gx    ,
	BAvector         &gxx   )
{	// determine the base type
	typedef typename BAvector::value_type Base;

	// check that BAvector is a SimpleVector class
	CheckSimpleVector<Base, BAvector>();

	// declare the ADvector type
	typedef CPPAD_TESTVECTOR(AD<Base>) ADvector;

	// size of the x and y spaces
	size_t n = size_t(x.size());
	size_t m = size_t(y.size());

	// check the size of gx and gxx
	CPPAD_ASSERT_KNOWN(
		g.size() == 1,
		"BenderQuad: size of the vector g is not equal to 1"
	);
	CPPAD_ASSERT_KNOWN(
		size_t(gx.size()) == n,
		"BenderQuad: size of the vector gx is not equal to n"
	);
	CPPAD_ASSERT_KNOWN(
		size_t(gxx.size()) == n * n,
		"BenderQuad: size of the vector gxx is not equal to n * n"
	);

	// some temporary indices
	size_t i, j;

	// variable versions x
	ADvector vx(n);
	for(j = 0; j < n; j++)
		vx[j] = x[j];
	
	// declare the independent variables
	Independent(vx);

	// evaluate h = H(x, y) 
	ADvector h(m);
	h = fun.h(vx, y);

	// evaluate dy (x) = Newton step as a function of x through h only
	ADvector dy(m);
	dy = fun.dy(x, y, h);

	// variable version of y
	ADvector vy(m);
	for(j = 0; j < m; j++)
		vy[j] = y[j] + dy[j];

	// evaluate G~ (x) = F [ x , y + dy(x) ] 
	ADvector gtilde(1);
	gtilde = fun.f(vx, vy);

	// AD function object that corresponds to G~ (x)
	// We will make heavy use of this tape, so optimize it
	ADFun<Base> Gtilde;
	Gtilde.Dependent(vx, gtilde); 
	Gtilde.optimize();

	// value of G(x)
	g = Gtilde.Forward(0, x);

	// initial forward direction vector as zero
	BAvector dx(n);
	for(j = 0; j < n; j++)
		dx[j] = Base(0);

	// weight, first and second order derivative values
	BAvector dg(1), w(1), ddw(2 * n);
	w[0] = 1.;


	// Jacobian and Hessian of G(x) is equal Jacobian and Hessian of Gtilde
	for(j = 0; j < n; j++)
	{	// compute partials in x[j] direction
		dx[j] = Base(1);
		dg    = Gtilde.Forward(1, dx);
		gx[j] = dg[0];

		// restore the dx vector to zero
		dx[j] = Base(0);

		// compute second partials w.r.t x[j] and x[l]  for l = 1, n
		ddw = Gtilde.Reverse(2, w);
		for(i = 0; i < n; i++)
			gxx[ i * n + j ] = ddw[ i * 2 + 1 ];
	}

	return;
}