コード例 #1
0
/* $$
$head Use Atomic Function$$
$codep */
bool reciprocal(void)
{	bool ok = true;
	using CppAD::AD;
	using CppAD::NearEqual;
	double eps = 10. * CppAD::numeric_limits<double>::epsilon();
/* $$
$subhead Constructor$$
$codep */
	// --------------------------------------------------------------------
	// Create the atomic reciprocal object
	atomic_reciprocal afun("atomic_reciprocal");
/* $$
$subhead Recording$$
$codep */
	// Create the function f(x)
	//
	// domain space vector
	size_t n  = 1;
	double  x0 = 0.5;
	vector< AD<double> > ax(n);
	ax[0]     = x0;

	// declare independent variables and start tape recording
	CppAD::Independent(ax);

	// range space vector 
	size_t m = 1;
	vector< AD<double> > ay(m);

	// call user function and store reciprocal(x) in au[0] 
	vector< AD<double> > au(m);
	afun(ax, au);        // u = 1 / x

	// now use AD division to invert to invert the operation
	ay[0] = 1.0 / au[0]; // y = 1 / u = x

	// create f: x -> y and stop tape recording
	CppAD::ADFun<double> f;
	f.Dependent (ax, ay);  // f(x) = x
/* $$
$subhead forward$$
$codep */
	// check function value 
	double check = x0;
	ok &= NearEqual( Value(ay[0]) , check,  eps, eps);

	// check zero order forward mode
	size_t p;
	vector<double> x_p(n), y_p(m);
	p      = 0;
	x_p[0] = x0;
	y_p    = f.Forward(p, x_p);
	ok &= NearEqual(y_p[0] , check,  eps, eps);

	// check first order forward mode
	p      = 1;
	x_p[0] = 1;
	y_p    = f.Forward(p, x_p);
	check  = 1.;
	ok &= NearEqual(y_p[0] , check,  eps, eps);

	// check second order forward mode
	p      = 2;
	x_p[0] = 0;
	y_p    = f.Forward(p, x_p);
	check  = 0.;
	ok &= NearEqual(y_p[0] , check,  eps, eps);
/* $$
$subhead reverse$$
$codep */
	// third order reverse mode 
	p     = 3;
	vector<double> w(m), dw(n * p);
	w[0]  = 1.;
	dw    = f.Reverse(p, w);
	check = 1.;
	ok &= NearEqual(dw[0] , check,  eps, eps);
	check = 0.;
	ok &= NearEqual(dw[1] , check,  eps, eps);
	ok &= NearEqual(dw[2] , check,  eps, eps);
/* $$
$subhead for_sparse_jac$$
$codep */
	// forward mode sparstiy pattern
	size_t q = n;
	CppAD::vectorBool r1(n * q), s1(m * q);
	r1[0] = true;          // compute sparsity pattern for x[0]
	//
	afun.option( CppAD::atomic_base<double>::bool_sparsity_enum );
	s1    = f.ForSparseJac(q, r1);
	ok  &= s1[0] == true;  // f[0] depends on x[0]  
	//
	afun.option( CppAD::atomic_base<double>::set_sparsity_enum );
	s1    = f.ForSparseJac(q, r1);
	ok  &= s1[0] == true;  // f[0] depends on x[0]  
/* $$
$subhead rev_sparse_jac$$
$codep */
	// reverse mode sparstiy pattern
	p = m;
	CppAD::vectorBool s2(p * m), r2(p * n);
	s2[0] = true;          // compute sparsity pattern for f[0]
	//
	afun.option( CppAD::atomic_base<double>::bool_sparsity_enum );
	r2    = f.RevSparseJac(p, s2);
	ok  &= r2[0] == true;  // f[0] depends on x[0]  
	//
	afun.option( CppAD::atomic_base<double>::set_sparsity_enum );
	r2    = f.RevSparseJac(p, s2);
	ok  &= r2[0] == true;  // f[0] depends on x[0]  
/* $$
$subhead rev_sparse_hes$$
$codep */
	// Hessian sparsity (using previous ForSparseJac call) 
	CppAD::vectorBool s3(m), h(q * n);
	s3[0] = true;        // compute sparsity pattern for f[0]
	//
	afun.option( CppAD::atomic_base<double>::bool_sparsity_enum );
	h     = f.RevSparseHes(q, s3);
	ok  &= h[0] == true; // second partial of f[0] w.r.t. x[0] may be non-zero
	//
	afun.option( CppAD::atomic_base<double>::set_sparsity_enum );
	h     = f.RevSparseHes(q, s3);
	ok  &= h[0] == true; // second partial of f[0] w.r.t. x[0] may be non-zero

	return ok;
}
コード例 #2
0
ファイル: old_tan.cpp プロジェクト: amonal42/CppAD
bool old_tan(void)
{	bool ok = true;
	using CppAD::AD;
	using CppAD::NearEqual;
	float eps = 10.f * CppAD::numeric_limits<float>::epsilon();

	// domain space vector
	size_t n  = 1;
	float  x0 = 0.5;
	CppAD::vector< AD<float> > ax(n);
	ax[0]     = x0;

	// declare independent variables and start tape recording
	CppAD::Independent(ax);

	// range space vector 
	size_t m = 3;
	CppAD::vector< AD<float> > af(m);

	// temporary vector for old_tan computations
	// (old_tan computes tan or tanh and its square)
	CppAD::vector< AD<float> > az(2);

	// call user tan function and store tan(x) in f[0] (ignore tan(x)^2)
	size_t id = 0;
	old_tan(id, ax, az);
	af[0] = az[0];

	// call user tanh function and store tanh(x) in f[1] (ignore tanh(x)^2)
	id = 1;
	old_tan(id, ax, az);
	af[1] = az[0];

	// put a constant in f[2] = tanh(1.) (for sparsity pattern testing)
	CppAD::vector< AD<float> > one(1);
	one[0] = 1.;
	old_tan(id, one, az);
	af[2] = az[0]; 

	// create f: x -> f and stop tape recording
	CppAD::ADFun<float> F;
	F.Dependent(ax, af); 

	// check function value 
	float tan = std::tan(x0);
	ok &= NearEqual(af[0] , tan,  eps, eps);
	float tanh = std::tanh(x0);
	ok &= NearEqual(af[1] , tanh,  eps, eps);

	// check zero order forward
	CppAD::vector<float> x(n), f(m);
	x[0] = x0;
	f    = F.Forward(0, x);
	ok &= NearEqual(f[0] , tan,  eps, eps);
	ok &= NearEqual(f[1] , tanh,  eps, eps);

	// compute first partial of f w.r.t. x[0] using forward mode
	CppAD::vector<float> dx(n), df(m);
	dx[0] = 1.;
	df    = F.Forward(1, dx);

	// compute derivative of tan - tanh using reverse mode
	CppAD::vector<float> w(m), dw(n);
	w[0]  = 1.;
	w[1]  = 1.;
	w[2]  = 0.;
	dw    = F.Reverse(1, w);

	// tan'(x)   = 1 + tan(x)  * tan(x) 
	// tanh'(x)  = 1 - tanh(x) * tanh(x) 
	float tanp  = 1.f + tan * tan; 
	float tanhp = 1.f - tanh * tanh; 
	ok   &= NearEqual(df[0], tanp, eps, eps);
	ok   &= NearEqual(df[1], tanhp, eps, eps);
	ok   &= NearEqual(dw[0], w[0]*tanp + w[1]*tanhp, eps, eps);

	// compute second partial of f w.r.t. x[0] using forward mode
	CppAD::vector<float> ddx(n), ddf(m);
	ddx[0] = 0.;
	ddf    = F.Forward(2, ddx);

	// compute second derivative of tan - tanh using reverse mode
	CppAD::vector<float> ddw(2);
	ddw   = F.Reverse(2, w);

	// tan''(x)   = 2 *  tan(x) * tan'(x) 
	// tanh''(x)  = - 2 * tanh(x) * tanh'(x) 
	// Note that second order Taylor coefficient for u half the
	// corresponding second derivative.
	float two    = 2;
	float tanpp  =   two * tan * tanp;
	float tanhpp = - two * tanh * tanhp;
	ok   &= NearEqual(two * ddf[0], tanpp, eps, eps);
	ok   &= NearEqual(two * ddf[1], tanhpp, eps, eps);
	ok   &= NearEqual(ddw[0], w[0]*tanp  + w[1]*tanhp , eps, eps);
	ok   &= NearEqual(ddw[1], w[0]*tanpp + w[1]*tanhpp, eps, eps);

	// Forward mode computation of sparsity pattern for F.
	size_t p = n;
	// user vectorBool because m and n are small
	CppAD::vectorBool r1(p), s1(m * p);
	r1[0] = true;            // propagate sparsity for x[0]
	s1    = F.ForSparseJac(p, r1);
	ok  &= (s1[0] == true);  // f[0] depends on x[0]
	ok  &= (s1[1] == true);  // f[1] depends on x[0]
	ok  &= (s1[2] == false); // f[2] does not depend on x[0]

	// Reverse mode computation of sparsity pattern for F.
	size_t q = m;
	CppAD::vectorBool s2(q * m), r2(q * n);
	// Sparsity pattern for identity matrix
	size_t i, j;
	for(i = 0; i < q; i++)
	{	for(j = 0; j < m; j++)
			s2[i * q + j] = (i == j);
	}
	r2   = F.RevSparseJac(q, s2);
	ok  &= (r2[0] == true);  // f[0] depends on x[0]
	ok  &= (r2[1] == true);  // f[1] depends on x[0]
	ok  &= (r2[2] == false); // f[2] does not depend on x[0]

	// Hessian sparsity for f[0]
	CppAD::vectorBool s3(m), h(p * n);
	s3[0] = true;
	s3[1] = false;
	s3[2] = false;
	h    = F.RevSparseHes(p, s3);
	ok  &= (h[0] == true);  // Hessian is non-zero

	// Hessian sparsity for f[2]
	s3[0] = false;
	s3[2] = true;
	h    = F.RevSparseHes(p, s3);
	ok  &= (h[0] == false);  // Hessian is zero

	// check tanh results for a large value of x
	x[0]  = std::numeric_limits<float>::max() / two;
	f     = F.Forward(0, x);
	tanh  = 1.;
	ok   &= NearEqual(f[1], tanh, eps, eps);
	df    = F.Forward(1, dx);
	tanhp = 0.;
	ok   &= NearEqual(df[1], tanhp, eps, eps);
 
	// --------------------------------------------------------------------
	// Free all temporary work space associated with old_atomic objects. 
	// (If there are future calls to user atomic functions, they will 
	// create new temporary work space.)
	CppAD::user_atomic<float>::clear();

	return ok;
}
コード例 #3
0
ファイル: old_reciprocal.cpp プロジェクト: barak/cppad
bool old_reciprocal(void)
{   bool ok = true;
    using CppAD::AD;
    using CppAD::NearEqual;
    double eps = 10. * CppAD::numeric_limits<double>::epsilon();

    // --------------------------------------------------------------------
    // Create the function f(x)
    //
    // domain space vector
    size_t n  = 1;
    double  x0 = 0.5;
    vector< AD<double> > ax(n);
    ax[0]     = x0;

    // declare independent variables and start tape recording
    CppAD::Independent(ax);

    // range space vector
    size_t m = 1;
    vector< AD<double> > ay(m);

    // call atomic function and store reciprocal(x) in au[0]
    vector< AD<double> > au(m);
    size_t id = 0;           // not used
    reciprocal(id, ax, au);  // u = 1 / x

    // call atomic function and store reciprocal(u) in ay[0]
    reciprocal(id, au, ay);  // y = 1 / u = x

    // create f: x -> y and stop tape recording
    CppAD::ADFun<double> f;
    f.Dependent (ax, ay);    // f(x) = x

    // --------------------------------------------------------------------
    // Check forward mode results
    //
    // check function value
    double check = x0;
    ok &= NearEqual( Value(ay[0]) , check,  eps, eps);

    // check zero order forward mode
    size_t q;
    vector<double> x_q(n), y_q(m);
    q      = 0;
    x_q[0] = x0;
    y_q    = f.Forward(q, x_q);
    ok &= NearEqual(y_q[0] , check,  eps, eps);

    // check first order forward mode
    q      = 1;
    x_q[0] = 1;
    y_q    = f.Forward(q, x_q);
    check  = 1.;
    ok &= NearEqual(y_q[0] , check,  eps, eps);

    // check second order forward mode
    q      = 2;
    x_q[0] = 0;
    y_q    = f.Forward(q, x_q);
    check  = 0.;
    ok &= NearEqual(y_q[0] , check,  eps, eps);

    // --------------------------------------------------------------------
    // Check reverse mode results
    //
    // third order reverse mode
    q     = 3;
    vector<double> w(m), dw(n * q);
    w[0]  = 1.;
    dw    = f.Reverse(q, w);
    check = 1.;
    ok &= NearEqual(dw[0] , check,  eps, eps);
    check = 0.;
    ok &= NearEqual(dw[1] , check,  eps, eps);
    ok &= NearEqual(dw[2] , check,  eps, eps);

    // --------------------------------------------------------------------
    // forward mode sparstiy pattern
    size_t p = n;
    CppAD::vectorBool r1(n * p), s1(m * p);
    r1[0] = true;          // compute sparsity pattern for x[0]
    s1    = f.ForSparseJac(p, r1);
    ok  &= s1[0] == true;  // f[0] depends on x[0]

    // --------------------------------------------------------------------
    // reverse mode sparstiy pattern
    q = m;
    CppAD::vectorBool s2(q * m), r2(q * n);
    s2[0] = true;          // compute sparsity pattern for f[0]
    r2    = f.RevSparseJac(q, s2);
    ok  &= r2[0] == true;  // f[0] depends on x[0]

    // --------------------------------------------------------------------
    // Hessian sparsity (using previous ForSparseJac call)
    CppAD::vectorBool s3(m), h(p * n);
    s3[0] = true;        // compute sparsity pattern for f[0]
    h     = f.RevSparseHes(p, s3);
    ok  &= h[0] == true; // second partial of f[0] w.r.t. x[0] may be non-zero

    // -----------------------------------------------------------------
    // Free all temporary work space associated with atomic_one objects.
    // (If there are future calls to atomic functions, they will
    // create new temporary work space.)
    CppAD::user_atomic<double>::clear();

    return ok;
}
コード例 #4
0
ファイル: norm_sq.cpp プロジェクト: fduffy/CppAD
/* %$$
$head Use Atomic Function$$
$srccode%cpp% */
bool norm_sq(void)
{	bool ok = true;
	using CppAD::AD;
	using CppAD::NearEqual;
	double eps = 10. * CppAD::numeric_limits<double>::epsilon();
/* %$$
$subhead Constructor$$
$srccode%cpp% */
	// --------------------------------------------------------------------
	// Create the atomic reciprocal object
	atomic_norm_sq afun("atomic_norm_sq");
/* %$$
$subhead Recording$$
$srccode%cpp% */
	// Create the function f(x)
	//
	// domain space vector
	size_t  n  = 2;
	double  x0 = 0.25;
	double  x1 = 0.75;
	vector< AD<double> > ax(n);
	ax[0]      = x0;
	ax[1]      = x1;

	// declare independent variables and start tape recording
	CppAD::Independent(ax);

	// range space vector
	size_t m = 1;
	vector< AD<double> > ay(m);

	// call user function and store norm_sq(x) in au[0]
	afun(ax, ay);        // y_0 = x_0 * x_0 + x_1 * x_1

	// create f: x -> y and stop tape recording
	CppAD::ADFun<double> f;
	f.Dependent (ax, ay);
/* %$$
$subhead forward$$
$srccode%cpp% */
	// check function value
	double check = x0 * x0 + x1 * x1;
	ok &= NearEqual( Value(ay[0]) , check,  eps, eps);

	// check zero order forward mode
	size_t q;
	vector<double> x_q(n), y_q(m);
	q      = 0;
	x_q[0] = x0;
	x_q[1] = x1;
	y_q    = f.Forward(q, x_q);
	ok &= NearEqual(y_q[0] , check,  eps, eps);

	// check first order forward mode
	q      = 1;
	x_q[0] = 0.3;
	x_q[1] = 0.7;
	y_q    = f.Forward(q, x_q);
	check  = 2.0 * x0 * x_q[0] + 2.0 * x1 * x_q[1];
	ok &= NearEqual(y_q[0] , check,  eps, eps);

/* %$$
$subhead reverse$$
$srccode%cpp% */
	// first order reverse mode
	q     = 1;
	vector<double> w(m), dw(n * q);
	w[0]  = 1.;
	dw    = f.Reverse(q, w);
	check = 2.0 * x0;
	ok &= NearEqual(dw[0] , check,  eps, eps);
	check = 2.0 * x1;
	ok &= NearEqual(dw[1] , check,  eps, eps);
/* %$$
$subhead for_sparse_jac$$
$srccode%cpp% */
	// forward mode sparstiy pattern
	size_t p = n;
	CppAD::vectorBool r1(n * p), s1(m * p);
	r1[0] = true;  r1[1] = false; // sparsity pattern identity
	r1[2] = false; r1[3] = true;
	//
	s1    = f.ForSparseJac(p, r1);
	ok  &= s1[0] == true;  // f[0] depends on x[0]
	ok  &= s1[1] == true;  // f[0] depends on x[1]
/* %$$
$subhead rev_sparse_jac$$
$srccode%cpp% */
	// reverse mode sparstiy pattern
	q = m;
	CppAD::vectorBool s2(q * m), r2(q * n);
	s2[0] = true;          // compute sparsity pattern for f[0]
	//
	r2    = f.RevSparseJac(q, s2);
	ok  &= r2[0] == true;  // f[0] depends on x[0]
	ok  &= r2[1] == true;  // f[0] depends on x[1]
/* %$$
$subhead rev_sparse_hes$$
$srccode%cpp% */
	// Hessian sparsity (using previous ForSparseJac call)
	CppAD::vectorBool s3(m), h(p * n);
	s3[0] = true;        // compute sparsity pattern for f[0]
	//
	h     = f.RevSparseHes(p, s3);
	ok  &= h[0] == true;  // partial of f[0] w.r.t. x[0],x[0] is non-zero
	ok  &= h[1] == false; // partial of f[0] w.r.t. x[0],x[1] is zero
	ok  &= h[2] == false; // partial of f[0] w.r.t. x[1],x[0] is zero
	ok  &= h[3] == true;  // partial of f[0] w.r.t. x[1],x[1] is non-zero
	//
	return ok;
}
コード例 #5
0
ファイル: sparse_hessian.cpp プロジェクト: CSCsw/CppAD
bool sparse_hessian(void)
{	bool ok = true;
	using CppAD::AD;
	using CppAD::NearEqual;
	size_t i, j, k, ell;
	typedef CPPAD_TESTVECTOR(AD<double>)               a_vector;
	typedef CPPAD_TESTVECTOR(double)                     d_vector;
	typedef CPPAD_TESTVECTOR(size_t)                     i_vector;
	typedef CPPAD_TESTVECTOR(bool)                       b_vector;
	typedef CPPAD_TESTVECTOR(std::set<size_t>)         s_vector;
	double eps = 10. * CppAD::numeric_limits<double>::epsilon();

	// domain space vector
	size_t n = 12;  // must be greater than or equal 3; see n_sweep below
	a_vector a_x(n);
	for(j = 0; j < n; j++)
		a_x[j] = AD<double> (0);

	// declare independent variables and starting recording
	CppAD::Independent(a_x);

	// range space vector
	size_t m = 1;
	a_vector a_y(m);
	a_y[0] = a_x[0]*a_x[1];
	for(j = 0; j < n; j++)
		a_y[0] += a_x[j] * a_x[j] * a_x[j];

	// create f: x -> y and stop tape recording
	// (without executing zero order forward calculation)
	CppAD::ADFun<double> f;
	f.Dependent(a_x, a_y);

	// new value for the independent variable vector, and weighting vector
	d_vector w(m), x(n);
	for(j = 0; j < n; j++)
		x[j] = double(j);
	w[0] = 1.0;

	// vector used to check the value of the hessian
	d_vector check(n * n);
	for(ell = 0; ell < n * n; ell++)
		check[ell] = 0.0;
	ell        = 0 * n + 1;
	check[ell] = 1.0;
	ell        = 1 * n + 0;
	check[ell] = 1.0 ;
	for(j = 0; j < n; j++)
	{	ell = j * n + j;
		check[ell] = 6.0 * x[j];
	}

	// -------------------------------------------------------------------
	// second derivative of y[0] w.r.t x
	d_vector hes(n * n);
	hes = f.SparseHessian(x, w);
	for(ell = 0; ell < n * n; ell++)
		ok &=  NearEqual(w[0] * check[ell], hes[ell], eps, eps );

	// --------------------------------------------------------------------
	// example using vectors of bools to compute sparsity pattern for Hessian
	b_vector r_bool(n * n);
	for(i = 0; i < n; i++)
	{	for(j = 0; j < n; j++)
			r_bool[i * n + j] = false;
		r_bool[i * n + i] = true;
	}
	f.ForSparseJac(n, r_bool);
	//
	b_vector s_bool(m);
	for(i = 0; i < m; i++)
		s_bool[i] = w[i] != 0;
	b_vector p_bool = f.RevSparseHes(n, s_bool);

	hes = f.SparseHessian(x, w, p_bool);
	for(ell = 0; ell < n * n; ell++)
		ok &=  NearEqual(w[0] * check[ell], hes[ell], eps, eps );

	// --------------------------------------------------------------------
	// example using vectors of sets to compute sparsity pattern for Hessian
	s_vector r_set(n);
	for(i = 0; i < n; i++)
		r_set[i].insert(i);
	f.ForSparseJac(n, r_set);
	//
	s_vector s_set(m);
	for(i = 0; i < m; i++)
		if( w[i] != 0. )
			s_set[0].insert(i);
	s_vector p_set = f.RevSparseHes(n, s_set);

	// example passing sparsity pattern to SparseHessian
	hes = f.SparseHessian(x, w, p_set);
	for(ell = 0; ell < n * n; ell++)
		ok &=  NearEqual(w[0] * check[ell], hes[ell], eps, eps );

	// --------------------------------------------------------------------
	// use row and column indices to specify upper triangle of
	// non-zero elements of Hessian
	size_t K = n + 1;
	i_vector row(K), col(K);
	hes.resize(K);
	k = 0;
	for(j = 0; j < n; j++)
	{	// diagonal of Hessian
		row[k] = j;
		col[k] = j;
		k++;
	}
	// only off diagonal non-zero elemenet in upper triangle
	row[k] = 0;
	col[k] = 1;
	k++;
	ok &= k == K;
	CppAD::sparse_hessian_work work;

	// can use p_set or p_bool.
	size_t n_sweep = f.SparseHessian(x, w, p_set, row, col, hes, work);
	for(k = 0; k < K; k++)
	{	ell = row[k] * n + col[k];
		ok &=  NearEqual(w[0] * check[ell], hes[k], eps, eps );
	}
	ok &= n_sweep == 2;

	// now recompute at a different x and w (using work from previous call
	w[0]       = 2.0;
	x[1]       = 0.5;
	ell        = 1 * n + 1;
	check[ell] = 6.0 * x[1];
	s_vector   not_used;
	n_sweep    = f.SparseHessian(x, w, not_used, row, col, hes, work);
	for(k = 0; k < K; k++)
	{	ell = row[k] * n + col[k];
		ok &=  NearEqual(w[0] * check[ell], hes[k], eps, eps );
	}
	ok &= n_sweep == 2;



	return ok;
}
コード例 #6
0
ファイル: set_sparsity.cpp プロジェクト: barak/cppad
/* %$$
$head Test Atomic Function$$
$srccode%cpp% */
bool set_sparsity(void)
{   bool ok = true;
    using CppAD::AD;
    using CppAD::NearEqual;
    double eps = 10. * std::numeric_limits<double>::epsilon();
/* %$$
$subhead Constructor$$
$srccode%cpp% */
    // Create the atomic get_started object
    atomic_set_sparsity afun("atomic_set_sparsity");
/* %$$
$subhead Recording$$
$srccode%cpp% */
    size_t n = 3;
    size_t m = 2;
    vector< AD<double> > ax(n), ay(m);
    for(size_t j = 0; j < n; j++)
        ax[j] = double(j + 1);

    // declare independent variables and start tape recording
    CppAD::Independent(ax);

    // call atomic function
    afun(ax, ay);

    // create f: x -> y and stop tape recording
    CppAD::ADFun<double> f;
    f.Dependent (ax, ay);  // f(x) = x

    // check function value
    ok &= NearEqual(ay[0] , ax[2],  eps, eps);
    ok &= NearEqual(ay[1] , ax[0] * ax[1],  eps, eps);

/* %$$
$subhead for_sparse_jac$$
$srccode%cpp% */
    // correct Jacobian result
    set_vector check_s(m);
    check_s[0].insert(2);
    check_s[1].insert(0);
    check_s[1].insert(1);
    // compute and test forward mode
    {   set_vector r(n), s(m);
        for(size_t i = 0; i < n; i++)
            r[i].insert(i);
        s = f.ForSparseJac(n, r);
        for(size_t i = 0; i < m; i++)
            ok &= s[i] == check_s[i];
    }
/* %$$
$subhead rev_sparse_jac$$
$srccode%cpp% */
    // compute and test reverse mode
    {   set_vector r(m), s(m);
        for(size_t i = 0; i < m; i++)
            r[i].insert(i);
        s = f.RevSparseJac(m, r);
        for(size_t i = 0; i < m; i++)
            ok &= s[i] == check_s[i];
    }
/* %$$
$subhead for_sparse_hes$$
$srccode%cpp% */
    // correct Hessian result
    set_vector check_h(n);
    check_h[0].insert(1);
    check_h[1].insert(0);
    // compute and test forward mode
    {   set_vector r(1), s(1), h(n);
        for(size_t i = 0; i < m; i++)
            s[0].insert(i);
        for(size_t j = 0; j < n; j++)
            r[0].insert(j);
        h = f.ForSparseHes(r, s);
        for(size_t i = 0; i < n; i++)
            ok &= h[i] == check_h[i];
    }
/* %$$
$subhead rev_sparse_hes$$
Note the previous call to $code ForSparseJac$$ above
stored its results in $icode f$$.
$srccode%cpp% */
    // compute and test reverse mode
    {   set_vector s(1), h(n);
        for(size_t i = 0; i < m; i++)
            s[0].insert(i);
        h = f.RevSparseHes(n, s);
        for(size_t i = 0; i < n; i++)
            ok &= h[i] == check_h[i];
    }
/* %$$
$subhead Test Result$$
$srccode%cpp% */
    return ok;
}
コード例 #7
0
ファイル: sub_sparse_hes.cpp プロジェクト: barak/CppAD-1
bool sub_sparse_hes(void)
{	bool ok = true;
	using CppAD::AD;
	typedef AD<double>   adouble;
	typedef AD<adouble> a2double;
	typedef vector< std::set<size_t> > pattern;
	double eps = 10. * std::numeric_limits<double>::epsilon();
	size_t i, j;

	// start recording with x = (u , v)
	size_t nu = 10;
	size_t nv = 5;
	size_t n  = nu + nv;
	vector<adouble> ax(n);
	for(j = 0; j < n; j++)
		ax[j] = adouble(j + 2);
	CppAD::Independent(ax);

	// extract u as independent variables
	vector<a2double> a2u(nu);
	for(j = 0; j < nu; j++)
		a2u[j] = a2double(j + 2);
	CppAD::Independent(a2u);

	// extract v as parameters
	vector<a2double> a2v(nv);
	for(j = 0; j < nv; j++)
		a2v[j] = ax[nu+j];

	// record g(u)
	vector<a2double> a2y(1);
	a2y[0] = f(a2u, a2v);
	CppAD::ADFun<adouble> g;
	g.Dependent(a2u, a2y);

	// compue sparsity pattern for Hessian of g(u)
	pattern r(nu), s(1);
	for(j = 0; j < nu; j++)
		r[j].insert(j);
	g.ForSparseJac(nu, r);
	s[0].insert(0);
	pattern p = g.RevSparseHes(nu, s);

	// Row and column indices for non-zeros in lower triangle of Hessian
	vector<size_t> row, col;
	for(i = 0; i < nu; i++)
	{	std::set<size_t>::const_iterator itr;
		for(itr = p[i].begin(); itr != p[i].end(); itr++)
		{	j = *itr;
			if( j <= i )
			{	row.push_back(i);
				col.push_back(j);
			}
		}
	}
	size_t K = row.size();
	CppAD::sparse_hessian_work work;
	vector<adouble> au(nu), ahes(K), aw(1);
	aw[0] = 1.0;
	for(j = 0; j < nu; j++)
		au[j] = ax[j];
	size_t n_sweep = g.SparseHessian(au, aw, p, row, col, ahes, work);

	// The Hessian w.r.t u is diagonal
	ok &= n_sweep == 1;

	// record H(u, v) = Hessian of f w.r.t u
	CppAD::ADFun<double> H(ax, ahes);

	// remove unecessary operations
	H.optimize();

	// Now evaluate the Hessian at a particular value for u, v
	vector<double> u(nu), v(nv), x(n);
	for(j = 0; j < n; j++)
		x[j] = double(j + 2);
	vector<double> hes = H.Forward(0, x);

	// Now check the Hessian
	double sum_v = 0.0;
	for(j = 0; j < nv; j++)
		sum_v += x[nu + j];
	for(size_t k = 0; k < K; k++)
	{	i     = row[k];
		j     = col[k];
		ok   &= i == j;
		double check = sum_v * x[i];
		ok &= CppAD::NearEqual(hes[k], check, eps, eps);
	}
	return ok;
}
コード例 #8
0
ファイル: sparsity.cpp プロジェクト: ruby-eigen/CppAD
/* %$$
$head Use Atomic Function$$
$srccode%cpp% */
bool sparsity(void)
{	bool ok = true;
	using CppAD::AD;
	using CppAD::NearEqual;
	double eps = 10. * std::numeric_limits<double>::epsilon();
/* %$$
$subhead Constructor$$
$srccode%cpp% */
	// Create the atomic get_started object
	atomic_sparsity afun("atomic_sparsity");
/* %$$
$subhead Recording$$
$srccode%cpp% */
	size_t n = 3;
	size_t m = 2;
	vector< AD<double> > ax(n), ay(m);
	for(size_t j = 0; j < n; j++)
		ax[j] = double(j + 1);

	// declare independent variables and start tape recording
	CppAD::Independent(ax);

	// call user function
	afun(ax, ay);

	// create f: x -> y and stop tape recording
	CppAD::ADFun<double> f;
	f.Dependent (ax, ay);  // f(x) = x

	// check function value
	ok &= NearEqual(ay[0] , ax[2],  eps, eps);
	ok &= NearEqual(ay[1] , ax[0] * ax[1],  eps, eps);

/* %$$
$subhead forsparse_jac and rev_sparse_jac$$
$srccode%cpp% */
	for(size_t dir = 0; dir < 2; dir++)
	{	size_t ell;
		if( dir == 0 )
			ell = n;
		else
			ell = m;

		// identity martrix
		vectorBool r(ell * ell);
		for(size_t i = 0; i < ell; i++)
			for(size_t j = 0; j < ell; j++)
				r[i * ell + j] = (i == j);

		vectorBool s;
		if( dir == 0 )
			s = f.ForSparseJac(n, r);
		else
			s = f.RevSparseJac(m, r);

		// check Jacobian result
		ok  &= s.size() == m * n;
		ok  &= s[0 * n + 0] == false;
		ok  &= s[0 * n + 1] == false;
		ok  &= s[0 * n + 2] == true;
		ok  &= s[1 * n + 0] == true;
		ok  &= s[1 * n + 1] == true;
		ok  &= s[1 * n + 2] == false;
	}
/* %$$
$subhead rev_sparse_hes$$
$srccode%cpp% */
	vectorBool s(m), h(n * n);
	s[0] = true;
	s[1] = true;
	h    = f.RevSparseHes(n, s);
	for(size_t i = 0; i < n; i++)
	{	for(size_t j = 0; j < n; j++)
		{	bool check = false;
			check     |= (i == 0) && (j == 1);
			check     |= (j == 0) && (i == 1);
			ok        &= h[ i * n + j] == check;
		}
	}
	//
	return ok;
}
コード例 #9
0
ファイル: old_mat_mul.cpp プロジェクト: amonal42/CppAD
bool old_mat_mul(void)
{	bool ok = true;
	using CppAD::AD;

	// matrix sizes for this test
	size_t nr_result = 2;
	size_t n_middle  = 2;
	size_t nc_result = 2;
	
	// declare the AD<double> vectors ax and ay and X 
	size_t n = nr_result * n_middle + n_middle * nc_result;
	size_t m = nr_result * nc_result;
	CppAD::vector< AD<double> > X(4), ax(n), ay(m);
	size_t i, j;
	for(j = 0; j < X.size(); j++)
		X[j] = (j + 1);

	// X is the vector of independent variables
	CppAD::Independent(X);
	// left matrix
	ax[0]  = X[0];  // left[0,0]   = x[0] = 1
	ax[1]  = X[1];  // left[0,1]   = x[1] = 2
	ax[2]  = 5.;    // left[1,0]   = 5
	ax[3]  = 6.;    // left[1,1]   = 6
	// right matrix
	ax[4]  = X[2];  // right[0,0]  = x[2] = 3
	ax[5]  = 7.;    // right[0,1]  = 7
	ax[6]  = X[3];  // right[1,0]  = x[3] = 4 
	ax[7]  = 8.;    // right[1,1]  = 8
	/*
	[ x0 , x1 ] * [ x2 , 7 ] = [ x0*x2 + x1*x3 , x0*7 + x1*8 ]
	[ 5  , 6 ]    [ x3 , 8 ]   [ 5*x2  + 6*x3  , 5*7 + 6*8 ]
	*/

	// The call back routines need to know the dimensions of the matrices.
	// Store information about the matrix multiply for this call to mat_mul.
	call_info info;
	info.nr_result = nr_result;
	info.n_middle  = n_middle;
	info.nc_result = nc_result;
	// info.vx gets set by forward during call to mat_mul below
	assert( info.vx.size() == 0 ); 
	size_t id      = info_.size();
	info_.push_back(info);

	// user defined AD<double> version of matrix multiply
	mat_mul(id, ax, ay);
	//----------------------------------------------------------------------
	// check AD<double>  results
	ok &= ay[0] == (1*3 + 2*4); ok &= Variable( ay[0] );
	ok &= ay[1] == (1*7 + 2*8); ok &= Variable( ay[1] );
	ok &= ay[2] == (5*3 + 6*4); ok &= Variable( ay[2] );
	ok &= ay[3] == (5*7 + 6*8); ok &= Parameter( ay[3] );
	//----------------------------------------------------------------------
	// use mat_mul to define a function g : X -> ay
	CppAD::ADFun<double> G;
	G.Dependent(X, ay);
	// g(x) = [ x0*x2 + x1*x3 , x0*7 + x1*8 , 5*x2  + 6*x3  , 5*7 + 6*8 ]^T
	//----------------------------------------------------------------------
	// Test zero order forward mode evaluation of g(x)
	CppAD::vector<double> x( X.size() ), y(m);
	for(j = 0; j <  X.size() ; j++)
		x[j] = j + 2;
	y = G.Forward(0, x);
	ok &= y[0] == x[0] * x[2] + x[1] * x[3];
	ok &= y[1] == x[0] * 7.   + x[1] * 8.;
	ok &= y[2] == 5. * x[2]   + 6. * x[3];
	ok &= y[3] == 5. * 7.     + 6. * 8.;

	//----------------------------------------------------------------------
	// Test first order forward mode evaluation of g'(x) * [1, 2, 3, 4]^T 
	// g'(x) = [ x2, x3, x0, x1 ]
	//         [ 7 ,  8,  0, 0  ]
	//         [ 0 ,  0,  5, 6  ]
	//         [ 0 ,  0,  0, 0  ] 
	CppAD::vector<double> dx( X.size() ), dy(m);
	for(j = 0; j <  X.size() ; j++)
		dx[j] = j + 1;
	dy = G.Forward(1, dx);
	ok &= dy[0] == 1. * x[2] + 2. * x[3] + 3. * x[0] + 4. * x[1];
	ok &= dy[1] == 1. * 7.   + 2. * 8.   + 3. * 0.   + 4. * 0.;
	ok &= dy[2] == 1. * 0.   + 2. * 0.   + 3. * 5.   + 4. * 6.;
	ok &= dy[3] == 1. * 0.   + 2. * 0.   + 3. * 0.   + 4. * 0.;

	//----------------------------------------------------------------------
	// Test second order forward mode 
	// g_0^2 (x) = [ 0, 0, 1, 0 ], g_0^2 (x) * [1] = [3]
	//             [ 0, 0, 0, 1 ]              [2]   [4]
	//             [ 1, 0, 0, 0 ]              [3]   [1]
	//             [ 0, 1, 0, 0 ]              [4]   [2]
	CppAD::vector<double> ddx( X.size() ), ddy(m);
	for(j = 0; j <  X.size() ; j++)
		ddx[j] = 0.;
	ddy = G.Forward(2, ddx);
	// [1, 2, 3, 4] * g_0^2 (x) * [1, 2, 3, 4]^T = 1*3 + 2*4 + 3*1 + 4*2
	ok &= 2. * ddy[0] == 1. * 3. + 2. * 4. + 3. * 1. + 4. * 2.; 
	// for i > 0, [1, 2, 3, 4] * g_i^2 (x) * [1, 2, 3, 4]^T = 0
	ok &= ddy[1] == 0.;
	ok &= ddy[2] == 0.;
	ok &= ddy[3] == 0.;

	//----------------------------------------------------------------------
	// Test second order reverse mode 
	CppAD::vector<double> w(m), dw(2 *  X.size() );
	for(i = 0; i < m; i++)
		w[i] = 0.;
	w[0] = 1.;
	dw = G.Reverse(2, w);
	// g_0'(x) = [ x2, x3, x0, x1 ]
	ok &= dw[0*2 + 0] == x[2];
	ok &= dw[1*2 + 0] == x[3];
	ok &= dw[2*2 + 0] == x[0];
	ok &= dw[3*2 + 0] == x[1];
	// g_0'(x)   * [1, 2, 3, 4]  = 1 * x2 + 2 * x3 + 3 * x0 + 4 * x1
	// g_0^2 (x) * [1, 2, 3, 4]  = [3, 4, 1, 2]
	ok &= dw[0*2 + 1] == 3.;
	ok &= dw[1*2 + 1] == 4.;
	ok &= dw[2*2 + 1] == 1.;
	ok &= dw[3*2 + 1] == 2.;

	//----------------------------------------------------------------------
	// Test forward and reverse Jacobian sparsity pattern
	/*
	[ x0 , x1 ] * [ x2 , 7 ] = [ x0*x2 + x1*x3 , x0*7 + x1*8 ]
	[ 5  , 6 ]    [ x3 , 8 ]   [ 5*x2  + 6*x3  , 5*7 + 6*8 ]
	so the sparsity pattern should be
	s[0] = {0, 1, 2, 3}
	s[1] = {0, 1}
	s[2] = {2, 3}
	s[3] = {}
	*/
	CppAD::vector< std::set<size_t> > r( X.size() ), s(m);
	for(j = 0; j <  X.size() ; j++)
	{	assert( r[j].empty() );
		r[j].insert(j);
	}
	s = G.ForSparseJac( X.size() , r);
	for(j = 0; j <  X.size() ; j++)
	{	// s[0] = {0, 1, 2, 3}
		ok &= s[0].find(j) != s[0].end();
		// s[1] = {0, 1}
		if( j == 0 || j == 1 )
			ok &= s[1].find(j) != s[1].end();
		else	ok &= s[1].find(j) == s[1].end();
		// s[2] = {2, 3}
		if( j == 2 || j == 3 )
			ok &= s[2].find(j) != s[2].end();
		else	ok &= s[2].find(j) == s[2].end();
	}
	// s[3] == {}
	ok &= s[3].empty();
	
	//----------------------------------------------------------------------
	// Test reverse Jacobian sparsity pattern
	/*
	[ x0 , x1 ] * [ x2 , 7 ] = [ x0*x2 + x1*x3 , x0*7 + x1*8 ]
	[ 5  , 6 ]    [ x3 , 8 ]   [ 5*x2  + 6*x3  , 5*7 + 6*8 ]
	so the sparsity pattern should be
	r[0] = {0, 1, 2, 3}
	r[1] = {0, 1}
	r[2] = {2, 3}
	r[3] = {}
	*/
	for(i = 0; i <  m; i++)
	{	s[i].clear();
		s[i].insert(i);
	}
	r = G.RevSparseJac(m, s);
	for(j = 0; j <  X.size() ; j++)
	{	// r[0] = {0, 1, 2, 3}
		ok &= r[0].find(j) != r[0].end();
		// r[1] = {0, 1}
		if( j == 0 || j == 1 )
			ok &= r[1].find(j) != r[1].end();
		else	ok &= r[1].find(j) == r[1].end();
		// r[2] = {2, 3}
		if( j == 2 || j == 3 )
			ok &= r[2].find(j) != r[2].end();
		else	ok &= r[2].find(j) == r[2].end();
	}
	// r[3] == {}
	ok &= r[3].empty();

	//----------------------------------------------------------------------
	/* Test reverse Hessian sparsity pattern
	g_0^2 (x) = [ 0, 0, 1, 0 ] and for i > 0, g_i^2 = 0
	            [ 0, 0, 0, 1 ]
	            [ 1, 0, 0, 0 ]
	            [ 0, 1, 0, 0 ]
	so for the sparsity pattern for the first component of g is
	h[0] = {2}
	h[1] = {3}
	h[2] = {0}
	h[3] = {1}
	*/
	CppAD::vector< std::set<size_t> > h( X.size() ), t(1);
	t[0].clear();
	t[0].insert(0);
	h = G.RevSparseHes(X.size() , t);
	size_t check[] = {2, 3, 0, 1};
	for(j = 0; j <  X.size() ; j++)
	{	// h[j] = { check[j] }
		for(i = 0; i < n; i++) 
		{	if( i == check[j] )
				ok &= h[j].find(i) != h[j].end();
			else	ok &= h[j].find(i) == h[j].end();
		}
	}
	t[0].clear();
	for( j = 1; j < X.size(); j++)
			t[0].insert(j);
	h = G.RevSparseHes(X.size() , t);
	for(j = 0; j <  X.size() ; j++)
	{	// h[j] = { }
		for(i = 0; i < X.size(); i++) 
			ok &= h[j].find(i) == h[j].end();
	}

	// --------------------------------------------------------------------
	// Free temporary work space. (If there are future calls to 
	// old_mat_mul they would create new temporary work space.)
	CppAD::user_atomic<double>::clear();
	info_.clear();

	return ok;
}
コード例 #10
0
ファイル: symb_cadtemp.cpp プロジェクト: ZiiCee/OPTI
void mexFunction(int nlhs, mxArray *plhs[],int nrhs, const mxArray *prhs[])
{
	double *v, *x, sigma, *lambda, *pr;
	char *mode; int imode;

    //Check Inputs
    if(nrhs < 1) {
        printInfo();        
        return;
    }   
    
    if(mxIsEmpty(prhs[0]) || !mxIsChar(prhs[0])) {
        mexErrMsgTxt("The mode must be a string!");
        return;
    }
    
    //If we have x, check it
    if(nrhs > 1) {
        if(!mxIsEmpty(prhs[1])) {
            if(mxIsClass(prhs[1],"scipvar") || mxIsClass(prhs[1],"barvec")) {
                mexErrMsgTxt("SCIP and BARON cannot be used with this callback function - please specify 'mcode' via symbset as the cbmode.");
                return;
            }           
            if(!mxIsDouble(prhs[1]) || mxIsComplex(prhs[1]) || mxIsSparse(prhs[1])) {
                mexErrMsgTxt("The input vector must be a dense real double vector!");
                return;
            }
        }
        else {
            mexErrMsgTxt("The input vector must be a dense real double vector!");
            return;
        }
        //Check x input size
        if(mxGetNumberOfElements(prhs[1]) != getNoVar()) {
            mexErrMsgTxt("The input vector is not the right size!");
        }
        //Allocate memory, if required
        if(xvec.empty())
            xvec.resize(getNoVar());        
        //Get x and copy to xvec
        x = mxGetPr(prhs[1]);
        memcpy(&xvec[0],x,getNoVar()*sizeof(double));
    }
	
	//Determine input mode and setup return variable
	mode = mxArrayToString(prhs[0]);
	lower(mode);
	if(!strcmp(mode,"obj")) {
		imode = 0;
		plhs[0] = mxCreateDoubleMatrix(1,1, mxREAL);
		v = mxGetPr(plhs[0]);
	}
	else if(!strcmp(mode,"grad")) {
		imode = 1;
		plhs[0] = mxCreateDoubleMatrix(1,getNoVar(), mxREAL);
		v = mxGetPr(plhs[0]);
	}
	else if(!strcmp(mode,"con")) {
		imode = 2;
		plhs[0] = mxCreateDoubleMatrix(getNoCon(),1, mxREAL);
		v = mxGetPr(plhs[0]);
	}
	else if(!strcmp(mode,"jac")) {
		imode = 3;
        //Can't allocate here until we know sparsity pattern		
	}
    else if(!strcmp(mode,"jacstr")) {
		imode = 4;
        //Can't allocate here until we know sparsity pattern		
	}
    else if(!strcmp(mode,"hess")) {
		if(nrhs < 4) {
			mexErrMsgTxt("You must supply the callback mode, input vector, sigma and lambda for Hessian Evaluations.");
			return;
		}
		//Check length of Sigma
		if(mxIsEmpty(prhs[2]) || !mxIsDouble(prhs[2]) || mxIsComplex(prhs[2]) || mxGetNumberOfElements(prhs[2]) != 1)
			mexErrMsgTxt("Sigma must be a real, double scalar.");
		//Check length of Lambda
		if(!mxIsDouble(prhs[3]) || mxIsComplex(prhs[3]) || mxIsSparse(prhs[3]) || mxGetNumberOfElements(prhs[3]) != getNoCon())
			mexErrMsgTxt("Lambda must be a real, double, dense vector with ncon elements.");
		//Get Sigma, Lambda
		sigma = *mxGetPr(prhs[2]);
        lambda = mxGetPr(prhs[3]);
		imode = 5;		
        //Can't allocate here until we know sparsity pattern	
	}
    else if(!strcmp(mode,"hstr")) {
        imode = 6;
        //Can't allocate here until we know sparsity pattern	
    }
	else
		mexErrMsgTxt("Unknown mode - options are 'obj', 'grad', 'con', 'jac', 'jacstr', 'hess' or 'hstr'");
	mxFree(mode);
	
	//Ensure we did have x for normal callbacks
    if(imode != 4 && imode != 6 && nrhs < 2)
        mexErrMsgTxt("You must supply the callback mode and input vector.");
    
	//Call Req Callback
	switch(imode)
	{
		case 0: //objective            
			*v = objective(xvec);
			break;
		case 1: //gradient
            //Check if we have recorded the objective yet
            if(obj.Memory()==0) { //new, tape operations
                vector< CppAD::AD<double> > X(getNoVar());
                memcpy(&X[0],x,getNoVar()*sizeof(double));
                CppAD::Independent(X);
                vector< CppAD::AD<double> > Y(1);
                Y[0] = objective(X);     
                obj = CppAD::ADFun<double>(X, Y);
                //obj.optimize();
                mexAtExit(mexExit); //also register memory clear function
                //mexPrintf("Evaluated Tape for Gradient\n");
            }
            //Evaluate "Jacobian" for gradient
            memcpy(v,&(obj.Jacobian(xvec)[0]),getNoVar()*sizeof(double));
			break;
		case 2: //constraints
            //Check if we have constraint memory yet
            if(cvec.empty())
                cvec.resize(getNoCon()); //allocate it
            //Evaluate Constraints
			constraints(xvec,cvec);
            //Copy Out
            memcpy(v,&cvec[0],getNoCon()*sizeof(double));
			break;
		case 3: //jacobian
        case 4: //jacobian structure
			//Check if we have recorded the constraints yet
            if(con.Memory()==0){ //new, tape operations
                vector< CppAD::AD<double> > X(getNoVar());
                memcpy(&X[0],x,getNoVar()*sizeof(double));
                CppAD::Independent(X);
                vector< CppAD::AD<double> > Y(getNoCon());
                constraints(X,Y);     
                con = CppAD::ADFun<double>(X, Y);
                //con.optimize();
                mexAtExit(mexExit); //also register memory clear function
                //mexPrintf("Evaluated Tape for Jacobian\n");
            }
            //Check if we have the sparsity pattern yet
            if(jstr.empty()) {                
                vector<set<size_t>> r(getNoVar());
                for(size_t i = 0; i < getNoVar(); i++)
                    r[i].insert(i); //identity matrix 
                jstr.resize(getNoCon());
                jstr = con.ForSparseJac(getNoVar(),r,true); //note transpose
                //Determine nnzs
                for(int i = 0; i < jstr.size(); i++)
                    nnzJac += jstr[i].size();
                //Save ir, jc for jac
                jir = (mwIndex*)mxCalloc(nnzJac,sizeof(mwIndex));
                jjc = (mwIndex*)mxCalloc(getNoVar()+1,sizeof(mwIndex));                
                mexMakeMemoryPersistent(jir);
                mexMakeMemoryPersistent(jjc);
                jwork.clear(); //reset jacobian calculations
                //Col starts
                jjc[0] = 0;
                for(int i = 1; i <= getNoVar(); i++)
                    jjc[i] = (mwIndex)(jjc[i-1] + jstr[i-1].size());
                //Rows
                size_t idx = 0;
                for(int i = 0; i < jstr.size(); i++)
                    for (set<size_t>::iterator it=jstr[i].begin(); it!=jstr[i].end(); ++it)
                        jir[idx++] = (mwIndex)*it;
                //Build missing triple so we can eval just sparse elements of Jac
                jrow.resize(nnzJac);
                jcol.resize(nnzJac);
                idx = 0;
                for(size_t i = 0; i < nnzJac; i++)
                    jrow[i] = jir[i];
                for(size_t i = 0; i < getNoVar();i++)
                    for(size_t j = jjc[i]; j < jjc[i+1]; j++)
                        jcol[idx++] = i;
                //Re-do with no transpose... (bad really...)
                jstr = con.ForSparseJac(getNoVar(),r,false); 
                //mexPrintf("Determined Jac Sparsity Structure (%d nzs)\n",nnzJac);
            }
            //Create Sparse Return Matrix
            plhs[0] = mxCreateSparse(getNoCon(),getNoVar(),nnzJac,mxREAL);   
            pr = mxGetPr(plhs[0]);
            memcpy(mxGetIr(plhs[0]),jir,nnzJac*sizeof(mwIndex));
            memcpy(mxGetJc(plhs[0]),jjc,(getNoVar()+1)*sizeof(mwIndex));
            //If we want the sparsity pattern only, fill in return matrix with 1s
            if(imode==4) {   
                for(int i = 0; i < nnzJac; i++)
                	pr[i] = 1.0;                
            }
            //Else, evaluate sparse jacobian and return as sparse matrix
            else {
                //Check if we have jacobian memory yet
                if(jac.empty())
                    jac.resize(nnzJac); //allocate it
                //If ndec > ncon, use reverse mode
                if(getNoVar() > getNoCon())                   
                    con.SparseJacobianReverse(xvec,jstr,jrow,jcol,jac,jwork);
                //else use forward
                else
                    con.SparseJacobianForward(xvec,jstr,jrow,jcol,jac,jwork);
                //Copy out
                memcpy(pr,&jac[0],nnzJac*sizeof(double));
            }
			break;
		case 5: //hessian of the lagrangian
        case 6: //hessian structure
            //Check if we have recorded the objective+constraints yet
            //Not sure if we can reuse ones we have done above??
            if(lag.Memory()==0){ //new, tape operations
                vector< CppAD::AD<double> > X(getNoVar());
                memcpy(&X[0],x,getNoVar()*sizeof(double));
                CppAD::Independent(X);
                //Output Array
                vector< CppAD::AD<double> > Y(1); 
                vector< CppAD::AD<double> > Yc(getNoCon()); 
                Y[0] = objective(X); //eval objective   
                if(getNoCon() > 0)
                    constraints(X,Yc); //eval constraints     
                Yc.insert(Yc.begin(),Y.begin(),Y.end());
                //Create ADFun
                lag.Dependent(Yc);
                //lag.optimize();
                mexAtExit(mexExit); //also register memory clear function
                //mexPrintf("Evaluated Tape for Hessian\n");
            }
            //Check if we have the sparsity pattern yet
            if(hstr.empty()) {        
                //First eval jac structure (not sure why)
                vector< std::set<size_t> > r(getNoVar());
                for(size_t i = 0; i < getNoVar(); i++)
                    r[i].insert(i);
                lag.ForSparseJac(getNoVar(), r);
                //Now do Hessian structure
                vector<set<size_t>> s(1);
                for(size_t i = 0; i < getNoCon()+1; i++)
                    s[0].insert(i); //identity matrix 
                hstr.resize(getNoVar());
                hstr = lag.RevSparseHes(getNoVar(),s); 
                //Determine total nnzs
                for(int i = 0; i < hstr.size(); i++)
                    nnzHess += hstr[i].size();
                //Determine nnzs in lower tri
                for(int i = 0; i < hstr.size(); i++)
                    for (set<size_t>::iterator it=hstr[i].begin(); it!=hstr[i].end(); ++it)
                        if(*it >= i)
                            nnzHessLT++;
                
                //Save ir, jc for jac
                hir = (mwIndex*)mxCalloc(nnzHessLT,sizeof(mwIndex));
                hjc = (mwIndex*)mxCalloc(getNoVar()+1,sizeof(mwIndex));                
                mexMakeMemoryPersistent(hir);
                mexMakeMemoryPersistent(hjc);
                hwork.clear(); //reset hessian calculations
                //Col & Row Starts
                size_t idx = 0;
                for(int i = 0; i < hstr.size(); i++) {
                    hjc[i] = idx;
                    for (set<size_t>::iterator it=hstr[i].begin(); it!=hstr[i].end(); ++it)
                        if(*it >= i)
                            hir[idx++] = (mwIndex)*it;
                }
                hjc[getNoVar()] = nnzHessLT;
                //Build missing triple so we can eval just sparse elements of Jac
                hrow.resize(nnzHessLT);
                hcol.resize(nnzHessLT);
                idx = 0;
                for(size_t i = 0; i < nnzHessLT; i++)
                    hrow[i] = hir[i];
                for(size_t i = 0; i < getNoVar();i++)
                    for(size_t j = hjc[i]; j < hjc[i+1]; j++)
                        hcol[idx++] = i;
                //mexPrintf("Determined Hess Sparsity Structure (%d nzs in tril)\n",nnzHessLT);
            }
            //Create Sparse Return Matrix
            plhs[0] = mxCreateSparse(getNoVar(),getNoVar(),nnzHessLT,mxREAL);   
            pr = mxGetPr(plhs[0]);
            memcpy(mxGetIr(plhs[0]),hir,nnzHessLT*sizeof(mwIndex));
            memcpy(mxGetJc(plhs[0]),hjc,(getNoVar()+1)*sizeof(mwIndex));
            //If we want the sparsity pattern only, fill in return matrix with 1s
            if(imode==6) {   
                for(int i = 0; i < nnzHessLT; i++)
                	pr[i] = 1.0;                
            }
            //Else, evaluate sparse hessian and return as sparse matrix
            else {
                //Check if we have hessian memory yet
                if(hes.empty())
                    hes.resize(nnzHessLT); //allocate it  
                if(w.empty())
                    w.resize(1+getNoCon()); //allocate it
                //Copy in Weights
                w[0] = sigma;
                for(int i = 0; i < getNoCon(); i++)
                    w[i+1] = lambda[i];
                //If ndec > ncon, use reverse mode
                lag.SparseHessian(xvec,w,hstr,hrow,hcol,hes,hwork);
                //Copy out elements
                memcpy(pr,&hes[0],nnzHessLT*sizeof(double));
            }
			break;
	}
}