void put_check_for_nan(const CppAD::vector<Base>& vec, std::string& file_name) { size_t char_size = sizeof(Base) * vec.size(); const char* char_ptr = reinterpret_cast<const char*>( vec.data() ); # if CPPAD_HAS_MKSTEMP char pattern[] = "/tmp/fileXXXXXX"; int fd = mkstemp(pattern); file_name = pattern; write(fd, char_ptr, char_size); close(fd); # else # if CPPAD_HAS_TMPNAM_S std::vector<char> name(L_tmpnam_s); if( tmpnam_s( name.data(), L_tmpnam_s ) != 0 ) { CPPAD_ASSERT_KNOWN( false, "Cannot create a temporary file name" ); } file_name = name.data(); # else file_name = tmpnam( CPPAD_NULL ); # endif std::fstream file_out(file_name.c_str(), std::ios::out|std::ios::binary ); file_out.write(char_ptr, char_size); file_out.close(); # endif return; }
/// inform CppAD that this information needs to be recomputed void clear(void) { user_row.clear(); user_col.clear(); sort_row.clear(); sort_col.clear(); color.clear(); }
/// inform CppAD that this information needs to be recomputed void clear(void) { color_method = "cppad.symmetric"; row.clear(); col.clear(); order.clear(); color.clear(); }
void ode_z( const Float &t , const CppAD::vector<Float> &z , CppAD::vector<Float> &h ) { // z = [ y ; y_x ] // z_t = h(t, x, z) = [ y_t , y_x_t ] size_t i, j; size_t n = x_.size(); CPPAD_ASSERT_UNKNOWN( z.size() == n + n * n ); // y_t for(i = 0; i < n; i++) { h[i] = x_[i] * z[i]; // initialize y_x_t as zero for(j = 0; j < n; j++) h[n + i * n + j] = 0.; } for(i = 0; i < n; i++) { // partial of g_i w.r.t y_i Float gi_yi = x_[i]; // partial of g_i w.r.t x_i Float gi_xi = z[i]; // partial of y_i w.r.t x_i Float yi_xi = z[n + i * n + i]; // derivative of yi_xi with respect to t h[n + i * n + i] = gi_xi + gi_yi * yi_xi; } }
void ode_evaluate( CppAD::vector<Float> &x , size_t m , CppAD::vector<Float> &fm ) { typedef CppAD::vector<Float> Vector; size_t n = x.size(); size_t ell; CPPAD_ASSERT_KNOWN( m == 0 || m == 1, "ode_evaluate: m is not zero or one" ); CPPAD_ASSERT_KNOWN( ((m==0) & (fm.size()==n)) || ((m==1) & (fm.size()==n*n)), "ode_evaluate: the size of fm is not correct" ); if( m == 0 ) ell = n; else ell = n + n * n; // set up the case we are integrating Float ti = 0.; Float tf = 1.; Float smin = 1e-5; Float smax = 1.; Float scur = 1.; Float erel = 0.; vector<Float> yi(ell), eabs(ell); size_t i, j; for(i = 0; i < ell; i++) { eabs[i] = 1e-10; if( i < n ) yi[i] = 1.; else yi[i] = 0.; } // return values Vector yf(ell), ef(ell), maxabs(ell); size_t nstep; // construct ode method for taking one step ode_evaluate_method<Float> method(m, x); // solve differential equation yf = OdeErrControl(method, ti, tf, yi, smin, smax, scur, eabs, erel, ef, maxabs, nstep); if( m == 0 ) { for(i = 0; i < n; i++) fm[i] = yf[i]; } else { for(i = 0; i < n; i++) for(j = 0; j < n; j++) fm[i * n + j] = yf[n + i * n + j]; } return; }
// The following routine is not yet used or tested. void cppad_colpack_symmetric( CppAD::vector<size_t>& color , size_t n , const CppAD::vector<unsigned int*>& adolc_pattern ) { size_t i, k; CPPAD_ASSERT_UNKNOWN( adolc_pattern.size() == n ); // Use adolc sparsity pattern to create corresponding bipartite graph ColPack::GraphColoringInterface graph( SRC_MEM_ADOLC, adolc_pattern.data(), n ); // Color the graph with the speciied ordering // graph.Coloring("SMALLEST_LAST", "STAR") is slower in adolc testing graph.Coloring("SMALLEST_LAST", "ACYCLIC_FOR_INDIRECT_RECOVERY"); // Use coloring information to create seed matrix int n_seed_row; int n_seed_col; double** seed_matrix = graph.GetSeedMatrix(&n_seed_row, &n_seed_col); CPPAD_ASSERT_UNKNOWN( size_t(n_seed_col) == n ); // now return coloring in format required by CppAD for(i = 0; i < n; i++) color[i] = n; for(k = 0; k < size_t(n_seed_row); k++) { for(i = 0; i < n; i++) { if( seed_matrix[k][i] != 0.0 ) { CPPAD_ASSERT_UNKNOWN( color[i] == n ); color[i] = k; } } } # ifndef NDEBUG for(i = 0; i < n; i++) CPPAD_ASSERT_UNKNOWN(color[i] < n || adolc_pattern[i][0] == 0); // The coloring above will probably fail this test. // Check that no rows with the same color have overlapping entries: CppAD::vector<bool> found(n); for(k = 0; k < size_t(n_seed_row); k++) { size_t j, ell; for(j = 0; j < n; j++) found[j] = false; for(i = 0; i < n; i++) if( color[i] == k ) { for(ell = 0; ell < adolc_pattern[i][0]; ell++) { j = adolc_pattern[i][1 + ell]; CPPAD_ASSERT_UNKNOWN( ! found[j] ); found[j] = true; } } } # endif return; }
/*! Change number of sets, set end, and initialize all sets as empty Any memory currently allocated for this object is freed. If \a n_set is zero, no new memory is allocated for the set. Otherwise, new memory may be allocated for the sets. \param n_set is the number of sets in this vector of sets. \param end is the maximum element plus one (the minimum element is 0). */ void resize(size_t n_set, size_t end) { n_set_ = n_set; end_ = end; // free all memory connected with data_ data_.resize(0); // now start a new vector with empty sets data_.resize(n_set_); // value that signfies past end of list next_index_ = n_set; }
void get_check_for_nan(CppAD::vector<Base>& vec, const std::string& file_name) { // size_t n = vec.size(); size_t char_size = sizeof(Base) * n; char* char_ptr = reinterpret_cast<char*>( vec.data() ); // std::fstream file_in(file_name.c_str(), std::ios::in|std::ios::binary ); file_in.read(char_ptr, char_size); // return; }
VectorBase ADFun<Base>::SparseHessian( const VectorBase& x, const VectorBase& w, const VectorSet& p ) { size_t i, j, k; size_t n = Domain(); VectorBase hes(n * n); CPPAD_ASSERT_KNOWN( size_t(x.size()) == n, "SparseHessian: size of x not equal domain size for f." ); typedef typename VectorSet::value_type Set_type; typedef typename internal_sparsity<Set_type>::pattern_type Pattern_type; // initialize the return value as zero Base zero(0); for(i = 0; i < n; i++) for(j = 0; j < n; j++) hes[i * n + j] = zero; // arguments to SparseHessianCompute Pattern_type s; CppAD::vector<size_t> row; CppAD::vector<size_t> col; sparse_hessian_work work; bool transpose = false; sparsity_user2internal(s, p, n, n, transpose); k = 0; for(i = 0; i < n; i++) { s.begin(i); j = s.next_element(); while( j != s.end() ) { row.push_back(i); col.push_back(j); k++; j = s.next_element(); } } size_t K = k; VectorBase H(K); // now we have folded this into the following case SparseHessianCompute(x, w, s, row, col, H, work); // now set the non-zero return values for(k = 0; k < K; k++) hes[ row[k] * n + col[k] ] = H[k]; return hes; }
/*! Change number of sets, set end, and initialize all sets as empty If \c n_set_in is zero, any memory currently allocated for this object is freed. Otherwise, new memory may be allocated for the sets (if needed). \param n_set_in is the number of sets in this vector of sets. \param end_in is the maximum element plus one (the minimum element is 0). */ void resize(size_t n_set_in, size_t end_in) { n_set_ = n_set_in; end_ = end_in; if( n_set_ == 0 ) { // free all memory connected with data_ data_.clear(); return; } // now start a new vector with empty sets data_.resize(n_set_); // value that signfies past end of list next_index_ = n_set_; }
bool link_ode( size_t size , size_t repeat , CppAD::vector<double> &x , CppAD::vector<double> &jacobian ) { // ------------------------------------------------------------- // setup size_t n = size; assert( x.size() == n ); size_t m = 0; CppAD::vector<double> f(n); while(repeat--) { // choose next x value uniform_01(n, x); // evaluate function CppAD::ode_evaluate(x, m, f); } size_t i; for(i = 0; i < n; i++) jacobian[i] = f[i]; return true; }
bool is_tape_point_constant(size_t index){ bool ok_index= (index<=tp_.size()-2); if(!ok_index) return false; tape_point tp1=tp_[index]; tape_point tp2=tp_[index+1]; const addr_t* op_arg; op_arg=tp1.op_arg; int numarg=tp2.op_arg - op_arg; // Handle the user operator special case if(tp1.op == UsrrvOp || tp1.op == UsrrpOp){ // Result of user atomic operation bool constant=true; size_t i=index; while(tp_[i].op != UserOp){ i--; constant = constant && constant_tape_point_[i]; if(tp_[i].op == UsrrvOp || tp_[i].op == UsrrpOp)break; } return constant; } if(numarg==0)return false; // E.g. begin or end operators bool ans=true; for(int i=0;i<numarg;i++){ ans = ans && ( constant_tape_point_[var2op_[op_arg[i]]] || (!isDepArg(&op_arg[i])) ) ; } return ans; }
void ode_y( const Float &t, const CppAD::vector<Float> &y, CppAD::vector<Float> &g) { // y_t = g(t, x, y) CPPAD_ASSERT_UNKNOWN( y.size() == x_.size() ); size_t i; size_t n = x_.size(); for(i = 0; i < n; i++) g[i] = x_[i] * y[i]; // because y_i(0) = 1, solution for this equation is // y_0 (t) = t // y_1 (t) = exp(x_1 * t) // y_2 (t) = exp(2 * x_2 * t) // ... }
// Given that y_i (0) = x_i, // the following y_i (t) satisfy the ODE below: // y_0 (t) = x[0] // y_1 (t) = x[1] + x[0] * t // y_2 (t) = x[2] + x[1] * t + x[0] * t^2/2 // y_3 (t) = x[3] + x[2] * t + x[1] * t^2/2 + x[0] * t^3 / 3! // ... void Ode( const Float& t, const CppAD::vector<Float>& y, CppAD::vector<Float>& f) { size_t n = y.size(); f[0] = 0.; for(size_t k = 1; k < n; k++) f[k] = y[k-1]; }
void sparse_hes_fun( size_t n , const FloatVector& x , const CppAD::vector<size_t>& row , const CppAD::vector<size_t>& col , size_t p , FloatVector& fp ) { // check numeric type specifications CheckNumericType<Float>(); // check value of p CPPAD_ASSERT_KNOWN( p < 3, "sparse_hes_fun: p > 2" ); size_t i, j, k; size_t size = 1; for(k = 0; k < p; k++) size *= n; for(k = 0; k < size; k++) fp[k] = Float(0); size_t K = row.size(); Float t; Float dt_i; Float dt_j; for(k = 0; k < K; k++) { i = row[k]; j = col[k]; t = exp( x[i] * x[j] ); dt_i = t * x[j]; dt_j = t * x[i]; switch(p) { case 0: fp[0] += t; break; case 1: fp[i] += dt_i; fp[j] += dt_j; break; case 2: fp[i * n + i] += dt_i * x[j]; fp[i * n + j] += t + dt_j * x[j]; // fp[j * n + i] += t + dt_i * x[i]; fp[j * n + j] += dt_j * x[i]; break; } } }
bool link_sparse_hessian( size_t repeat , CppAD::vector<double> &x , CppAD::vector<size_t> &i , CppAD::vector<size_t> &j , CppAD::vector<double> &hessian ) { // ----------------------------------------------------- // setup using CppAD::vector; size_t order = 0; // derivative order corresponding to function size_t n = x.size(); // argument space dimension size_t ell = i.size(); // size of index vectors vector<double> y(1); // function value // temporaries size_t k; vector<double> tmp(2 * ell); // choose a value for x CppAD::uniform_01(n, x); // ------------------------------------------------------ while(repeat--) { // get the next set of indices CppAD::uniform_01(2 * ell, tmp); for(k = 0; k < ell; k++) { i[k] = size_t( n * tmp[k] ); i[k] = std::min(n-1, i[k]); // j[k] = size_t( n * tmp[k + ell] ); j[k] = std::min(n-1, j[k]); } // computation of the function CppAD::sparse_evaluate(x, i, j, order, y); } hessian[0] = y[0]; return true; }
/** * Evaluates the Jacobian and the Hessian of the loop model * * @param individualColoring whether or not there are atomic * functions in the model */ inline void evalLoopModelJacobianHessian(bool individualColoring) { using namespace CppAD::extra; using CppAD::vector; ADFun<CG<Base> >& fun = model->getTape(); const std::vector<IterEquationGroup<Base> >& eqGroups = model->getEquationsGroups(); vector<vector<CG<Base> > > vw(1); vw[0].resize(w.size()); vector<CG<Base> > y; size_t nEqGroups = equationGroups.size(); vector<std::set<size_t> > empty; vector<std::map<size_t, CG<Base> > > emptyJac; for (size_t g = 0; g < nEqGroups; g++) { const IterEquationGroup<Base>& group = eqGroups[g]; vector<std::map<size_t, std::map<size_t, CG<Base> > > > vhess; for (size_t i = 0; i < w.size(); i++) { vw[0][i] = Base(0); } for (size_t itI : group.tapeI) { vw[0][itI] = w[itI]; } generateLoopForJacHes(fun, x, vw, y, model->getJacobianSparsity(), g == 0 ? evalJacSparsity : empty, g == 0 ? dyiDzk : emptyJac, model->getHessianSparsity(), equationGroups[g].evalHessSparsity, vhess, individualColoring); //Hessian equationGroups[g].hess = vhess[0]; } }
virtual void zeroOrderDependency(const CppAD::vector<bool>& vx, CppAD::vector<bool>& vy) override { using CppAD::vector; size_t m = vy.size(); size_t n = vx.size(); vector<std::set<size_t> > rt(m); for (size_t j = 0; j < m; j++) { rt[j].insert(j); } vector<std::set<size_t> > st(n); rev_sparse_jac(m, rt, st); for (size_t j = 0; j < n; j++) { for (size_t i : st[j]) { if (vx[j]) { vy[i] = true; } } } }
/*! Create a two vector sparsity representation from a vector of maps. \param sparse Is a vector of maps representation of sparsity as well as the index in the two vector representation. To be specific; \verbatim for(i = 0; i < sparse.size(); i++) { for(itr = sparse[i].begin(); itr != sparse[i].end(); itr++) { j = itr->first; // (i, j) is a possibly non-zero entry in sparsity pattern // k == itr->second, is corresponding index in i_row and j_col k++; } } \endverbatim \param n_nz is the total number of possibly non-zero entries. \param i_row The input size and element values for \c i_row do not matter. On output, it has size \c n_nz and <tt>i_row[k]</tt> contains the row index corresponding to the \c k-th possibly non-zero entry. \param j_col The input size and element values for \c j_col do not matter. On output, it has size \c n_nz and <tt>j_col[k]</tt> contains the column index corresponding to the \c k-th possibly non-zero entry. */ void sparse_map2vec( const CppAD::vector< std::map<size_t, size_t> > sparse, size_t& n_nz , CppAD::vector<size_t>& i_row , CppAD::vector<size_t>& j_col ) { size_t i, j, k, m; // number of rows in sparse m = sparse.size(); // itererator for one row std::map<size_t, size_t>::const_iterator itr; // count the number of possibly non-zeros in sparse n_nz = 0; for(i = 0; i < m; i++) for(itr = sparse[i].begin(); itr != sparse[i].end(); itr++) ++n_nz; // resize the return vectors to accomidate n_nz entries i_row.resize(n_nz); j_col.resize(n_nz); // set the row and column indices and check assumptions on sparse k = 0; for(i = 0; i < m; i++) { for(itr = sparse[i].begin(); itr != sparse[i].end(); itr++) { j = itr->first; CPPAD_ASSERT_UNKNOWN( k == itr->second ); i_row[k] = i; j_col[k] = j; ++k; } } return; }
void do_init(vector<double> x){ UserFunctor<double> f; n=x.size(); m=f(x).size(); UserFunctor<AD<double> > f0; UserFunctor<AD<AD<double> > > f1; UserFunctor<AD<AD<AD<double> > > > f2; UserFunctor<AD<AD<AD<AD<double> > > > > f3; vpf.resize(NTHREADS); for(int thread=0;thread<NTHREADS;thread++){ vpf[thread].resize(4); } cpyADfunPointer(tape_symbol(f0,x), 0); cpyADfunPointer(tape_symbol(f1,x), 1); cpyADfunPointer(tape_symbol(f2,x), 2); cpyADfunPointer(tape_symbol(f3,x), 3); }
void sparse_jac_fun( size_t m , size_t n , const FloatVector& x , const CppAD::vector<size_t>& row , const CppAD::vector<size_t>& col , size_t p , FloatVector& fp ) { // check numeric type specifications CheckNumericType<Float>(); // check value of p CPPAD_ASSERT_KNOWN( p == 0 || p == 1, "sparse_jac_fun: p != 0 and p != 1" ); size_t K = row.size(); CPPAD_ASSERT_KNOWN( K >= m, "sparse_jac_fun: row.size() < m" ); size_t i, j, k; if( p == 0 ) for(i = 0; i < m; i++) fp[i] = Float(0); Float t; for(k = 0; k < K; k++) { i = row[k]; j = col[k]; t = exp( x[j] * x[j] / 2.0 ); switch(p) { case 0: fp[i] += t; break; case 1: fp[k] = t * x[j]; break; } } }
void sparse_jac_fun( size_t m , size_t n , const FloatVector& x , const CppAD::vector<size_t>& row , const CppAD::vector<size_t>& col , size_t p , FloatVector& fp ) { // check numeric type specifications CheckNumericType<Float>(); // check value of p CPPAD_ASSERT_KNOWN( p < 2, "sparse_jac_fun: p > 1" ); size_t i, j, k; size_t size = m; if( p > 0 ) size *= n; for(k = 0; k < size; k++) fp[k] = Float(0); size_t K = row.size(); Float t; for(k = 0; k < K; k++) { i = row[k]; j = col[k]; t = exp( x[j] * x[j] / 2.0 ); switch(p) { case 0: fp[i] += t; break; case 1: fp[i * n + j] += t * x[j]; break; } } }
bool link_sparse_hessian( size_t size , size_t repeat , CppAD::vector<double>& x , const CppAD::vector<size_t>& row , const CppAD::vector<size_t>& col , CppAD::vector<double>& hessian ) { // ----------------------------------------------------- // setup typedef vector<double> DblVector; typedef vector< std::set<size_t> > SetVector; typedef CppAD::AD<double> ADScalar; typedef vector<ADScalar> ADVector; size_t i, j, k; size_t order = 0; // derivative order corresponding to function size_t m = 1; // number of dependent variables size_t n = size; // number of independent variables size_t K = row.size(); // number of non-zeros in lower triangle ADVector a_x(n); // AD domain space vector ADVector a_y(m); // AD range space vector DblVector w(m); // double range space vector DblVector hes(K); // non-zeros in lower triangle CppAD::ADFun<double> f; // AD function object // weights for hessian calculation (only one component of f) w[0] = 1.; // use the unspecified fact that size is non-decreasing between calls static size_t previous_size = 0; bool print = (repeat > 1) & (previous_size != size); previous_size = size; // declare sparsity pattern # if USE_SET_SPARSITY SetVector sparsity(n); # else typedef vector<bool> BoolVector; BoolVector sparsity(n * n); # endif // initialize all entries as zero for(i = 0; i < n; i++) { for(j = 0; j < n; j++) hessian[ i * n + j] = 0.; } // ------------------------------------------------------ extern bool global_retape; if( global_retape) while(repeat--) { // choose a value for x CppAD::uniform_01(n, x); for(j = 0; j < n; j++) a_x[j] = x[j]; // declare independent variables Independent(a_x); // AD computation of f(x) CppAD::sparse_hes_fun<ADScalar>(n, a_x, row, col, order, a_y); // create function object f : X -> Y f.Dependent(a_x, a_y); extern bool global_optimize; if( global_optimize ) { print_optimize(f, print, "cppad_sparse_hessian_optimize", size); print = false; } // calculate the Hessian sparsity pattern for this function calc_sparsity(sparsity, f); // structure that holds some of work done by SparseHessian CppAD::sparse_hessian_work work; // calculate this Hessian at this x f.SparseHessian(x, w, sparsity, row, col, hes, work); for(k = 0; k < K; k++) { hessian[ row[k] * n + col[k] ] = hes[k]; hessian[ col[k] * n + row[k] ] = hes[k]; } } else { // choose a value for x CppAD::uniform_01(n, x); for(j = 0; j < n; j++) a_x[j] = x[j]; // declare independent variables Independent(a_x); // AD computation of f(x) CppAD::sparse_hes_fun<ADScalar>(n, a_x, row, col, order, a_y); // create function object f : X -> Y f.Dependent(a_x, a_y); extern bool global_optimize; if( global_optimize ) { print_optimize(f, print, "cppad_sparse_hessian_optimize", size); print = false; } // calculate the Hessian sparsity pattern for this function calc_sparsity(sparsity, f); // declare structure that holds some of work done by SparseHessian CppAD::sparse_hessian_work work; while(repeat--) { // choose a value for x CppAD::uniform_01(n, x); // calculate sparsity at this x f.SparseHessian(x, w, sparsity, row, col, hes, work); for(k = 0; k < K; k++) { hessian[ row[k] * n + col[k] ] = hes[k]; hessian[ col[k] * n + row[k] ] = hes[k]; } } } return true; }
bool link_sparse_jacobian( size_t size , size_t repeat , size_t m , const CppAD::vector<size_t>& row , const CppAD::vector<size_t>& col , CppAD::vector<double>& x_return , CppAD::vector<double>& jacobian , size_t& n_sweep ) { if( global_atomic || (! global_colpack) ) return false; if( global_memory || global_optimize ) return false; // ----------------------------------------------------- // setup typedef unsigned int* SizeVector; typedef double* DblVector; typedef adouble ADScalar; typedef ADScalar* ADVector; size_t i, j, k; // temporary indices size_t n = size; // number of independent variables size_t order = 0; // derivative order corresponding to function // set up for thread_alloc memory allocator (fast and checks for leaks) using CppAD::thread_alloc; // the allocator size_t capacity; // capacity of an allocation // tape identifier int tag = 0; // AD domain space vector ADVector a_x = thread_alloc::create_array<ADScalar>(n, capacity); // AD range space vector ADVector a_y = thread_alloc::create_array<ADScalar>(m, capacity); // argument value in double DblVector x = thread_alloc::create_array<double>(n, capacity); // function value in double DblVector y = thread_alloc::create_array<double>(m, capacity); // options that control sparse_jac int options[4]; extern bool global_boolsparsity; if( global_boolsparsity ) options[0] = 1; // sparsity by propagation of bit pattern else options[0] = 0; // sparsity pattern by index domains options[1] = 0; // (0 = safe mode, 1 = tight mode) options[2] = 0; // see changing to -1 and back to 0 below options[3] = 0; // (0 = column compression, 1 = row compression) // structure that holds some of the work done by sparse_jac int nnz; // number of non-zero values SizeVector rind = CPPAD_NULL; // row indices SizeVector cind = CPPAD_NULL; // column indices DblVector values = CPPAD_NULL; // Jacobian values // choose a value for x CppAD::uniform_01(n, x); // declare independent variables int keep = 0; // keep forward mode results trace_on(tag, keep); for(j = 0; j < n; j++) a_x[j] <<= x[j]; // AD computation of f (x) CppAD::sparse_jac_fun<ADScalar>(m, n, a_x, row, col, order, a_y); // create function object f : x -> y for(i = 0; i < m; i++) a_y[i] >>= y[i]; trace_off(); // Retrieve n_sweep using undocumented feature of sparsedrivers.cpp int same_pattern = 0; options[2] = -1; n_sweep = sparse_jac(tag, int(m), int(n), same_pattern, x, &nnz, &rind, &cind, &values, options ); options[2] = 0; // ---------------------------------------------------------------------- if( ! global_onetape ) while(repeat--) { // choose a value for x CppAD::uniform_01(n, x); // declare independent variables trace_on(tag, keep); for(j = 0; j < n; j++) a_x[j] <<= x[j]; // AD computation of f (x) CppAD::sparse_jac_fun<ADScalar>(m, n, a_x, row, col, order, a_y); // create function object f : x -> y for(i = 0; i < m; i++) a_y[i] >>= y[i]; trace_off(); // is this a repeat call with the same sparsity pattern same_pattern = 0; // calculate the jacobian at this x rind = CPPAD_NULL; cind = CPPAD_NULL; values = CPPAD_NULL; sparse_jac(tag, int(m), int(n), same_pattern, x, &nnz, &rind, &cind, &values, options ); // only needed last time through loop if( repeat == 0 ) { size_t K = row.size(); for(int ell = 0; ell < nnz; ell++) { i = size_t(rind[ell]); j = size_t(cind[ell]); for(k = 0; k < K; k++) { if( row[k]==i && col[k]==j ) jacobian[k] = values[ell]; } } } // free raw memory allocated by sparse_jac free(rind); free(cind); free(values); } else { while(repeat--)
bool link_sparse_hessian( size_t size , size_t repeat , const CppAD::vector<size_t>& row , const CppAD::vector<size_t>& col , CppAD::vector<double>& x_return , CppAD::vector<double>& hessian , size_t& n_sweep ) { if( global_atomic || (! global_colpack) ) return false; if( global_memory || global_optimize || global_boolsparsity ) return false; // ----------------------------------------------------- // setup typedef unsigned int* SizeVector; typedef double* DblVector; typedef adouble ADScalar; typedef ADScalar* ADVector; size_t i, j, k; // temporary indices size_t order = 0; // derivative order corresponding to function size_t m = 1; // number of dependent variables size_t n = size; // number of independent variables // setup for thread_alloc memory allocator (fast and checks for leaks) using CppAD::thread_alloc; // the allocator size_t capacity; // capacity of an allocation // tape identifier int tag = 0; // AD domain space vector ADVector a_x = thread_alloc::create_array<ADScalar>(n, capacity); // AD range space vector ADVector a_y = thread_alloc::create_array<ADScalar>(m, capacity); // double argument value DblVector x = thread_alloc::create_array<double>(n, capacity); // double function value double f; // options that control sparse_hess int options[2]; options[0] = 0; // safe mode options[1] = 0; // indirect recovery // structure that holds some of the work done by sparse_hess int nnz; // number of non-zero values SizeVector rind = CPPAD_NULL; // row indices SizeVector cind = CPPAD_NULL; // column indices DblVector values = CPPAD_NULL; // Hessian values // ---------------------------------------------------------------------- if( ! global_onetape ) while(repeat--) { // choose a value for x CppAD::uniform_01(n, x); // declare independent variables int keep = 0; // keep forward mode results trace_on(tag, keep); for(j = 0; j < n; j++) a_x[j] <<= x[j]; // AD computation of f (x) CppAD::sparse_hes_fun<ADScalar>(n, a_x, row, col, order, a_y); // create function object f : x -> y a_y[0] >>= f; trace_off(); // is this a repeat call with the same sparsity pattern int same_pattern = 0; // calculate the hessian at this x rind = CPPAD_NULL; cind = CPPAD_NULL; values = CPPAD_NULL; sparse_hess(tag, int(n), same_pattern, x, &nnz, &rind, &cind, &values, options ); // only needed last time through loop if( repeat == 0 ) { size_t K = row.size(); for(int ell = 0; ell < nnz; ell++) { i = size_t(rind[ell]); j = size_t(cind[ell]); for(k = 0; k < K; k++) { if( (row[k]==i && col[k]==j) || (row[k]==j && col[k]==i) ) hessian[k] = values[ell]; } } } // free raw memory allocated by sparse_hess free(rind); free(cind); free(values); } else { // choose a value for x
void color_general_cppad( const VectorSet& pattern , const VectorSize& row , const VectorSize& col , CppAD::vector<size_t>& color ) { size_t i, j, k, ell, r; size_t K = row.size(); size_t m = pattern.n_set(); size_t n = pattern.end(); CPPAD_ASSERT_UNKNOWN( size_t( col.size() ) == K ); CPPAD_ASSERT_UNKNOWN( size_t( color.size() ) == m ); // We define the set of rows, columns, and pairs that appear // by the set ( row[k], col[k] ) for k = 0, ... , K-1. // initialize rows that appear CppAD::vector<bool> row_appear(m); for(i = 0; i < m; i++) row_appear[i] = false; // rows and columns that appear VectorSet c2r_appear, r2c_appear; c2r_appear.resize(n, m); r2c_appear.resize(m, n); for(k = 0; k < K; k++) { CPPAD_ASSERT_UNKNOWN( pattern.is_element(row[k], col[k]) ); row_appear[ row[k] ] = true; c2r_appear.add_element(col[k], row[k]); r2c_appear.add_element(row[k], col[k]); } // for each column, which rows are non-zero and do not appear VectorSet not_appear; not_appear.resize(n, m); for(i = 0; i < m; i++) { typename VectorSet::const_iterator pattern_itr(pattern, i); j = *pattern_itr; while( j != pattern.end() ) { if( ! c2r_appear.is_element(j , i) ) not_appear.add_element(j, i); j = *(++pattern_itr); } } // initial coloring color.resize(m); ell = 0; for(i = 0; i < m; i++) { if( row_appear[i] ) color[i] = ell++; else color[i] = m; } /* See GreedyPartialD2Coloring Algorithm Section 3.6.2 of Graph Coloring in Optimization Revisited by Assefaw Gebremedhin, Fredrik Maane, Alex Pothen The algorithm above was modified (by Brad Bell) to take advantage of the fact that only the entries (subset of the sparsity pattern) specified by row and col need to be computed. */ CppAD::vector<bool> forbidden(m); for(i = 1; i < m; i++) // for each row that appears if( color[i] < m ) { // initial all colors as ok for this row // (value of forbidden for ell > initial color[i] does not matter) for(ell = 0; ell <= color[i]; ell++) forbidden[ell] = false; // ----------------------------------------------------- // Forbid colors for which this row would destroy results: // // for each column that is non-zero for this row typename VectorSet::const_iterator pattern_itr(pattern, i); j = *pattern_itr; while( j != pattern.end() ) { // for each row that appears with this column typename VectorSet::const_iterator c2r_itr(c2r_appear, j); r = *c2r_itr; while( r != c2r_appear.end() ) { // if this is not the same row, forbid its color if( (r < i) & (color[r] < m) ) forbidden[ color[r] ] = true; r = *(++c2r_itr); } j = *(++pattern_itr); } // ----------------------------------------------------- // Forbid colors that destroy results needed for this row. // // for each column that appears with this row typename VectorSet::const_iterator r2c_itr(r2c_appear, i); j = *r2c_itr; while( j != r2c_appear.end() ) { // For each row that is non-zero for this column // (the appear rows have already been checked above). typename VectorSet::const_iterator not_itr(not_appear, j); r = *not_itr; while( r != not_appear.end() ) { // if this is not the same row, forbid its color if( (r < i) & (color[r] < m) ) forbidden[ color[r] ] = true; r = *(++not_itr); } j = *(++r2c_itr); } // pick the color with smallest index ell = 0; while( forbidden[ell] ) { ell++; CPPAD_ASSERT_UNKNOWN( ell <= color[i] ); } color[i] = ell; } return; }
/// inform CppAD that this information needs to be recomputed void clear(void) { order.clear(); color.clear(); }
// ---------------------------------------------------------------------- void cppad_colpack_general( CppAD::vector<size_t>& color , size_t m , size_t n , const CppAD::vector<unsigned int*>& adolc_pattern ) { size_t i, k; CPPAD_ASSERT_UNKNOWN( adolc_pattern.size() == m ); CPPAD_ASSERT_UNKNOWN( color.size() == m ); // Use adolc sparsity pattern to create corresponding bipartite graph ColPack::BipartiteGraphPartialColoringInterface graph( SRC_MEM_ADOLC, adolc_pattern.data(), m, n ); // row ordered Partial-Distance-Two-Coloring of the bipartite graph graph.PartialDistanceTwoColoring( "SMALLEST_LAST", "ROW_PARTIAL_DISTANCE_TWO" ); // Use coloring information to create seed matrix int n_seed_row; int n_seed_col; double** seed_matrix = graph.GetSeedMatrix(&n_seed_row, &n_seed_col); CPPAD_ASSERT_UNKNOWN( size_t(n_seed_col) == m ); // now return coloring in format required by CppAD for(i = 0; i < m; i++) color[i] = m; for(k = 0; k < size_t(n_seed_row); k++) { for(i = 0; i < m; i++) { if( seed_matrix[k][i] != 0.0 ) { // check that no row appears twice in the coloring CPPAD_ASSERT_UNKNOWN( color[i] == m ); color[i] = k; } } } # ifndef NDEBUG // check that all non-zero rows appear in the coloring for(i = 0; i < m; i++) CPPAD_ASSERT_UNKNOWN(color[i] < m || adolc_pattern[i][0] == 0); // check that no rows with the same color have overlapping entries CppAD::vector<bool> found(n); for(k = 0; k < size_t(n_seed_row); k++) { size_t j, ell; for(j = 0; j < n; j++) found[j] = false; for(i = 0; i < m; i++) if( color[i] == k ) { for(ell = 0; ell < adolc_pattern[i][0]; ell++) { j = adolc_pattern[i][1 + ell]; CPPAD_ASSERT_UNKNOWN( ! found[j] ); found[j] = true; } } } # endif return; }
void ForSparseJacSet( bool transpose , size_t q , const VectorSet& r , VectorSet& s , size_t total_num_var , CppAD::vector<size_t>& dep_taddr , CppAD::vector<size_t>& ind_taddr , CppAD::player<Base>& play , CPPAD_INTERNAL_SPARSE_SET& for_jac_sparsity ) { // temporary indices size_t i, j; std::set<size_t>::const_iterator itr; // range and domain dimensions for F size_t m = dep_taddr.size(); size_t n = ind_taddr.size(); CPPAD_ASSERT_KNOWN( q > 0, "RevSparseJac: q is not greater than zero" ); CPPAD_ASSERT_KNOWN( size_t(r.size()) == n || transpose, "RevSparseJac: size of r is not equal to n and transpose is false." ); CPPAD_ASSERT_KNOWN( size_t(r.size()) == q || ! transpose, "RevSparseJac: size of r is not equal to q and transpose is true." ); // allocate memory for the requested sparsity calculation for_jac_sparsity.resize(total_num_var, q); // set values corresponding to independent variables if( transpose ) { for(i = 0; i < q; i++) { // add the elements that are present itr = r[i].begin(); while( itr != r[i].end() ) { j = *itr++; CPPAD_ASSERT_KNOWN( j < n, "ForSparseJac: transpose is true and element of the set\n" "r[j] has value greater than or equal n." ); CPPAD_ASSERT_UNKNOWN( ind_taddr[j] < total_num_var ); // operator for j-th independent variable CPPAD_ASSERT_UNKNOWN( play.GetOp( ind_taddr[j] ) == InvOp ); for_jac_sparsity.add_element( ind_taddr[j], i); } } } else { for(i = 0; i < n; i++) { CPPAD_ASSERT_UNKNOWN( ind_taddr[i] < total_num_var ); // ind_taddr[i] is operator taddr for i-th independent variable CPPAD_ASSERT_UNKNOWN( play.GetOp( ind_taddr[i] ) == InvOp ); // add the elements that are present itr = r[i].begin(); while( itr != r[i].end() ) { j = *itr++; CPPAD_ASSERT_KNOWN( j < q, "ForSparseJac: an element of the set r[i] " "has value greater than or equal q." ); for_jac_sparsity.add_element( ind_taddr[i], j); } } } // evaluate the sparsity patterns ForJacSweep( n, total_num_var, &play, for_jac_sparsity ); // return values corresponding to dependent variables CPPAD_ASSERT_UNKNOWN( size_t(s.size()) == m || transpose ); CPPAD_ASSERT_UNKNOWN( size_t(s.size()) == q || ! transpose ); for(i = 0; i < m; i++) { CPPAD_ASSERT_UNKNOWN( dep_taddr[i] < total_num_var ); // extract results from for_jac_sparsity // and add corresponding elements to sets in s CPPAD_ASSERT_UNKNOWN( for_jac_sparsity.end() == q ); for_jac_sparsity.begin( dep_taddr[i] ); j = for_jac_sparsity.next_element(); while( j < q ) { if( transpose ) s[j].insert(i); else s[i].insert(j); j = for_jac_sparsity.next_element(); } } }
void ForSparseJacBool( bool transpose , size_t q , const VectorSet& r , VectorSet& s , size_t total_num_var , CppAD::vector<size_t>& dep_taddr , CppAD::vector<size_t>& ind_taddr , CppAD::player<Base>& play , sparse_pack& for_jac_sparsity ) { // temporary indices size_t i, j; // range and domain dimensions for F size_t m = dep_taddr.size(); size_t n = ind_taddr.size(); CPPAD_ASSERT_KNOWN( q > 0, "ForSparseJac: q is not greater than zero" ); CPPAD_ASSERT_KNOWN( size_t(r.size()) == n * q, "ForSparseJac: size of r is not equal to\n" "q times domain dimension for ADFun object." ); // allocate memory for the requested sparsity calculation result for_jac_sparsity.resize(total_num_var, q); // set values corresponding to independent variables for(i = 0; i < n; i++) { CPPAD_ASSERT_UNKNOWN( ind_taddr[i] < total_num_var ); // ind_taddr[i] is operator taddr for i-th independent variable CPPAD_ASSERT_UNKNOWN( play.GetOp( ind_taddr[i] ) == InvOp ); // set bits that are true if( transpose ) { for(j = 0; j < q; j++) if( r[ j * n + i ] ) for_jac_sparsity.add_element( ind_taddr[i], j); } else { for(j = 0; j < q; j++) if( r[ i * q + j ] ) for_jac_sparsity.add_element( ind_taddr[i], j); } } // evaluate the sparsity patterns ForJacSweep( n, total_num_var, &play, for_jac_sparsity ); // return values corresponding to dependent variables CPPAD_ASSERT_UNKNOWN( size_t(s.size()) == m * q ); for(i = 0; i < m; i++) { CPPAD_ASSERT_UNKNOWN( dep_taddr[i] < total_num_var ); // extract the result from for_jac_sparsity if( transpose ) { for(j = 0; j < q; j++) s[ j * m + i ] = false; } else { for(j = 0; j < q; j++) s[ i * q + j ] = false; } CPPAD_ASSERT_UNKNOWN( for_jac_sparsity.end() == q ); for_jac_sparsity.begin( dep_taddr[i] ); j = for_jac_sparsity.next_element(); while( j < q ) { if( transpose ) s[j * m + i] = true; else s[i * q + j] = true; j = for_jac_sparsity.next_element(); } } }