void get_internal_sparsity( bool transpose , const pod_vector<size_t>& internal_index , const InternalSparsity& internal_pattern , sparse_rc<SizeVector>& pattern_out ) { typedef typename InternalSparsity::const_iterator iterator; // number variables size_t nr = internal_index.size(); // column size of interanl sparstiy pattern size_t nc = internal_pattern.end(); // determine nnz, the number of possibly non-zero index pairs size_t nnz = 0; for(size_t i = 0; i < nr; i++) { CPPAD_ASSERT_UNKNOWN( internal_index[i] < internal_pattern.n_set() ); iterator itr(internal_pattern, internal_index[i]); size_t j = *itr; while( j < nc ) { ++nnz; j = *(++itr); } } // transposed if( transpose ) { pattern_out.resize(nc, nr, nnz); // size_t k = 0; for(size_t i = 0; i < nr; i++) { iterator itr(internal_pattern, internal_index[i]); size_t j = *itr; while( j < nc ) { pattern_out.set(k++, j, i); j = *(++itr); } } return; } // not transposed pattern_out.resize(nr, nc, nnz); // size_t k = 0; for(size_t i = 0; i < nr; i++) { iterator itr(internal_pattern, internal_index[i]); size_t j = *itr; while( j < nc ) { pattern_out.set(k++, i, j); j = *(++itr); } } return; }
void ADFun<Base>::for_hes_sparsity( const BoolVector& select_domain , const BoolVector& select_range , bool internal_bool , sparse_rc<SizeVector>& pattern_out ) { size_t n = Domain(); size_t m = Range(); // CPPAD_ASSERT_KNOWN( size_t( select_domain.size() ) == n, "for_hes_sparsity: size of select_domain is not equal to " "number of independent variables" ); CPPAD_ASSERT_KNOWN( size_t( select_range.size() ) == m, "for_hes_sparsity: size of select_range is not equal to " "number of dependent variables" ); // do not need transpose or depenency bool transpose = false; bool dependency = false; // sparse_rc<SizeVector> pattern_tmp; if( internal_bool ) { // forward Jacobian sparsity pattern for independent variables local::sparse_pack internal_for_jac; internal_for_jac.resize(num_var_tape_, n + 1 ); for(size_t j = 0; j < n; j++) if( select_domain[j] ) { CPPAD_ASSERT_UNKNOWN( ind_taddr_[j] < n + 1 ); internal_for_jac.add_element( ind_taddr_[j] , ind_taddr_[j] ); } // forward Jacobian sparsity for all variables on tape local::ForJacSweep( dependency, n, num_var_tape_, &play_, internal_for_jac ); // reverse Jacobian sparsity pattern for select_range local::sparse_pack internal_rev_jac; internal_rev_jac.resize(num_var_tape_, 1); for(size_t i = 0; i < m; i++) if( select_range[i] ) { CPPAD_ASSERT_UNKNOWN( dep_taddr_[i] < num_var_tape_ ); internal_rev_jac.add_element( dep_taddr_[i] , 0 ); } // reverse Jacobian sparsity for all variables on tape local::RevJacSweep( dependency, n, num_var_tape_, &play_, internal_rev_jac ); // internal vector of sets that will hold Hessian local::sparse_pack internal_for_hes; internal_for_hes.resize(n + 1, n + 1); // // compute forward Hessian sparsity pattern local::ForHesSweep( n, num_var_tape_, &play_, internal_for_jac, internal_rev_jac, internal_for_hes ); // // put the result in pattern_tmp get_internal_sparsity( transpose, ind_taddr_, internal_for_hes, pattern_tmp ); } else { // forward Jacobian sparsity pattern for independent variables // (corresponds to D) local::sparse_list internal_for_jac; internal_for_jac.resize(num_var_tape_, n + 1 ); for(size_t j = 0; j < n; j++) if( select_domain[j] ) { CPPAD_ASSERT_UNKNOWN( ind_taddr_[j] < n + 1 ); internal_for_jac.add_element( ind_taddr_[j] , ind_taddr_[j] ); } // forward Jacobian sparsity for all variables on tape local::ForJacSweep( dependency, n, num_var_tape_, &play_, internal_for_jac ); // reverse Jacobian sparsity pattern for select_range // (corresponds to s) local::sparse_list internal_rev_jac; internal_rev_jac.resize(num_var_tape_, 1); for(size_t i = 0; i < m; i++) if( select_range[i] ) { CPPAD_ASSERT_UNKNOWN( dep_taddr_[i] < num_var_tape_ ); internal_rev_jac.add_element( dep_taddr_[i] , 0 ); } // reverse Jacobian sparsity for all variables on tape local::RevJacSweep( dependency, n, num_var_tape_, &play_, internal_rev_jac ); // internal vector of sets that will hold Hessian local::sparse_list internal_for_hes; internal_for_hes.resize(n + 1, n + 1); // // compute forward Hessian sparsity pattern local::ForHesSweep( n, num_var_tape_, &play_, internal_for_jac, internal_rev_jac, internal_for_hes ); // // put the result in pattern_tmp get_internal_sparsity( transpose, ind_taddr_, internal_for_hes, pattern_tmp ); } // subtract 1 from all column values CPPAD_ASSERT_UNKNOWN( pattern_tmp.nr() == n ); CPPAD_ASSERT_UNKNOWN( pattern_tmp.nc() == n + 1 ); const SizeVector& row( pattern_tmp.row() ); const SizeVector& col( pattern_tmp.col() ); size_t nr = n; size_t nc = n; size_t nnz = pattern_tmp.nnz(); pattern_out.resize(nr, nc, nnz); for(size_t k = 0; k < nnz; k++) { CPPAD_ASSERT_UNKNOWN( 0 < col[k] ); pattern_out.set(k, row[k], col[k] - 1); } return; }
void ADFun<Base,RecBase>::subgraph_sparsity( const BoolVector& select_domain , const BoolVector& select_range , bool transpose , sparse_rc<SizeVector>& pattern_out ) { // compute the sparsity pattern in row, col local::pod_vector<size_t> row; local::pod_vector<size_t> col; // create the optimized recording switch( play_.address_type() ) { case local::play::unsigned_short_enum: local::subgraph::subgraph_sparsity<unsigned short>( &play_, subgraph_info_, dep_taddr_, select_domain, select_range, row, col ); break; case local::play::unsigned_int_enum: local::subgraph::subgraph_sparsity<unsigned int>( &play_, subgraph_info_, dep_taddr_, select_domain, select_range, row, col ); break; case local::play::size_t_enum: local::subgraph::subgraph_sparsity<size_t>( &play_, subgraph_info_, dep_taddr_, select_domain, select_range, row, col ); break; default: CPPAD_ASSERT_UNKNOWN(false); } CPPAD_ASSERT_UNKNOWN( row.size() == col.size() ); // return the sparsity pattern size_t nr = dep_taddr_.size(); size_t nc = ind_taddr_.size(); size_t nnz = row.size(); if( transpose ) { pattern_out.resize(nc, nr, nnz); for(size_t k = 0; k < nnz; k++) pattern_out.set(k, col[k], row[k]); } else { pattern_out.resize(nr, nc, nnz); for(size_t k = 0; k < nnz; k++) pattern_out.set(k, row[k], col[k]); } return; }