void get_internal_sparsity( bool transpose , const pod_vector<size_t>& internal_index , const InternalSparsity& internal_pattern , sparse_rc<SizeVector>& pattern_out ) { typedef typename InternalSparsity::const_iterator iterator; // number variables size_t nr = internal_index.size(); // column size of interanl sparstiy pattern size_t nc = internal_pattern.end(); // determine nnz, the number of possibly non-zero index pairs size_t nnz = 0; for(size_t i = 0; i < nr; i++) { CPPAD_ASSERT_UNKNOWN( internal_index[i] < internal_pattern.n_set() ); iterator itr(internal_pattern, internal_index[i]); size_t j = *itr; while( j < nc ) { ++nnz; j = *(++itr); } } // transposed if( transpose ) { pattern_out.resize(nc, nr, nnz); // size_t k = 0; for(size_t i = 0; i < nr; i++) { iterator itr(internal_pattern, internal_index[i]); size_t j = *itr; while( j < nc ) { pattern_out.set(k++, j, i); j = *(++itr); } } return; } // not transposed pattern_out.resize(nr, nc, nnz); // size_t k = 0; for(size_t i = 0; i < nr; i++) { iterator itr(internal_pattern, internal_index[i]); size_t j = *itr; while( j < nc ) { pattern_out.set(k++, i, j); j = *(++itr); } } return; }
void set_internal_sparsity( bool zero_empty , bool input_empty , bool transpose , const pod_vector<size_t>& internal_index , InternalSparsity& internal_pattern , const sparse_rc<SizeVector>& pattern_in ) { size_t nr = internal_index.size(); # ifndef NDEBUG size_t nc = internal_pattern.end(); if( transpose ) { CPPAD_ASSERT_UNKNOWN( pattern_in.nr() == nc ); CPPAD_ASSERT_UNKNOWN( pattern_in.nc() == nr ); } else { CPPAD_ASSERT_UNKNOWN( pattern_in.nr() == nr ); CPPAD_ASSERT_UNKNOWN( pattern_in.nc() == nc ); } if( input_empty ) for(size_t i = 0; i < nr; i++) { size_t i_var = internal_index[i]; CPPAD_ASSERT_UNKNOWN( internal_pattern.number_elements(i_var) == 0 ); } # endif const SizeVector& row( pattern_in.row() ); const SizeVector& col( pattern_in.col() ); size_t nnz = row.size(); for(size_t k = 0; k < nnz; k++) { size_t r = row[k]; size_t c = col[k]; if( transpose ) std::swap(r, c); // size_t i_var = internal_index[r]; CPPAD_ASSERT_UNKNOWN( i_var < internal_pattern.n_set() ); CPPAD_ASSERT_UNKNOWN( c < nc ); bool ignore = zero_empty && i_var == 0; if( ! ignore ) internal_pattern.post_element( internal_index[r], c ); } // process posts for(size_t i = 0; i < nr; ++i) internal_pattern.process_post( internal_index[i] ); }
void ADFun<Base>::for_jac_sparsity( const sparse_rc<SizeVector>& pattern_in , bool transpose , bool dependency , bool internal_bool , sparse_rc<SizeVector>& pattern_out ) { // number or rows, columns, and non-zeros in pattern_in size_t nr_in = pattern_in.nr(); size_t nc_in = pattern_in.nc(); // size_t n = nr_in; size_t ell = nc_in; if( transpose ) std::swap(n, ell); // CPPAD_ASSERT_KNOWN( n == Domain() , "for_jac_sparsity: number rows in R " "is not equal number of independent variables." ); bool zero_empty = true; bool input_empty = true; if( internal_bool ) { // allocate memory for bool sparsity calculation // (sparsity pattern is emtpy after a resize) for_jac_sparse_pack_.resize(num_var_tape_, ell); for_jac_sparse_set_.resize(0, 0); // // set sparsity patttern for independent variables local::set_internal_sparsity( zero_empty , input_empty , transpose , ind_taddr_ , for_jac_sparse_pack_ , pattern_in ); // compute sparsity for other variables local::ForJacSweep( dependency, n, num_var_tape_, &play_, for_jac_sparse_pack_ ); // set the output pattern local::get_internal_sparsity( transpose, dep_taddr_, for_jac_sparse_pack_, pattern_out ); } else { // allocate memory for set sparsity calculation // (sparsity pattern is emtpy after a resize) for_jac_sparse_set_.resize(num_var_tape_, ell); for_jac_sparse_pack_.resize(0, 0); // // set sparsity patttern for independent variables local::set_internal_sparsity( zero_empty , input_empty , transpose , ind_taddr_ , for_jac_sparse_set_ , pattern_in ); // compute sparsity for other variables local::ForJacSweep( dependency, n, num_var_tape_, &play_, for_jac_sparse_set_ ); // get the ouput pattern local::get_internal_sparsity( transpose, dep_taddr_, for_jac_sparse_set_, pattern_out ); } return; }
void ADFun<Base>::for_hes_sparsity( const BoolVector& select_domain , const BoolVector& select_range , bool internal_bool , sparse_rc<SizeVector>& pattern_out ) { size_t n = Domain(); size_t m = Range(); // CPPAD_ASSERT_KNOWN( size_t( select_domain.size() ) == n, "for_hes_sparsity: size of select_domain is not equal to " "number of independent variables" ); CPPAD_ASSERT_KNOWN( size_t( select_range.size() ) == m, "for_hes_sparsity: size of select_range is not equal to " "number of dependent variables" ); // do not need transpose or depenency bool transpose = false; bool dependency = false; // sparse_rc<SizeVector> pattern_tmp; if( internal_bool ) { // forward Jacobian sparsity pattern for independent variables local::sparse_pack internal_for_jac; internal_for_jac.resize(num_var_tape_, n + 1 ); for(size_t j = 0; j < n; j++) if( select_domain[j] ) { CPPAD_ASSERT_UNKNOWN( ind_taddr_[j] < n + 1 ); internal_for_jac.add_element( ind_taddr_[j] , ind_taddr_[j] ); } // forward Jacobian sparsity for all variables on tape local::ForJacSweep( dependency, n, num_var_tape_, &play_, internal_for_jac ); // reverse Jacobian sparsity pattern for select_range local::sparse_pack internal_rev_jac; internal_rev_jac.resize(num_var_tape_, 1); for(size_t i = 0; i < m; i++) if( select_range[i] ) { CPPAD_ASSERT_UNKNOWN( dep_taddr_[i] < num_var_tape_ ); internal_rev_jac.add_element( dep_taddr_[i] , 0 ); } // reverse Jacobian sparsity for all variables on tape local::RevJacSweep( dependency, n, num_var_tape_, &play_, internal_rev_jac ); // internal vector of sets that will hold Hessian local::sparse_pack internal_for_hes; internal_for_hes.resize(n + 1, n + 1); // // compute forward Hessian sparsity pattern local::ForHesSweep( n, num_var_tape_, &play_, internal_for_jac, internal_rev_jac, internal_for_hes ); // // put the result in pattern_tmp get_internal_sparsity( transpose, ind_taddr_, internal_for_hes, pattern_tmp ); } else { // forward Jacobian sparsity pattern for independent variables // (corresponds to D) local::sparse_list internal_for_jac; internal_for_jac.resize(num_var_tape_, n + 1 ); for(size_t j = 0; j < n; j++) if( select_domain[j] ) { CPPAD_ASSERT_UNKNOWN( ind_taddr_[j] < n + 1 ); internal_for_jac.add_element( ind_taddr_[j] , ind_taddr_[j] ); } // forward Jacobian sparsity for all variables on tape local::ForJacSweep( dependency, n, num_var_tape_, &play_, internal_for_jac ); // reverse Jacobian sparsity pattern for select_range // (corresponds to s) local::sparse_list internal_rev_jac; internal_rev_jac.resize(num_var_tape_, 1); for(size_t i = 0; i < m; i++) if( select_range[i] ) { CPPAD_ASSERT_UNKNOWN( dep_taddr_[i] < num_var_tape_ ); internal_rev_jac.add_element( dep_taddr_[i] , 0 ); } // reverse Jacobian sparsity for all variables on tape local::RevJacSweep( dependency, n, num_var_tape_, &play_, internal_rev_jac ); // internal vector of sets that will hold Hessian local::sparse_list internal_for_hes; internal_for_hes.resize(n + 1, n + 1); // // compute forward Hessian sparsity pattern local::ForHesSweep( n, num_var_tape_, &play_, internal_for_jac, internal_rev_jac, internal_for_hes ); // // put the result in pattern_tmp get_internal_sparsity( transpose, ind_taddr_, internal_for_hes, pattern_tmp ); } // subtract 1 from all column values CPPAD_ASSERT_UNKNOWN( pattern_tmp.nr() == n ); CPPAD_ASSERT_UNKNOWN( pattern_tmp.nc() == n + 1 ); const SizeVector& row( pattern_tmp.row() ); const SizeVector& col( pattern_tmp.col() ); size_t nr = n; size_t nc = n; size_t nnz = pattern_tmp.nnz(); pattern_out.resize(nr, nc, nnz); for(size_t k = 0; k < nnz; k++) { CPPAD_ASSERT_UNKNOWN( 0 < col[k] ); pattern_out.set(k, row[k], col[k] - 1); } return; }
void ADFun<Base,RecBase>::subgraph_sparsity( const BoolVector& select_domain , const BoolVector& select_range , bool transpose , sparse_rc<SizeVector>& pattern_out ) { // compute the sparsity pattern in row, col local::pod_vector<size_t> row; local::pod_vector<size_t> col; // create the optimized recording switch( play_.address_type() ) { case local::play::unsigned_short_enum: local::subgraph::subgraph_sparsity<unsigned short>( &play_, subgraph_info_, dep_taddr_, select_domain, select_range, row, col ); break; case local::play::unsigned_int_enum: local::subgraph::subgraph_sparsity<unsigned int>( &play_, subgraph_info_, dep_taddr_, select_domain, select_range, row, col ); break; case local::play::size_t_enum: local::subgraph::subgraph_sparsity<size_t>( &play_, subgraph_info_, dep_taddr_, select_domain, select_range, row, col ); break; default: CPPAD_ASSERT_UNKNOWN(false); } CPPAD_ASSERT_UNKNOWN( row.size() == col.size() ); // return the sparsity pattern size_t nr = dep_taddr_.size(); size_t nc = ind_taddr_.size(); size_t nnz = row.size(); if( transpose ) { pattern_out.resize(nc, nr, nnz); for(size_t k = 0; k < nnz; k++) pattern_out.set(k, col[k], row[k]); } else { pattern_out.resize(nr, nc, nnz); for(size_t k = 0; k < nnz; k++) pattern_out.set(k, row[k], col[k]); } return; }