void writer_run(const reader_t* reader) { context_t context; context_initialize(&context); calc_work(&context, reader); allocate_work(&context); read(&context, &reader); context_finalize(&context); }
vpMatrix vpMatrix::inverseByQRLapack() const{ int rowNum_ = (int)this->getRows(); int colNum_ = (int)this->getCols(); int lda = (int)rowNum_; //lda is the number of rows because we don't use a submatrix int dimTau = std::min(rowNum_,colNum_); int dimWork = -1; double *tau = new double[dimTau]; double *work = new double[1]; int info; vpMatrix C; vpMatrix A = *this; try{ //1) Extract householder reflections (useful to compute Q) and R dgeqrf_( &rowNum_, //The number of rows of the matrix A. M >= 0. &colNum_, //The number of columns of the matrix A. N >= 0. A.data, /*On entry, the M-by-N matrix A. On exit, the elements on and above the diagonal of the array contain the min(M,N)-by-N upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the orthogonal matrix Q as a product of min(m,n) elementary reflectors. */ &lda, //The leading dimension of the array A. LDA >= max(1,M). tau, /*Dimension (min(M,N)) The scalar factors of the elementary reflectors */ work, //Internal working array. dimension (MAX(1,LWORK)) &dimWork, //The dimension of the array WORK. LWORK >= max(1,N). &info //status ); if(info != 0){ std::cout << "dgeqrf_:Preparation:" << -info << "th element had an illegal value" << std::endl; throw vpMatrixException::badValue; } dimWork = allocate_work(&work); dgeqrf_( &rowNum_, //The number of rows of the matrix A. M >= 0. &colNum_, //The number of columns of the matrix A. N >= 0. A.data, /*On entry, the M-by-N matrix A. On exit, the elements on and above the diagonal of the array contain the min(M,N)-by-N upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the orthogonal matrix Q as a product of min(m,n) elementary reflectors. */ &lda, //The leading dimension of the array A. LDA >= max(1,M). tau, /*Dimension (min(M,N)) The scalar factors of the elementary reflectors */ work, //Internal working array. dimension (MAX(1,LWORK)) &dimWork, //The dimension of the array WORK. LWORK >= max(1,N). &info //status ); if(info != 0){ std::cout << "dgeqrf_:" << -info << "th element had an illegal value" << std::endl; throw vpMatrixException::badValue; } //A now contains the R matrix in its upper triangular (in lapack convention) C = A; //2) Invert R dtrtri_((char*)"U",(char*)"N",&dimTau,C.data,&lda,&info); if(info!=0){ if(info < 0) std::cout << "dtrtri_:"<< -info << "th element had an illegal value" << std::endl; else if(info > 0){ std::cout << "dtrtri_:R("<< info << "," <<info << ")"<< " is exactly zero. The triangular matrix is singular and its inverse can not be computed." << std::endl; std::cout << "R=" << std::endl << C << std::endl; } throw vpMatrixException::badValue; } //3) Zero-fill R^-1 //the matrix is upper triangular for lapack but lower triangular for visp //we fill it with zeros above the diagonal (where we don't need the values) for(unsigned int i=0;i<C.getRows();i++) for(unsigned int j=0;j<C.getRows();j++) if(j>i) C[i][j] = 0.; dimWork = -1; int ldc = lda; //4) Transpose Q and left-multiply it by R^-1 //get R^-1*tQ //C contains R^-1 //A contains Q dormqr_((char*)"R", (char*)"T", &rowNum_, &colNum_, &dimTau, A.data, &lda, tau, C.data, &ldc, work, &dimWork, &info); if(info != 0){ std::cout << "dormqr_:Preparation"<< -info << "th element had an illegal value" << std::endl; throw vpMatrixException::badValue; } dimWork = allocate_work(&work); dormqr_((char*)"R", (char*)"T", &rowNum_, &colNum_, &dimTau, A.data, &lda, tau, C.data, &ldc, work, &dimWork, &info); if(info != 0){ std::cout << "dormqr_:"<< -info << "th element had an illegal value" << std::endl; throw vpMatrixException::badValue; } delete[] tau; delete[] work; }catch(vpMatrixException&){ delete[] tau; delete[] work; throw; } return C; }
bool atomic_base<Base>::rev_sparse_hes( const vector<Base>& x , const local::pod_vector<size_t>& x_index , const local::pod_vector<size_t>& y_index , const InternalSparsity& for_jac_sparsity , bool* rev_jac_flag , InternalSparsity& rev_hes_sparsity ) { CPPAD_ASSERT_UNKNOWN( for_jac_sparsity.end() == rev_hes_sparsity.end() ); size_t q = rev_hes_sparsity.end(); size_t n = x_index.size(); size_t m = y_index.size(); bool ok = false; size_t thread = thread_alloc::thread_num(); allocate_work(thread); bool zero_empty = true; bool input_empty = false; bool transpose = false; // // vx vector<bool> vx(n); for(size_t j = 0; j < n; j++) vx[j] = x_index[j] != 0; // // note that s and t are vectors so transpose does not matter for bool case vector<bool> bool_s( work_[thread]->bool_s ); vector<bool> bool_t( work_[thread]->bool_t ); // bool_s.resize(m); bool_t.resize(n); // for(size_t i = 0; i < m; i++) { if( y_index[i] > 0 ) bool_s[i] = rev_jac_flag[ y_index[i] ]; } // std::string msg = ": atomic_base.rev_sparse_hes: returned false"; if( sparsity_ == pack_sparsity_enum ) { vectorBool& pack_r( work_[thread]->pack_r ); vectorBool& pack_u( work_[thread]->pack_u ); vectorBool& pack_v( work_[thread]->pack_h ); // pack_v.resize(n * q); // local::get_internal_sparsity( transpose, x_index, for_jac_sparsity, pack_r ); local::get_internal_sparsity( transpose, y_index, rev_hes_sparsity, pack_u ); // ok = rev_sparse_hes(vx, bool_s, bool_t, q, pack_r, pack_u, pack_v, x); if( ! ok ) ok = rev_sparse_hes(vx, bool_s, bool_t, q, pack_r, pack_u, pack_v); if( ! ok ) { msg = afun_name() + msg + " sparsity = pack_sparsity_enum"; CPPAD_ASSERT_KNOWN(false, msg.c_str()); } local::set_internal_sparsity(zero_empty, input_empty, transpose, x_index, rev_hes_sparsity, pack_v ); } else if( sparsity_ == bool_sparsity_enum ) { vector<bool>& bool_r( work_[thread]->bool_r ); vector<bool>& bool_u( work_[thread]->bool_u ); vector<bool>& bool_v( work_[thread]->bool_h ); // bool_v.resize(n * q); // local::get_internal_sparsity( transpose, x_index, for_jac_sparsity, bool_r ); local::get_internal_sparsity( transpose, y_index, rev_hes_sparsity, bool_u ); // ok = rev_sparse_hes(vx, bool_s, bool_t, q, bool_r, bool_u, bool_v, x); if( ! ok ) ok = rev_sparse_hes(vx, bool_s, bool_t, q, bool_r, bool_u, bool_v); if( ! ok ) { msg = afun_name() + msg + " sparsity = bool_sparsity_enum"; CPPAD_ASSERT_KNOWN(false, msg.c_str()); } local::set_internal_sparsity(zero_empty, input_empty, transpose, x_index, rev_hes_sparsity, bool_v ); } else { CPPAD_ASSERT_UNKNOWN( sparsity_ == set_sparsity_enum ); vector< std::set<size_t> >& set_r( work_[thread]->set_r ); vector< std::set<size_t> >& set_u( work_[thread]->set_u ); vector< std::set<size_t> >& set_v( work_[thread]->set_h ); // set_v.resize(n); // local::get_internal_sparsity( transpose, x_index, for_jac_sparsity, set_r ); local::get_internal_sparsity( transpose, y_index, rev_hes_sparsity, set_u ); // ok = rev_sparse_hes(vx, bool_s, bool_t, q, set_r, set_u, set_v, x); if( ! ok ) ok = rev_sparse_hes(vx, bool_s, bool_t, q, set_r, set_u, set_v); if( ! ok ) { msg = afun_name() + msg + " sparsity = set_sparsity_enum"; CPPAD_ASSERT_KNOWN(false, msg.c_str()); } local::set_internal_sparsity(zero_empty, input_empty, transpose, x_index, rev_hes_sparsity, set_v ); } for(size_t j = 0; j < n; j++) { if( x_index[j] > 0 ) rev_jac_flag[ x_index[j] ] |= bool_t[j]; } return ok; }