vector_type solve(const matrix_type& A, const vector_type& y) { typedef typename matrix_type::size_type size_type; typedef typename matrix_type::value_type value_type; namespace ublas = boost::numeric::ublas; matrix_type Q(A.size1(), A.size2()), R(A.size1(), A.size2()); qr (A, Q, R); vector_type b = prod(trans(Q), y); vector_type result; if (R.size1() > R.size2()) { size_type min = (R.size1() < R.size2() ? R.size1() : R.size2()); result = ublas::solve(subrange(R, 0, min, 0, min), subrange(b, 0, min), ublas::upper_tag()); } else { result = ublas::solve(R, b, ublas::upper_tag()); } return result; }
/** * 13. * @brief Copy the contents of a matrix. * @param[out] A A is overwritten with B. * @param[in] B The matrix to be copied. */ static void copy (matrix_type &A, const matrix_type &B) { /** Result matrices should always be of the right size */ A.Resize (B.Height(), B.Width()); /** This one is pretty simple, but the order is different */ elem::Copy (B, A); }
/** * 15. * @brief Extract the diagonal elements of a matrix. * @param[in] A The (square) matrix whose diagonal is to be extracted. * @param[out] B A diagonal matrix containing entries from A. */ static void diag(const matrix_type& A, matrix_type& B) { /* create a zeros matrix of the right dimension and assign to B */ const int n = A.Width(); B = zeros(n, n); /* Set the diagonals of B */ for (int i=0; i<n; ++i) B.Set(i, i, A.Get(i,i)); }
/** * 6. * @brief Compute the matrix transpose. * @param[in] A The matrix to be transposed. * @param[out] B B is overwritten with A^{T} */ static void transpose (const matrix_type& A, matrix_type& B) { /** Result matrices should always have sufficient space */ B.Resize(A.Width(), A.Height()); /** Compute transpose */ elem::Transpose (A, B); }
/** * 5. * @brief Multiply one matrix with another. * @param[in] A The first matrix * @param[in] B The second matrix * @param[out] C C is overwritten with (A*B) */ static void multiply (const matrix_type& A, const matrix_type& B, matrix_type& C) { /** Result matrices should always have sufficient space */ C.Resize(A.Height(), B.Width()); /** We have to do a Gemm */ elem::Gemm (elem::NORMAL, elem::NORMAL, 1.0, A, B, 0.0, C); }
KOKKOS_INLINE_FUNCTION void operator()( size_type inode ) const { // Apply a dirichlet boundary condition to 'irow' // to maintain the symmetry of the original // global stiffness matrix, zero out the columns // that correspond to boundary conditions, and // adjust the load vector accordingly const size_type iBeg = matrix.graph.row_map[inode]; const size_type iEnd = matrix.graph.row_map[inode+1]; const ScalarCoordType z = node_coords(inode,2); const bool bc_lower = z <= bc_lower_z ; const bool bc_upper = bc_upper_z <= z ; if ( bc_lower || bc_upper ) { const ScalarType bc_value = bc_lower ? bc_lower_value : bc_upper_value ; rhs(inode) = bc_value ; // set the rhs vector // zero each value on the row, and leave a one // on the diagonal for( size_type i = iBeg ; i < iEnd ; i++) { matrix.coefficients(i) = (int) inode == matrix.graph.entries(i) ? 1 : 0 ; } } else { // Find any columns that are boundary conditions. // Clear them and adjust the load vector for( size_type i = iBeg ; i < iEnd ; i++ ) { const size_type cnode = matrix.graph.entries(i) ; const ScalarCoordType zc = node_coords(cnode,2); const bool c_bc_lower = zc <= bc_lower_z ; const bool c_bc_upper = bc_upper_z <= zc ; if ( c_bc_lower || c_bc_upper ) { const ScalarType c_bc_value = c_bc_lower ? bc_lower_value : bc_upper_value ; rhs( inode ) -= c_bc_value * matrix.coefficients(i); matrix.coefficients(i) = 0 ; } } } }
/** @brief Constructs a tensor with a matrix * * \note Initially the tensor will be two-dimensional. * * @param v matrix to be copied. */ BOOST_UBLAS_INLINE tensor (const matrix_type &v) : tensor_expression_type<self_type>() , extents_ () , strides_ () , data_ (v.data()) { if(!data_.empty()){ extents_ = extents_type{v.size1(),v.size2()}; strides_ = strides_type(extents_); } }
// Vectorizes (stacks columns of) an input matrix. state_type Vec(const matrix_type &A) { state_type vectorized(A.size1()*A.size2()); for (int n = 0; n < A.size1(); n++) { for (int m = 0; m < A.size2(); m++) { vectorized(n*A.size1()+m) = A(m,n); } } return vectorized; }
/** * 7. * @brief Compute element-wise negation of a matrix. * @param[in] A The matrix to be negated. * @param[out] B B is overwritten with -1.0*A */ static void negation (const matrix_type& A, matrix_type& B) { /** Result matrices should always have sufficient space */ B.Resize(A.Height(), A.Width()); /** Copy over the matrix */ elem::Copy (A, B); /** Multiply by -1.0 */ elem::Scal(-1.0, B); }
static void calc(matrix_type& a, vector_type& v, const char& jobz) { namespace impl = boost::numeric::bindings::lapack::detail; //impl::syev(jobz,a,v); char uplo = 'L'; integer_t n = a.size2(); integer_t lda = a.size1(); integer_t lwork =-1; integer_t info = 0; value_type dlwork; impl::syev(jobz, uplo, n, mtraits::data(a), lda, vtraits::data(v), &dlwork, lwork, info); }
/** * Apply columnwise the sketching transform that is described by the * the transform with output sketch_of_A. */ void apply (const matrix_type& A, output_matrix_type& sketch_of_A, columnwise_tag dimension) const { const value_type *a = A.LockedBuffer(); El::Int lda = A.LDim(); value_type *sa = sketch_of_A.Buffer(); El::Int ldsa = sketch_of_A.LDim(); for (El::Int j = 0; j < A.Width(); j++) for (El::Int i = 0; i < data_type::_S; i++) sa[j * ldsa + i] = a[j * lda + data_type::_samples[i]]; }
/** * Apply rowwise the sketching transform that is described by the * the transform with output sketch_of_A. */ void apply (const matrix_type& A, output_matrix_type& sketch_of_A, rowwise_tag dimension) const { const value_type *a = A.LockedBuffer(); El::Int lda = A.LDim(); value_type *sa = sketch_of_A.Buffer(); El::Int ldsa = sketch_of_A.LDim(); for (El::Int j = 0; j < data_type::_S; j++) for (El::Int i = 0; i < A.Height(); i++) sa[j * ldsa + i] = a[data_type::_samples[j] * lda + i]; }
/** * 4. * @brief Subtract one matrix from another. * @param[in] A The first matrix * @param[in] B The second matrix * @param[out] C C is overwritten with (A-B) */ static void minus (const matrix_type& A, const matrix_type& B, matrix_type& C) { /** Result matrices should always have sufficient space */ C.Resize(A.Height(), A.Width()); /** first copy the matrix over */ elem::Copy (A, C); /** now, subtract the other matrix in */ elem::Axpy (-1.0, B, C); }
bool is_positive_definite( const matrix<T,N,A>& m ) { typedef matrix<T,N,A> matrix_type; typedef typename matrix_type::range_type range_type; if ( m.row() != m.col() ) return false; for ( std::size_t i = 1; i != m.row(); ++i ) { const matrix_type a{ m, range_type{0,i}, range_type{0,i} }; if ( a.det() <= T(0) ) return false; } return true; }
// -------------------------------------------------------------------- static matrix_type _compute_rooted_fa(const matrix_type & fa) { const size_t K = fa.get_height(); const size_t J = fa.get_width(); assert(K > 1); matrix_type rooted_fa (K - 1, J); for (size_t k = 0; k + 1 < K; k++) for (size_t j = 0; j < J; j++) rooted_fa(k, j) = fa(k + 1, j) - fa(0, j); return rooted_fa; }
/** * Apply the sketching transform on A and write to sketch_of_A. * Implementation for columnwise. */ void apply_impl_vdist(const matrix_type& A, output_matrix_type& sketch_of_A, skylark::sketch::columnwise_tag tag) const { // Just a local operation on the Matrix _local.apply(A.LockedMatrix(), sketch_of_A.Matrix(), tag); }
// Tensor product. Probably a better way of doing this. matrix_type Kronecker(const matrix_type &A, const matrix_type &B) { matrix_type C(A.size1()*B.size1(), A.size2()*B.size2()); for (int i=0; i < A.size1(); i++) { for (int j=0; j < A.size2(); j++) { for (int k=0; k < B.size1(); k++) { for (int l=0; l < B.size2(); l++) { C(i*B.size1()+k, j*B.size2()+l) = A(i,j)*B(k,l); } } } } return C; }
static scalar_type r(const matrix_type& m) { BOOST_STATIC_ASSERT(Row >= 0); BOOST_STATIC_ASSERT(Row < rows); BOOST_STATIC_ASSERT(Col >= 0); BOOST_STATIC_ASSERT(Col < cols); return m.at(Col, Row); }
static scalar_type ir(int row, int col, const matrix_type& m) { BOOST_ASSERT(row >= 0); BOOST_ASSERT(row < rows); BOOST_ASSERT(col >= 0); BOOST_ASSERT(col < cols); return m.at(col, row); }
void apply (const matrix_type& A, output_matrix_type& sketch_of_A, Dimension dimension) const { // TODO do we allow different communicators and different roots? // If on root: Just a local operation on the Matrix if (skylark::utility::get_communicator(A).rank() == 0) _local.apply(A.LockedMatrix(), sketch_of_A.Matrix(), dimension); }
void fill_from_file(matrix_type& tens, const std::string& str) { std::ifstream ifs(str); for(int j = 0 ; j < tens.extent<1>(); ++j) for(int i = 0 ; i < tens.extent<0>(); ++i) { ifs >> tens.at(i,j); } }
// ---------------------------------------------------------------- explicit q_data(const matrix_type & d) : d_ij () , d_ik () , d_jk () , i () , j () { static const auto k_0_5 = value_type(0.5); const auto n = d.get_height(); const auto k_n_2 = value_type(n - 2); // // Cache the row sums; column sums would work equally well // because the matrix is symmetric. // std::vector<value_type> sigma; for (size_t c = 0; c < n; c++) sigma.push_back(d.get_row_sum(c)); // // Compute the values of the Q matrix. // matrix_type q (n, n); for (size_t r = 0; r < n; r++) for (size_t c = 0; c < r; c++) q(r, c) = k_n_2 * d(r, c) - sigma[r] - sigma[c]; // // Find the cell with the minimum value. // i = 1, j = 0; for (size_t r = 2; r < n; r++) for (size_t c = 0; c < r; c++) if (q(r, c) < q(i, j)) i = r, j = c; // // Compute distances between the new nodes. // d_ij = d(i, j); d_ik = k_0_5 * (d_ij + ((sigma[i] - sigma[j]) / k_n_2)); d_jk = d_ij - d_ik; }
int operator()() { if ( make_preprocessing() ) { assert( !"Failed to make preprocessing." ); return 1; } if( make_initialization_preprocessing() ) { assert( !"Failed to make initialization preprocessing." ); return 1; } unknown_parameters = make_unknown_parameters(); if( !unknown_parameters ) { assert( !"Failed to make unknown parameters." ); return 1; } fitting_array = make_fitting_array_initial_guess(); if ( fitting_array.size() != unknown_parameters ) { assert( !"Failed to make initial guess." ); return 1; } max_iteration = make_max_iteration(); if( !max_iteration ) { assert( !"Failed to make max iteration." ); return 1; } eps = make_eps();if( eps < value_type{0} ) { assert( !"Failed to make eps." ); return 1; } if ( make_initialization_postprocessing() ) { assert( !"Failed to make initialization postprocessing." ); return 1; } merit_function = make_merit_function(); if( !merit_function ) { assert( !"Failed to make merit function." ); return 1; } jacobian_matrix_function.resize( 1, unknown_parameters ); for ( size_type index = 0; index != unknown_parameters; ++index ) { jacobian_matrix_function[0][index] = make_jacobian_matrix_function( index ); if ( !jacobian_matrix_function[0][index] ) { assert( "Failed to make jacobian matrix function." ); return 1; } } hessian_matrix_function.resize( unknown_parameters, unknown_parameters ); for ( size_type index = 0; index != unknown_parameters; ++index ) for ( size_type jndex = 0; jndex <= index; ++jndex ) { hessian_matrix_function[index][jndex] = make_hessian_matrix_function( index, jndex ); if ( !hessian_matrix_function[index][jndex] ) { assert( "Failed to make jacobian matrix function." ); return 1; } if ( index != jndex ) { hessian_matrix_function[jndex][index] = hessian_matrix_function[index][jndex]; } } if ( make_iteration_preprocessing() ) { assert( !"Failed to make iteration preprocessing." ); return 1; } for ( size_type step_index = 0; step_index != max_iteration; ++step_index ) { if ( make_every_iteration_preprocessing() ) { assert( !"Failed to make every iteration preprocessing." ); return 1; } if ( make_iteration() ) { assert( !"Failed to make iteration" ); return 1; } if ( ! make_fitting_flag() ) { /*eps reached*/break;} if ( make_every_iteration_postprocessing() ) { assert( !"Failed to make every iteration postprocessing." ); return 1; } } if ( make_iteration_postprocessing() ) { assert( !"Failed to make iteration postprocessing." ); return 1; } if ( make_postprocessing() ) { assert( !"Failed to make postprocessing. " ); return 1; } return 0; }
/** * Parametrized constructor. * \param aBelief The belief-state of the gaussian distribution. */ gaussian_pdf(const BeliefState& aBelief) : mean_state(aBelief.get_mean_state()), E_inv(aBelief.get_covariance().get_inverse_matrix()), factor(-1) { factor = determinant_Cholesky(E_inv); if(fabs(factor) < std::numeric_limits< scalar_type >::epsilon()) { factor = scalar_type(-1); } else { factor = scalar_type(1) / factor; for(size_type i = 0; i < E_inv.get_row_count(); ++i) factor *= scalar_type(6.28318530718); }; };
box<dimension>::box(matrix_type const& edges) { if (edges.size1() != dimension || edges.size2() != dimension) { throw std::invalid_argument("edge vectors have invalid dimensionality"); } edges_ = zero_matrix_type(dimension, dimension); for (unsigned int i = 0; i < dimension; ++i) { edges_(i, i) = edges(i, i); } if (norm_inf(edges_ - edges) != 0) { throw std::invalid_argument("non-cuboid geomtries are not implemented"); } for (unsigned int i = 0; i < dimension; ++i) { length_[i] = edges_(i, i); } length_half_ = 0.5 * length_; LOG("edge lengths of simulation domain: " << length_); }
constexpr auto operator*(matrix_type<S1> m1, matrix_type<S2> m2) { auto storage = fmap( [=](auto row) { return zip_with( scalar_prod, repeat_n(m2.ncolumns(), row), columns(m2)); } , rows(m1)); return matrix_type<decltype(storage)>{storage}; }
void qr(const matrix_type& A, matrix_type& Q, matrix_type& R) { using namespace boost::numeric::ublas; typedef typename matrix_type::size_type size_type; typedef typename matrix_type::value_type value_type; // TODO resize Q and R to match the needed size. int m=A.size1(); int n=A.size2(); identity_matrix<value_type> ident(m); if (Q.size1() != ident.size1() || Q.size2() != ident.size2()) Q = matrix_type(m, m); Q.assign(ident); R.clear(); R = A; for (size_type k=0; k< R.size1() && k<R.size2(); k++) { slice s1(k, 1, m - k); slice s2(k, 0, m - k); unit_vector<value_type> e1(m - k, 0); // x = A(k:m, k); matrix_vector_slice<matrix_type> x(R, s1, s2); matrix_type F(x.size(), x.size()); Reflector(x, F); matrix_type temp = subrange(R, k, m, k, n); //F = prod(F, temp); subrange(R, k, m, k, n) = prod(F, temp); // <<---------------------------------------------->> // forming Q identity_matrix<value_type> iqk(A.size1()); matrix_type Q_k(iqk); subrange(Q_k, Q_k.size1() - F.size1(), Q_k.size1(), Q_k.size2() - F.size2(), Q_k.size2()) = F; Q = prod(Q, Q_k); } }
/** * 8. * @brief Compute the inverse of a matrix. * @param[in] A The (non-singular and square) matrix to be inverted. * @param[out] B B is overwritten with A's inverse. */ static void inv(const matrix_type& A, matrix_type& B) { /** Assume that the matrix has an inverse */ const int n = A.Height(); /** First, copy over an identity as the right hand side */ B = eye(n); /** Solve the linear system using LU */ elem::lu::SolveAfter(elem::NORMAL, A, B); }
// -------------------------------------------------------------------- static matrix_type _create_b_vec( const matrix_type & current_values, const size_t row_padding) { assert(current_values.is_vector()); const auto K = current_values.get_length(); matrix_type b_vec (K + K + row_padding, 1); for (size_t k = 0; k < K; k++) { b_vec[k] = current_values[k]; b_vec[k + K] = value_type(1) - current_values[k]; } for (size_t k = K + K; k < b_vec.get_height(); k++) b_vec[k] = value_type(0); return b_vec; }
value_type iterate( matrix_type const& initial_matrix, matrix_type& result_matrix ) { triple_homotopy_fitting<value_type> thf{ug_size}; size_type const tilt_number = diag_matrix.row(); matrix_type intensity{ intensity_matrix.col(), 1 }; for ( size_type index = 0; index != tilt_number; ++index ) { std::copy( intensity_matrix.row_begin(index), intensity_matrix.row_end(index), intensity.col_begin(0) ); //TODO -- optimizaton here thf.register_entry( ar, //C1 approximation alpha(progress_ratio), make_coefficient_matrix( thickness, diag_matrix.row_begin(index), diag_matrix.row_end(index), column_index ), //C/2 * C/2 approximation beta(progress_ratio), make_coefficient_matrix( thickness/2.0, diag_matrix.row_begin(index), diag_matrix.row_end(index) ), expm( make_structure_matrix(ar, initial_matrix, diag_matrix.row_begin(index), diag_matrix.row_end(index) ), thickness/2.0, column_index ), //standard expm gamma(progress_ratio), make_scattering_matrix( ar, initial_matrix, diag_matrix.row_begin(index), diag_matrix.row_end(index), thickness, column_index ), intensity, column_index ); } result_matrix.resize( ug_size, 1 ); value_type const residual = thf.output( result_matrix.begin() ); /* std::cout << "\n current residual is " << residual << "\n"; std::cout << "\n current ug is \n" << result_matrix.transpose() << "\n"; */ return residual; }