void apply(vector<ScalarType> & vec) const { if (vec.handle().get_active_handle_id() != viennacl::MAIN_MEMORY) { if (tag_.use_level_scheduling()) { //std::cout << "Using multifrontal on GPU..." << std::endl; detail::level_scheduling_substitute(vec, multifrontal_L_row_index_arrays_, multifrontal_L_row_buffers_, multifrontal_L_col_buffers_, multifrontal_L_element_buffers_, multifrontal_L_row_elimination_num_list_); vec = viennacl::linalg::element_div(vec, multifrontal_U_diagonal_); detail::level_scheduling_substitute(vec, multifrontal_U_row_index_arrays_, multifrontal_U_row_buffers_, multifrontal_U_col_buffers_, multifrontal_U_element_buffers_, multifrontal_U_row_elimination_num_list_); } else { viennacl::memory_types old_memory_location = viennacl::memory_domain(vec); viennacl::switch_memory_domain(vec, viennacl::MAIN_MEMORY); viennacl::linalg::inplace_solve(LU, vec, unit_lower_tag()); viennacl::linalg::inplace_solve(LU, vec, upper_tag()); viennacl::switch_memory_domain(vec, old_memory_location); } } else //apply ILU0 directly on CPU { if (tag_.use_level_scheduling()) { //std::cout << "Using multifrontal..." << std::endl; detail::level_scheduling_substitute(vec, multifrontal_L_row_index_arrays_, multifrontal_L_row_buffers_, multifrontal_L_col_buffers_, multifrontal_L_element_buffers_, multifrontal_L_row_elimination_num_list_); vec = viennacl::linalg::element_div(vec, multifrontal_U_diagonal_); detail::level_scheduling_substitute(vec, multifrontal_U_row_index_arrays_, multifrontal_U_row_buffers_, multifrontal_U_col_buffers_, multifrontal_U_element_buffers_, multifrontal_U_row_elimination_num_list_); } else { viennacl::linalg::inplace_solve(LU, vec, unit_lower_tag()); viennacl::linalg::inplace_solve(LU, vec, upper_tag()); } } }
void lu_substitute(matrix<SCALARTYPE, F, ALIGNMENT> const & mat, vector<SCALARTYPE, VEC_ALIGNMENT> & vec) { assert(mat.size1() == mat.size2()); inplace_solve(mat, vec, unit_lower_tag()); inplace_solve(mat, vec, upper_tag()); }
void lu_substitute(matrix<SCALARTYPE, F, ALIGNMENT> const & A, vector<SCALARTYPE, VEC_ALIGNMENT> & vec) { assert(A.size1() == A.size2() && bool("Matrix must be square")); inplace_solve(A, vec, unit_lower_tag()); inplace_solve(A, vec, upper_tag()); }
void apply(vector<ScalarType> & vec) const { if (viennacl::memory_domain(vec) != viennacl::MAIN_MEMORY) { viennacl::memory_types old_memory_location = viennacl::memory_domain(vec); viennacl::switch_memory_domain(vec, viennacl::MAIN_MEMORY); viennacl::linalg::inplace_solve(trans(LLT), vec, lower_tag()); viennacl::linalg::inplace_solve(LLT, vec, upper_tag()); viennacl::switch_memory_domain(vec, old_memory_location); } else //apply ILU0 directly: { // Note: L is stored in a column-oriented fashion, i.e. transposed w.r.t. the row-oriented layout. Thus, the factorization A = L L^T holds L in the upper triangular part of A. viennacl::linalg::inplace_solve(trans(LLT), vec, lower_tag()); viennacl::linalg::inplace_solve(LLT, vec, upper_tag()); } }
void lu_substitute(matrix<SCALARTYPE, F1, ALIGNMENT_A> const & A, matrix<SCALARTYPE, F2, ALIGNMENT_B> & B) { assert(A.size1() == A.size2()); assert(A.size1() == A.size2()); inplace_solve(A, B, unit_lower_tag()); inplace_solve(A, B, upper_tag()); }
void apply(vector<NumericT> & vec) const { if (viennacl::traits::context(vec).memory_type() != viennacl::MAIN_MEMORY) { viennacl::context host_ctx(viennacl::MAIN_MEMORY); viennacl::context old_ctx = viennacl::traits::context(vec); viennacl::switch_memory_context(vec, host_ctx); viennacl::linalg::inplace_solve(trans(LLT), vec, lower_tag()); viennacl::linalg::inplace_solve( LLT , vec, upper_tag()); viennacl::switch_memory_context(vec, old_ctx); } else //apply ILU0 directly: { // Note: L is stored in a column-oriented fashion, i.e. transposed w.r.t. the row-oriented layout. Thus, the factorization A = L L^T holds L in the upper triangular part of A. viennacl::linalg::inplace_solve(trans(LLT), vec, lower_tag()); viennacl::linalg::inplace_solve( LLT , vec, upper_tag()); } }
void apply(viennacl::vector<NumericT> & vec) const { if (vec.handle().get_active_handle_id() != viennacl::MAIN_MEMORY) { if (tag_.use_level_scheduling()) { //std::cout << "Using multifrontal on GPU..." << std::endl; detail::level_scheduling_substitute(vec, multifrontal_L_row_index_arrays_, multifrontal_L_row_buffers_, multifrontal_L_col_buffers_, multifrontal_L_element_buffers_, multifrontal_L_row_elimination_num_list_); vec = viennacl::linalg::element_div(vec, multifrontal_U_diagonal_); detail::level_scheduling_substitute(vec, multifrontal_U_row_index_arrays_, multifrontal_U_row_buffers_, multifrontal_U_col_buffers_, multifrontal_U_element_buffers_, multifrontal_U_row_elimination_num_list_); } else { viennacl::context host_context(viennacl::MAIN_MEMORY); viennacl::context old_context = viennacl::traits::context(vec); viennacl::switch_memory_context(vec, host_context); viennacl::linalg::inplace_solve(LU_, vec, unit_lower_tag()); viennacl::linalg::inplace_solve(LU_, vec, upper_tag()); viennacl::switch_memory_context(vec, old_context); } } else //apply ILUT directly: { viennacl::linalg::inplace_solve(LU_, vec, unit_lower_tag()); viennacl::linalg::inplace_solve(LU_, vec, upper_tag()); } }
void apply(VectorType & vec) const { unsigned int const * row_buffer = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(LLT.handle1()); unsigned int const * col_buffer = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(LLT.handle2()); ScalarType const * elements = viennacl::linalg::host_based::detail::extract_raw_pointer<ScalarType>(LLT.handle()); // Note: L is stored in a column-oriented fashion, i.e. transposed w.r.t. the row-oriented layout. Thus, the factorization A = L L^T holds L in the upper triangular part of A. viennacl::linalg::host_based::detail::csr_trans_inplace_solve<ScalarType>(row_buffer, col_buffer, elements, vec, LLT.size2(), lower_tag()); viennacl::linalg::host_based::detail::csr_inplace_solve<ScalarType>(row_buffer, col_buffer, elements, vec, LLT.size2(), upper_tag()); }
void apply(VectorT & vec) const { //Note: Since vec can be a rather arbitrary vector type, we call the more generic version in the backend manually: unsigned int const * row_buffer = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(LU_.handle1()); unsigned int const * col_buffer = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(LU_.handle2()); NumericType const * elements = viennacl::linalg::host_based::detail::extract_raw_pointer<NumericType>(LU_.handle()); viennacl::linalg::host_based::detail::csr_inplace_solve<NumericType>(row_buffer, col_buffer, elements, vec, LU_.size2(), unit_lower_tag()); viennacl::linalg::host_based::detail::csr_inplace_solve<NumericType>(row_buffer, col_buffer, elements, vec, LU_.size2(), upper_tag()); }
void apply(VectorType & vec) const { unsigned int const * row_buffer = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(LU.handle1()); unsigned int const * col_buffer = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(LU.handle2()); ScalarType const * elements = viennacl::linalg::host_based::detail::extract_raw_pointer<ScalarType>(LU.handle()); viennacl::linalg::host_based::detail::csr_inplace_solve<ScalarType>(row_buffer, col_buffer, elements, vec, LU.size2(), unit_lower_tag()); viennacl::linalg::host_based::detail::csr_inplace_solve<ScalarType>(row_buffer, col_buffer, elements, vec, LU.size2(), upper_tag()); }