void pipelined_bicgstab_update_s(vector_base<NumericT> & s,
                                 vector_base<NumericT> & r,
                                 vector_base<NumericT> const & Ap,
                                 vector_base<NumericT> & inner_prod_buffer,
                                 vcl_size_t buffer_chunk_size,
                                 vcl_size_t buffer_chunk_offset)
{
  switch (viennacl::traits::handle(s).get_active_handle_id())
  {
  case viennacl::MAIN_MEMORY:
    viennacl::linalg::host_based::pipelined_bicgstab_update_s(s, r, Ap, inner_prod_buffer, buffer_chunk_size, buffer_chunk_offset);
    break;
#ifdef VIENNACL_WITH_OPENCL
  case viennacl::OPENCL_MEMORY:
    viennacl::linalg::opencl::pipelined_bicgstab_update_s(s, r, Ap, inner_prod_buffer, buffer_chunk_size, buffer_chunk_offset);
    break;
#endif
#ifdef VIENNACL_WITH_CUDA
  case viennacl::CUDA_MEMORY:
    viennacl::linalg::cuda::pipelined_bicgstab_update_s(s, r, Ap, inner_prod_buffer, buffer_chunk_size, buffer_chunk_offset);
    break;
#endif
  case viennacl::MEMORY_NOT_INITIALIZED:
    throw memory_exception("not initialised!");
  default:
    throw memory_exception("not implemented");
  }
}
    typename viennacl::enable_if< viennacl::is_any_sparse_matrix<SparseMatrixType>::value>::type
    prod_impl(const SparseMatrixType & mat,
              const viennacl::vector_base<ScalarType> & vec,
                    viennacl::vector_base<ScalarType> & result)
    {
      assert( (mat.size1() == result.size()) && bool("Size check failed for compressed matrix-vector product: size1(mat) != size(result)"));
      assert( (mat.size2() == vec.size())    && bool("Size check failed for compressed matrix-vector product: size2(mat) != size(x)"));

      switch (viennacl::traits::handle(mat).get_active_handle_id())
      {
        case viennacl::MAIN_MEMORY:
          viennacl::linalg::host_based::prod_impl(mat, vec, result);
          break;
#ifdef VIENNACL_WITH_OPENCL
        case viennacl::OPENCL_MEMORY:
          viennacl::linalg::opencl::prod_impl(mat, vec, result);
          break;
#endif
#ifdef VIENNACL_WITH_CUDA
        case viennacl::CUDA_MEMORY:
          viennacl::linalg::cuda::prod_impl(mat, vec, result);
          break;
#endif
        case viennacl::MEMORY_NOT_INITIALIZED:
          throw memory_exception("not initialised!");
        default:
          throw memory_exception("not implemented");
      }
    }
示例#3
0
    void inplace_solve(const matrix_base<NumericT> & A,
                       matrix_expression< const matrix_base<NumericT>, const matrix_base<NumericT>, op_trans> proxy_B,
                       SOLVERTAG)
    {
      assert( (viennacl::traits::size1(A) == viennacl::traits::size2(A))       && bool("Size check failed in inplace_solve(): size1(A) != size2(A)"));
      assert( (viennacl::traits::size1(A) == viennacl::traits::size1(proxy_B)) && bool("Size check failed in inplace_solve(): size1(A) != size1(B^T)"));

      switch (viennacl::traits::handle(A).get_active_handle_id())
      {
        case viennacl::MAIN_MEMORY:
          viennacl::linalg::host_based::inplace_solve(A, false, const_cast<matrix_base<NumericT> &>(proxy_B.lhs()), true, SOLVERTAG());
          break;
#ifdef VIENNACL_WITH_OPENCL
        case viennacl::OPENCL_MEMORY:
          viennacl::linalg::opencl::inplace_solve(A, false, const_cast<matrix_base<NumericT> &>(proxy_B.lhs()), true, SOLVERTAG());
          break;
#endif
#ifdef VIENNACL_WITH_CUDA
        case viennacl::CUDA_MEMORY:
          viennacl::linalg::cuda::inplace_solve(A, false, const_cast<matrix_base<NumericT> &>(proxy_B.lhs()), true, SOLVERTAG());
          break;
#endif
        case viennacl::MEMORY_NOT_INITIALIZED:
          throw memory_exception("not initialised!");
        default:
          throw memory_exception("not implemented");
      }
    }
示例#4
0
void ilu_scale(compressed_matrix<NumericT> const & A,
               compressed_matrix<NumericT>       & L,
               compressed_matrix<NumericT>       & U)
{
  switch (viennacl::traits::handle(A).get_active_handle_id())
  {
  case viennacl::MAIN_MEMORY:
    viennacl::linalg::host_based::ilu_scale(A, L, U);
    break;
#ifdef VIENNACL_WITH_OPENCL
  case viennacl::OPENCL_MEMORY:
    viennacl::linalg::opencl::ilu_scale(A, L, U);
    break;
#endif
#ifdef VIENNACL_WITH_CUDA
  case viennacl::CUDA_MEMORY:
    viennacl::linalg::cuda::ilu_scale(A, L, U);
    break;
#endif
  case viennacl::MEMORY_NOT_INITIALIZED:
    throw memory_exception("not initialised!");
  default:
    throw memory_exception("not implemented");
  }
}
      typename viennacl::enable_if< viennacl::is_any_sparse_matrix<SparseMatrixType>::value>::type
      block_inplace_solve(const matrix_expression<const SparseMatrixType, const SparseMatrixType, op_trans> & mat,
                          viennacl::backend::mem_handle const & block_index_array, vcl_size_t num_blocks,
                          viennacl::vector_base<ScalarType> const & mat_diagonal,
                          viennacl::vector_base<ScalarType> & vec,
                          SOLVERTAG tag)
      {
        assert( (mat.size1() == mat.size2()) && bool("Size check failed for triangular solve on transposed compressed matrix: size1(mat) != size2(mat)"));
        assert( (mat.size1() == vec.size())  && bool("Size check failed for transposed compressed matrix triangular solve: size1(mat) != size(x)"));

        switch (viennacl::traits::handle(mat.lhs()).get_active_handle_id())
        {
          case viennacl::MAIN_MEMORY:
            viennacl::linalg::host_based::detail::block_inplace_solve(mat, block_index_array, num_blocks, mat_diagonal, vec, tag);
            break;
  #ifdef VIENNACL_WITH_OPENCL
          case viennacl::OPENCL_MEMORY:
            viennacl::linalg::opencl::detail::block_inplace_solve(mat, block_index_array, num_blocks, mat_diagonal, vec, tag);
            break;
  #endif
  #ifdef VIENNACL_WITH_CUDA
          case viennacl::CUDA_MEMORY:
            viennacl::linalg::cuda::detail::block_inplace_solve(mat, block_index_array, num_blocks, mat_diagonal, vec, tag);
            break;
  #endif
          case viennacl::MEMORY_NOT_INITIALIZED:
            throw memory_exception("not initialised!");
          default:
            throw memory_exception("not implemented");
        }
      }
示例#6
0
typename viennacl::enable_if< viennacl::is_any_sparse_matrix<SparseMatrixType>::value>::type
assign_to_dense(SparseMatrixType const & A,
                viennacl::matrix_base<NumericT> & B)
{
  assert( (A.size1() == B.size1()) && bool("Size check failed for assignment to dense matrix: size1(A) != size1(B)"));
  assert( (A.size2() == B.size1()) && bool("Size check failed for assignment to dense matrix: size2(A) != size2(B)"));

  switch (viennacl::traits::handle(A).get_active_handle_id())
  {
    case viennacl::MAIN_MEMORY:
      viennacl::linalg::host_based::amg::assign_to_dense(A, B);
      break;
#ifdef VIENNACL_WITH_OPENCL
    case viennacl::OPENCL_MEMORY:
      viennacl::linalg::opencl::amg::assign_to_dense(A, B);
      break;
#endif
#ifdef VIENNACL_WITH_CUDA
    case viennacl::CUDA_MEMORY:
      viennacl::linalg::cuda::amg::assign_to_dense(A, B);
      break;
#endif
    case viennacl::MEMORY_NOT_INITIALIZED:
      throw memory_exception("not initialised!");
    default:
      throw memory_exception("not implemented");
  }
}
示例#7
0
void level_scheduling_substitute(vector<ScalarType> & vec,
                                 viennacl::backend::mem_handle const & row_index_array,
                                 viennacl::backend::mem_handle const & row_buffer,
                                 viennacl::backend::mem_handle const & col_buffer,
                                 viennacl::backend::mem_handle const & element_buffer,
                                 vcl_size_t num_rows
                                )
{
    assert( viennacl::traits::handle(vec).get_active_handle_id() == row_index_array.get_active_handle_id() && bool("Incompatible memory domains"));
    assert( viennacl::traits::handle(vec).get_active_handle_id() ==      row_buffer.get_active_handle_id() && bool("Incompatible memory domains"));
    assert( viennacl::traits::handle(vec).get_active_handle_id() ==      col_buffer.get_active_handle_id() && bool("Incompatible memory domains"));
    assert( viennacl::traits::handle(vec).get_active_handle_id() ==  element_buffer.get_active_handle_id() && bool("Incompatible memory domains"));

    switch (viennacl::traits::handle(vec).get_active_handle_id())
    {
    case viennacl::MAIN_MEMORY:
        viennacl::linalg::host_based::detail::level_scheduling_substitute(vec, row_index_array, row_buffer, col_buffer, element_buffer, num_rows);
        break;
#ifdef VIENNACL_WITH_OPENCL
    case viennacl::OPENCL_MEMORY:
        viennacl::linalg::opencl::detail::level_scheduling_substitute(vec, row_index_array, row_buffer, col_buffer, element_buffer, num_rows);
        break;
#endif
#ifdef VIENNACL_WITH_CUDA
    case viennacl::CUDA_MEMORY:
        viennacl::linalg::cuda::detail::level_scheduling_substitute(vec, row_index_array, row_buffer, col_buffer, element_buffer, num_rows);
        break;
#endif
    case viennacl::MEMORY_NOT_INITIALIZED:
        throw memory_exception("not initialised!");
    default:
        throw memory_exception("not implemented");
    }
}
示例#8
0
    void prod_impl(const matrix_expression< const matrix_base<NumericT>, const matrix_base<NumericT>, op_trans> & mat_trans,
                   const vector_base<NumericT> & vec,
                         vector_base<NumericT> & result)
    {
      assert( (viennacl::traits::size1(mat_trans.lhs()) == viennacl::traits::size(vec))    && bool("Size check failed at v1 = trans(A) * v2: size1(A) != size(v2)"));
      assert( (viennacl::traits::size2(mat_trans.lhs()) == viennacl::traits::size(result)) && bool("Size check failed at v1 = trans(A) * v2: size2(A) != size(v1)"));

      switch (viennacl::traits::handle(mat_trans.lhs()).get_active_handle_id())
      {
        case viennacl::MAIN_MEMORY:
          viennacl::linalg::host_based::prod_impl(mat_trans.lhs(), true, vec, result);
          break;
#ifdef VIENNACL_WITH_OPENCL
        case viennacl::OPENCL_MEMORY:
          viennacl::linalg::opencl::prod_impl(mat_trans.lhs(), true, vec, result);
          break;
#endif
#ifdef VIENNACL_WITH_CUDA
        case viennacl::CUDA_MEMORY:
          viennacl::linalg::cuda::prod_impl(mat_trans.lhs(), true, vec, result);
          break;
#endif
        case viennacl::MEMORY_NOT_INITIALIZED:
          throw memory_exception("not initialised!");
        default:
          throw memory_exception("not implemented");
      }
    }
示例#9
0
    void prod_impl(const viennacl::matrix_expression< const matrix_base<NumericT>, const matrix_base<NumericT>, op_trans> & A,
                   const viennacl::matrix_expression< const matrix_base<NumericT>, const matrix_base<NumericT>, op_trans> & B,
                   matrix_base<NumericT> & C,
                   ScalarType alpha,
                   ScalarType beta)
    {
      assert(viennacl::traits::size2(A.lhs()) == viennacl::traits::size1(C)       && bool("Size check failed at C = prod(trans(A), trans(B)): size2(A) != size1(C)"));
      assert(viennacl::traits::size1(A.lhs()) == viennacl::traits::size2(B.lhs()) && bool("Size check failed at C = prod(trans(A), trans(B)): size1(A) != size2(B)"));
      assert(viennacl::traits::size1(B.lhs()) == viennacl::traits::size2(C)       && bool("Size check failed at C = prod(trans(A), trans(B)): size1(B) != size2(C)"));

      switch (viennacl::traits::handle(A.lhs()).get_active_handle_id())
      {
        case viennacl::MAIN_MEMORY:
          viennacl::linalg::host_based::prod_impl(A.lhs(), true, B.lhs(), true, C, alpha, beta);
          break;
#ifdef VIENNACL_WITH_OPENCL
        case viennacl::OPENCL_MEMORY:
          viennacl::linalg::opencl::prod_impl(A.lhs(), true, B.lhs(), true, C, alpha, beta);
          break;
#endif
#ifdef VIENNACL_WITH_CUDA
        case viennacl::CUDA_MEMORY:
          viennacl::linalg::cuda::prod_impl(A.lhs(), true, B.lhs(), true, C, alpha, beta);
          break;
#endif
        case viennacl::MEMORY_NOT_INITIALIZED:
          throw memory_exception("not initialised!");
        default:
          throw memory_exception("not implemented");
      }
    }
void pipelined_gmres_update_result(vector_base<T> & result,
                                   vector_base<T> const & residual,
                                   vector_base<T> const & krylov_basis,
                                   vcl_size_t v_k_size,
                                   vcl_size_t v_k_internal_size,
                                   vector_base<T> const & coefficients,
                                   vcl_size_t k)
{
  switch (viennacl::traits::handle(result).get_active_handle_id())
  {
  case viennacl::MAIN_MEMORY:
    viennacl::linalg::host_based::pipelined_gmres_update_result(result, residual, krylov_basis, v_k_size, v_k_internal_size, coefficients, k);
    break;
#ifdef VIENNACL_WITH_OPENCL
  case viennacl::OPENCL_MEMORY:
    viennacl::linalg::opencl::pipelined_gmres_update_result(result, residual, krylov_basis, v_k_size, v_k_internal_size, coefficients, k);
    break;
#endif
#ifdef VIENNACL_WITH_CUDA
  case viennacl::CUDA_MEMORY:
    viennacl::linalg::cuda::pipelined_gmres_update_result(result, residual, krylov_basis, v_k_size, v_k_internal_size, coefficients, k);
    break;
#endif
  case viennacl::MEMORY_NOT_INITIALIZED:
    throw memory_exception("not initialised!");
  default:
    throw memory_exception("not implemented");
  }
}
void pipelined_cg_prod(MatrixT const & A,
                       vector_base<NumericT> const & p,
                       vector_base<NumericT> & Ap,
                       vector_base<NumericT> & inner_prod_buffer)
{
  switch (viennacl::traits::handle(p).get_active_handle_id())
  {
  case viennacl::MAIN_MEMORY:
    viennacl::linalg::host_based::pipelined_cg_prod(A, p, Ap, inner_prod_buffer);
    break;
#ifdef VIENNACL_WITH_OPENCL
  case viennacl::OPENCL_MEMORY:
    viennacl::linalg::opencl::pipelined_cg_prod(A, p, Ap, inner_prod_buffer);
    break;
#endif
#ifdef VIENNACL_WITH_CUDA
  case viennacl::CUDA_MEMORY:
    viennacl::linalg::cuda::pipelined_cg_prod(A, p, Ap, inner_prod_buffer);
    break;
#endif
  case viennacl::MEMORY_NOT_INITIALIZED:
    throw memory_exception("not initialised!");
  default:
    throw memory_exception("not implemented");
  }
}
void pipelined_gmres_gram_schmidt_stage2(vector_base<T> & device_krylov_basis,
                                         vcl_size_t v_k_size,
                                         vcl_size_t v_k_internal_size,
                                         vcl_size_t k,
                                         vector_base<T> const & vi_in_vk_buffer,
                                         vector_base<T> & R_buffer,
                                         vcl_size_t krylov_dim,
                                         vector_base<T> & inner_prod_buffer,
                                         vcl_size_t buffer_chunk_size)
{
  switch (viennacl::traits::handle(device_krylov_basis).get_active_handle_id())
  {
  case viennacl::MAIN_MEMORY:
    viennacl::linalg::host_based::pipelined_gmres_gram_schmidt_stage2(device_krylov_basis, v_k_size, v_k_internal_size, k, vi_in_vk_buffer, R_buffer, krylov_dim, inner_prod_buffer, buffer_chunk_size);
    break;
#ifdef VIENNACL_WITH_OPENCL
  case viennacl::OPENCL_MEMORY:
    viennacl::linalg::opencl::pipelined_gmres_gram_schmidt_stage2(device_krylov_basis, v_k_size, v_k_internal_size, k, vi_in_vk_buffer, R_buffer, krylov_dim, inner_prod_buffer, buffer_chunk_size);
    break;
#endif
#ifdef VIENNACL_WITH_CUDA
  case viennacl::CUDA_MEMORY:
    viennacl::linalg::cuda::pipelined_gmres_gram_schmidt_stage2(device_krylov_basis, v_k_size, v_k_internal_size, k, vi_in_vk_buffer, R_buffer, krylov_dim, inner_prod_buffer, buffer_chunk_size);
    break;
#endif
  case viennacl::MEMORY_NOT_INITIALIZED:
    throw memory_exception("not initialised!");
  default:
    throw memory_exception("not implemented");
  }
}
void pipelined_gmres_normalize_vk(vector_base<T> & v_k,
                                  vector_base<T> const & residual,
                                  vector_base<T> & R_buffer,
                                  vcl_size_t offset_in_R,
                                  vector_base<T> const & inner_prod_buffer,
                                  vector_base<T> & r_dot_vk_buffer,
                                  vcl_size_t buffer_chunk_size,
                                  vcl_size_t buffer_chunk_offset)
{
  switch (viennacl::traits::handle(v_k).get_active_handle_id())
  {
  case viennacl::MAIN_MEMORY:
    viennacl::linalg::host_based::pipelined_gmres_normalize_vk(v_k, residual, R_buffer, offset_in_R, inner_prod_buffer, r_dot_vk_buffer, buffer_chunk_size, buffer_chunk_offset);
    break;
#ifdef VIENNACL_WITH_OPENCL
  case viennacl::OPENCL_MEMORY:
    viennacl::linalg::opencl::pipelined_gmres_normalize_vk(v_k, residual, R_buffer, offset_in_R, inner_prod_buffer, r_dot_vk_buffer, buffer_chunk_size, buffer_chunk_offset);
    break;
#endif
#ifdef VIENNACL_WITH_CUDA
  case viennacl::CUDA_MEMORY:
    viennacl::linalg::cuda::pipelined_gmres_normalize_vk(v_k, residual, R_buffer, offset_in_R, inner_prod_buffer, r_dot_vk_buffer, buffer_chunk_size, buffer_chunk_offset);
    break;
#endif
  case viennacl::MEMORY_NOT_INITIALIZED:
    throw memory_exception("not initialised!");
  default:
    throw memory_exception("not implemented");
  }
}
void pipelined_bicgstab_vector_update(vector_base<NumericT> & result, NumericT alpha, vector_base<NumericT> & p, NumericT omega, vector_base<NumericT> const & s,
                                      vector_base<NumericT> & residual, vector_base<NumericT> const & As,
                                      NumericT beta, vector_base<NumericT> const & Ap,
                                      vector_base<NumericT> const & r0star,
                                      vector_base<NumericT> & inner_prod_buffer,
                                      vcl_size_t buffer_chunk_size)
{
  switch (viennacl::traits::handle(s).get_active_handle_id())
  {
  case viennacl::MAIN_MEMORY:
    viennacl::linalg::host_based::pipelined_bicgstab_vector_update(result, alpha, p, omega, s, residual, As, beta, Ap, r0star, inner_prod_buffer, buffer_chunk_size);
    break;
  #ifdef VIENNACL_WITH_OPENCL
  case viennacl::OPENCL_MEMORY:
    viennacl::linalg::opencl::pipelined_bicgstab_vector_update(result, alpha, p, omega, s, residual, As, beta, Ap, r0star, inner_prod_buffer, buffer_chunk_size);
    break;
  #endif
  #ifdef VIENNACL_WITH_CUDA
  case viennacl::CUDA_MEMORY:
    viennacl::linalg::cuda::pipelined_bicgstab_vector_update(result, alpha, p, omega, s, residual, As, beta, Ap, r0star, inner_prod_buffer, buffer_chunk_size);
    break;
  #endif
  case viennacl::MEMORY_NOT_INITIALIZED:
    throw memory_exception("not initialised!");
  default:
    throw memory_exception("not implemented");
  }
}
示例#15
0
    void direct(const viennacl::matrix<SCALARTYPE, viennacl::row_major, ALIGNMENT>& in,
        viennacl::matrix<SCALARTYPE, viennacl::row_major, ALIGNMENT>& out, vcl_size_t size,
        vcl_size_t stride, vcl_size_t batch_num, SCALARTYPE sign = -1.0f,
        viennacl::linalg::host_based::detail::fft::FFT_DATA_ORDER::DATA_ORDER data_order =
            viennacl::linalg::host_based::detail::fft::FFT_DATA_ORDER::ROW_MAJOR)
    {

      switch (viennacl::traits::handle(in).get_active_handle_id())
      {
        case viennacl::MAIN_MEMORY:
          viennacl::linalg::host_based::direct(in, out, size, stride, batch_num, sign, data_order);
          break;
#ifdef VIENNACL_WITH_OPENCL
          case viennacl::OPENCL_MEMORY:
          viennacl::linalg::opencl::direct(viennacl::traits::opencl_handle(in),
              viennacl::traits::opencl_handle(out), size, stride, batch_num,
              sign,data_order);
          break;
#endif

#ifdef VIENNACL_WITH_CUDA
          case viennacl::CUDA_MEMORY:
          viennacl::linalg::cuda::direct(in,
              out, size, stride, batch_num,sign,data_order);
          break;
#endif

        case viennacl::MEMORY_NOT_INITIALIZED:
          throw memory_exception("not initialised!");
        default:
          throw memory_exception("not implemented");

      }
    }
示例#16
0
    void element_op(matrix_base<T> & A,
                    matrix_expression<const matrix_base<T>, const matrix_base<T>, OP> const & proxy)
    {
      assert( (viennacl::traits::size1(A) == viennacl::traits::size1(proxy)) && bool("Size check failed at A = element_op(B): size1(A) != size1(B)"));
      assert( (viennacl::traits::size2(A) == viennacl::traits::size2(proxy)) && bool("Size check failed at A = element_op(B): size2(A) != size2(B)"));

      switch (viennacl::traits::handle(A).get_active_handle_id())
      {
        case viennacl::MAIN_MEMORY:
          viennacl::linalg::host_based::element_op(A, proxy);
          break;
#ifdef VIENNACL_WITH_OPENCL
        case viennacl::OPENCL_MEMORY:
          viennacl::linalg::opencl::element_op(A, proxy);
          break;
#endif
#ifdef VIENNACL_WITH_CUDA
        case viennacl::CUDA_MEMORY:
          viennacl::linalg::cuda::element_op(A, proxy);
          break;
#endif
        case viennacl::MEMORY_NOT_INITIALIZED:
          throw memory_exception("not initialised!");
        default:
          throw memory_exception("not implemented");
      }
    }
示例#17
0
void amg_interpol(compressed_matrix<NumericT> const & A,
                  compressed_matrix<NumericT>       & P,
                  AMGContextT & amg_context,
                  amg_tag & tag)
{
  switch (viennacl::traits::handle(A).get_active_handle_id())
  {
    case viennacl::MAIN_MEMORY:
      viennacl::linalg::host_based::amg::amg_interpol(A, P, amg_context, tag);
      break;
#ifdef VIENNACL_WITH_OPENCL
    case viennacl::OPENCL_MEMORY:
      viennacl::linalg::opencl::amg::amg_interpol(A, P, amg_context, tag);
      break;
#endif
#ifdef VIENNACL_WITH_CUDA
    case viennacl::CUDA_MEMORY:
      viennacl::linalg::cuda::amg::amg_interpol(A, P, amg_context, tag);
      break;
#endif
    case viennacl::MEMORY_NOT_INITIALIZED:
      throw memory_exception("not initialised!");
    default:
      throw memory_exception("not implemented");
  }
}
示例#18
0
    void scaled_rank_1_update(matrix_base<NumericT> & mat1,
                              S1 const & alpha, vcl_size_t len_alpha, bool reciprocal_alpha, bool flip_sign_alpha,
                              const vector_base<NumericT> & vec1,
                              const vector_base<NumericT> & vec2)
    {
      switch (viennacl::traits::handle(mat1).get_active_handle_id())
      {
        case viennacl::MAIN_MEMORY:
          viennacl::linalg::host_based::scaled_rank_1_update(mat1,
                                                             alpha, len_alpha, reciprocal_alpha, flip_sign_alpha,
                                                             vec1, vec2);
          break;
#ifdef VIENNACL_WITH_OPENCL
        case viennacl::OPENCL_MEMORY:
          viennacl::linalg::opencl::scaled_rank_1_update(mat1,
                                                         alpha, len_alpha, reciprocal_alpha, flip_sign_alpha,
                                                         vec1, vec2);
          break;
#endif
#ifdef VIENNACL_WITH_CUDA
        case viennacl::CUDA_MEMORY:
          viennacl::linalg::cuda::scaled_rank_1_update(mat1,
                                                       alpha, len_alpha, reciprocal_alpha, flip_sign_alpha,
                                                       vec1, vec2);
          break;
#endif
        case viennacl::MEMORY_NOT_INITIALIZED:
          throw memory_exception("not initialised!");
        default:
          throw memory_exception("not implemented");
      }
    }
示例#19
0
void smooth_jacobi(unsigned int iterations,
                   compressed_matrix<NumericT> const & A,
                   vector<NumericT> & x,
                   vector<NumericT> & x_backup,
                   vector<NumericT> const & rhs_smooth,
                   NumericT weight)
{
  switch (viennacl::traits::handle(A).get_active_handle_id())
  {
    case viennacl::MAIN_MEMORY:
      viennacl::linalg::host_based::amg::smooth_jacobi(iterations, A, x, x_backup, rhs_smooth, weight);
      break;
#ifdef VIENNACL_WITH_OPENCL
    case viennacl::OPENCL_MEMORY:
      viennacl::linalg::opencl::amg::smooth_jacobi(iterations, A, x, x_backup, rhs_smooth, weight);
      break;
#endif
#ifdef VIENNACL_WITH_CUDA
    case viennacl::CUDA_MEMORY:
      viennacl::linalg::cuda::amg::smooth_jacobi(iterations, A, x, x_backup, rhs_smooth, weight);
      break;
#endif
    case viennacl::MEMORY_NOT_INITIALIZED:
      throw memory_exception("not initialised!");
    default:
      throw memory_exception("not implemented");
  }
}
示例#20
0
    void ambm(matrix_base<NumericT> & mat1,
              matrix_base<NumericT> const & mat2, ScalarType1 const & alpha, vcl_size_t len_alpha, bool reciprocal_alpha, bool flip_sign_alpha,
              matrix_base<NumericT> const & mat3, ScalarType2 const & beta,  vcl_size_t len_beta,  bool reciprocal_beta,  bool flip_sign_beta)
    {
      switch (viennacl::traits::handle(mat1).get_active_handle_id())
      {
        case viennacl::MAIN_MEMORY:
          viennacl::linalg::host_based::ambm(mat1,
                                             mat2, alpha, len_alpha, reciprocal_alpha, flip_sign_alpha,
                                             mat3,  beta, len_beta,  reciprocal_beta,  flip_sign_beta);
          break;
#ifdef VIENNACL_WITH_OPENCL
        case viennacl::OPENCL_MEMORY:
          viennacl::linalg::opencl::ambm(mat1,
                                         mat2, alpha, len_alpha, reciprocal_alpha, flip_sign_alpha,
                                         mat3,  beta, len_beta,  reciprocal_beta,  flip_sign_beta);
          break;
#endif
#ifdef VIENNACL_WITH_CUDA
        case viennacl::CUDA_MEMORY:
          viennacl::linalg::cuda::ambm(mat1,
                                       mat2, alpha, len_alpha, reciprocal_alpha, flip_sign_alpha,
                                       mat3,  beta, len_beta,  reciprocal_beta,  flip_sign_beta);
          break;
#endif
        case viennacl::MEMORY_NOT_INITIALIZED:
          throw memory_exception("not initialised!");
        default:
          throw memory_exception("not implemented");
      }
    }
示例#21
0
  /** @brief Switches the currently active handle. If no support for that backend is provided, an exception is thrown. */
  void switch_active_handle_id(memory_types new_id)
  {
    if (new_id != active_handle_)
    {
      if (active_handle_ == MEMORY_NOT_INITIALIZED)
        active_handle_ = new_id;
      else if (active_handle_ == MAIN_MEMORY)
      {
        active_handle_ = new_id;
      }
      else if (active_handle_ == OPENCL_MEMORY)
      {
#ifdef VIENNACL_WITH_OPENCL
        active_handle_ = new_id;
#else
        throw memory_exception("compiled without OpenCL suppport!");
#endif
      }
      else if (active_handle_ == CUDA_MEMORY)
      {
#ifdef VIENNACL_WITH_CUDA
        active_handle_ = new_id;
#else
        throw memory_exception("compiled without CUDA suppport!");
#endif
      }
      else
        throw memory_exception("invalid new memory region!");
    }
  }
示例#22
0
    void reorder(viennacl::vector<SCALARTYPE, ALIGNMENT>& in, vcl_size_t size, vcl_size_t stride,
        vcl_size_t bits_datasize, vcl_size_t batch_num,
        viennacl::linalg::host_based::detail::fft::FFT_DATA_ORDER::DATA_ORDER data_order =
            viennacl::linalg::host_based::detail::fft::FFT_DATA_ORDER::ROW_MAJOR)
    {
      switch (viennacl::traits::handle(in).get_active_handle_id())
      {
        case viennacl::MAIN_MEMORY:
          viennacl::linalg::host_based::reorder(in, size, stride, bits_datasize, batch_num, data_order);
          break;
#ifdef VIENNACL_WITH_OPENCL
          case viennacl::OPENCL_MEMORY:
          viennacl::linalg::opencl::reorder<SCALARTYPE>(
              viennacl::traits::opencl_handle(in), size, stride,
              bits_datasize, batch_num, data_order);
          break;
#endif

#ifdef VIENNACL_WITH_CUDA
          case viennacl::CUDA_MEMORY:
          viennacl::linalg::cuda::reorder(in,size,stride,bits_datasize,batch_num,data_order);
          break;
#endif

        case viennacl::MEMORY_NOT_INITIALIZED:
          throw memory_exception("not initialised!");
        default:
          throw memory_exception("not implemented");

      }
    }
示例#23
0
void ilu_chow_patel_sweep(compressed_matrix<NumericT>       & L,
                          vector<NumericT>            const & aij_L,
                          compressed_matrix<NumericT>       & U_trans,
                          vector<NumericT>            const & aij_U_trans)
{
  switch (viennacl::traits::handle(L).get_active_handle_id())
  {
  case viennacl::MAIN_MEMORY:
    viennacl::linalg::host_based::ilu_chow_patel_sweep(L, aij_L, U_trans, aij_U_trans);
    break;
#ifdef VIENNACL_WITH_OPENCL
  case viennacl::OPENCL_MEMORY:
    viennacl::linalg::opencl::ilu_chow_patel_sweep(L, aij_L, U_trans, aij_U_trans);
    break;
#endif
#ifdef VIENNACL_WITH_CUDA
  case viennacl::CUDA_MEMORY:
    viennacl::linalg::cuda::ilu_chow_patel_sweep(L, aij_L, U_trans, aij_U_trans);
    break;
#endif
  case viennacl::MEMORY_NOT_INITIALIZED:
    throw memory_exception("not initialised!");
  default:
    throw memory_exception("not implemented");
  }
}
示例#24
0
    void bluestein(viennacl::vector<SCALARTYPE, ALIGNMENT>& in,
        viennacl::vector<SCALARTYPE, ALIGNMENT>& out, vcl_size_t /*batch_num*/)
    {

      switch (viennacl::traits::handle(in).get_active_handle_id())
      {
        case viennacl::MAIN_MEMORY:
          viennacl::linalg::host_based::bluestein(in, out, 1);
          break;
#ifdef VIENNACL_WITH_OPENCL
          case viennacl::OPENCL_MEMORY:
          viennacl::linalg::opencl::bluestein(in,out,1);
          break;
#endif

#ifdef VIENNACL_WITH_CUDA
          case viennacl::CUDA_MEMORY:
          viennacl::linalg::cuda::bluestein(in,out,1);
          break;
#endif

        case viennacl::MEMORY_NOT_INITIALIZED:
          throw memory_exception("not initialised!");
        default:
          throw memory_exception("not implemented");

      }
    }
      typename viennacl::enable_if< viennacl::is_any_sparse_matrix<SparseMatrixType>::value >::type
      row_info(SparseMatrixType const & mat,
               vector<SCALARTYPE, VEC_ALIGNMENT> & vec,
               row_info_types info_selector)
      {
        switch (viennacl::traits::handle(mat).get_active_handle_id())
        {
          case viennacl::MAIN_MEMORY:
            viennacl::linalg::host_based::detail::row_info(mat, vec, info_selector);
            break;
#ifdef VIENNACL_WITH_OPENCL
          case viennacl::OPENCL_MEMORY:
            viennacl::linalg::opencl::detail::row_info(mat, vec, info_selector);
            break;
#endif
#ifdef VIENNACL_WITH_CUDA
          case viennacl::CUDA_MEMORY:
            viennacl::linalg::cuda::detail::row_info(mat, vec, info_selector);
            break;
#endif
          case viennacl::MEMORY_NOT_INITIALIZED:
            throw memory_exception("not initialised!");
          default:
            throw memory_exception("not implemented");
        }
      }
示例#26
0
    void transpose(viennacl::matrix<SCALARTYPE, viennacl::row_major, ALIGNMENT> const & input,
        viennacl::matrix<SCALARTYPE, viennacl::row_major, ALIGNMENT> & output)
    {
      switch (viennacl::traits::handle(input).get_active_handle_id())
      {
        case viennacl::MAIN_MEMORY:
          viennacl::linalg::host_based::transpose(input, output);
          break;
#ifdef VIENNACL_WITH_OPENCL
          case viennacl::OPENCL_MEMORY:
          viennacl::linalg::opencl::transpose(input,output);
          break;
#endif

#ifdef VIENNACL_WITH_CUDA
          case viennacl::CUDA_MEMORY:
          viennacl::linalg::cuda::transpose(input,output);
          break;
#endif

        case viennacl::MEMORY_NOT_INITIALIZED:
          throw memory_exception("not initialised!");
        default:
          throw memory_exception("not implemented");
      }
    }
示例#27
0
    void inplace_solve(const matrix_base<NumericT> & mat,
                             vector_base<NumericT> & vec,
                       SOLVERTAG)
    {
      assert( (mat.size1() == vec.size()) && bool("Size check failed in inplace_solve(): size1(A) != size(b)"));
      assert( (mat.size2() == vec.size()) && bool("Size check failed in inplace_solve(): size2(A) != size(b)"));

      switch (viennacl::traits::handle(mat).get_active_handle_id())
      {
        case viennacl::MAIN_MEMORY:
          viennacl::linalg::host_based::inplace_solve(mat, false, vec, SOLVERTAG());
          break;
#ifdef VIENNACL_WITH_OPENCL
        case viennacl::OPENCL_MEMORY:
          viennacl::linalg::opencl::inplace_solve(mat, false, vec, SOLVERTAG());
          break;
#endif
#ifdef VIENNACL_WITH_CUDA
        case viennacl::CUDA_MEMORY:
          viennacl::linalg::cuda::inplace_solve(mat, false, vec, SOLVERTAG());
          break;
#endif
        case viennacl::MEMORY_NOT_INITIALIZED:
          throw memory_exception("not initialised!");
        default:
          throw memory_exception("not implemented");
      }
    }
示例#28
0
    void reverse(viennacl::vector_base<SCALARTYPE>& in)
    {
      switch (viennacl::traits::handle(in).get_active_handle_id())
      {
        case viennacl::MAIN_MEMORY:
          viennacl::linalg::host_based::reverse(in);
          break;
#ifdef VIENNACL_WITH_OPENCL
          case viennacl::OPENCL_MEMORY:
          viennacl::linalg::opencl::reverse(in);
          break;
#endif

#ifdef VIENNACL_WITH_CUDA
          case viennacl::CUDA_MEMORY:
          viennacl::linalg::cuda::reverse(in);
          break;
#endif

        case viennacl::MEMORY_NOT_INITIALIZED:
          throw memory_exception("not initialised!");
        default:
          throw memory_exception("not implemented");
      }
    }
示例#29
0
    void nmf(viennacl::matrix_base<ScalarType> const & V, viennacl::matrix_base<ScalarType> & W,
        viennacl::matrix_base<ScalarType> & H, viennacl::linalg::nmf_config const & conf)
    {
      assert(V.size1() == W.size1() && V.size2() == H.size2() && bool("Dimensions of W and H don't allow for V = W * H"));
      assert(W.size2() == H.size1() && bool("Dimensions of W and H don't match, prod(W, H) impossible"));

      switch (viennacl::traits::handle(V).get_active_handle_id())
      {
        case viennacl::MAIN_MEMORY:
          viennacl::linalg::host_based::nmf(V, W, H, conf);
          break;
#ifdef VIENNACL_WITH_OPENCL
          case viennacl::OPENCL_MEMORY:
          viennacl::linalg::opencl::nmf(V,W,H,conf);
          break;
#endif

#ifdef VIENNACL_WITH_CUDA
          case viennacl::CUDA_MEMORY:
          viennacl::linalg::cuda::nmf(V,W,H,conf);
          break;
#endif

        case viennacl::MEMORY_NOT_INITIALIZED:
          throw memory_exception("not initialised!");
        default:
          throw memory_exception("not implemented");

      }

    }
void pipelined_cg_vector_update(vector_base<NumericT> & result,
                                NumericT alpha,
                                vector_base<NumericT> & p,
                                vector_base<NumericT> & r,
                                vector_base<NumericT> const & Ap,
                                NumericT beta,
                                vector_base<NumericT> & inner_prod_buffer)
{
  switch (viennacl::traits::handle(result).get_active_handle_id())
  {
  case viennacl::MAIN_MEMORY:
    viennacl::linalg::host_based::pipelined_cg_vector_update(result, alpha, p, r, Ap, beta, inner_prod_buffer);
    break;
#ifdef VIENNACL_WITH_OPENCL
  case viennacl::OPENCL_MEMORY:
    viennacl::linalg::opencl::pipelined_cg_vector_update(result, alpha, p, r, Ap, beta, inner_prod_buffer);
    break;
#endif
#ifdef VIENNACL_WITH_HSA
case viennacl::HSA_MEMORY:
    viennacl::linalg::hsa::pipelined_cg_vector_update(result, alpha, p, r, Ap, beta, inner_prod_buffer);
break;
#endif
#ifdef VIENNACL_WITH_CUDA
  case viennacl::CUDA_MEMORY:
    viennacl::linalg::cuda::pipelined_cg_vector_update(result, alpha, p, r, Ap, beta, inner_prod_buffer);
    break;
#endif
  case viennacl::MEMORY_NOT_INITIALIZED:
    throw memory_exception("not initialised!");
  default:
    throw memory_exception("not implemented");
  }
}