void prod_impl(const viennacl::compressed_matrix<TYPE, ALIGNMENT> & mat, 
                   const viennacl::vector<TYPE, VECTOR_ALIGNMENT> & vec,
                         viennacl::vector<TYPE, VECTOR_ALIGNMENT> & result, 
                   size_t NUM_THREADS = 0)
    {
      assert(mat.size1() == result.size());
      assert(mat.size2() == vec.size());

      viennacl::ocl::kernel & k = viennacl::ocl::get_kernel(viennacl::linalg::kernels::compressed_matrix<TYPE, ALIGNMENT>::program_name(), "vec_mul");
      
      viennacl::ocl::enqueue(k(mat.handle1(), mat.handle2(), mat.handle(), vec, result, static_cast<cl_uint>(mat.size1())));
    }
      void copy(viennashe::math::sparse_matrix<NumericT> const & assembled_matrix,
                viennacl::compressed_matrix<NumericT>          &       vcl_matrix)
      {
        std::size_t nonzeros = assembled_matrix.nnz();
        viennacl::backend::typesafe_host_array<unsigned int> row_buffer(vcl_matrix.handle1(), assembled_matrix.size1() + 1);
        viennacl::backend::typesafe_host_array<unsigned int> col_buffer(vcl_matrix.handle2(), nonzeros);
        std::vector<NumericT> elements(nonzeros);

        std::size_t data_index = 0;

        for (std::size_t i  = 0;
                         i != assembled_matrix.size1();
                       ++i)
        {
          typedef typename viennashe::math::sparse_matrix<NumericT>::const_iterator2   AlongRowIterator;
          typedef typename viennashe::math::sparse_matrix<NumericT>::row_type          RowType;

          row_buffer.set(i, data_index);
          RowType const & row_i = assembled_matrix.row(i);

          for (AlongRowIterator col_it  = row_i.begin();
                                col_it != row_i.end();
                              ++col_it)
          {
            col_buffer.set(data_index, col_it->first);
            elements[data_index] = col_it->second;
            ++data_index;
          }
        }
        row_buffer.set(assembled_matrix.size1(), data_index);

        vcl_matrix.set(row_buffer.get(),
                       col_buffer.get(),
                       &elements[0],
                       assembled_matrix.size1(),
                       assembled_matrix.size2(),
                       nonzeros);
      }
Ejemplo n.º 3
0
NumericT setup_w(viennacl::compressed_matrix<NumericT> const & A,
                 SizeT row,
                 SparseVectorT & w)
{
  assert( (A.handle1().get_active_handle_id() == viennacl::MAIN_MEMORY) && bool("System matrix must reside in main memory for ILUT") );
  assert( (A.handle2().get_active_handle_id() == viennacl::MAIN_MEMORY) && bool("System matrix must reside in main memory for ILUT") );
  assert( (A.handle().get_active_handle_id()  == viennacl::MAIN_MEMORY) && bool("System matrix must reside in main memory for ILUT") );

  NumericT     const * elements   = viennacl::linalg::host_based::detail::extract_raw_pointer<NumericT>(A.handle());
  unsigned int const * row_buffer = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(A.handle1());
  unsigned int const * col_buffer = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(A.handle2());

  SizeT row_i_begin = static_cast<SizeT>(row_buffer[row]);
  SizeT row_i_end   = static_cast<SizeT>(row_buffer[row+1]);
  NumericT row_norm = 0;
  for (SizeT buf_index_i = row_i_begin; buf_index_i < row_i_end; ++buf_index_i) //Note: We do not assume that the column indices within a row are sorted
  {
    NumericT entry = elements[buf_index_i];
    w[col_buffer[buf_index_i]] = entry;
    row_norm += entry * entry;
  }
  return std::sqrt(row_norm);
}
Ejemplo n.º 4
0
 static size_t get(const ::viennacl::compressed_matrix<V> &A) {
     return A.nnz();
 }
Ejemplo n.º 5
0
 static size_t size(const viennacl::compressed_matrix<ScalarType, Amat> & lhs,
                    const viennacl::vector<ScalarType, A> & rhs) { return lhs.size1(); }
Ejemplo n.º 6
0
    void precondition(viennacl::compressed_matrix<ScalarType> & A, ichol0_tag const & /* tag */)
    {
      assert( (viennacl::memory_domain(A) == viennacl::MAIN_MEMORY) && bool("System matrix must reside in main memory for ICHOL0") );
      
      ScalarType         * elements   = viennacl::linalg::host_based::detail::extract_raw_pointer<ScalarType>(A.handle());
      unsigned int const * row_buffer = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(A.handle1());
      unsigned int const * col_buffer = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(A.handle2());
      
      //std::cout << A.size1() << std::endl;
      for (std::size_t i=0; i<A.size1(); ++i)
      {
        unsigned int row_i_begin = row_buffer[i];
        unsigned int row_i_end   = row_buffer[i+1];
        
        // get a_ii:
        ScalarType a_ii = 0;
        for (unsigned int buf_index_aii = row_i_begin; buf_index_aii < row_i_end; ++buf_index_aii)
        {
          if (col_buffer[buf_index_aii] == i)
          {
            a_ii = std::sqrt(elements[buf_index_aii]);
            elements[buf_index_aii] = a_ii;
            break;
          }
        }
          
        // Now scale column/row i, i.e. A(k, i) /= A(i, i)
        for (unsigned int buf_index_aii = row_i_begin; buf_index_aii < row_i_end; ++buf_index_aii)
        {
          if (col_buffer[buf_index_aii] > i)
            elements[buf_index_aii] /= a_ii;
        }

        // Now compute A(k, j) -= A(k, i) * A(j, i) for all nonzero k, j in column i:
        for (unsigned int buf_index_j = row_i_begin; buf_index_j < row_i_end; ++buf_index_j)
        {
          unsigned int j = col_buffer[buf_index_j];
          if (j <= i)
            continue;
          
          ScalarType a_ji = elements[buf_index_j];
          
          for (unsigned int buf_index_k = row_i_begin; buf_index_k < row_i_end; ++buf_index_k)
          {
            unsigned int k = col_buffer[buf_index_k];
            if (k < j)
              continue;
            
            ScalarType a_ki = elements[buf_index_k];
            
            //Now check whether A(k, j) is in nonzero pattern:
            unsigned int row_j_begin = row_buffer[j];
            unsigned int row_j_end   = row_buffer[j+1];
            for (unsigned int buf_index_kj = row_j_begin; buf_index_kj < row_j_end; ++buf_index_kj)
            {
              if (col_buffer[buf_index_kj] == k)
              {
                elements[buf_index_kj] -= a_ki * a_ji;
                break;
              }
            }
          }
        }
        
      }
      
    }
Ejemplo n.º 7
0
    void precondition(viennacl::compressed_matrix<ScalarType> & A, ilu0_tag const & /* tag */)
    {
      assert( (A.handle1().get_active_handle_id() == viennacl::MAIN_MEMORY) && bool("System matrix must reside in main memory for ILU0") );
      assert( (A.handle2().get_active_handle_id() == viennacl::MAIN_MEMORY) && bool("System matrix must reside in main memory for ILU0") );
      assert( (A.handle().get_active_handle_id() == viennacl::MAIN_MEMORY) && bool("System matrix must reside in main memory for ILU0") );

      ScalarType         * elements   = viennacl::linalg::host_based::detail::extract_raw_pointer<ScalarType>(A.handle());
      unsigned int const * row_buffer = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(A.handle1());
      unsigned int const * col_buffer = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(A.handle2());

      // Note: Line numbers in the following refer to the algorithm in Saad's book

      for (std::size_t i=1; i<A.size1(); ++i)  // Line 1
      {
        unsigned int row_i_begin = row_buffer[i];
        unsigned int row_i_end   = row_buffer[i+1];
        for (unsigned int buf_index_k = row_i_begin; buf_index_k < row_i_end; ++buf_index_k) //Note: We do not assume that the column indices within a row are sorted
        {
          unsigned int k = col_buffer[buf_index_k];
          if (k >= i)
            continue; //Note: We do not assume that the column indices within a row are sorted

          unsigned int row_k_begin = row_buffer[k];
          unsigned int row_k_end   = row_buffer[k+1];

          // get a_kk:
          ScalarType a_kk = 0;
          for (unsigned int buf_index_akk = row_k_begin; buf_index_akk < row_k_end; ++buf_index_akk)
          {
            if (col_buffer[buf_index_akk] == k)
            {
              a_kk = elements[buf_index_akk];
              break;
            }
          }

          ScalarType & a_ik = elements[buf_index_k];
          a_ik /= a_kk;                                 //Line 3

          for (unsigned int buf_index_j = row_i_begin; buf_index_j < row_i_end; ++buf_index_j) //Note: We do not assume that the column indices within a row are sorted
          {
            unsigned int j = col_buffer[buf_index_j];
            if (j <= k)
              continue;

            // determine a_kj:
            ScalarType a_kj = 0;
            for (unsigned int buf_index_akj = row_k_begin; buf_index_akj < row_k_end; ++buf_index_akj)
            {
              if (col_buffer[buf_index_akj] == j)
              {
                a_kk = elements[buf_index_akj];
                break;
              }
            }

            //a_ij -= a_ik * a_kj
            elements[buf_index_j] -= a_ik * a_kj;  //Line 5
          }
        }
      }

    }