Пример #1
0
matrix_map<T, ptrdiff_t>
map(const boost::numeric::ublas::compressed_matrix<T, boost::numeric::ublas::row_major> &A) {
    return matrix_map<T, ptrdiff_t>(
            A.size1(),
            A.size2(),
            // amgcl expects signed type for row and col data. This should work
            // for decently sized matrices:
            reinterpret_cast<const ptrdiff_t*>(A.index1_data().begin()),
            reinterpret_cast<const ptrdiff_t*>(A.index2_data().begin()),
            A.value_data().begin()
            );
}
Пример #2
0
std::tuple<
    size_t,
    boost::iterator_range<const size_t*>,
    boost::iterator_range<const size_t*>,
    boost::iterator_range<const T*>
    >
map(const boost::numeric::ublas::compressed_matrix<T, boost::numeric::ublas::row_major> &A) {
    return std::make_tuple(
            A.size1(),
            boost::make_iterator_range(
                A.index1_data().begin(), A.index1_data().end()
                ),
            boost::make_iterator_range(
                A.index2_data().begin(), A.index2_data().end()
                ),
            boost::make_iterator_range(
                A.value_data().begin(), A.value_data().end()
                )
            );
}
Пример #3
0
void axpy(
        const boost::numeric::ublas::compressed_matrix<real, boost::numeric::ublas::row_major> &A,
        const boost::numeric::ublas::vector<real> &x,
        boost::numeric::ublas::vector<real> &y
        )
{
    const ptrdiff_t n = A.size1();

    const size_t *Arow = A.index1_data().begin();
    const size_t *Acol = A.index2_data().begin();
    const real   *Aval = A.value_data().begin();

#pragma omp parallel for schedule(dynamic, 1024)
    for(ptrdiff_t i = 0; i < n; ++i) {
        real buf = 0;
        for(size_t j = Arow[i], e = Arow[i + 1]; j < e; ++j)
            buf += Aval[j] * x[Acol[j]];
        y[i] = buf;
    }
}