Exemple #1
0
void SymmetricEuclideanDistanceMatrix(El::UpperOrLower uplo, direction_t dir,
    T alpha, const El::Matrix<T> &A, T beta, El::Matrix<T> &C) {

    T *c = C.Buffer();
    int ldC = C.LDim();

    if (dir == base::COLUMNS) {
        El::Herk(uplo, El::ADJOINT, -2.0 * alpha, A, beta, C);
        //El::Gemm(El::ADJOINT, El::NORMAL, T(-2.0) * alpha, A, A, beta, C);

        El::Matrix<T> N;
        ColumnNrm2(A, N);
        T *nn = N.Buffer();;

        int n = base::Width(A);

        for(El::Int j = 0; j < n; j++)
            for(El::Int i = ((uplo == El::UPPER) ? 0 : j);
                i < ((uplo == El::UPPER) ? (j + 1) : n); i++)
                c[j * ldC + i] += alpha * (nn[i] * nn[i] + nn[j] * nn[j]);

    }

    // TODO the rest of the cases.
}
Exemple #2
0
void SymmetricL1DistanceMatrix(El::UpperOrLower uplo, direction_t dir, T alpha,
    const El::Matrix<T> &A, T beta, El::Matrix<T> &C) {

    const T *a = A.LockedBuffer();
    El::Int ldA = A.LDim();

    T *c = C.Buffer();
    El::Int ldC = C.LDim();

    El::Int n = A.Width();
    El::Int d = A.Height();

    /* Not the most efficient way... but mimicking BLAS is too much work! */
    if (dir == base::COLUMNS) {
        for (El::Int j = 0; j < n; j++)
            for(El::Int i = ((uplo == El::UPPER) ? 0 : j);
                i < ((uplo == El::UPPER) ? (j + 1) : n); i++)
            for (El::Int i = 0; i < A.Width(); i++) {
                T v = 0.0;
                for (El::Int k = 0; k < d; k++)
                    v += std::abs(a[j * ldA + k] - a[i * ldA + k]);
                c[j * ldC + i] = beta * c[j * ldC + i] + alpha * v;
            }

    }

    // TODO the rest of the cases.
}
Exemple #3
0
void L1DistanceMatrix(direction_t dirA, direction_t dirB, T alpha,
    const El::Matrix<T> &A, const El::Matrix<T> &B,
    T beta, El::Matrix<T> &C) {

    // TODO verify sizes

    const T *a = A.LockedBuffer();
    El::Int ldA = A.LDim();

    const T *b = B.LockedBuffer();
    El::Int ldB = B.LDim();

    T *c = C.Buffer();
    El::Int ldC = C.LDim();

    El::Int d = A.Height();

    /* Not the most efficient way... but mimicking BLAS is too much work! */
    if (dirA == base::COLUMNS && dirB == base::COLUMNS) {
        for (El::Int j = 0; j < B.Width(); j++)
            for (El::Int i = 0; i < A.Width(); i++) {
                T v = 0.0;
                for (El::Int k = 0; k < d; k++)
                    v += std::abs(b[j * ldB + k] - a[i * ldA + k]);
                c[j * ldC + i] = beta * c[j * ldC + i] + alpha * v;
            }

    }

    // TODO the rest of the cases.
}
Exemple #4
0
void EuclideanDistanceMatrix(direction_t dirA, direction_t dirB, T alpha,
    const El::Matrix<T> &A, const El::Matrix<T> &B,
    T beta, El::Matrix<T> &C) {

    T *c = C.Buffer();
    El::Int ldC = C.LDim();

    if (dirA == base::COLUMNS && dirB == base::COLUMNS) {
        base::Gemm(El::ADJOINT, El::NORMAL, T(-2.0) * alpha, A, B, beta, C);

        El::Matrix<T> NA, NB;
        ColumnNrm2(A, NA);
        ColumnNrm2(B, NB);
        T *na = NA.Buffer(), *nb = NB.Buffer();

        El::Int m = base::Width(A);
        El::Int n = base::Width(B);

        for(El::Int j = 0; j < n; j++)
            for(El::Int i = 0; i < m; i++)
                c[j * ldC + i] += alpha * (na[i] * na[i] + nb[j] * nb[j]);

    }

    // TODO the rest of the cases.
}
Exemple #5
0
    void apply_inverse_impl(El::Matrix<ValueType>& A,
                            skylark::sketch::columnwise_tag) const {
        ValueType* AA = A.Buffer();
        int j;

#       ifdef SKYLARK_HAVE_OPENMP
#       pragma omp parallel for private(j)
#       endif
        for (j = 0; j < A.Width(); j++)
            ExecuteFun(_plan_inverse, AA + j * A.LDim(), AA + j * A.LDim());
    }
Exemple #6
0
    void apply_impl(El::Matrix<value_type>& A,
                    skylark::sketch::columnwise_tag) const {
        ValueType* AA = A.Buffer();
        int j;

#       ifdef SKYLARK_HAVE_OPENMP
#       pragma omp parallel for private(j)
#       endif
        for (j = 0; j < A.Width(); j++)
            wht_apply(_tree, 1, AA + j * A.LDim());
    }
Exemple #7
0
    void apply_inverse_impl(El::Matrix<value_type>& A,
        skylark::sketch::rowwise_tag) const {
        ValueType* AA = A.Buffer();
        int j;

#       ifdef SKYLARK_HAVE_OPENMP
#       pragma omp parallel for private(j)
#       endif
        for (j = 0; j < A.Width(); j++)
            wht_apply(_tree, A.Height(), AA + j);
        // Not sure stride is used correctly here.
    }
Exemple #8
0
    void apply_impl(El::Matrix<ValueType>& A,
                    skylark::sketch::rowwise_tag) const {
        // Using transpositions instead of moving to the advanced interface
        // of FFTW
        El::Matrix<ValueType> matrix;
        El::Transpose(A, matrix);
        ValueType* matrix_buffer = matrix.Buffer();
        int j;

#       ifdef SKYLARK_HAVE_OPENMP
#       pragma omp parallel for private(j)
#       endif
        for (j = 0; j < matrix.Width(); j++)
            ExecuteFun(_plan, matrix_buffer + j * matrix.LDim(),
                matrix_buffer + j * matrix.LDim());
        El::Transpose(matrix, A);
    }
Exemple #9
0
inline void Gemv(El::Orientation oA,
    T alpha, const sparse_matrix_t<T>& A, const El::Matrix<T>& x,
    T beta, El::Matrix<T>& y) {
    // TODO verify sizes etc.

    const int* indptr = A.indptr();
    const int* indices = A.indices();
    const double *values = A.locked_values();
    double *yd = y.Buffer();
    const double *xd = x.LockedBuffer();

    int n = A.width();

    if (oA == El::NORMAL) {
        El::Scale(beta, y);

#       if SKYLARK_HAVE_OPENMP
#       pragma omp parallel for
#       endif
        for(int col = 0; col < n; col++) {
            T xv = alpha * xd[col];
            for (int j = indptr[col]; j < indptr[col + 1]; j++) {
                     int row = indices[j];
                     T val = values[j];
                     yd[row] += val * xv;
                 }
        }

    } else {

#       if SKYLARK_HAVE_OPENMP
#       pragma omp parallel for
#       endif
        for(int col = 0; col < n; col++) {
            double yv = beta * yd[col];
            for (int j = indptr[col]; j < indptr[col + 1]; j++) {
                     int row = indices[j];
                     T val = values[j];
                     yv += alpha * val * xd[row];
                 }
            yd[col] = yv;
        }

    }
}
Exemple #10
0
int max(El::Matrix<double> Y) {
    int k =  (int) *std::max_element(Y.Buffer(), Y.Buffer() + Y.Height());
    return k;
}