void SymmetricL1DistanceMatrix(El::UpperOrLower uplo, direction_t dir, T alpha, const El::Matrix<T> &A, T beta, El::Matrix<T> &C) { const T *a = A.LockedBuffer(); El::Int ldA = A.LDim(); T *c = C.Buffer(); El::Int ldC = C.LDim(); El::Int n = A.Width(); El::Int d = A.Height(); /* Not the most efficient way... but mimicking BLAS is too much work! */ if (dir == base::COLUMNS) { for (El::Int j = 0; j < n; j++) for(El::Int i = ((uplo == El::UPPER) ? 0 : j); i < ((uplo == El::UPPER) ? (j + 1) : n); i++) for (El::Int i = 0; i < A.Width(); i++) { T v = 0.0; for (El::Int k = 0; k < d; k++) v += std::abs(a[j * ldA + k] - a[i * ldA + k]); c[j * ldC + i] = beta * c[j * ldC + i] + alpha * v; } } // TODO the rest of the cases. }
void L1DistanceMatrix(direction_t dirA, direction_t dirB, T alpha, const El::Matrix<T> &A, const El::Matrix<T> &B, T beta, El::Matrix<T> &C) { // TODO verify sizes const T *a = A.LockedBuffer(); El::Int ldA = A.LDim(); const T *b = B.LockedBuffer(); El::Int ldB = B.LDim(); T *c = C.Buffer(); El::Int ldC = C.LDim(); El::Int d = A.Height(); /* Not the most efficient way... but mimicking BLAS is too much work! */ if (dirA == base::COLUMNS && dirB == base::COLUMNS) { for (El::Int j = 0; j < B.Width(); j++) for (El::Int i = 0; i < A.Width(); i++) { T v = 0.0; for (El::Int k = 0; k < d; k++) v += std::abs(b[j * ldB + k] - a[i * ldA + k]); c[j * ldC + i] = beta * c[j * ldC + i] + alpha * v; } } // TODO the rest of the cases. }
void lbann_callback_dump_minibatch_sample_indices::dump_to_file(model *m, Layer *l, int64_t step) { // Print minibatch sample indices of input layers auto *input = dynamic_cast<generic_input_layer*>(l); if (input != nullptr) { El::Matrix<El::Int>* indices = l->get_sample_indices_per_mb(); if (indices == nullptr || indices->Height() == 0 || indices->Width() == 0) { return; } std::ostringstream s; s << "mkdir -p " << m_basename; const int dir= system(s.str().c_str()); if (dir< 0) { LBANN_ERROR("callback_dump_minibatch_sample_indices is unable to create the target director"); } const std::string file = (m_basename + _to_string(m->get_execution_mode()) + "-model" + std::to_string(m->get_comm()->get_trainer_rank()) + "-rank" + std::to_string(m->get_comm()->get_rank_in_trainer()) + "-epoch" + std::to_string(m->get_cur_epoch()) + "-step" + std::to_string(m->get_cur_step()) + "-" + l->get_name() + "-MB_Sample_Indices"); El::Write(*indices, file, El::ASCII); } }
void apply_inverse_impl(El::Matrix<value_type>& A, skylark::sketch::rowwise_tag) const { ValueType* AA = A.Buffer(); int j; # ifdef SKYLARK_HAVE_OPENMP # pragma omp parallel for private(j) # endif for (j = 0; j < A.Width(); j++) wht_apply(_tree, A.Height(), AA + j); // Not sure stride is used correctly here. }
inline void ColumnView(El::Matrix<T>& A, El::Matrix<T>& B, int j, int width) { El::View(A, B, 0, j, B.Height(), width); }
inline const El::Matrix<T> ColumnView(const El::Matrix<T>& B, int j, int width) { El::Matrix<T> A; El::LockedView(A, B, 0, j, B.Height(), width); return A; }
int Height(const El::Matrix<T>& A) { return A.Height(); }
int max(El::Matrix<double> Y) { int k = (int) *std::max_element(Y.Buffer(), Y.Buffer() + Y.Height()); return k; }