Eigen::VectorXd LayerAdapter::gradient(std::vector<int>::const_iterator startN, std::vector<int>::const_iterator endN) { // Assumes that we want to comput the gradient of the whole training set OPENANN_CHECK_EQUALS(*startN, 0); OPENANN_CHECK_EQUALS(endN-startN, input.rows()); Eigen::MatrixXd* output; layer.forwardPropagate(&input, output, false); Eigen::MatrixXd diff = *output - desired; Eigen::MatrixXd* e = &diff; layer.backpropagate(e, e); Eigen::VectorXd derivs(dimension()); std::vector<double*>::const_iterator it = derivatives.begin(); for(int i = 0; i < dimension(); i++, it++) derivs(i) = **it; return derivs; }
void Subsampling::forwardPropagate(Eigen::MatrixXd* x, Eigen::MatrixXd*& y, bool dropout, double* error) { const int N = x->rows(); this->a.conservativeResize(N, Eigen::NoChange); this->y.conservativeResize(N, Eigen::NoChange); this->x = x; OPENANN_CHECK_EQUALS(x->cols(), fm * inRows * inCols); OPENANN_CHECK_EQUALS(this->y.cols(), fm * outRows * outCols); a.setZero(); #pragma omp parallel for for(int n = 0; n < N; n++) { int outputIdx = 0; for(int fmo = 0; fmo < fm; fmo++) { for(int ri = 0, ro = 0; ri < maxRow; ri += kernelRows, ro++) { int rowBase = fmo * fmInSize + ri * inCols; for(int ci = 0, co = 0; ci < maxCol; ci += kernelCols, co++, outputIdx++) { for(int kr = 0; kr < kernelRows; kr++) { for(int kc = 0, inputIdx = rowBase + ci; kc < kernelCols; kc++) a(n, outputIdx) += (*x)(n, inputIdx++) * W[fmo](ro, co); } if(bias) a(n, outputIdx) += Wb[fmo](ro, co); } } } } activationFunction(act, a, this->y); if(error && regularization.l1Penalty > 0.0) for(int fmo = 0; fmo < fm; fmo++) *error += regularization.l1Penalty * W[fmo].array().abs().sum(); if(error && regularization.l2Penalty > 0.0) for(int fmo = 0; fmo < fm; fmo++) *error += regularization.l2Penalty * W[fmo].array().square().sum() / 2.0; y = &(this->y); }
void save(const Eigen::MatrixXd& in, const Eigen::MatrixXd& out, std::ostream& stream) { OPENANN_CHECK_EQUALS(in.rows(), out.rows()); int N = in.rows(); int D = in.cols(); int F = out.cols(); stream << N << " " << D << " " << F << std::endl; for(int n = 0; n < N; n++) { stream << in.row(n) << std::endl; stream << out.row(n) << std::endl; } }
void MaxPooling::forwardPropagate(Eigen::MatrixXd* x, Eigen::MatrixXd*& y, bool dropout) { const int N = x->rows(); this->y.conservativeResize(N, Eigen::NoChange); this->x = x; OPENANN_CHECK(x->cols() == fm * inRows * inRows); OPENANN_CHECK_EQUALS(this->y.cols(), fm * outRows * outCols); #pragma omp parallel for for(int n = 0; n < N; n++) { int outputIdx = 0; int inputIdx = 0; for(int fmo = 0; fmo < fm; fmo++) { for(int ri = 0, ro = 0; ri < maxRow; ri += kernelRows, ro++) { int rowBase = fmo * fmInSize + ri * inCols; for(int ci = 0, co = 0; ci < maxCol; ci += kernelCols, co++, outputIdx++) { double m = -std::numeric_limits<double>::max(); for(int kr = 0; kr < kernelRows; kr++) { inputIdx = rowBase + ci; for(int kc = 0; kc < kernelCols; kc++, inputIdx++) m = std::max(m, (*x)(n, inputIdx)); } this->y(n, outputIdx) = m; } } } } y = &(this->y); }
void save(const Eigen::MatrixXd& in, const Eigen::MatrixXd& out, std::ostream& stream) { OPENANN_CHECK_EQUALS(in.rows(), out.rows()); for(int i = 0; i < in.rows(); ++i) { if(out.cols() > 1) { int index; out.row(i).maxCoeff(&index); stream << static_cast<int>(index); } else stream << out(i, 0); for(int j = 0; j < in.cols(); ++j) { if(std::fabs(in(i, j)) > 0.0e-20) stream << " " << j + 1 << ":" << in(i, j); } stream << std::endl; } }