void Subsampling::backpropagate(Eigen::MatrixXd* ein, Eigen::MatrixXd*& eout,
                                bool backpropToPrevious)
{
  const int N = a.rows();
  yd.conservativeResize(N, Eigen::NoChange);
  e.conservativeResize(N, Eigen::NoChange);
  // Derive activations
  activationFunctionDerivative(act, y, yd);
  deltas = yd.cwiseProduct(*ein);

  e.setZero();
  for(int fmo = 0; fmo < fm; fmo++)
  {
    Wd[fmo].setZero();
    if(bias)
      Wbd[fmo].setZero();
  }

  for(int n = 0; n < N; n++)
  {
    int outputIdx = 0;
    for(int fmo = 0; fmo < fm; fmo++)
    {
      for(int ri = 0, ro = 0; ri < maxRow; ri += kernelRows, ro++)
      {
        int rowBase = fmo * fmInSize + ri * inCols;
        for(int ci = 0, co = 0; ci < maxCol;
            ci += kernelCols, co++, outputIdx++)
        {
          const double d = deltas(n, outputIdx);
          for(int kr = 0; kr < kernelRows; kr++)
          {
            for(int kc = 0, inputIdx = rowBase + ci; kc < kernelCols;
                kc++, inputIdx++)
            {
              e(n, inputIdx) += W[fmo](ro, co) * d;
              Wd[fmo](ro, co) += d * (*x)(n, inputIdx);
            }
          }
          if(bias)
            Wbd[fmo](ro, co) += d;
        }
      }
    }
  }

  if(regularization.l1Penalty > 0.0)
  {
    for(int fmo = 0; fmo < fm; fmo++)
      Wd[fmo].array() += regularization.l1Penalty * W[fmo].array() / W[fmo].array().abs();
  }
  if(regularization.l2Penalty > 0.0)
  {
    for(int fmo = 0; fmo < fm; fmo++)
      Wd[fmo] += regularization.l2Penalty * W[fmo];
  }

  eout = &e;
}
void FullyConnected::backpropagate(Eigen::MatrixXd* ein, Eigen::MatrixXd*& eout)
{
  const int N = a.rows();
  yd.conservativeResize(N, Eigen::NoChange);
  // Derive activations
  activationFunctionDerivative(act, y, yd);
  deltas = yd.cwiseProduct(*ein);
  // Weight derivatives
  Wd = deltas.transpose() * *x;
  if(bias)
    bd = deltas.colwise().sum().transpose();
  // Prepare error signals for previous layer
  e = deltas * W;
  eout = &e;
}
Exemple #3
0
void RBM::backpropagate(Eigen::MatrixXd* ein, Eigen::MatrixXd*& eout,
                        bool backpropToPrevious)
{
  const int N = ph.rows();
  phd.conservativeResize(N, Eigen::NoChange);
  // Derive activations
  activationFunctionDerivative(LOGISTIC, ph, phd);
  deltas = phd.cwiseProduct(*ein);
  if(backprop)
  {
    Wd = deltas.transpose() * v;
    bhd = deltas.colwise().sum().transpose();
  }
  // Prepare error signals for previous layer
  if(backpropToPrevious)
    e = deltas * W;
  eout = &e;
}