FullyConnected::FullyConnected(OutputInfo info, int J, bool bias, ActivationFunction act, double stdDev, double maxSquaredWeightNorm) : I(info.outputs()), J(J), bias(bias), act(act), stdDev(stdDev), maxSquaredWeightNorm(maxSquaredWeightNorm), W(J, I), Wd(J, I), b(J), bd(J), x(0), a(1, J), y(1, J), yd(1, J), deltas(1, J), e(1, I) { }
Subsampling::Subsampling(OutputInfo info, int kernelRows, int kernelCols, bool bias, ActivationFunction act, double stdDev, Regularization regularization) : I(info.outputs()), fm(info.dimensions[0]), inRows(info.dimensions[1]), inCols(info.dimensions[2]), kernelRows(kernelRows), kernelCols(kernelCols), bias(bias), act(act), stdDev(stdDev), x(0), e(1, I), fmInSize(-1), outRows(-1), outCols(-1), fmOutSize(-1), maxRow(-1), maxCol(-1), regularization(regularization) { }
OutputInfo MaxPooling::initialize(std::vector<double*>& parameterPointers, std::vector<double*>& parameterDerivativePointers) { OutputInfo info; info.dimensions.push_back(fm); outRows = inRows / kernelRows; outCols = inCols / kernelCols; fmOutSize = outRows * outCols; info.dimensions.push_back(outRows); info.dimensions.push_back(outCols); fmInSize = inRows * inCols; maxRow = inRows - kernelRows + 1; maxCol = inCols - kernelCols + 1; y.resize(1, info.outputs()); deltas.resize(1, info.outputs()); if(info.outputs() < 1) throw OpenANNException("Number of outputs in max-pooling layer is below" " 1. You should either choose a smaller filter" " size or generate a bigger input."); return info; }
Dropout::Dropout(OutputInfo info, double dropoutProbability) : info(info), I(info.outputs()), dropoutProbability(dropoutProbability), y(1, I), dropoutMask(1, I), e(1, I) { }
OutputInfo Subsampling::initialize(std::vector<double*>& parameterPointers, std::vector<double*>& parameterDerivativePointers) { OutputInfo info; info.dimensions.push_back(fm); outRows = inRows / kernelRows; outCols = inCols / kernelCols; fmOutSize = outRows * outCols; info.dimensions.push_back(outRows); info.dimensions.push_back(outCols); fmInSize = inRows * inCols; maxRow = inRows - kernelRows + 1; maxCol = inCols - kernelCols + 1; W.resize(fm, Eigen::MatrixXd(outRows, outCols)); Wd.resize(fm, Eigen::MatrixXd(outRows, outCols)); int numParams = fm * outRows * outCols * kernelRows * kernelCols; if(bias) { Wb.resize(fm, Eigen::MatrixXd(outRows, outCols)); Wbd.resize(fm, Eigen::MatrixXd(outRows, outCols)); numParams += fm * outRows * outCols; } parameterPointers.reserve(parameterPointers.size() + numParams); parameterDerivativePointers.reserve(parameterDerivativePointers.size() + numParams); for(int fmo = 0; fmo < fm; fmo++) { for(int r = 0; r < outRows; r++) { for(int c = 0; c < outCols; c++) { parameterPointers.push_back(&W[fmo](r, c)); parameterDerivativePointers.push_back(&Wd[fmo](r, c)); if(bias) { parameterPointers.push_back(&Wb[fmo](r, c)); parameterDerivativePointers.push_back(&Wbd[fmo](r, c)); } } } } initializeParameters(); a.resize(1, info.outputs()); y.resize(1, info.outputs()); yd.resize(1, info.outputs()); deltas.resize(1, info.outputs()); if(info.outputs() < 1) throw OpenANNException("Number of outputs in subsampling layer is below" " 1. You should either choose a smaller filter" " size or generate a bigger input."); OPENANN_CHECK(fmInSize > 0); OPENANN_CHECK(outRows > 0); OPENANN_CHECK(outCols > 0); OPENANN_CHECK(fmOutSize > 0); OPENANN_CHECK(maxRow > 0); OPENANN_CHECK(maxCol > 0); return info; }
MaxPooling::MaxPooling(OutputInfo info, int kernelRows, int kernelCols) : I(info.outputs()), fm(info.dimensions[0]), inRows(info.dimensions[1]), inCols(info.dimensions[2]), kernelRows(kernelRows), kernelCols(kernelCols), x(0), e(1, I) { }