void operator ()(const tensor_type &delta, const tensor_type &x, tensor_type &x_gradient) { CHECK_EQ(x.order(), delta.order()); // TODO(robertsdionne): Support arbitrary tensor order with an n-dimensional iterator. for (auto i = 0; i < x.shape().at(0); ++i) { for (auto j = 0; j < x.shape().at(1); ++j) { x_gradient.set({i, j}, delta.at({i, j}) * (x.at({i, j}) > F(0))); } } }
void operator ()(const tensor_type &x, tensor_type &y) { using std::max; CHECK_EQ(x.order(), y.order()); // TODO(robertsdionne): Support arbitrary tensor order with an n-dimensional iterator. for (auto i = 0; i < x.shape().at(0); ++i) { for (auto j = 0; j < x.shape().at(1); ++j) { y.set({i, j}, max(F(0), x.at({i, j}))); } } }
void operator ()(const tensor_type &x, tensor_type &y) { // y = F ∗ x + b for (auto i = 0; i < y.shape().at(0); ++i) { for (auto j = 0; j < y.shape().at(1); ++j) { for (auto s = 0; s < filter_.shape().at(0); ++s) { for (auto t = 0; t < filter_.shape().at(1); ++t) { for (auto u = 0; u < y.shape().at(2); ++u) { F output_value = F(1) * y.at({i, j, u}); for (auto v = 0; v < filter_.shape().at(3); ++v) { output_value += F(1) * filter_.at({s, t, u, v}) * x.at({i + s, j + t, v}); } y.set({i, j, u}, output_value); } } } } } for (auto i = 0; i < y.shape().at(0); ++i) { for (auto j = 0; j < y.shape().at(1); ++j) { for (auto k = 0; k < y.shape().at(2); ++k) { y.set({i, j, k}, F(1) * y.at({i, j, k}) + F(1) * bias_.at({k})); } } } }