ImageBatch Convolution::PoolingBackward(ImageBatch diff, ImageBatch top, ImageBatch bottom, PoolingInfo info) { CHECK_EQ(diff.Size(), top.Size()) << "inputs sizes mismatch"; CHECK_EQ(diff.GetNumImages(), bottom.GetNumImages()) << "#images mismatch"; CHECK_EQ(diff.GetNumFeatureMaps(), bottom.GetNumFeatureMaps()) << "#channels mismatch"; int pooled_height = (bottom.GetHeight() + 2 * info.pad_height - info.height + info.stride_vertical - 1) / info.stride_vertical + 1; int pooled_width = (bottom.GetWidth() + 2 * info.pad_width - info.width + info.stride_horizontal - 1) / info.stride_horizontal + 1; if (0 <= (pooled_height - 1) * info.stride_vertical - bottom.GetHeight() - info.pad_height) { --pooled_height; } if (0 <= (pooled_width - 1) * info.stride_horizontal - bottom.GetWidth() - info.pad_width) { --pooled_width; } CHECK_EQ(top.GetHeight(), pooled_height) << "height mismatch"; CHECK_EQ(top.GetWidth(), pooled_width) << "width mismatch"; PoolingBackwardOp* op = new PoolingBackwardOp(); op->closure = { info.algorithm, info.height, info.width, info.stride_vertical, info.stride_horizontal, info.pad_height, info.pad_width }; return NArray::ComputeOne({diff, top, bottom}, bottom.Size(), op); }
ImageBatch Convolution::PoolingForward(ImageBatch src, PoolingInfo info) { int pooled_height = (src.GetHeight() + 2 * info.pad_height - info.height + info.stride_vertical - 1) / info.stride_vertical + 1; int pooled_width = (src.GetWidth() + 2 * info.pad_width - info.width + info.stride_horizontal - 1) / info.stride_horizontal + 1; if (0 <= (pooled_height - 1) * info.stride_vertical - src.GetHeight() - info.pad_height) { --pooled_height; } if (0 <= (pooled_width - 1) * info.stride_horizontal - src.GetWidth() - info.pad_width) { --pooled_width; } Scale new_size { pooled_height, pooled_width, src.GetNumFeatureMaps(), src.GetNumImages() }; PoolingForwardOp* op = new PoolingForwardOp(); op->closure = { info.algorithm, info.height, info.width, info.stride_vertical, info.stride_horizontal, info.pad_height, info.pad_width }; return NArray::ComputeOne({src}, new_size, op); }
Filter Convolution::ConvBackwardFilter(ImageBatch diff, ImageBatch bottom, Filter filter, ConvInfo info) { CHECK_EQ(diff.GetNumImages(), bottom.GetNumImages()) << "#images mismatch"; /* * We can't get filter size when (top + 2*pad) % stride != 0 Scale new_size { -(diff.GetWidth() - 1) * info.stride_horizontal + bottom.GetWidth() + 2 * info.pad_width, -(diff.GetHeight() - 1) * info.stride_vertical + bottom.GetHeight() + 2 * info.pad_height, bottom.GetNumFeatureMaps(), diff.GetNumFeatureMaps() }; */ ConvBackwardFilterOp* op = new ConvBackwardFilterOp(); op->closure = { info.pad_height, info.pad_width, info.stride_vertical, info.stride_horizontal }; return NArray::ComputeOne({diff, bottom}, filter.Size(), op); }
ImageBatch Convolution::ConvForward(ImageBatch src, Filter filter, NArray bias, ConvInfo info) { CHECK_EQ(src.GetNumFeatureMaps(), filter.GetNumInputs()) << "#input channels mismatch"; CHECK_EQ(bias.Size().NumDims(), 1) << "bias dimension mismatch"; CHECK_EQ(bias.Size()[0], filter.GetNumOutputs()) << "bias size mismatch"; //no such limit //CHECK_EQ((src.GetHeight() + 2 * info.pad_height - filter.GetHeight()) % info.stride_vertical, 0) << "filter height mismatch"; //CHECK_EQ((src.GetWidth() + 2 * info.pad_width - filter.GetWidth()) % info.stride_horizontal, 0) << "filter width mismatch"; Scale new_size { (src.GetWidth() + 2 * info.pad_width - filter.GetWidth()) / info.stride_horizontal + 1, (src.GetHeight() + 2 * info.pad_height - filter.GetHeight()) / info.stride_vertical + 1, filter.GetNumOutputs(), src.GetNumImages() }; ConvForwardOp* op = new ConvForwardOp(); op->closure = { info.pad_height, info.pad_width, info.stride_vertical, info.stride_horizontal }; return NArray::ComputeOne({src, filter, bias}, new_size, op); }