void blobFromProto(const caffe::BlobProto &pbBlob, cv::Mat &dstBlob) { MatShape shape; blobShapeFromProto(pbBlob, shape); dstBlob.create((int)shape.size(), &shape[0], CV_32F); if (pbBlob.data_size()) { // Single precision floats. CV_Assert(pbBlob.data_size() == (int)dstBlob.total()); CV_DbgAssert(pbBlob.GetDescriptor()->FindFieldByLowercaseName("data")->cpp_type() == FieldDescriptor::CPPTYPE_FLOAT); Mat(dstBlob.dims, &dstBlob.size[0], CV_32F, (void*)pbBlob.data().data()).copyTo(dstBlob); } else { CV_Assert(pbBlob.has_raw_data()); const std::string& raw_data = pbBlob.raw_data(); if (pbBlob.raw_data_type() == caffe::FLOAT16) { // Half precision floats. CV_Assert(raw_data.size() / 2 == (int)dstBlob.total()); Mat halfs((int)shape.size(), &shape[0], CV_16SC1, (void*)raw_data.c_str()); convertFp16(halfs, dstBlob); } else if (pbBlob.raw_data_type() == caffe::FLOAT) { CV_Assert(raw_data.size() / 4 == (int)dstBlob.total()); Mat((int)shape.size(), &shape[0], CV_32FC1, (void*)raw_data.c_str()).copyTo(dstBlob); } else CV_Error(Error::StsNotImplemented, "Unexpected blob data type"); } }
static inline MatShape shape(int a0, int a1=-1, int a2=-1, int a3=-1) { int dims[] = {a0, a1, a2, a3}; MatShape s = shape(dims, 4); s.erase(std::remove_if(s.begin(), s.end(), is_neg), s.end()); return s; }
void blobFromProto(const caffe::BlobProto &pbBlob, cv::Mat &dstBlob) { MatShape shape; blobShapeFromProto(pbBlob, shape); dstBlob.create((int)shape.size(), &shape[0], CV_32F); float *dstData = dstBlob.ptr<float>(); if (pbBlob.data_size()) { // Single precision floats. CV_Assert(pbBlob.data_size() == (int)dstBlob.total()); CV_DbgAssert(pbBlob.GetDescriptor()->FindFieldByLowercaseName("data")->cpp_type() == FieldDescriptor::CPPTYPE_FLOAT); for (int i = 0; i < pbBlob.data_size(); i++) dstData[i] = pbBlob.data(i); } else { // Half precision floats. CV_Assert(pbBlob.raw_data_type() == caffe::FLOAT16); std::string raw_data = pbBlob.raw_data(); CV_Assert(raw_data.size() / 2 == (int)dstBlob.total()); Mat halfs((int)shape.size(), &shape[0], CV_16SC1, (void*)raw_data.c_str()); convertFp16(halfs, dstBlob); } }
static inline MatShape concat(const MatShape& a, const MatShape& b) { MatShape c = a; c.insert(c.end(), b.begin(), b.end()); return c; }
bool getMemoryShapes(const std::vector<MatShape> &inputs, const int requiredOutputs, std::vector<MatShape> &outputs, std::vector<MatShape> &internals) const { bool inplace = Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals); MatShape shape = inputs[0]; int cAxis = clamp(axisRaw, shape.size()); shape[cAxis] = 1; internals.assign(1, shape); return inplace; }
static MatShape getBufferShape(const MatShape& shape) { if (shape.size() == 2 || shape.size() == 4) { int w, h, c, n; getCanonicalSize(shape, &w, &h, &c, &n); return {w, h, c, n}; } else { MatShape bufferShape(shape); std::reverse(bufferShape.begin(), bufferShape.end()); return bufferShape; } }
inline void print(const MatShape& shape, const String& name = "") { printf("%s: [", name.c_str()); size_t i, n = shape.size(); for( i = 0; i < n; i++ ) printf(" %d", shape[i]); printf(" ]\n"); }
static inline int total(const MatShape& shape, int start = -1, int end = -1) { if (start == -1) start = 0; if (end == -1) end = (int)shape.size(); if (shape.empty()) return 0; int elems = 1; CV_Assert(start <= (int)shape.size() && end <= (int)shape.size() && start <= end); for(int i = start; i < end; i++) { elems *= shape[i]; } return elems; }
bool getMemoryShapes(const std::vector<MatShape> &inputs, const int requiredOutputs, std::vector<MatShape> &outputs, std::vector<MatShape> &internals) const { CV_Assert(inputs.size() == 2); MatShape dstShape = inputs[0]; int start = clamp(startAxis, dstShape); for (int i = start; i < dstShape.size(); i++) { dstShape[i] = inputs[1][i]; } outputs.resize(1, dstShape); return false; }
static inline std::string toString(const MatShape& shape, const String& name = "") { std::ostringstream ss; if (!name.empty()) ss << name << ' '; ss << '['; for(size_t i = 0, n = shape.size(); i < n; ++i) ss << ' ' << shape[i]; ss << " ]"; return ss.str(); }
bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals) { std::vector<UMat> inputs; std::vector<UMat> outputs; inps.getUMatVector(inputs); outs.getUMatVector(outputs); for (size_t i = 0; i < inputs.size(); i++) { UMat srcBlob = inputs[i]; void *src_handle = inputs[i].handle(ACCESS_READ); void *dst_handle = outputs[i].handle(ACCESS_WRITE); if (src_handle != dst_handle) { MatShape outShape = shape(outputs[i]); UMat umat = srcBlob.reshape(1, (int)outShape.size(), &outShape[0]); umat.copyTo(outputs[i]); } } outs.assign(outputs); return true; }
void getCanonicalSize(const MatShape& shape, int* width, int* height, int* channels, int* batch) { const int dims = shape.size(); CV_Assert(dims == 2 || dims == 4); *batch = shape[0]; *channels = shape[1]; if (dims == 4) { *width = shape[3]; *height = shape[2]; } else { *width = 1; *height = 1; } }
void blobShapeFromProto(const caffe::BlobProto &pbBlob, MatShape& shape) { shape.clear(); if (pbBlob.has_num() || pbBlob.has_channels() || pbBlob.has_height() || pbBlob.has_width()) { shape.push_back(pbBlob.num()); shape.push_back(pbBlob.channels()); shape.push_back(pbBlob.height()); shape.push_back(pbBlob.width()); } else if (pbBlob.has_shape()) { const caffe::BlobShape &_shape = pbBlob.shape(); for (int i = 0; i < _shape.dim_size(); i++) shape.push_back((int)_shape.dim(i)); } else shape.resize(1, 1); // Is a scalar. }
static void computeShapeByReshapeMask(const MatShape &srcShape, const MatShape &maskShape, Range srcRange /*= Range::all()*/, MatShape& dstShape) { int srcShapeSize = (int)srcShape.size(); int maskShapeSize = (int)maskShape.size(); if (srcRange == Range::all()) srcRange = Range(0, srcShapeSize); else { int sz = srcRange.size(); srcRange.start = clamp(srcRange.start, srcShapeSize); srcRange.end = srcRange.end == INT_MAX ? srcShapeSize : srcRange.start + sz; } bool explicitMask = !maskShape.empty(); // All mask values are positive. for (int i = 0, n = maskShape.size(); i < n && explicitMask; ++i) { explicitMask = maskShape[i] > 0; } // Working range of source shape is a range where area(src) == area(mask). if (explicitMask) { int maskTotal = total(maskShape); // Go from the end of mask until we collect required total. bool matched = false; for (int i = srcRange.end - 1; i >= srcRange.start; --i) { if (matched) { if (i == 0 || total(srcShape, i, srcRange.end) != maskTotal) { srcRange.start = i + 1; break; } } else { matched = total(srcShape, i, srcRange.end) == maskTotal; } } CV_Assert(total(srcShape, srcRange.start, srcRange.end) == maskTotal); } CV_Assert(0 <= srcRange.start && srcRange.start <= srcRange.end && srcRange.end <= srcShapeSize); int dstShapeSize = srcShapeSize - srcRange.size() + maskShapeSize; dstShape.resize(dstShapeSize); std::copy(srcShape.begin(), srcShape.begin() + srcRange.start, dstShape.begin()); std::copy(srcShape.begin() + srcRange.end, srcShape.begin() + srcShapeSize, dstShape.begin() + srcRange.start + maskShapeSize); int inferDim = -1; for (int i = 0; i < maskShapeSize; i++) { if (maskShape[i] > 0) { dstShape[srcRange.start + i] = maskShape[i]; } else if (maskShape[i] == 0) { if (srcRange.start + i >= srcShapeSize) CV_Error(Error::StsBadArg, format("Copy dim[%d] (which has zero size) is out of the source shape bounds", srcRange.start + i)); dstShape[srcRange.start + i] = srcShape[srcRange.start + i]; } else if (maskShape[i] == -1) { if (inferDim != -1) CV_Error(Error::StsAssert, "Duplicate of inferred dim (which is denoted by -1)"); inferDim = srcRange.start + i; dstShape[inferDim] = 1; } else CV_Error(Error::StsBadArg, "maskShape[i] >= -1"); } size_t srcTotal = total(srcShape); size_t dstTotal = total(dstShape); if (inferDim != -1) { if (srcTotal % dstTotal != 0) CV_Error(Error::StsBackTrace, "Can't infer a dim denoted by -1"); dstShape[inferDim] = (int)(srcTotal / dstTotal); } else { CV_Assert(srcTotal == dstTotal); } }
static void computeShapeByReshapeMask(const MatShape &srcShape, const MatShape &maskShape, Range srcRange /*= Range::all()*/, MatShape& dstShape) { int srcShapeSize = (int)srcShape.size(); int maskShapeSize = (int)maskShape.size(); if (srcRange == Range::all()) srcRange = Range(0, srcShapeSize); else { int sz = srcRange.size(); srcRange.start = clamp(srcRange.start, srcShapeSize); srcRange.end = srcRange.end == INT_MAX ? srcShapeSize : srcRange.start + sz; } CV_Assert(0 <= srcRange.start && srcRange.start <= srcRange.end && srcRange.end <= srcShapeSize); int dstShapeSize = srcShapeSize - srcRange.size() + maskShapeSize; dstShape.resize(dstShapeSize); std::copy(srcShape.begin(), srcShape.begin() + srcRange.start, dstShape.begin()); std::copy(srcShape.begin() + srcRange.end, srcShape.begin() + srcShapeSize, dstShape.begin() + srcRange.start + maskShapeSize); int inferDim = -1; for (int i = 0; i < maskShapeSize; i++) { if (maskShape[i] > 0) { dstShape[srcRange.start + i] = maskShape[i]; } else if (maskShape[i] == 0) { if (srcRange.start + i >= srcShapeSize) CV_Error(Error::StsBadArg, format("Copy dim[%d] (which has zero size) is out of the source shape bounds", srcRange.start + i)); dstShape[srcRange.start + i] = srcShape[srcRange.start + i]; } else if (maskShape[i] == -1) { if (inferDim != -1) CV_Error(Error::StsAssert, "Duplicate of inferred dim (which is denoted by -1)"); inferDim = srcRange.start + i; dstShape[inferDim] = 1; } else CV_Error(Error::StsBadArg, "maskShape[i] >= -1"); } size_t srcTotal = total(srcShape); size_t dstTotal = total(dstShape); if (inferDim != -1) { if (srcTotal % dstTotal != 0) CV_Error(Error::StsBackTrace, "Can't infer a dim denoted by -1"); dstShape[inferDim] = (int)(srcTotal / dstTotal); } else { CV_Assert(srcTotal == dstTotal); } }
static inline MatShape shape(const int* dims, const int n) { MatShape shape; shape.assign(dims, dims + n); return shape; }
void Mat_to_MatShape(cv::Mat& mat, MatShape& matshape) { matshape.clear(); CHECK_MAT(mat.type()==CV_32SC1 && mat.cols==1); matshape = (MatShape) mat; }
void ONNXImporter::populateNet(Net dstNet) { CV_Assert(model_proto.has_graph()); opencv_onnx::GraphProto graph_proto = model_proto.graph(); std::map<std::string, Mat> constBlobs = getGraphTensors(graph_proto); // List of internal blobs shapes. std::map<std::string, MatShape> outShapes; // Add all the inputs shapes. It includes as constant blobs as network's inputs shapes. for (int i = 0; i < graph_proto.input_size(); ++i) { opencv_onnx::ValueInfoProto valueInfoProto = graph_proto.input(i); CV_Assert(valueInfoProto.has_type()); opencv_onnx::TypeProto typeProto = valueInfoProto.type(); CV_Assert(typeProto.has_tensor_type()); opencv_onnx::TypeProto::Tensor tensor = typeProto.tensor_type(); CV_Assert(tensor.has_shape()); opencv_onnx::TensorShapeProto tensorShape = tensor.shape(); MatShape inpShape(tensorShape.dim_size()); for (int j = 0; j < inpShape.size(); ++j) { inpShape[j] = tensorShape.dim(j).dim_value(); } outShapes[valueInfoProto.name()] = inpShape; } std::string framework_name; if (model_proto.has_producer_name()) { framework_name = model_proto.producer_name(); } // create map with network inputs (without const blobs) std::map<std::string, LayerInfo> layer_id; std::map<std::string, LayerInfo>::iterator layerId; std::map<std::string, MatShape>::iterator shapeIt; // fill map: push layer name, layer id and output id std::vector<String> netInputs; for (int j = 0; j < graph_proto.input_size(); j++) { const std::string& name = graph_proto.input(j).name(); if (constBlobs.find(name) == constBlobs.end()) { netInputs.push_back(name); layer_id.insert(std::make_pair(name, LayerInfo(0, netInputs.size() - 1))); } } dstNet.setInputsNames(netInputs); int layersSize = graph_proto.node_size(); LayerParams layerParams; opencv_onnx::NodeProto node_proto; for(int li = 0; li < layersSize; li++) { node_proto = graph_proto.node(li); layerParams = getLayerParams(node_proto); CV_Assert(node_proto.output_size() >= 1); layerParams.name = node_proto.output(0); std::string layer_type = node_proto.op_type(); layerParams.type = layer_type; if (layer_type == "MaxPool") { layerParams.type = "Pooling"; layerParams.set("pool", "MAX"); layerParams.set("ceil_mode", isCeilMode(layerParams)); } else if (layer_type == "AveragePool") { layerParams.type = "Pooling"; layerParams.set("pool", "AVE"); layerParams.set("ceil_mode", isCeilMode(layerParams)); layerParams.set("ave_pool_padded_area", framework_name == "pytorch"); } else if (layer_type == "GlobalAveragePool") { layerParams.type = "Pooling"; layerParams.set("pool", "AVE"); layerParams.set("global_pooling", true); } else if (layer_type == "Add" || layer_type == "Sum") { if (layer_id.find(node_proto.input(1)) == layer_id.end()) { Mat blob = getBlob(node_proto, constBlobs, 1); blob = blob.reshape(1, 1); if (blob.total() == 1) { layerParams.type = "Power"; layerParams.set("shift", blob.at<float>(0)); } else { layerParams.type = "Scale"; layerParams.set("bias_term", true); layerParams.blobs.push_back(blob); } } else { layerParams.type = "Eltwise"; } } else if (layer_type == "Sub") { Mat blob = getBlob(node_proto, constBlobs, 1); if (blob.total() == 1) { layerParams.type = "Power"; layerParams.set("shift", -blob.at<float>(0)); } else { layerParams.type = "Scale"; layerParams.set("has_bias", true); layerParams.blobs.push_back(-1.0f * blob.reshape(1, 1)); } } else if (layer_type == "Div") { Mat blob = getBlob(node_proto, constBlobs, 1); CV_Assert_N(blob.type() == CV_32F, blob.total()); if (blob.total() == 1) { layerParams.set("scale", 1.0f / blob.at<float>(0)); layerParams.type = "Power"; } else { layerParams.type = "Scale"; divide(1.0, blob, blob); layerParams.blobs.push_back(blob); layerParams.set("bias_term", false); } } else if (layer_type == "Constant") { CV_Assert(node_proto.input_size() == 0); CV_Assert(layerParams.blobs.size() == 1); constBlobs.insert(std::make_pair(layerParams.name, layerParams.blobs[0])); continue; } else if (layer_type == "ImageScaler") { const float scale = layerParams.has("scale") ? layerParams.get<float>("scale") : 1.0f; layerParams.erase("scale"); if (layerParams.has("bias")) { layerParams.type = "Scale"; layerParams.blobs.push_back( Mat(Size(1, layerParams.get("bias").size()), CV_32FC1, scale)); layerParams.set("bias_term", true); Mat bias(1, layerParams.get("bias").size(), CV_32FC1); for (int j = 0; j < bias.total(); j++) { bias.at<float>(0, j) = layerParams.get("bias").getRealValue(j); } layerParams.blobs.push_back(bias); layerParams.erase("bias"); } else { layerParams.set("scale", scale); layerParams.type = "Power"; } } else if (layer_type == "LeakyRelu") { layerParams.type = "ReLU"; replaceLayerParam(layerParams, "alpha", "negative_slope"); } else if (layer_type == "LRN") { replaceLayerParam(layerParams, "size", "local_size"); } else if (layer_type == "BatchNormalization") { if (node_proto.input_size() != 5) CV_Error(Error::StsNotImplemented, "Expected input, scale, bias, mean and var"); layerParams.type = "BatchNorm"; replaceLayerParam(layerParams, "epsilon", "eps"); replaceLayerParam(layerParams, "spatial", "use_global_stats"); Mat meanData = getBlob(node_proto, constBlobs, 3); Mat stdData = getBlob(node_proto, constBlobs, 4); layerParams.blobs.push_back(meanData); layerParams.blobs.push_back(stdData); if (!node_proto.input(1).empty()) { layerParams.set("has_weight", true); layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 1)); // weightData } else { layerParams.set("has_weight", false); } if (!node_proto.input(2).empty()) { layerParams.set("has_bias", true); layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 2)); // biasData } else { layerParams.set("has_bias", false); } } else if (layer_type == "Gemm") { CV_Assert(node_proto.input_size() >= 2); layerParams.type = "InnerProduct"; Mat weights = getBlob(node_proto, constBlobs, 1); int ind_num_out = 0; if (layerParams.has("transB") && !layerParams.get<int>("transB")) { transpose(weights, weights); ind_num_out = 1; } layerParams.blobs.push_back(weights); if (node_proto.input_size() == 3) { Mat bias = getBlob(node_proto, constBlobs, 2); layerParams.blobs.push_back(bias); } layerParams.set("num_output", layerParams.blobs[0].size[ind_num_out]); layerParams.set("bias_term", node_proto.input_size() == 3); } else if (layer_type == "MatMul") { CV_Assert(node_proto.input_size() == 2); layerParams.type = "InnerProduct"; Mat blob = getBlob(node_proto, constBlobs, 1); layerParams.blobs.push_back(blob.t()); layerParams.set("bias_term", false); layerParams.set("num_output", layerParams.blobs[0].size[0]); } else if (layer_type == "Mul") { CV_Assert(node_proto.input_size() == 2); if (layer_id.find(node_proto.input(1)) == layer_id.end()) { Mat blob = getBlob(node_proto, constBlobs, 1); blob = blob.reshape(1, 1); if (blob.total() == 1) { layerParams.set("scale", blob.at<float>(0)); layerParams.type = "Power"; } else { layerParams.blobs.push_back(blob); layerParams.type = "Scale"; } } else { layerParams.type = "Eltwise"; layerParams.set("operation", "prod"); } } else if (layer_type == "Conv") { CV_Assert(node_proto.input_size() >= 2); layerParams.type = "Convolution"; for (int j = 1; j < node_proto.input_size(); j++) { layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j)); } layerParams.set("num_output", layerParams.blobs[0].size[0]); layerParams.set("bias_term", node_proto.input_size() == 3); } else if (layer_type == "ConvTranspose") { CV_Assert(node_proto.input_size() >= 2); layerParams.type = "Deconvolution"; for (int j = 1; j < node_proto.input_size(); j++) { layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j)); } layerParams.set("num_output", layerParams.blobs[0].size[1]); layerParams.set("bias_term", node_proto.input_size() == 3); } else if (layer_type == "Transpose") { layerParams.type = "Permute"; replaceLayerParam(layerParams, "perm", "order"); } else if (layer_type == "Unsqueeze") { CV_Assert(node_proto.input_size() == 1); Mat input = getBlob(node_proto, constBlobs, 0); DictValue axes = layerParams.get("axes"); std::vector<int> dims; for (int j = 0; j < input.dims; j++) { dims.push_back(input.size[j]); } CV_Assert(axes.getIntValue(axes.size()-1) <= dims.size()); for (int j = 0; j < axes.size(); j++) { dims.insert(dims.begin() + axes.getIntValue(j), 1); } Mat out = input.reshape(0, dims); constBlobs.insert(std::make_pair(layerParams.name, out)); continue; } else if (layer_type == "Reshape") { CV_Assert(node_proto.input_size() == 2 || layerParams.has("shape")); if (node_proto.input_size() == 2) { Mat blob = getBlob(node_proto, constBlobs, 1); CV_Assert(blob.type() == CV_32SC1); if (layer_id.find(node_proto.input(0)) == layer_id.end()) { Mat input = getBlob(node_proto, constBlobs, 0); Mat out = input.reshape(0, static_cast<std::vector<int> >(blob)); constBlobs.insert(std::make_pair(layerParams.name, out)); continue; } layerParams.set("dim", DictValue::arrayInt<int*>( blob.ptr<int>(), blob.total() )); } else { DictValue shape = layerParams.get("shape"); std::vector<int> dim; for (int j = 0; j < shape.size(); j++) { dim.push_back(shape.getIntValue(j)); } if (layer_id.find(node_proto.input(0)) == layer_id.end()) { Mat input = getBlob(node_proto, constBlobs, 0); Mat out = input.reshape(0, dim); constBlobs.insert(std::make_pair(layerParams.name, out)); continue; } replaceLayerParam(layerParams, "shape", "dim"); } } else if (layer_type == "Pad") { layerParams.type = "Padding"; } else if (layer_type == "Shape") { CV_Assert(node_proto.input_size() == 1); shapeIt = outShapes.find(node_proto.input(0)); CV_Assert(shapeIt != outShapes.end()); MatShape inpShape = shapeIt->second; Mat shapeMat(inpShape.size(), 1, CV_32S); for (int j = 0; j < inpShape.size(); ++j) shapeMat.at<int>(j) = inpShape[j]; shapeMat.dims = 1; constBlobs.insert(std::make_pair(layerParams.name, shapeMat)); continue; } else if (layer_type == "Gather") { CV_Assert(node_proto.input_size() == 2); CV_Assert(layerParams.has("axis")); Mat input = getBlob(node_proto, constBlobs, 0); Mat indexMat = getBlob(node_proto, constBlobs, 1); CV_Assert_N(indexMat.type() == CV_32S, indexMat.total() == 1); int index = indexMat.at<int>(0); int axis = layerParams.get<int>("axis"); std::vector<cv::Range> ranges(input.dims, Range::all()); ranges[axis] = Range(index, index + 1); Mat out = input(ranges); constBlobs.insert(std::make_pair(layerParams.name, out)); continue; } else if (layer_type == "Concat") { bool hasVariableInps = false; for (int i = 0; i < node_proto.input_size(); ++i) { if (layer_id.find(node_proto.input(i)) != layer_id.end()) { hasVariableInps = true; break; } } if (!hasVariableInps) { std::vector<Mat> inputs(node_proto.input_size()), concatenated; for (size_t i = 0; i < inputs.size(); ++i) { inputs[i] = getBlob(node_proto, constBlobs, i); } Ptr<Layer> concat = ConcatLayer::create(layerParams); runLayer(concat, inputs, concatenated); CV_Assert(concatenated.size() == 1); constBlobs.insert(std::make_pair(layerParams.name, concatenated[0])); continue; } } else { for (int j = 0; j < node_proto.input_size(); j++) { if (layer_id.find(node_proto.input(j)) == layer_id.end()) layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j)); } } int id = dstNet.addLayer(layerParams.name, layerParams.type, layerParams); layer_id.insert(std::make_pair(layerParams.name, LayerInfo(id, 0))); std::vector<MatShape> layerInpShapes, layerOutShapes, layerInternalShapes; for (int j = 0; j < node_proto.input_size(); j++) { layerId = layer_id.find(node_proto.input(j)); if (layerId != layer_id.end()) { dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, j); // Collect input shapes. shapeIt = outShapes.find(node_proto.input(j)); CV_Assert(shapeIt != outShapes.end()); layerInpShapes.push_back(shapeIt->second); } } // Compute shape of output blob for this layer. Ptr<Layer> layer = dstNet.getLayer(id); layer->getMemoryShapes(layerInpShapes, 0, layerOutShapes, layerInternalShapes); CV_Assert(!layerOutShapes.empty()); outShapes[layerParams.name] = layerOutShapes[0]; } }
inline int clamp(int ax, const MatShape& shape) { return clamp(ax, (int)shape.size()); }
bool forward_ocl(InputArrayOfArrays inputs_, OutputArrayOfArrays outputs_, OutputArrayOfArrays internals_) { std::vector<UMat> inputs; std::vector<UMat> outputs; std::vector<UMat> internals; inputs_.getUMatVector(inputs); outputs_.getUMatVector(outputs); internals_.getUMatVector(internals); CV_Assert(inputs.size() == 1 && outputs.size() == 1); CV_Assert(inputs[0].total() == outputs[0].total()); const UMat& inp0 = inputs[0]; UMat& buffer = internals[0]; size_t num = inp0.size[0]; size_t channels = inp0.size[1]; size_t channelSize = inp0.total() / (num * channels); for (size_t i = 0; i < num; ++i) { MatShape s = shape(channels, channelSize); UMat src = inputs[i].reshape(1, s.size(), &s[0]); UMat dst = outputs[i].reshape(1, s.size(), &s[0]); UMat abs_mat; absdiff(src, cv::Scalar::all(0), abs_mat); pow(abs_mat, pnorm, buffer); if (acrossSpatial) { // add eps to avoid overflow float absSum = sum(buffer)[0] + epsilon; float norm = pow(absSum, 1.0f / pnorm); multiply(src, 1.0f / norm, dst); } else { Mat norm; reduce(buffer, norm, 0, REDUCE_SUM); norm += epsilon; // compute inverted norm to call multiply instead divide cv::pow(norm, -1.0f / pnorm, norm); repeat(norm, channels, 1, buffer); multiply(src, buffer, dst); } if (!blobs.empty()) { // scale the output Mat scale = blobs[0]; if (scale.total() == 1) { // _scale: 1 x 1 multiply(dst, scale.at<float>(0, 0), dst); } else { // _scale: _channels x 1 CV_Assert(scale.total() == channels); repeat(scale, 1, dst.cols, buffer); multiply(dst, buffer, dst); } } } return true; }