Ptr<Layer> createLayerFromCaffe<EltwiseLayer>(LayerParams& params) { EltwiseLayer::EltwiseOp op = EltwiseLayer::SUM; if (params.has("operation")) { String operation = params.get<String>("operation").toLowerCase(); if (operation == "prod") op = EltwiseLayer::PROD; else if (operation == "sum") op = EltwiseLayer::SUM; else if (operation == "max") op = EltwiseLayer::MAX; else CV_Error(cv::Error::StsBadArg, "Unknown operaticon type \"" + operation + "\""); } std::vector<int> coeffs; if (params.has("coeff")) { DictValue paramCoeff = params.get("coeff"); coeffs.resize(paramCoeff.size(), 1); for (int i = 0; i < paramCoeff.size(); i++) { coeffs[i] = paramCoeff.get<int>(i); } } return Ptr<Layer>(EltwiseLayer::create(op, coeffs)); }
ResizeNearestNeighborLayerImpl(const LayerParams& params) { setParamsFrom(params); CV_Assert(params.has("width"), params.has("height")); outWidth = params.get<float>("width"); outHeight = params.get<float>("height"); alignCorners = params.get<bool>("align_corners", false); if (alignCorners) CV_Error(Error::StsNotImplemented, "Nearest neighborhood resize with align_corners=true is not implemented"); }
bool ONNXImporter::isCeilMode(const LayerParams& layerParams) { if (!layerParams.has("pad_mode")) { if (layerParams.has("pad_h")) { return layerParams.get<int>("pad_h") != layerParams.get<int>("pad_b") || layerParams.get<int>("pad_w") != layerParams.get<int>("pad_r"); } else return false; // all pads == 0 } return true; }
Ptr<Layer> createLayerFromCaffe<PoolingLayer>(LayerParams ¶ms) { int type = PoolingLayer::MAX; Size kernel, stride, pad; bool globalPooling; cv::String padMode; if (params.has("pool")) { String pool = params.get<String>("pool").toLowerCase(); if (pool == "max") type = PoolingLayer::MAX; else if (pool == "ave") type = PoolingLayer::AVE; else if (pool == "stochastic") type = PoolingLayer::STOCHASTIC; else CV_Error(Error::StsBadArg, "Unknown pooling type \"" + pool + "\""); } getPoolingKernelParams(params, kernel.height, kernel.width, globalPooling, pad.height, pad.width, stride.height, stride.width, padMode); //getCaffeConvParams(params, kernel, pad, stride); if (!globalPooling) return Ptr<Layer>(PoolingLayer::create(type, kernel, stride, pad, padMode)); else return Ptr<Layer>(PoolingLayer::createGlobal(type)); }
ConvolutionLayer::ConvolutionLayer(LayerParams ¶ms) : Layer(params) { getKernelParams(params, kerH, kerW, padH, padW, strideH, strideW); numOutput = params.get<int>("num_output"); bias = params.get<bool>("bias_term", true); group = params.get<int>("group", 1); CV_Assert(numOutput % group == 0); CV_Assert(!bias || blobs.size() == 2); CV_Assert( bias || blobs.size() == 1); const Blob &wgtBlob = blobs[0]; CV_Assert(wgtBlob.dims() == 4 && wgtBlob.cols() == kerW && wgtBlob.rows() == kerH); if (bias) { Blob &biasBlob = blobs[1]; CV_Assert(biasBlob.total() == (size_t)numOutput); } //TBD useOpenCL = params.has("use_opencl"); #if HAVE_CBLAS { if (getBlasThreads() != cv::getThreadNum()) { setBlasThreads(cv::getThreadNum()); } } #endif }
PriorBoxLayerImpl(const LayerParams ¶ms) { setParamsFrom(params); _minSize = getParameter<unsigned>(params, "min_size"); CV_Assert(_minSize > 0); _flip = getParameter<bool>(params, "flip"); _clip = getParameter<bool>(params, "clip"); _aspectRatios.clear(); _aspectRatios.push_back(1.); getAspectRatios(params); getVariance(params); _numPriors = _aspectRatios.size(); _maxSize = -1; if (params.has("max_size")) { _maxSize = params.get("max_size").get<float>(0); CV_Assert(_maxSize > _minSize); _numPriors += 1; } if (params.has("step_h") || params.has("step_w")) { CV_Assert(!params.has("step")); _stepY = getParameter<float>(params, "step_h"); CV_Assert(_stepY > 0.); _stepX = getParameter<float>(params, "step_w"); CV_Assert(_stepX > 0.); } else if (params.has("step")) { const float step = getParameter<float>(params, "step"); CV_Assert(step > 0); _stepY = step; _stepX = step; } else { _stepY = 0; _stepX = 0; } }
bool getParameterDict(const LayerParams ¶ms, const std::string ¶meterName, DictValue& result) { if (!params.has(parameterName)) { return false; } result = params.get(parameterName); return true; }
SplitLayerImpl(const LayerParams ¶ms) { setParamsFrom(params); //TODO: maybe "top_count" param is useless because it can be determined by output connections number if (params.has("top_count")) { outputsCount = params.get<int>("top_count"); CV_Assert(outputsCount >= 0); } else { outputsCount = -1; } }
Ptr<Layer> createLayerFromCaffe<SplitLayer>(LayerParams ¶ms) { int outputsCount; //TODO: maybe "top_count" param is useless because it can be determined by output connections number if (params.has("top_count")) { outputsCount = params.get<int>("top_count"); CV_Assert(outputsCount >= 0); } else { outputsCount = -1; } return Ptr<Layer>(SplitLayer::create(outputsCount)); }
ReshapeLayerImpl(const LayerParams& params) { setParamsFrom(params); int axis = params.get<int>("axis", 0); int numAxes = params.get<int>("num_axes", -1); CV_Assert(numAxes >= -1); newShapeRange = (numAxes == -1) ? Range(axis, INT_MAX) : Range(axis, axis + numAxes); newShapeDesc.clear(); if (params.has("dim")) { const DictValue ¶mShape = params.get("dim"); int i, dims = paramShape.size(); newShapeDesc.resize(dims); for (i = 0; i < dims; i++) newShapeDesc[i] = paramShape.get<int>(i); } }
Ptr<Layer> createLayerFromCaffe<SliceLayer>(LayerParams& params) { int axis = params.get<int>("axis", 1); if (!params.has("slice_point")) { return Ptr<Layer>(SliceLayer::create(axis)); } else { const DictValue &indicesValue = params.get("slice_point"); std::vector<int> sliceIndices(indicesValue.size()); for (int i = 0; i < indicesValue.size(); i++) sliceIndices[i] = indicesValue.get<int>(i); return Ptr<Layer>(SliceLayer::create(axis, sliceIndices)); } }
PaddingLayerImpl(const LayerParams ¶ms) { setParamsFrom(params); paddingValue = params.get<float>("value", 0); inputDims = params.get<int>("input_dims", -1); paddingType = params.get<String>("type", "constant"); CV_Assert(params.has("paddings")); const DictValue& paddingsParam = params.get("paddings"); CV_Assert((paddingsParam.size() & 1) == 0); paddings.resize(paddingsParam.size() / 2); for (int i = 0; i < paddings.size(); ++i) { paddings[i].first = paddingsParam.get<int>(i * 2); // Pad before. paddings[i].second = paddingsParam.get<int>(i * 2 + 1); // Pad after. CV_Assert(paddings[i].first >= 0, paddings[i].second >= 0); } }
Ptr<Layer> createLayerFromCaffe<ReshapeLayer>(LayerParams ¶ms) { int axis = params.get<int>("axis", 0); int numAxes = params.get<int>("num_axes", -1); bool enableReordering = params.get<bool>("reorder_dims", false); CV_Assert(numAxes >= -1); Range applyingRange = (numAxes == -1) ? Range(axis, INT_MAX) : Range(axis, axis + numAxes); Shape newShape; if (params.has("dim")) { const DictValue ¶mShape = params.get("dim"); newShape = Shape::all(paramShape.size()); for (int i = 0; i < paramShape.size(); i++) newShape[i] = paramShape.get<int>(i); } else newShape = Shape::all(0); return Ptr<Layer>(ReshapeLayer::create(newShape, applyingRange, enableReordering)); }
PriorBoxLayerImpl(const LayerParams ¶ms) : _boxWidth(0), _boxHeight(0) { setParamsFrom(params); _minSize = getParameter<float>(params, "min_size", 0, false, 0); _flip = getParameter<bool>(params, "flip", 0, false, true); _clip = getParameter<bool>(params, "clip", 0, false, true); _bboxesNormalized = getParameter<bool>(params, "normalized_bbox", 0, false, true); _scales.clear(); _aspectRatios.clear(); getAspectRatios(params); getVariance(params); getParams("scales", params, &_scales); getParams("width", params, &_widths); getParams("height", params, &_heights); _explicitSizes = !_widths.empty(); CV_Assert(_widths.size() == _heights.size()); if (_explicitSizes) { CV_Assert(_aspectRatios.empty(), !params.has("min_size"), !params.has("max_size")); _numPriors = _widths.size(); } else { CV_Assert(!_aspectRatios.empty(), _minSize > 0); _numPriors = _aspectRatios.size() + 1; // + 1 for an aspect ratio 1.0 } _maxSize = -1; if (params.has("max_size")) { _maxSize = params.get("max_size").get<float>(0); CV_Assert(_maxSize > _minSize); _numPriors += 1; } if (params.has("step_h") || params.has("step_w")) { CV_Assert(!params.has("step")); _stepY = getParameter<float>(params, "step_h"); CV_Assert(_stepY > 0.); _stepX = getParameter<float>(params, "step_w"); CV_Assert(_stepX > 0.); } else if (params.has("step")) { const float step = getParameter<float>(params, "step"); CV_Assert(step > 0); _stepY = step; _stepX = step; } else { _stepY = 0; _stepX = 0; } if (params.has("offset_h") || params.has("offset_w")) { CV_Assert(!params.has("offset"), params.has("offset_h"), params.has("offset_w")); getParams("offset_h", params, &_offsetsY); getParams("offset_w", params, &_offsetsX); CV_Assert(_offsetsX.size() == _offsetsY.size()); _numPriors *= std::max((size_t)1, 2 * (_offsetsX.size() - 1)); } else { float offset = getParameter<float>(params, "offset", 0, false, 0.5); _offsetsX.assign(1, offset); _offsetsY.assign(1, offset); } }
void ONNXImporter::populateNet(Net dstNet) { CV_Assert(model_proto.has_graph()); opencv_onnx::GraphProto graph_proto = model_proto.graph(); std::map<std::string, Mat> constBlobs = getGraphTensors(graph_proto); // List of internal blobs shapes. std::map<std::string, MatShape> outShapes; // Add all the inputs shapes. It includes as constant blobs as network's inputs shapes. for (int i = 0; i < graph_proto.input_size(); ++i) { opencv_onnx::ValueInfoProto valueInfoProto = graph_proto.input(i); CV_Assert(valueInfoProto.has_type()); opencv_onnx::TypeProto typeProto = valueInfoProto.type(); CV_Assert(typeProto.has_tensor_type()); opencv_onnx::TypeProto::Tensor tensor = typeProto.tensor_type(); CV_Assert(tensor.has_shape()); opencv_onnx::TensorShapeProto tensorShape = tensor.shape(); MatShape inpShape(tensorShape.dim_size()); for (int j = 0; j < inpShape.size(); ++j) { inpShape[j] = tensorShape.dim(j).dim_value(); } outShapes[valueInfoProto.name()] = inpShape; } std::string framework_name; if (model_proto.has_producer_name()) { framework_name = model_proto.producer_name(); } // create map with network inputs (without const blobs) std::map<std::string, LayerInfo> layer_id; std::map<std::string, LayerInfo>::iterator layerId; std::map<std::string, MatShape>::iterator shapeIt; // fill map: push layer name, layer id and output id std::vector<String> netInputs; for (int j = 0; j < graph_proto.input_size(); j++) { const std::string& name = graph_proto.input(j).name(); if (constBlobs.find(name) == constBlobs.end()) { netInputs.push_back(name); layer_id.insert(std::make_pair(name, LayerInfo(0, netInputs.size() - 1))); } } dstNet.setInputsNames(netInputs); int layersSize = graph_proto.node_size(); LayerParams layerParams; opencv_onnx::NodeProto node_proto; for(int li = 0; li < layersSize; li++) { node_proto = graph_proto.node(li); layerParams = getLayerParams(node_proto); CV_Assert(node_proto.output_size() >= 1); layerParams.name = node_proto.output(0); std::string layer_type = node_proto.op_type(); layerParams.type = layer_type; if (layer_type == "MaxPool") { layerParams.type = "Pooling"; layerParams.set("pool", "MAX"); layerParams.set("ceil_mode", isCeilMode(layerParams)); } else if (layer_type == "AveragePool") { layerParams.type = "Pooling"; layerParams.set("pool", "AVE"); layerParams.set("ceil_mode", isCeilMode(layerParams)); layerParams.set("ave_pool_padded_area", framework_name == "pytorch"); } else if (layer_type == "GlobalAveragePool") { layerParams.type = "Pooling"; layerParams.set("pool", "AVE"); layerParams.set("global_pooling", true); } else if (layer_type == "Add" || layer_type == "Sum") { if (layer_id.find(node_proto.input(1)) == layer_id.end()) { Mat blob = getBlob(node_proto, constBlobs, 1); blob = blob.reshape(1, 1); if (blob.total() == 1) { layerParams.type = "Power"; layerParams.set("shift", blob.at<float>(0)); } else { layerParams.type = "Scale"; layerParams.set("bias_term", true); layerParams.blobs.push_back(blob); } } else { layerParams.type = "Eltwise"; } } else if (layer_type == "Sub") { Mat blob = getBlob(node_proto, constBlobs, 1); if (blob.total() == 1) { layerParams.type = "Power"; layerParams.set("shift", -blob.at<float>(0)); } else { layerParams.type = "Scale"; layerParams.set("has_bias", true); layerParams.blobs.push_back(-1.0f * blob.reshape(1, 1)); } } else if (layer_type == "Div") { Mat blob = getBlob(node_proto, constBlobs, 1); CV_Assert_N(blob.type() == CV_32F, blob.total()); if (blob.total() == 1) { layerParams.set("scale", 1.0f / blob.at<float>(0)); layerParams.type = "Power"; } else { layerParams.type = "Scale"; divide(1.0, blob, blob); layerParams.blobs.push_back(blob); layerParams.set("bias_term", false); } } else if (layer_type == "Constant") { CV_Assert(node_proto.input_size() == 0); CV_Assert(layerParams.blobs.size() == 1); constBlobs.insert(std::make_pair(layerParams.name, layerParams.blobs[0])); continue; } else if (layer_type == "ImageScaler") { const float scale = layerParams.has("scale") ? layerParams.get<float>("scale") : 1.0f; layerParams.erase("scale"); if (layerParams.has("bias")) { layerParams.type = "Scale"; layerParams.blobs.push_back( Mat(Size(1, layerParams.get("bias").size()), CV_32FC1, scale)); layerParams.set("bias_term", true); Mat bias(1, layerParams.get("bias").size(), CV_32FC1); for (int j = 0; j < bias.total(); j++) { bias.at<float>(0, j) = layerParams.get("bias").getRealValue(j); } layerParams.blobs.push_back(bias); layerParams.erase("bias"); } else { layerParams.set("scale", scale); layerParams.type = "Power"; } } else if (layer_type == "LeakyRelu") { layerParams.type = "ReLU"; replaceLayerParam(layerParams, "alpha", "negative_slope"); } else if (layer_type == "LRN") { replaceLayerParam(layerParams, "size", "local_size"); } else if (layer_type == "BatchNormalization") { if (node_proto.input_size() != 5) CV_Error(Error::StsNotImplemented, "Expected input, scale, bias, mean and var"); layerParams.type = "BatchNorm"; replaceLayerParam(layerParams, "epsilon", "eps"); replaceLayerParam(layerParams, "spatial", "use_global_stats"); Mat meanData = getBlob(node_proto, constBlobs, 3); Mat stdData = getBlob(node_proto, constBlobs, 4); layerParams.blobs.push_back(meanData); layerParams.blobs.push_back(stdData); if (!node_proto.input(1).empty()) { layerParams.set("has_weight", true); layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 1)); // weightData } else { layerParams.set("has_weight", false); } if (!node_proto.input(2).empty()) { layerParams.set("has_bias", true); layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 2)); // biasData } else { layerParams.set("has_bias", false); } } else if (layer_type == "Gemm") { CV_Assert(node_proto.input_size() >= 2); layerParams.type = "InnerProduct"; Mat weights = getBlob(node_proto, constBlobs, 1); int ind_num_out = 0; if (layerParams.has("transB") && !layerParams.get<int>("transB")) { transpose(weights, weights); ind_num_out = 1; } layerParams.blobs.push_back(weights); if (node_proto.input_size() == 3) { Mat bias = getBlob(node_proto, constBlobs, 2); layerParams.blobs.push_back(bias); } layerParams.set("num_output", layerParams.blobs[0].size[ind_num_out]); layerParams.set("bias_term", node_proto.input_size() == 3); } else if (layer_type == "MatMul") { CV_Assert(node_proto.input_size() == 2); layerParams.type = "InnerProduct"; Mat blob = getBlob(node_proto, constBlobs, 1); layerParams.blobs.push_back(blob.t()); layerParams.set("bias_term", false); layerParams.set("num_output", layerParams.blobs[0].size[0]); } else if (layer_type == "Mul") { CV_Assert(node_proto.input_size() == 2); if (layer_id.find(node_proto.input(1)) == layer_id.end()) { Mat blob = getBlob(node_proto, constBlobs, 1); blob = blob.reshape(1, 1); if (blob.total() == 1) { layerParams.set("scale", blob.at<float>(0)); layerParams.type = "Power"; } else { layerParams.blobs.push_back(blob); layerParams.type = "Scale"; } } else { layerParams.type = "Eltwise"; layerParams.set("operation", "prod"); } } else if (layer_type == "Conv") { CV_Assert(node_proto.input_size() >= 2); layerParams.type = "Convolution"; for (int j = 1; j < node_proto.input_size(); j++) { layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j)); } layerParams.set("num_output", layerParams.blobs[0].size[0]); layerParams.set("bias_term", node_proto.input_size() == 3); } else if (layer_type == "ConvTranspose") { CV_Assert(node_proto.input_size() >= 2); layerParams.type = "Deconvolution"; for (int j = 1; j < node_proto.input_size(); j++) { layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j)); } layerParams.set("num_output", layerParams.blobs[0].size[1]); layerParams.set("bias_term", node_proto.input_size() == 3); } else if (layer_type == "Transpose") { layerParams.type = "Permute"; replaceLayerParam(layerParams, "perm", "order"); } else if (layer_type == "Unsqueeze") { CV_Assert(node_proto.input_size() == 1); Mat input = getBlob(node_proto, constBlobs, 0); DictValue axes = layerParams.get("axes"); std::vector<int> dims; for (int j = 0; j < input.dims; j++) { dims.push_back(input.size[j]); } CV_Assert(axes.getIntValue(axes.size()-1) <= dims.size()); for (int j = 0; j < axes.size(); j++) { dims.insert(dims.begin() + axes.getIntValue(j), 1); } Mat out = input.reshape(0, dims); constBlobs.insert(std::make_pair(layerParams.name, out)); continue; } else if (layer_type == "Reshape") { CV_Assert(node_proto.input_size() == 2 || layerParams.has("shape")); if (node_proto.input_size() == 2) { Mat blob = getBlob(node_proto, constBlobs, 1); CV_Assert(blob.type() == CV_32SC1); if (layer_id.find(node_proto.input(0)) == layer_id.end()) { Mat input = getBlob(node_proto, constBlobs, 0); Mat out = input.reshape(0, static_cast<std::vector<int> >(blob)); constBlobs.insert(std::make_pair(layerParams.name, out)); continue; } layerParams.set("dim", DictValue::arrayInt<int*>( blob.ptr<int>(), blob.total() )); } else { DictValue shape = layerParams.get("shape"); std::vector<int> dim; for (int j = 0; j < shape.size(); j++) { dim.push_back(shape.getIntValue(j)); } if (layer_id.find(node_proto.input(0)) == layer_id.end()) { Mat input = getBlob(node_proto, constBlobs, 0); Mat out = input.reshape(0, dim); constBlobs.insert(std::make_pair(layerParams.name, out)); continue; } replaceLayerParam(layerParams, "shape", "dim"); } } else if (layer_type == "Pad") { layerParams.type = "Padding"; } else if (layer_type == "Shape") { CV_Assert(node_proto.input_size() == 1); shapeIt = outShapes.find(node_proto.input(0)); CV_Assert(shapeIt != outShapes.end()); MatShape inpShape = shapeIt->second; Mat shapeMat(inpShape.size(), 1, CV_32S); for (int j = 0; j < inpShape.size(); ++j) shapeMat.at<int>(j) = inpShape[j]; shapeMat.dims = 1; constBlobs.insert(std::make_pair(layerParams.name, shapeMat)); continue; } else if (layer_type == "Gather") { CV_Assert(node_proto.input_size() == 2); CV_Assert(layerParams.has("axis")); Mat input = getBlob(node_proto, constBlobs, 0); Mat indexMat = getBlob(node_proto, constBlobs, 1); CV_Assert_N(indexMat.type() == CV_32S, indexMat.total() == 1); int index = indexMat.at<int>(0); int axis = layerParams.get<int>("axis"); std::vector<cv::Range> ranges(input.dims, Range::all()); ranges[axis] = Range(index, index + 1); Mat out = input(ranges); constBlobs.insert(std::make_pair(layerParams.name, out)); continue; } else if (layer_type == "Concat") { bool hasVariableInps = false; for (int i = 0; i < node_proto.input_size(); ++i) { if (layer_id.find(node_proto.input(i)) != layer_id.end()) { hasVariableInps = true; break; } } if (!hasVariableInps) { std::vector<Mat> inputs(node_proto.input_size()), concatenated; for (size_t i = 0; i < inputs.size(); ++i) { inputs[i] = getBlob(node_proto, constBlobs, i); } Ptr<Layer> concat = ConcatLayer::create(layerParams); runLayer(concat, inputs, concatenated); CV_Assert(concatenated.size() == 1); constBlobs.insert(std::make_pair(layerParams.name, concatenated[0])); continue; } } else { for (int j = 0; j < node_proto.input_size(); j++) { if (layer_id.find(node_proto.input(j)) == layer_id.end()) layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j)); } } int id = dstNet.addLayer(layerParams.name, layerParams.type, layerParams); layer_id.insert(std::make_pair(layerParams.name, LayerInfo(id, 0))); std::vector<MatShape> layerInpShapes, layerOutShapes, layerInternalShapes; for (int j = 0; j < node_proto.input_size(); j++) { layerId = layer_id.find(node_proto.input(j)); if (layerId != layer_id.end()) { dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, j); // Collect input shapes. shapeIt = outShapes.find(node_proto.input(j)); CV_Assert(shapeIt != outShapes.end()); layerInpShapes.push_back(shapeIt->second); } } // Compute shape of output blob for this layer. Ptr<Layer> layer = dstNet.getLayer(id); layer->getMemoryShapes(layerInpShapes, 0, layerOutShapes, layerInternalShapes); CV_Assert(!layerOutShapes.empty()); outShapes[layerParams.name] = layerOutShapes[0]; } }
PriorBoxLayerImpl(const LayerParams ¶ms) { setParamsFrom(params); _minSize = getParameter<float>(params, "min_size", 0, false, 0); _flip = getParameter<bool>(params, "flip", 0, false, true); _clip = getParameter<bool>(params, "clip", 0, false, true); _bboxesNormalized = getParameter<bool>(params, "normalized_bbox", 0, false, true); _aspectRatios.clear(); getAspectRatios(params); getVariance(params); _maxSize = -1; if (params.has("max_size")) { _maxSize = params.get("max_size").get<float>(0); CV_Assert(_maxSize > _minSize); } std::vector<float> widths, heights; getParams("width", params, &widths); getParams("height", params, &heights); _explicitSizes = !widths.empty(); CV_Assert(widths.size() == heights.size()); if (_explicitSizes) { CV_Assert(_aspectRatios.empty()); CV_Assert(!params.has("min_size")); CV_Assert(!params.has("max_size")); _boxWidths = widths; _boxHeights = heights; } else { CV_Assert(_minSize > 0); _boxWidths.resize(1 + (_maxSize > 0 ? 1 : 0) + _aspectRatios.size()); _boxHeights.resize(_boxWidths.size()); _boxWidths[0] = _boxHeights[0] = _minSize; int i = 1; if (_maxSize > 0) { // second prior: aspect_ratio = 1, size = sqrt(min_size * max_size) _boxWidths[i] = _boxHeights[i] = sqrt(_minSize * _maxSize); i += 1; } // rest of priors for (size_t r = 0; r < _aspectRatios.size(); ++r) { float arSqrt = sqrt(_aspectRatios[r]); _boxWidths[i + r] = _minSize * arSqrt; _boxHeights[i + r] = _minSize / arSqrt; } } CV_Assert(_boxWidths.size() == _boxHeights.size()); _numPriors = _boxWidths.size(); if (params.has("step_h") || params.has("step_w")) { CV_Assert(!params.has("step")); _stepY = getParameter<float>(params, "step_h"); CV_Assert(_stepY > 0.); _stepX = getParameter<float>(params, "step_w"); CV_Assert(_stepX > 0.); } else if (params.has("step")) { const float step = getParameter<float>(params, "step"); CV_Assert(step > 0); _stepY = step; _stepX = step; } else { _stepY = 0; _stepX = 0; } if (params.has("offset_h") || params.has("offset_w")) { CV_Assert(!params.has("offset"), params.has("offset_h"), params.has("offset_w")); getParams("offset_h", params, &_offsetsY); getParams("offset_w", params, &_offsetsX); CV_Assert(_offsetsX.size() == _offsetsY.size()); _numPriors *= std::max((size_t)1, 2 * (_offsetsX.size() - 1)); } else { float offset = getParameter<float>(params, "offset", 0, false, 0.5); _offsetsX.assign(1, offset); _offsetsY.assign(1, offset); } }