示例#1
0
void fuseConvWeights(const std::shared_ptr<InferenceEngine::ConvolutionLayer>& conv,
                     const Mat& w, const Mat& b)
{
    CV_Assert(!w.empty() || !b.empty());
    if (!w.empty())
    {
        // Get convolution's weights. Clone the data because Inference Engine can host it
        // and conv->_weights->allocate() below will deallocate it.
        Mat originWeights = infEngineBlobToMat(conv->_weights).clone();

        // Create new weights blob.
        conv->_weights = InferenceEngine::make_shared_blob<float>(
                            InferenceEngine::Precision::FP32, conv->_weights->dims());
        conv->_weights->allocate();

        // Convolution weights have OIHW data layout.
        // (conv(I) + b1 ) * w + b2
        // w*conv(I) + b1 * w + b2
        Mat fusedWeights = infEngineBlobToMat(conv->_weights);

        const int numChannels = fusedWeights.size[0];
        // Mat weights = blobs[0].reshape(1, 1);
        // Mat bias = hasBias ? blobs[1].reshape(1, 1) : Mat();
        CV_Assert(numChannels == w.total());
        CV_Assert(b.empty() || numChannels == b.total());
        for (int i = 0; i < numChannels; ++i)
        {
            cv::multiply(slice(originWeights, i), w.at<float>(i), slice(fusedWeights, i));
        }
    }
    if (conv->_biases)
    {
        // The same for biases.
        Mat originBiases = infEngineBlobToMat(conv->_biases).clone();

        conv->_biases = InferenceEngine::make_shared_blob<float>(
                            InferenceEngine::Precision::FP32, conv->_biases->dims());
        conv->_biases->allocate();
        Mat fusedBiases = infEngineBlobToMat(conv->_biases);
        originBiases.copyTo(fusedBiases);

        if (!w.empty())
            cv::multiply(w.reshape(1, fusedBiases.dims, &fusedBiases.size[0]), fusedBiases, fusedBiases);
        if (!b.empty())
            cv::add(fusedBiases, b.reshape(1, fusedBiases.dims, &fusedBiases.size[0]), fusedBiases);
    }
    else
        conv->_biases = wrapToInfEngineBlob(b);
}
示例#2
0
    virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&)
    {
#ifdef HAVE_INF_ENGINE
        // Inference Engine has no layer just for biases. Create a linear
        // transformation layer with ones weights.
        InferenceEngine::LayerParams lp;
        lp.name = name;
        lp.type = "ScaleShift";
        lp.precision = InferenceEngine::Precision::FP32;
        std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));

        auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,
                                                                {blobs[0].total()});
        weights->allocate();

        std::vector<float> ones(blobs[0].total(), 1);
        weights->set(ones);
        ieLayer->_weights = weights;

        ieLayer->_biases = wrapToInfEngineBlob(blobs[0]);
        return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif  // HAVE_INF_ENGINE
        return Ptr<BackendNode>();
    }
示例#3
0
InfEngineBackendWrapper::InfEngineBackendWrapper(int targetId, const cv::Mat& m)
    : BackendWrapper(DNN_BACKEND_INFERENCE_ENGINE, targetId)
{
    dataPtr = wrapToInfEngineDataNode(m);
    blob = wrapToInfEngineBlob(m);
}
示例#4
0
InferenceEngine::TBlob<float>::Ptr wrapToInfEngineBlob(const Mat& m)
{
    std::vector<size_t> reversedShape(&m.size[0], &m.size[0] + m.dims);
    std::reverse(reversedShape.begin(), reversedShape.end());
    return wrapToInfEngineBlob(m, reversedShape);
}
示例#5
0
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout)
{
    std::vector<size_t> reversedShape(&m.size[0], &m.size[0] + m.dims);
    std::reverse(reversedShape.begin(), reversedShape.end());
    return wrapToInfEngineBlob(m, reversedShape, layout);
}