virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) { #ifdef HAVE_INF_ENGINE InferenceEngine::LayerParams lp; lp.name = name; lp.type = "Reshape"; lp.precision = InferenceEngine::Precision::FP32; std::shared_ptr<InferenceEngine::ReshapeLayer> ieLayer(new InferenceEngine::ReshapeLayer(lp)); ieLayer->shape = newShapeDesc; return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); #endif // HAVE_INF_ENGINE return Ptr<BackendNode>(); }
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) { #ifdef HAVE_INF_ENGINE // Inference Engine has no layer just for biases. Create a linear // transformation layer with ones weights. InferenceEngine::LayerParams lp; lp.name = name; lp.type = "ScaleShift"; lp.precision = InferenceEngine::Precision::FP32; std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp)); auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32, {blobs[0].total()}); weights->allocate(); std::vector<float> ones(blobs[0].total(), 1); weights->set(ones); ieLayer->_weights = weights; ieLayer->_biases = wrapToInfEngineBlob(blobs[0]); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); #endif // HAVE_INF_ENGINE return Ptr<BackendNode>(); }