ConvolutionLayer::ConvolutionLayer(LayerParams &params) : Layer(params)
    {
        getKernelParams(params, kerH, kerW, padH, padW, strideH, strideW);

        numOutput = params.get<int>("num_output");
        bias = params.get<bool>("bias_term", true);
        group = params.get<int>("group", 1);
        CV_Assert(numOutput % group == 0);

        CV_Assert(!bias || blobs.size() == 2);
        CV_Assert( bias || blobs.size() == 1);

        const Blob &wgtBlob = blobs[0];
        CV_Assert(wgtBlob.dims() == 4 && wgtBlob.cols() == kerW && wgtBlob.rows() == kerH);

        if (bias)
        {
            Blob &biasBlob = blobs[1];
            CV_Assert(biasBlob.total() == (size_t)numOutput);
        }

        //TBD
        useOpenCL = params.has("use_opencl");

        #if HAVE_CBLAS
        {
            if (getBlasThreads() != cv::getThreadNum())
            {
                setBlasThreads(cv::getThreadNum());
            }
        }
        #endif
    }
Esempio n. 2
0
ShiftLayer::ShiftLayer(LayerParams &params) : Layer(params)
{
    CV_Assert(blobs.size() == 1);

    #ifdef HAVE_LAPACK
    {
        if (getBlasThreads() != cv::getThreadNum())
        {
            setBlasThreads(cv::getThreadNum());
        }
    }
    #endif
}