Exemplo n.º 1
0
    void ConvolutionLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
    {
        Blob &wgtBlob = blobs[0];

        for (size_t ii = 0; ii < outputs.size(); ii++)
        {
            Blob &inpBlob = *inputs[ii];
            Blob &outBlob = outputs[ii];

            for (int n = 0; n < inpBlob.num(); n++)
            {
                for (int g = 0; g < group; g++)
                {
                    im2col(inpBlob, n, g);

                    Mat kerMat(outGroupCn, ksize, wgtBlob.type(), wgtBlob.ptr(g*outGroupCn));
                    Mat dstMat(outGroupCn, outH*outW, outBlob.type(), outBlob.ptr(n, g*outGroupCn));

                    gemmCPU(kerMat, colMat, 1, dstMat, 0);
                    
                    if (bias)
                    {
                        float *biasPtr = blobs[1].ptrf() + g*outGroupCn;
                        Mat biasMat(outGroupCn, 1, CV_32F, biasPtr);
                        gemmCPU(biasMat, biasOnesMat, 1, dstMat, 1); //TODO: gemv
                    }
                }
            }
        }
    }
Exemplo n.º 2
0
    void DeConvolutionLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
    {
        Blob &wghtBlob = blobs[0];

        for (size_t ii = 0; ii < outputs.size(); ii++)
        {
            Blob &convBlob = *inputs[ii];
            Blob &decnBlob = outputs[ii];

            for (int n = 0; n < convBlob.num(); n++)
            {
                for (int g = 0; g < group; g++)
                {
                    Mat dstMat(inpGroupCn, inpH*inpW, decnBlob.type(), decnBlob.ptr(n, g*inpGroupCn));

                    if (is1x1())
                        colMat = dstMat;

                    Mat convMat(outGroupCn, outH*outW, convBlob.type(), convBlob.ptr(n, g*outGroupCn));
                    Mat wghtMat(outGroupCn, ksize, wghtBlob.type(), wghtBlob.ptr(g*outGroupCn));
                    gemmCPU(wghtMat, convMat, 1, colMat, 0, GEMM_1_T);

                    col2im(dstMat);

                    if (bias)
                    {
                        float *biasPtr = blobs[1].ptrf() + g*inpGroupCn;
                        Mat biasMat(inpGroupCn, 1, CV_32F, biasPtr);
                        gemmCPU(biasMat, biasOnesMat, 1, dstMat, 1); //TODO: gemv
                    }
                }
            }
        }
    }
Exemplo n.º 3
0
glm::mat4 Math::BiasMat(){
	glm::mat4 biasMat(
	0.5, 0.0, 0.0, 0.0,
	0.0, 0.5, 0.0, 0.0,
	0.0, 0.0, 0.5, 0.0,
	0.5, 0.5, 0.5, 1.0);
	return biasMat;
}
    void FullyConnectedLayer::forward(std::vector<Blob*> &input, std::vector<Blob> &output)
    {
        for (size_t i = 0; i < input.size(); i++)
        {
            int M = (int)input[i]->total(0, axis);
            int N = numOutputs;
            int K = innerSize;

            Mat srcMat(M, K, input[i]->type(), input[i]->ptrf());
            Mat weight(N, K, blobs[0].type(), blobs[0].ptrf());
            Mat dstMat(M, N, output[i].type(), output[i].ptrf());

            //important: for perfomance purposes Caffe stores weights as transposed array
            gemmCPU(srcMat, weight, 1, dstMat, 0, GEMM_2_T);

            if (bias)
            {
                Mat biasOnesMat = Mat::ones(M, 1, CV_32F);
                Mat biasMat(1, N, CV_32F, blobs[1].ptrf());
                gemmCPU(biasOnesMat, biasMat, 1, dstMat, 1);
            }
        }
    }