示例#1
0
bool TestRectStdDev::process()
{
    NCVStatus ncvStat;
    bool rcode = false;

    Ncv32s _normWidth = (Ncv32s)this->width - this->rect.x - this->rect.width + 1;
    Ncv32s _normHeight = (Ncv32s)this->height - this->rect.y - this->rect.height + 1;
    if (_normWidth <= 0 || _normHeight <= 0)
    {
        return true;
    }
    Ncv32u normWidth = (Ncv32u)_normWidth;
    Ncv32u normHeight = (Ncv32u)_normHeight;
    NcvSize32u szNormRoi(normWidth, normHeight);

    Ncv32u widthII = this->width + 1;
    Ncv32u heightII = this->height + 1;
    Ncv32u widthSII = this->width + 1;
    Ncv32u heightSII = this->height + 1;

    NCVMatrixAlloc<Ncv8u> d_img(*this->allocatorGPU.get(), this->width, this->height);
    ncvAssertReturn(d_img.isMemAllocated(), false);
    NCVMatrixAlloc<Ncv8u> h_img(*this->allocatorCPU.get(), this->width, this->height);
    ncvAssertReturn(h_img.isMemAllocated(), false);

    NCVMatrixAlloc<Ncv32u> d_imgII(*this->allocatorGPU.get(), widthII, heightII);
    ncvAssertReturn(d_imgII.isMemAllocated(), false);
    NCVMatrixAlloc<Ncv32u> h_imgII(*this->allocatorCPU.get(), widthII, heightII);
    ncvAssertReturn(h_imgII.isMemAllocated(), false);

    NCVMatrixAlloc<Ncv64u> d_imgSII(*this->allocatorGPU.get(), widthSII, heightSII);
    ncvAssertReturn(d_imgSII.isMemAllocated(), false);
    NCVMatrixAlloc<Ncv64u> h_imgSII(*this->allocatorCPU.get(), widthSII, heightSII);
    ncvAssertReturn(h_imgSII.isMemAllocated(), false);

    NCVMatrixAlloc<Ncv32f> d_norm(*this->allocatorGPU.get(), normWidth, normHeight);
    ncvAssertReturn(d_norm.isMemAllocated(), false);
    NCVMatrixAlloc<Ncv32f> h_norm(*this->allocatorCPU.get(), normWidth, normHeight);
    ncvAssertReturn(h_norm.isMemAllocated(), false);
    NCVMatrixAlloc<Ncv32f> h_norm_d(*this->allocatorCPU.get(), normWidth, normHeight);
    ncvAssertReturn(h_norm_d.isMemAllocated(), false);

    Ncv32u bufSizeII, bufSizeSII;
    ncvStat = nppiStIntegralGetSize_8u32u(NcvSize32u(this->width, this->height), &bufSizeII, this->devProp);
    ncvAssertReturn(NPPST_SUCCESS == ncvStat, false);
    ncvStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(this->width, this->height), &bufSizeSII, this->devProp);
    ncvAssertReturn(NPPST_SUCCESS == ncvStat, false);
    Ncv32u bufSize = bufSizeII > bufSizeSII ? bufSizeII : bufSizeSII;
    NCVVectorAlloc<Ncv8u> d_tmpBuf(*this->allocatorGPU.get(), bufSize);
    ncvAssertReturn(d_tmpBuf.isMemAllocated(), false);

    NCV_SET_SKIP_COND(this->allocatorGPU.get()->isCounting());
    NCV_SKIP_COND_BEGIN
    ncvAssertReturn(this->src.fill(h_img), false);

    ncvStat = h_img.copySolid(d_img, 0);
    ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);

    ncvStat = nppiStIntegral_8u32u_C1R(d_img.ptr(), d_img.pitch(),
                                       d_imgII.ptr(), d_imgII.pitch(),
                                       NcvSize32u(this->width, this->height),
                                       d_tmpBuf.ptr(), bufSize, this->devProp);
    ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);

    ncvStat = nppiStSqrIntegral_8u64u_C1R(d_img.ptr(), d_img.pitch(),
                                          d_imgSII.ptr(), d_imgSII.pitch(),
                                          NcvSize32u(this->width, this->height),
                                          d_tmpBuf.ptr(), bufSize, this->devProp);
    ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);

    ncvStat = nppiStRectStdDev_32f_C1R(d_imgII.ptr(), d_imgII.pitch(),
                                       d_imgSII.ptr(), d_imgSII.pitch(),
                                       d_norm.ptr(), d_norm.pitch(),
                                       szNormRoi, this->rect,
                                       this->scaleFactor,
                                       this->bTextureCache);
    ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);

    ncvStat = d_norm.copySolid(h_norm_d, 0);
    ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);

    ncvStat = nppiStIntegral_8u32u_C1R_host(h_img.ptr(), h_img.pitch(),
                                          h_imgII.ptr(), h_imgII.pitch(),
                                          NcvSize32u(this->width, this->height));
    ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);

    ncvStat = nppiStSqrIntegral_8u64u_C1R_host(h_img.ptr(), h_img.pitch(),
                                             h_imgSII.ptr(), h_imgSII.pitch(),
                                             NcvSize32u(this->width, this->height));
    ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);

    ncvStat = nppiStRectStdDev_32f_C1R_host(h_imgII.ptr(), h_imgII.pitch(),
                                          h_imgSII.ptr(), h_imgSII.pitch(),
                                          h_norm.ptr(), h_norm.pitch(),
                                          szNormRoi, this->rect,
                                          this->scaleFactor);
    ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);
    NCV_SKIP_COND_END

    //bit-to-bit check
    bool bLoopVirgin = true;

    NCV_SKIP_COND_BEGIN
    const Ncv64f relEPS = 0.005;
    for (Ncv32u i=0; bLoopVirgin && i < h_norm.height(); i++)
    {
        for (Ncv32u j=0; bLoopVirgin && j < h_norm.width(); j++)
        {
            Ncv64f absErr = fabs(h_norm.ptr()[h_norm.stride()*i+j] - h_norm_d.ptr()[h_norm_d.stride()*i+j]);
            Ncv64f relErr = absErr / h_norm.ptr()[h_norm.stride()*i+j];

            if (relErr > relEPS)
            {
                bLoopVirgin = false;
            }
        }
    }
    NCV_SKIP_COND_END

    if (bLoopVirgin)
    {
        rcode = true;
    }

    return rcode;
}
示例#2
0
bool TestIntegralImage<T_in, T_out>::process()
{
    NCVStatus ncvStat;
    bool rcode = false;

    Ncv32u widthII = this->width + 1;
    Ncv32u heightII = this->height + 1;

    NCVMatrixAlloc<T_in> d_img(*this->allocatorGPU.get(), this->width, this->height);
    ncvAssertReturn(d_img.isMemAllocated(), false);
    NCVMatrixAlloc<T_in> h_img(*this->allocatorCPU.get(), this->width, this->height);
    ncvAssertReturn(h_img.isMemAllocated(), false);
    NCVMatrixAlloc<T_out> d_imgII(*this->allocatorGPU.get(), widthII, heightII);
    ncvAssertReturn(d_imgII.isMemAllocated(), false);
    NCVMatrixAlloc<T_out> h_imgII(*this->allocatorCPU.get(), widthII, heightII);
    ncvAssertReturn(h_imgII.isMemAllocated(), false);
    NCVMatrixAlloc<T_out> h_imgII_d(*this->allocatorCPU.get(), widthII, heightII);
    ncvAssertReturn(h_imgII_d.isMemAllocated(), false);

    Ncv32u bufSize;
    if (sizeof(T_in) == sizeof(Ncv8u))
    {
        ncvStat = nppiStIntegralGetSize_8u32u(NcvSize32u(this->width, this->height), &bufSize, this->devProp);
        ncvAssertReturn(NPPST_SUCCESS == ncvStat, false);
    }
    else if (sizeof(T_in) == sizeof(Ncv32f))
    {
        ncvStat = nppiStIntegralGetSize_32f32f(NcvSize32u(this->width, this->height), &bufSize, this->devProp);
        ncvAssertReturn(NPPST_SUCCESS == ncvStat, false);
    }
    else
    {
        ncvAssertPrintReturn(false, "Incorrect integral image test instance", false);
    }

    NCVVectorAlloc<Ncv8u> d_tmpBuf(*this->allocatorGPU.get(), bufSize);
    ncvAssertReturn(d_tmpBuf.isMemAllocated(), false);

    NCV_SET_SKIP_COND(this->allocatorGPU.get()->isCounting());
    NCV_SKIP_COND_BEGIN

    ncvAssertReturn(this->src.fill(h_img), false);

    ncvStat = h_img.copySolid(d_img, 0);
    ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);

    if (sizeof(T_in) == sizeof(Ncv8u))
    {
        ncvStat = nppiStIntegral_8u32u_C1R((Ncv8u *)d_img.ptr(), d_img.pitch(),
                                           (Ncv32u *)d_imgII.ptr(), d_imgII.pitch(),
                                           NcvSize32u(this->width, this->height),
                                           d_tmpBuf.ptr(), bufSize, this->devProp);
        ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);
    }
    else if (sizeof(T_in) == sizeof(Ncv32f))
    {
        ncvStat = nppiStIntegral_32f32f_C1R((Ncv32f *)d_img.ptr(), d_img.pitch(),
                                            (Ncv32f *)d_imgII.ptr(), d_imgII.pitch(),
                                            NcvSize32u(this->width, this->height),
                                            d_tmpBuf.ptr(), bufSize, this->devProp);
        ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);
    }
    else
    {
        ncvAssertPrintReturn(false, "Incorrect integral image test instance", false);
    }

    ncvStat = d_imgII.copySolid(h_imgII_d, 0);
    ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);

    if (sizeof(T_in) == sizeof(Ncv8u))
    {
        ncvStat = nppiStIntegral_8u32u_C1R_host((Ncv8u *)h_img.ptr(), h_img.pitch(),
                                                (Ncv32u *)h_imgII.ptr(), h_imgII.pitch(),
                                                NcvSize32u(this->width, this->height));
        ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);
    }
    else if (sizeof(T_in) == sizeof(Ncv32f))
    {
        ncvStat = nppiStIntegral_32f32f_C1R_host((Ncv32f *)h_img.ptr(), h_img.pitch(),
                                                 (Ncv32f *)h_imgII.ptr(), h_imgII.pitch(),
                                                 NcvSize32u(this->width, this->height));
        ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);
    }
    else
    {
        ncvAssertPrintReturn(false, "Incorrect integral image test instance", false);
    }

    NCV_SKIP_COND_END

    //bit-to-bit check
    bool bLoopVirgin = true;

    NCV_SKIP_COND_BEGIN
    for (Ncv32u i=0; bLoopVirgin && i < h_img.height() + 1; i++)
    {
        for (Ncv32u j=0; bLoopVirgin && j < h_img.width() + 1; j++)
        {
            if (sizeof(T_in) == sizeof(Ncv8u))
            {
                if (h_imgII.ptr()[h_imgII.stride()*i+j] != h_imgII_d.ptr()[h_imgII_d.stride()*i+j])
                {
                    bLoopVirgin = false;
                }
            }
            else if (sizeof(T_in) == sizeof(Ncv32f))
            {
                if (fabsf((float)h_imgII.ptr()[h_imgII.stride()*i+j] - (float)h_imgII_d.ptr()[h_imgII_d.stride()*i+j]) > 0.01f)
                {
                    bLoopVirgin = false;
                }
            }
            else
            {
                ncvAssertPrintReturn(false, "Incorrect integral image test instance", false);
            }
        }
    }
    NCV_SKIP_COND_END

    if (bLoopVirgin)
    {
        rcode = true;
    }

    return rcode;
}