예제 #1
0
OCL_PERF_TEST_P(SuperResolution_BTVL1 ,BTVL1,
            Combine(Values(szSmall64, szSmall128),
                    Values(MatType(CV_8UC1), MatType(CV_8UC3))))
{
    Size_MatType_t params = GetParam();
    const Size size = get<0>(params);
    const int type = get<1>(params);

    Mat frame(size, type);
    UMat dst(1, 1, 0);
    declare.in(frame, WARMUP_RNG);

    const int scale = 2;
    const int iterations = 50;
    const int temporalAreaRadius = 1;

    Ptr<DenseOpticalFlowExt> opticalFlow(new ZeroOpticalFlow);
    Ptr<SuperResolution> superRes = createSuperResolution_BTVL1();

    superRes->set("scale", scale);
    superRes->set("iterations", iterations);
    superRes->set("temporalAreaRadius", temporalAreaRadius);
    superRes->set("opticalFlow", opticalFlow);

    superRes->setInput(makePtr<OneFrameSource_CPU>(frame));

    // skip first frame
    superRes->nextFrame(dst);

    OCL_TEST_CYCLE_N(10) superRes->nextFrame(dst);

    SANITY_CHECK_NOTHING();
}
예제 #2
0
PERF_TEST_P( EstimateAffine, EstimateAffine2D, ESTIMATE_PARAMS )
{
    AffineParams params = GetParam();
    const int n = get<0>(params);
    const double confidence = get<1>(params);
    const int method = get<2>(params);
    const size_t refining = get<3>(params);

    Mat aff(2, 3, CV_64F);
    cv::randu(aff, -2., 2.);

    // LMEDS can't handle more than 50% outliers (by design)
    int m;
    if (method == LMEDS)
        m = 3*n/5;
    else
        m = 2*n/5;
    const float shift_outl = 15.f;
    const float noise_level = 20.f;

    Mat fpts(1, n, CV_32FC2);
    Mat tpts(1, n, CV_32FC2);

    randu(fpts, 0., 100.);
    transform(fpts, tpts, aff);

    /* adding noise to some points */
    Mat outliers = tpts.colRange(m, n);
    outliers.reshape(1) += shift_outl;

    Mat noise (outliers.size(), outliers.type());
    randu(noise, 0., noise_level);
    outliers += noise;

    Mat aff_est;
    vector<uchar> inliers (n);

    warmup(inliers, WARMUP_WRITE);
    warmup(fpts, WARMUP_READ);
    warmup(tpts, WARMUP_READ);

    TEST_CYCLE()
    {
        aff_est = estimateAffine2D(fpts, tpts, inliers, method, 3, 2000, confidence, refining);
    }

    // we already have accuracy tests
    SANITY_CHECK_NOTHING();
}
예제 #3
0
OCL_PERF_TEST_P(BufferPoolFixture, BufferPool_UMatIntegral10, Bool())
{
    BufferPoolState s(cv::ocl::getOpenCLAllocator()->getBufferPoolController(), GetParam());

    Size sz(1920, 1080);

    OCL_TEST_CYCLE()
    {
        for (int i = 0; i < 10; i++)
        {
            UMat src(sz, CV_32FC1);
            UMat dst;
            integral(src, dst);
            dst.getMat(ACCESS_READ); // complete async operations
        }
    }

    SANITY_CHECK_NOTHING();
}
예제 #4
0
PERF_TEST_P( Size_MatType, AccumulateWeighted,
             testing::Combine(
                 testing::Values(::perf::szODD, ::perf::szQVGA, ::perf::szVGA, ::perf::sz1080p),
                 testing::Values(CV_32FC1)
             )
           )
#endif
{
    Size sz = get<0>(GetParam());
    int dstType = get<1>(GetParam());

    Mat src(sz, CV_8UC1);
    Mat dst(sz, dstType);

    declare.time(100);
    declare.in(src, WARMUP_RNG).out(dst);

    TEST_CYCLE() accumulateWeighted(src, dst, 0.314);

    SANITY_CHECK_NOTHING();
}
예제 #5
0
PERF_TEST_P(Size_CvtMode32F, DISABLED_cvtColor_32f,
            testing::Combine(
                testing::Values(::perf::szODD, ::perf::szVGA, ::perf::sz1080p),
                CvtMode32F::all()
                )
            )
{
    Size sz = get<0>(GetParam());
    int _mode = get<1>(GetParam()), mode = _mode;
    ChPair ch = getConversionInfo(mode);
    mode %= COLOR_COLORCVT_MAX;
    Mat src(sz, CV_32FC(ch.scn));
    Mat dst(sz, CV_32FC(ch.scn));

    declare.time(100);
    declare.in(src, WARMUP_RNG).out(dst);

    int runs = sz.width <= 320 ? 100 : 5;
    TEST_CYCLE_MULTIRUN(runs) cvtColor(src, dst, mode, ch.dcn);

    SANITY_CHECK_NOTHING();
}
예제 #6
0
OCL_PERF_TEST_P(BufferPoolFixture, BufferPool_UMatCanny10, Bool())
{
    BufferPoolState s(cv::ocl::getOpenCLAllocator()->getBufferPoolController(), GetParam());

    Size sz(1920, 1080);

    int aperture = 3;
    bool useL2 = false;
    double thresh_low = 100;
    double thresh_high = 120;

    OCL_TEST_CYCLE()
    {
        for (int i = 0; i < 10; i++)
        {
            UMat src(sz, CV_8UC1);
            UMat dst;
            Canny(src, dst, thresh_low, thresh_high, aperture, useL2);
            dst.getMat(ACCESS_READ); // complete async operations
        }
    }

    SANITY_CHECK_NOTHING();
}
예제 #7
0
파일: perf_net.cpp 프로젝트: Aspie96/opencv
    void processNet(std::string weights, std::string proto, std::string halide_scheduler,
                    const Mat& input, const std::string& outputLayer,
                    const std::string& framework)
    {
        if (backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL)
        {
#if defined(HAVE_OPENCL)
            if (!cv::ocl::useOpenCL())
#endif
            {
                throw cvtest::SkipTestException("OpenCL is not available/disabled in OpenCV");
            }
        }
        if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL)
            throw SkipTestException("Skip OpenCL target of Inference Engine backend");

        randu(input, 0.0f, 1.0f);

        weights = findDataFile(weights, false);
        if (!proto.empty())
            proto = findDataFile(proto, false);
        if (backend == DNN_BACKEND_HALIDE)
        {
            if (halide_scheduler == "disabled")
                throw cvtest::SkipTestException("Halide test is disabled");
            if (!halide_scheduler.empty())
                halide_scheduler = findDataFile(std::string("dnn/halide_scheduler_") + (target == DNN_TARGET_OPENCL ? "opencl_" : "") + halide_scheduler, true);
        }
        if (framework == "caffe")
        {
            net = cv::dnn::readNetFromCaffe(proto, weights);
        }
        else if (framework == "torch")
        {
            net = cv::dnn::readNetFromTorch(weights);
        }
        else if (framework == "tensorflow")
        {
            net = cv::dnn::readNetFromTensorflow(weights, proto);
        }
        else
            CV_Error(Error::StsNotImplemented, "Unknown framework " + framework);

        net.setInput(blobFromImage(input, 1.0, Size(), Scalar(), false));
        net.setPreferableBackend(backend);
        net.setPreferableTarget(target);
        if (backend == DNN_BACKEND_HALIDE)
        {
            net.setHalideScheduler(halide_scheduler);
        }

        MatShape netInputShape = shape(1, 3, input.rows, input.cols);
        size_t weightsMemory = 0, blobsMemory = 0;
        net.getMemoryConsumption(netInputShape, weightsMemory, blobsMemory);
        int64 flops = net.getFLOPS(netInputShape);
        CV_Assert(flops > 0);

        net.forward(outputLayer); // warmup

        std::cout << "Memory consumption:" << std::endl;
        std::cout << "    Weights(parameters): " << divUp(weightsMemory, 1u<<20) << " Mb" << std::endl;
        std::cout << "    Blobs: " << divUp(blobsMemory, 1u<<20) << " Mb" << std::endl;
        std::cout << "Calculation complexity: " << flops * 1e-9 << " GFlops" << std::endl;

        PERF_SAMPLE_BEGIN()
            net.forward();
        PERF_SAMPLE_END()

        SANITY_CHECK_NOTHING();
    }
예제 #8
0
PERF_TEST_P(Sz, DISABLED_GeneralizedHoughGuil, CUDA_TYPICAL_MAT_SIZES)
{
    declare.time(10);

    const cv::Size imageSize = GetParam();

    const cv::Mat templ = readImage("cv/shared/templ.png", cv::IMREAD_GRAYSCALE);
    ASSERT_FALSE(templ.empty());

    cv::Mat image(imageSize, CV_8UC1, cv::Scalar::all(0));
    templ.copyTo(image(cv::Rect(50, 50, templ.cols, templ.rows)));

    cv::RNG rng(123456789);
    const int objCount = rng.uniform(5, 15);
    for (int i = 0; i < objCount; ++i)
    {
        double scale = rng.uniform(0.7, 1.3);
        bool rotate = 1 == rng.uniform(0, 2);

        cv::Mat obj;
        cv::resize(templ, obj, cv::Size(), scale, scale);
        if (rotate)
            obj = obj.t();

        cv::Point pos;

        pos.x = rng.uniform(0, image.cols - obj.cols);
        pos.y = rng.uniform(0, image.rows - obj.rows);

        cv::Mat roi = image(cv::Rect(pos, obj.size()));
        cv::add(roi, obj, roi);
    }

    cv::Mat edges;
    cv::Canny(image, edges, 50, 100);

    cv::Mat dx, dy;
    cv::Sobel(image, dx, CV_32F, 1, 0);
    cv::Sobel(image, dy, CV_32F, 0, 1);

    if (PERF_RUN_CUDA())
    {
        cv::Ptr<cv::GeneralizedHoughGuil> alg = cv::cuda::createGeneralizedHoughGuil();
        alg->setMaxAngle(90.0);
        alg->setAngleStep(2.0);

        const cv::cuda::GpuMat d_edges(edges);
        const cv::cuda::GpuMat d_dx(dx);
        const cv::cuda::GpuMat d_dy(dy);
        cv::cuda::GpuMat positions;

        alg->setTemplate(cv::cuda::GpuMat(templ));

        TEST_CYCLE() alg->detect(d_edges, d_dx, d_dy, positions);
    }
    else
    {
        cv::Ptr<cv::GeneralizedHoughGuil> alg = cv::createGeneralizedHoughGuil();
        alg->setMaxAngle(90.0);
        alg->setAngleStep(2.0);

        cv::Mat positions;

        alg->setTemplate(templ);

        TEST_CYCLE() alg->detect(edges, dx, dy, positions);
    }

    // The algorithm is not stable yet.
    SANITY_CHECK_NOTHING();
}