Beispiel #1
0
void cv::gpu::interpolateFrames(const GpuMat& frame0, const GpuMat& frame1, const GpuMat& fu, const GpuMat& fv, const GpuMat& bu, const GpuMat& bv,
                                float pos, GpuMat& newFrame, GpuMat& buf, Stream& s)
{
    CV_Assert(frame0.type() == CV_32FC1);
    CV_Assert(frame1.size() == frame0.size() && frame1.type() == frame0.type());
    CV_Assert(fu.size() == frame0.size() && fu.type() == frame0.type());
    CV_Assert(fv.size() == frame0.size() && fv.type() == frame0.type());
    CV_Assert(bu.size() == frame0.size() && bu.type() == frame0.type());
    CV_Assert(bv.size() == frame0.size() && bv.type() == frame0.type());

    newFrame.create(frame0.size(), frame0.type());

    buf.create(6 * frame0.rows, frame0.cols, CV_32FC1);
    buf.setTo(Scalar::all(0));

    // occlusion masks
    GpuMat occ0 = buf.rowRange(0 * frame0.rows, 1 * frame0.rows);
    GpuMat occ1 = buf.rowRange(1 * frame0.rows, 2 * frame0.rows);

    // interpolated forward flow
    GpuMat fui = buf.rowRange(2 * frame0.rows, 3 * frame0.rows);
    GpuMat fvi = buf.rowRange(3 * frame0.rows, 4 * frame0.rows);

    // interpolated backward flow
    GpuMat bui = buf.rowRange(4 * frame0.rows, 5 * frame0.rows);
    GpuMat bvi = buf.rowRange(5 * frame0.rows, 6 * frame0.rows);

    size_t step = frame0.step;

    CV_Assert(frame1.step == step && fu.step == step && fv.step == step && bu.step == step && bv.step == step && newFrame.step == step && buf.step == step);

    cudaStream_t stream = StreamAccessor::getStream(s);
    NppStStreamHandler h(stream);

    NppStInterpolationState state;

    state.size         = NcvSize32u(frame0.cols, frame0.rows);
    state.nStep        = static_cast<Ncv32u>(step);
    state.pSrcFrame0   = const_cast<Ncv32f*>(frame0.ptr<Ncv32f>());
    state.pSrcFrame1   = const_cast<Ncv32f*>(frame1.ptr<Ncv32f>());
    state.pFU          = const_cast<Ncv32f*>(fu.ptr<Ncv32f>());
    state.pFV          = const_cast<Ncv32f*>(fv.ptr<Ncv32f>());
    state.pBU          = const_cast<Ncv32f*>(bu.ptr<Ncv32f>());
    state.pBV          = const_cast<Ncv32f*>(bv.ptr<Ncv32f>());
    state.pos          = pos;
    state.pNewFrame    = newFrame.ptr<Ncv32f>();
    state.ppBuffers[0] = occ0.ptr<Ncv32f>();
    state.ppBuffers[1] = occ1.ptr<Ncv32f>();
    state.ppBuffers[2] = fui.ptr<Ncv32f>();
    state.ppBuffers[3] = fvi.ptr<Ncv32f>();
    state.ppBuffers[4] = bui.ptr<Ncv32f>();
    state.ppBuffers[5] = bvi.ptr<Ncv32f>();

    ncvSafeCall( nppiStInterpolateFrames(&state) );

    if (stream == 0)
        cudaSafeCall( cudaDeviceSynchronize() );
}
Beispiel #2
0
void cv::gpu::ORB_GPU::mergeKeyPoints(GpuMat& keypoints)
{
    using namespace cv::gpu::device::orb;

    int nAllkeypoints = 0;

    for (int level = 0; level < nLevels_; ++level)
        nAllkeypoints += keyPointsCount_[level];

    if (nAllkeypoints == 0)
    {
        keypoints.release();
        return;
    }

    ensureSizeIsEnough(ROWS_COUNT, nAllkeypoints, CV_32FC1, keypoints);

    int offset = 0;

    for (int level = 0; level < nLevels_; ++level)
    {
        if (keyPointsCount_[level] == 0)
            continue;

        float sf = getScale(scaleFactor_, firstLevel_, level);

        GpuMat keyPointsRange = keypoints.colRange(offset, offset + keyPointsCount_[level]);

        float locScale = level != firstLevel_ ? sf : 1.0f;

        mergeLocation_gpu(keyPointsPyr_[level].ptr<short2>(0), keyPointsRange.ptr<float>(0), keyPointsRange.ptr<float>(1), keyPointsCount_[level], locScale, 0);

        GpuMat range = keyPointsRange.rowRange(2, 4);
        keyPointsPyr_[level](Range(1, 3), Range(0, keyPointsCount_[level])).copyTo(range);

        keyPointsRange.row(4).setTo(Scalar::all(level));
        keyPointsRange.row(5).setTo(Scalar::all(patchSize_ * sf));

        offset += keyPointsCount_[level];
    }
}
Beispiel #3
0
void cv::gpu::ORB_GPU::computeDescriptors(GpuMat& descriptors)
{
    using namespace cv::gpu::device::orb;

    int nAllkeypoints = 0;

    for (int level = 0; level < nLevels_; ++level)
        nAllkeypoints += keyPointsCount_[level];

    if (nAllkeypoints == 0)
    {
        descriptors.release();
        return;
    }

    ensureSizeIsEnough(nAllkeypoints, descriptorSize(), CV_8UC1, descriptors);

    int offset = 0;

    for (int level = 0; level < nLevels_; ++level)
    {
        if (keyPointsCount_[level] == 0)
            continue;

        GpuMat descRange = descriptors.rowRange(offset, offset + keyPointsCount_[level]);

        if (blurForDescriptor)
        {
            // preprocess the resized image
            ensureSizeIsEnough(imagePyr_[level].size(), imagePyr_[level].type(), buf_);
            blurFilter->apply(imagePyr_[level], buf_, Rect(0, 0, imagePyr_[level].cols, imagePyr_[level].rows));
        }

        computeOrbDescriptor_gpu(blurForDescriptor ? buf_ : imagePyr_[level], keyPointsPyr_[level].ptr<short2>(0), keyPointsPyr_[level].ptr<float>(2),
            keyPointsCount_[level], pattern_.ptr<int>(0), pattern_.ptr<int>(1), descRange, descriptorSize(), WTA_K_, 0);

        offset += keyPointsCount_[level];
    }
}
Beispiel #4
0
static void csbp_operator(StereoConstantSpaceBP& rthis, GpuMat& mbuf, GpuMat& temp, GpuMat& out, const GpuMat& left, const GpuMat& right, GpuMat& disp, Stream& stream)
{
    CV_DbgAssert(0 < rthis.ndisp && 0 < rthis.iters && 0 < rthis.levels && 0 < rthis.nr_plane
        && left.rows == right.rows && left.cols == right.cols && left.type() == right.type());

    CV_Assert(rthis.levels <= 8 && (left.type() == CV_8UC1 || left.type() == CV_8UC3 || left.type() == CV_8UC4));

    const Scalar zero = Scalar::all(0);

    cudaStream_t cudaStream = StreamAccessor::getStream(stream);

    ////////////////////////////////////////////////////////////////////////////////////////////
    // Init

    int rows = left.rows;
    int cols = left.cols;

    rthis.levels = min(rthis.levels, int(log((double)rthis.ndisp) / log(2.0)));
    int levels = rthis.levels;

    // compute sizes
    AutoBuffer<int> buf(levels * 3);
    int* cols_pyr = buf;
    int* rows_pyr = cols_pyr + levels;
    int* nr_plane_pyr = rows_pyr + levels;

    cols_pyr[0]     = cols;
    rows_pyr[0]     = rows;
    nr_plane_pyr[0] = rthis.nr_plane;

    for (int i = 1; i < levels; i++)
    {
        cols_pyr[i]     = cols_pyr[i-1] / 2;
        rows_pyr[i]     = rows_pyr[i-1] / 2;
        nr_plane_pyr[i] = nr_plane_pyr[i-1] * 2;
    }


    GpuMat u[2], d[2], l[2], r[2], disp_selected_pyr[2], data_cost, data_cost_selected;


    //allocate buffers
    int buffers_count = 10; // (up + down + left + right + disp_selected_pyr) * 2
    buffers_count += 2; //  data_cost has twice more rows than other buffers, what's why +2, not +1;
    buffers_count += 1; //  data_cost_selected
    mbuf.create(rows * rthis.nr_plane * buffers_count, cols, DataType<T>::type);

    data_cost          = mbuf.rowRange(0, rows * rthis.nr_plane * 2);
    data_cost_selected = mbuf.rowRange(data_cost.rows, data_cost.rows + rows * rthis.nr_plane);

    for(int k = 0; k < 2; ++k) // in/out
    {
        GpuMat sub1 = mbuf.rowRange(data_cost.rows + data_cost_selected.rows, mbuf.rows);
        GpuMat sub2 = sub1.rowRange((k+0)*sub1.rows/2, (k+1)*sub1.rows/2);

        GpuMat *buf_ptrs[] = { &u[k], &d[k], &l[k], &r[k], &disp_selected_pyr[k] };
        for(int _r = 0; _r < 5; ++_r)
        {
            *buf_ptrs[_r] = sub2.rowRange(_r * sub2.rows/5, (_r+1) * sub2.rows/5);
            assert(buf_ptrs[_r]->cols == cols && buf_ptrs[_r]->rows == rows * rthis.nr_plane);
        }
    };

    size_t elem_step = mbuf.step / sizeof(T);

    Size temp_size = data_cost.size();
    if ((size_t)temp_size.area() < elem_step * rows_pyr[levels - 1] * rthis.ndisp)
        temp_size = Size(static_cast<int>(elem_step), rows_pyr[levels - 1] * rthis.ndisp);

    temp.create(temp_size, DataType<T>::type);

    ////////////////////////////////////////////////////////////////////////////
    // Compute

    load_constants(rthis.ndisp, rthis.max_data_term, rthis.data_weight, rthis.max_disc_term, rthis.disc_single_jump, rthis.min_disp_th, left, right, temp);

    if (stream)
    {
        stream.enqueueMemSet(l[0], zero);
        stream.enqueueMemSet(d[0], zero);
        stream.enqueueMemSet(r[0], zero);
        stream.enqueueMemSet(u[0], zero);

        stream.enqueueMemSet(l[1], zero);
        stream.enqueueMemSet(d[1], zero);
        stream.enqueueMemSet(r[1], zero);
        stream.enqueueMemSet(u[1], zero);

        stream.enqueueMemSet(data_cost, zero);
        stream.enqueueMemSet(data_cost_selected, zero);
    }
    else
    {
        l[0].setTo(zero);
        d[0].setTo(zero);
        r[0].setTo(zero);
        u[0].setTo(zero);

        l[1].setTo(zero);
        d[1].setTo(zero);
        r[1].setTo(zero);
        u[1].setTo(zero);

        data_cost.setTo(zero);
        data_cost_selected.setTo(zero);
    }

    int cur_idx = 0;

    for (int i = levels - 1; i >= 0; i--)
    {
        if (i == levels - 1)
        {
            init_data_cost(left.rows, left.cols, disp_selected_pyr[cur_idx].ptr<T>(), data_cost_selected.ptr<T>(),
                elem_step, rows_pyr[i], cols_pyr[i], i, nr_plane_pyr[i], rthis.ndisp, left.channels(), rthis.use_local_init_data_cost, cudaStream);
        }
        else
        {
            compute_data_cost(disp_selected_pyr[cur_idx].ptr<T>(), data_cost.ptr<T>(), elem_step,
                left.rows, left.cols, rows_pyr[i], cols_pyr[i], rows_pyr[i+1], i, nr_plane_pyr[i+1], left.channels(), cudaStream);

            int new_idx = (cur_idx + 1) & 1;

            init_message(u[new_idx].ptr<T>(), d[new_idx].ptr<T>(), l[new_idx].ptr<T>(), r[new_idx].ptr<T>(),
                         u[cur_idx].ptr<T>(), d[cur_idx].ptr<T>(), l[cur_idx].ptr<T>(), r[cur_idx].ptr<T>(),
                         disp_selected_pyr[new_idx].ptr<T>(), disp_selected_pyr[cur_idx].ptr<T>(),
                         data_cost_selected.ptr<T>(), data_cost.ptr<T>(), elem_step, rows_pyr[i],
                         cols_pyr[i], nr_plane_pyr[i], rows_pyr[i+1], cols_pyr[i+1], nr_plane_pyr[i+1], cudaStream);

            cur_idx = new_idx;
        }

        calc_all_iterations(u[cur_idx].ptr<T>(), d[cur_idx].ptr<T>(), l[cur_idx].ptr<T>(), r[cur_idx].ptr<T>(),
                            data_cost_selected.ptr<T>(), disp_selected_pyr[cur_idx].ptr<T>(), elem_step,
                            rows_pyr[i], cols_pyr[i], nr_plane_pyr[i], rthis.iters, cudaStream);
    }

    if (disp.empty())
        disp.create(rows, cols, CV_16S);

    out = ((disp.type() == CV_16S) ? disp : (out.create(rows, cols, CV_16S), out));

    if (stream)
        stream.enqueueMemSet(out, zero);
    else
        out.setTo(zero);

    compute_disp(u[cur_idx].ptr<T>(), d[cur_idx].ptr<T>(), l[cur_idx].ptr<T>(), r[cur_idx].ptr<T>(),
                 data_cost_selected.ptr<T>(), disp_selected_pyr[cur_idx].ptr<T>(), elem_step, out, nr_plane_pyr[0], cudaStream);

    if (disp.type() != CV_16S)
    {
        if (stream)
            stream.enqueueConvert(out, disp, disp.type());
        else
            out.convertTo(disp, disp.type());
    }
}