Beispiel #1
0
void cv::gpu::ORB_GPU::mergeKeyPoints(GpuMat& keypoints)
{
    using namespace cv::gpu::device::orb;

    int nAllkeypoints = 0;

    for (int level = 0; level < nLevels_; ++level)
        nAllkeypoints += keyPointsCount_[level];

    if (nAllkeypoints == 0)
    {
        keypoints.release();
        return;
    }

    ensureSizeIsEnough(ROWS_COUNT, nAllkeypoints, CV_32FC1, keypoints);

    int offset = 0;

    for (int level = 0; level < nLevels_; ++level)
    {
        if (keyPointsCount_[level] == 0)
            continue;

        float sf = getScale(scaleFactor_, firstLevel_, level);

        GpuMat keyPointsRange = keypoints.colRange(offset, offset + keyPointsCount_[level]);

        float locScale = level != firstLevel_ ? sf : 1.0f;

        mergeLocation_gpu(keyPointsPyr_[level].ptr<short2>(0), keyPointsRange.ptr<float>(0), keyPointsRange.ptr<float>(1), keyPointsCount_[level], locScale, 0);

        GpuMat range = keyPointsRange.rowRange(2, 4);
        keyPointsPyr_[level](Range(1, 3), Range(0, keyPointsCount_[level])).copyTo(range);

        keyPointsRange.row(4).setTo(Scalar::all(level));
        keyPointsRange.row(5).setTo(Scalar::all(patchSize_ * sf));

        offset += keyPointsCount_[level];
    }
}
Beispiel #2
0
int cv::gpu::FAST_GPU::getKeyPoints(GpuMat& keypoints)
{
    using namespace cv::gpu::cudev::fast;

    if (count_ == 0)
        return 0;

    ensureSizeIsEnough(ROWS_COUNT, count_, CV_32FC1, keypoints);

    if (nonmaxSupression)
        return nonmaxSupression_gpu(kpLoc_.ptr<short2>(), count_, score_, keypoints.ptr<short2>(LOCATION_ROW), keypoints.ptr<float>(RESPONSE_ROW));

    GpuMat locRow(1, count_, kpLoc_.type(), keypoints.ptr(0));
    kpLoc_.colRange(0, count_).copyTo(locRow);
    keypoints.row(1).setTo(Scalar::all(0));

    return count_;
}
Beispiel #3
0
int cv::gpu::FAST_GPU::getKeyPoints(GpuMat& keypoints)
{
    using namespace cv::gpu::device::fast;

    if (!TargetArchs::builtWith(GLOBAL_ATOMICS) || !DeviceInfo().supports(GLOBAL_ATOMICS))
        CV_Error(CV_StsNotImplemented, "The device doesn't support global atomics");

    if (count_ == 0)
        return 0;

    ensureSizeIsEnough(ROWS_COUNT, count_, CV_32FC1, keypoints);

    if (nonmaxSupression)
        return nonmaxSupression_gpu(kpLoc_.ptr<short2>(), count_, score_, keypoints.ptr<short2>(LOCATION_ROW), keypoints.ptr<float>(RESPONSE_ROW));

    GpuMat locRow(1, count_, kpLoc_.type(), keypoints.ptr(0));
    kpLoc_.colRange(0, count_).copyTo(locRow);
    keypoints.row(1).setTo(Scalar::all(0));

    return count_;
}
void cv::gpu::GeneralizedHough_GPU::download(const GpuMat& d_positions, OutputArray h_positions_, OutputArray h_votes_)
{
    if (d_positions.empty())
    {
        h_positions_.release();
        if (h_votes_.needed())
            h_votes_.release();
        return;
    }

    CV_Assert(d_positions.rows == 2 && d_positions.type() == CV_32FC4);

    h_positions_.create(1, d_positions.cols, CV_32FC4);
    Mat h_positions = h_positions_.getMat();
    d_positions.row(0).download(h_positions);

    if (h_votes_.needed())
    {
        h_votes_.create(1, d_positions.cols, CV_32SC3);
        Mat h_votes = h_votes_.getMat();
        GpuMat d_votes(1, d_positions.cols, CV_32SC3, const_cast<int3*>(d_positions.ptr<int3>(1)));
        d_votes.download(h_votes);
    }
}