//! find keypoints and compute it's response if _bNonMaxSupression is true //! return count of detected keypoints //return the # of keypoints, stored in _uCount; //store Key point locations into _cvgmKeyPointLocation; //store the corner strength into _cvgmScore; int CFast::calcKeyPointsLocation(const cv::gpu::GpuMat& cvgmImg_, const cv::gpu::GpuMat& cvgmMask_) { //using namespace cv::gpu::device::fast; CV_Assert(cvgmImg_.type() == CV_8UC1); CV_Assert(cvgmMask_.empty() || (cvgmMask_.type() == CV_8UC1 && cvgmMask_.size() == cvgmImg_.size())); if (!cv::gpu::TargetArchs::builtWith(cv::gpu::GLOBAL_ATOMICS) || !cv::gpu::DeviceInfo().supports(cv::gpu::GLOBAL_ATOMICS)) CV_Error(CV_StsNotImplemented, "The device doesn't support global atomics"); unsigned int uMaxKeypoints = static_cast<unsigned int>(_dKeyPointsRatio * cvgmImg_.size().area()); ensureSizeIsEnough(1, uMaxKeypoints, CV_16SC2, _cvgmKeyPointLocation); if (_bNonMaxSupression) { ensureSizeIsEnough(cvgmImg_.size(), CV_32SC1, _cvgmScore); _cvgmScore.setTo(cv::Scalar::all(0)); } _uCount = btl::device::fast::cudaCalcKeypoints(cvgmImg_, cvgmMask_, uMaxKeypoints, _nThreshold, _cvgmKeyPointLocation.ptr<short2>(), _bNonMaxSupression ? &_cvgmScore : NULL); _uCount = std::min(_uCount, uMaxKeypoints); return _uCount; }
void CFast::operator ()(const cv::gpu::GpuMat& cvgmImage_, const cv::gpu::GpuMat& cvgmMask_, std::vector<cv::KeyPoint>* pvKeyPoints_) { if (cvgmImage_.empty()) return; (*this)(cvgmImage_, cvgmMask_, &_cvgmdKeyPoints); downloadKeypoints(_cvgmdKeyPoints, &*pvKeyPoints_); }
void CFast::downloadKeypoints(const cv::gpu::GpuMat& cvgmKeyPoints_, std::vector<cv::KeyPoint>* pvKeyPoints_) { if (cvgmKeyPoints_.empty()) return; cv::Mat cvmKeyPoints(cvgmKeyPoints_);//download from cvgm to cvm convertKeypoints(cvmKeyPoints, &*pvKeyPoints_); }