//! find keypoints and compute it's response if _bNonMaxSupression is true //! return count of detected keypoints //return the # of keypoints, stored in _uCount; //store Key point locations into _cvgmKeyPointLocation; //store the corner strength into _cvgmScore; int CFast::calcKeyPointsLocation(const cv::gpu::GpuMat& cvgmImg_, const cv::gpu::GpuMat& cvgmMask_) { //using namespace cv::gpu::device::fast; CV_Assert(cvgmImg_.type() == CV_8UC1); CV_Assert(cvgmMask_.empty() || (cvgmMask_.type() == CV_8UC1 && cvgmMask_.size() == cvgmImg_.size())); if (!cv::gpu::TargetArchs::builtWith(cv::gpu::GLOBAL_ATOMICS) || !cv::gpu::DeviceInfo().supports(cv::gpu::GLOBAL_ATOMICS)) CV_Error(CV_StsNotImplemented, "The device doesn't support global atomics"); unsigned int uMaxKeypoints = static_cast<unsigned int>(_dKeyPointsRatio * cvgmImg_.size().area()); ensureSizeIsEnough(1, uMaxKeypoints, CV_16SC2, _cvgmKeyPointLocation); if (_bNonMaxSupression) { ensureSizeIsEnough(cvgmImg_.size(), CV_32SC1, _cvgmScore); _cvgmScore.setTo(cv::Scalar::all(0)); } _uCount = btl::device::fast::cudaCalcKeypoints(cvgmImg_, cvgmMask_, uMaxKeypoints, _nThreshold, _cvgmKeyPointLocation.ptr<short2>(), _bNonMaxSupression ? &_cvgmScore : NULL); _uCount = std::min(_uCount, uMaxKeypoints); return _uCount; }
void cv::gpu::MOG_GPU::operator()(const cv::gpu::GpuMat& frame, cv::gpu::GpuMat& fgmask, float learningRate, Stream& stream) { using namespace cv::gpu::cudev::mog; CV_Assert(frame.depth() == CV_8U); int ch = frame.channels(); int work_ch = ch; if (nframes_ == 0 || learningRate >= 1.0 || frame.size() != frameSize_ || work_ch != mean_.channels()) initialize(frame.size(), frame.type()); fgmask.create(frameSize_, CV_8UC1); ++nframes_; learningRate = learningRate >= 0.0f && nframes_ > 1 ? learningRate : 1.0f / std::min(nframes_, history); CV_Assert(learningRate >= 0.0f); mog_gpu(frame, ch, fgmask, weight_, sortKey_, mean_, var_, nmixtures_, varThreshold, learningRate, backgroundRatio, noiseSigma, StreamAccessor::getStream(stream)); }