void buildPyramid(const cv::gpu::GpuMat& cvgmGray_){ cvgmGray_.copyTo(*_acvgmShrPtrPyrBWs[0]); for (int n=0; n< 3; n++){ cv::gpu::resize(*_acvgmShrPtrPyrBWs[n],*_acvgmShrPtrPyrBWs[n+1],cv::Size(0,0),.5f,.5f ); } return; }
static void processFrameGPU(cv::gpu::GpuMat &m) { static cv::gpu::GpuMat aux; static cv::gpu::GpuMat aux2; static cv::gpu::GpuMat aux3; cv::gpu::cvtColor(m, aux, CV_BGR2GRAY); cv::gpu::GaussianBlur(aux,aux2, cv::Size(7,7),1.5); cv::gpu::Canny(aux2, aux3, 2, 15); aux3.copyTo(m); }
void cv::softcascade::SCascade::detect(InputArray _image, InputArray _rois, OutputArray _objects, cv::gpu::Stream& s) const { CV_Assert(fields); // only color images and precomputed integrals are supported int type = _image.type(); CV_Assert(type == CV_8UC3 || type == CV_32SC1 || (!_rois.empty())); const cv::gpu::GpuMat image = _image.getGpuMat(); if (_objects.empty()) _objects.create(1, 4096 * sizeof(Detection), CV_8UC1); cv::gpu::GpuMat rois = _rois.getGpuMat(), objects = _objects.getGpuMat(); /// roi Fields& flds = *fields; int shr = flds.shrinkage; flds.mask.create( rois.cols / shr, rois.rows / shr, rois.type()); device::shrink(rois, flds.mask); //cv::gpu::transpose(flds.genRoiTmp, flds.mask, s); if (type == CV_8UC3) { flds.update(image.rows, image.cols, flds.shrinkage); if (flds.check((float)minScale, (float)maxScale, scales)) flds.createLevels(image.rows, image.cols); flds.preprocessor->apply(image, flds.shrunk); integral(flds.shrunk, flds.hogluv, flds.integralBuffer, s); } else { if (s) s.enqueueCopy(image, flds.hogluv); else image.copyTo(flds.hogluv); } flds.detect(objects, s); if ( (flags && NMS_MASK) != NO_REJECT) { cv::gpu::GpuMat spr(objects, cv::Rect(0, 0, flds.suppressed.cols, flds.suppressed.rows)); flds.suppress(objects, s); flds.suppressed.copyTo(spr); } }