__host__ explicit Texture(const GlobPtrSz<T>& mat, bool normalizedCoords = false, cudaTextureFilterMode filterMode = cudaFilterModePoint, cudaTextureAddressMode addressMode = cudaAddressModeClamp) { CV_Assert( deviceSupports(FEATURE_SET_COMPUTE_30) ); rows = mat.rows; cols = mat.cols; cudaResourceDesc texRes; std::memset(&texRes, 0, sizeof(texRes)); texRes.resType = cudaResourceTypePitch2D; texRes.res.pitch2D.devPtr = mat.data; texRes.res.pitch2D.height = mat.rows; texRes.res.pitch2D.width = mat.cols; texRes.res.pitch2D.pitchInBytes = mat.step; texRes.res.pitch2D.desc = cudaCreateChannelDesc<T>(); cudaTextureDesc texDescr; std::memset(&texDescr, 0, sizeof(texDescr)); texDescr.addressMode[0] = addressMode; texDescr.addressMode[1] = addressMode; texDescr.addressMode[2] = addressMode; texDescr.filterMode = filterMode; texDescr.readMode = cudaReadModeElementType; texDescr.normalizedCoords = normalizedCoords; CV_CUDEV_SAFE_CALL( cudaCreateTextureObject(&this->texObj, &texRes, &texDescr, 0) ); }
void cv::gpu::labelComponents(const GpuMat& mask, GpuMat& components, int flags, Stream& s) { CV_Assert(!mask.empty() && mask.type() == CV_8U); if (!deviceSupports(SHARED_ATOMICS)) CV_Error(CV_StsNotImplemented, "The device doesn't support shared atomics and communicative synchronization!"); components.create(mask.size(), CV_32SC1); cudaStream_t stream = StreamAccessor::getStream(s); device::ccl::labelComponents(mask, components, flags, stream); }
__host__ void gridHistogram_(const SrcPtr& src, GpuMat_<ResType>& dst, Stream& stream = Stream::Null()) { CV_Assert( deviceSupports(SHARED_ATOMICS) ); const int rows = getRows(src); const int cols = getCols(src); dst.create(1, BIN_COUNT); dst.setTo(0, stream); grid_histogram_detail::histogram<BIN_COUNT, Policy>(shrinkPtr(src), dst[0], WithOutMask(), rows, cols, StreamAccessor::getStream(stream)); }