/** * @brief Compute derivative kernels for sizes different than 3 * @param _kx Horizontal kernel ues * @param _ky Vertical kernel values * @param dx Derivative order in X-direction (horizontal) * @param dy Derivative order in Y-direction (vertical) * @param scale_ Scale factor or derivative size */ void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale) { int ksize = 3 + 2 * (scale - 1); // The standard Scharr kernel if (scale == 1) { getDerivKernels(_kx, _ky, dx, dy, 0, true, CV_32F); return; } _kx.create(ksize, 1, CV_32F, -1, true); _ky.create(ksize, 1, CV_32F, -1, true); Mat kx = _kx.getMat(); Mat ky = _ky.getMat(); float w = 10.0f / 3.0f; float norm = 1.0f / (2.0f*scale*(w + 2.0f)); for (int k = 0; k < 2; k++) { Mat* kernel = k == 0 ? &kx : &ky; int order = k == 0 ? dx : dy; std::vector<float> kerI(ksize, 0.0f); if (order == 0) { kerI[0] = norm, kerI[ksize / 2] = w*norm, kerI[ksize - 1] = norm; } else if (order == 1) { kerI[0] = -1, kerI[ksize / 2] = 0, kerI[ksize - 1] = 1; } Mat temp(kernel->rows, kernel->cols, CV_32F, &kerI[0]); temp.copyTo(*kernel); } }
void stereo::stereoRectify(cv::InputArray _K1, cv::InputArray _K2, cv::InputArray _R, cv::InputArray _T, cv::OutputArray _R1, cv::OutputArray _R2, cv::OutputArray _P1, cv::OutputArray _P2) { Mat K1 = _K1.getMat(), K2 = _K2.getMat(), R = _R.getMat(), T = _T.getMat(); _R1.create(3, 3, CV_32F); _R2.create(3, 3, CV_32F); Mat R1 = _R1.getMat(); Mat R2 = _R2.getMat(); _P1.create(3, 4, CV_32F); _P2.create(3, 4, CV_32F); Mat P1 = _P1.getMat(); Mat P2 = _P2.getMat(); if(K1.type()!=CV_32F) K1.convertTo(K1,CV_32F); if(K2.type()!=CV_32F) K2.convertTo(K2,CV_32F); if(R.type()!=CV_32F) R.convertTo(R,CV_32F); if(T.type()!=CV_32F) T.convertTo(T,CV_32F); if(T.rows != 3) T = T.t(); // R and T is the transformation from the first to the second camera // Get the transformation from the second to the first camera Mat R_inv = R.t(); Mat T_inv = -R.t()*T; Mat e1, e2, e3; e1 = T_inv.t() / norm(T_inv); /*Mat z = (Mat_<float>(1, 3) << 0.0,0.0,-1.0); e2 = e1.cross(z); e2 = e2 / norm(e2);*/ e2 = (Mat_<float>(1,3) << T_inv.at<float>(1)*-1, T_inv.at<float>(0), 0.0 ); e2 = e2 / (sqrt(e2.at<float>(0)*e2.at<float>(0) + e2.at<float>(1)*e2.at<float>(1))); e3 = e1.cross(e2); e3 = e3 / norm(e3); e1.copyTo(R1.row(0)); e2.copyTo(R1.row(1)); e3.copyTo(R1.row(2)); R2 = R_inv * R1; P1.setTo(Scalar(0)); R1.copyTo(P1.colRange(0, 3)); P1 = K1 * P1; P2.setTo(Scalar(0)); R2.copyTo(P2.colRange(0, 3)); P2 = K2 * P2; }
void BackgroundSubtractorMedian::operator()(cv::InputArray _image, cv::OutputArray _fgmask, double learningRate) { framecount++; cv::Mat image = _image.getMat(); if (image.channels() > 1) { cvtColor(image,image,CV_BGR2GRAY); } if (image.cols == 0 || image.rows == 0) { return; } _fgmask.create(image.size(), CV_8U); cv::Mat fgmask = _fgmask.getMat(); if (!init) { init = true; bgmodel = cv::Mat(image.size(), CV_8U); } //printf("(%d,%d)(%d) ",image.cols,image.rows,image.type()); //printf("(%d,%d)(%d)\n",bgmodel.cols,bgmodel.rows,bgmodel.type()); cv::Mat cmpArr = cv::Mat(image.size(),CV_8U); cv::compare(image, bgmodel, cmpArr, CV_CMP_GT); cv::bitwise_and(cmpArr, 1, cmpArr); cv::add(bgmodel, cmpArr, bgmodel); cmpArr = cv::Mat(image.size(),CV_8U); cv::compare(image, bgmodel, cmpArr, CV_CMP_LT); cv::bitwise_and(cmpArr, 1, cmpArr); cv::subtract(bgmodel, cmpArr, bgmodel); cv::absdiff(image, bgmodel,fgmask); cv::threshold(fgmask,fgmask,fg_threshold,255,CV_THRESH_TOZERO); cv::medianBlur(fgmask,fgmask,median_filter_level); }
static bool calcLut(cv::InputArray _src, cv::OutputArray _dst, const int tilesX, const int tilesY, const cv::Size tileSize, const int clipLimit, const float lutScale) { cv::ocl::Kernel k("calcLut", cv::ocl::imgproc::clahe_oclsrc); if(k.empty()) return false; cv::UMat src = _src.getUMat(); _dst.create(tilesX * tilesY, 256, CV_8UC1); cv::UMat dst = _dst.getUMat(); int tile_size[2]; tile_size[0] = tileSize.width; tile_size[1] = tileSize.height; size_t localThreads[3] = { 32, 8, 1 }; size_t globalThreads[3] = { tilesX * localThreads[0], tilesY * localThreads[1], 1 }; int idx = 0; idx = k.set(idx, cv::ocl::KernelArg::ReadOnlyNoSize(src)); idx = k.set(idx, cv::ocl::KernelArg::WriteOnlyNoSize(dst)); idx = k.set(idx, tile_size); idx = k.set(idx, tilesX); idx = k.set(idx, clipLimit); k.set(idx, lutScale); return k.run(2, globalThreads, localThreads, false); }
void illuminationChange(cv::InputArray _src, cv::InputArray _mask, cv::OutputArray _dst, float a, float b) { Mat src = _src.getMat(); Mat mask = _mask.getMat(); _dst.create(src.size(), src.type()); Mat blend = _dst.getMat(); float alpha = a; float beta = b; Mat gray = Mat::zeros(mask.size(), CV_8UC1); if(mask.channels() == 3) cvtColor(mask, gray, COLOR_BGR2GRAY); else gray = mask; Mat cs_mask = Mat::zeros(src.size(), CV_8UC3); src.copyTo(cs_mask, gray); Cloning obj; obj.illum_change(src, cs_mask, gray, blend, alpha, beta); }
void EdgeDetector_<ParallelUtils::eGLSL>::getLatestEdgeMask(cv::OutputArray _oLastEdgeMask) { _oLastEdgeMask.create(m_oFrameSize,CV_8UC1); cv::Mat oLastEdgeMask = _oLastEdgeMask.getMat(); if(!GLImageProcAlgo::m_bFetchingOutput) glAssert(GLImageProcAlgo::setOutputFetching(true)) GLImageProcAlgo::fetchLastOutput(oLastEdgeMask); }
void BackgroundSubtractorLOBSTER_<ParallelUtils::eGLSL>::getBackgroundDescriptorsImage(cv::OutputArray oBGDescImg) const { lvDbgExceptionWatch; CV_Assert(m_bInitialized); glAssert(m_bGLInitialized && !m_vnBGModelData.empty()); CV_Assert(LBSP::DESC_SIZE==2); oBGDescImg.create(m_oFrameSize,CV_16UC(int(m_nImgChannels))); cv::Mat oOutputImg = oBGDescImg.getMatRef(); glBindBuffer(GL_SHADER_STORAGE_BUFFER,getSSBOId(BackgroundSubtractorLOBSTER_::eLOBSTERStorageBuffer_BGModelBinding)); glGetBufferSubData(GL_SHADER_STORAGE_BUFFER,0,m_nBGModelSize*sizeof(uint),(void*)m_vnBGModelData.data()); glErrorCheck; for(size_t nRowIdx=0; nRowIdx<(size_t)m_oFrameSize.height; ++nRowIdx) { const size_t nModelRowOffset = nRowIdx*m_nRowStepSize; const size_t nImgRowOffset = nRowIdx*oOutputImg.step.p[0]; for(size_t nColIdx=0; nColIdx<(size_t)m_oFrameSize.width; ++nColIdx) { const size_t nModelColOffset = nColIdx*m_nColStepSize+nModelRowOffset; const size_t nImgColOffset = nColIdx*oOutputImg.step.p[1]+nImgRowOffset; std::array<float,4> afCurrPxSum = {0.0f,0.0f,0.0f,0.0f}; for(size_t nSampleIdx=0; nSampleIdx<m_nBGSamples; ++nSampleIdx) { const size_t nModelPxOffset_color = nSampleIdx*m_nSampleStepSize+nModelColOffset; const size_t nModelPxOffset_desc = nModelPxOffset_color+(m_nBGSamples*m_nSampleStepSize); for(size_t nChannelIdx=0; nChannelIdx<m_nImgChannels; ++nChannelIdx) { const size_t nModelTotOffset = nChannelIdx+nModelPxOffset_desc; afCurrPxSum[nChannelIdx] += m_vnBGModelData[nModelTotOffset]; } } for(size_t nChannelIdx=0; nChannelIdx<m_nImgChannels; ++nChannelIdx) { const size_t nSampleChannelIdx = ((nChannelIdx==3||m_nImgChannels==1)?nChannelIdx:2-nChannelIdx); const size_t nImgTotOffset = nSampleChannelIdx*2+nImgColOffset; *(ushort*)(oOutputImg.data+nImgTotOffset) = (ushort)(afCurrPxSum[nChannelIdx]/m_nBGSamples); } } } }
void colorChange(cv::InputArray _src, cv::InputArray _mask, cv::OutputArray _dst, float r, float g, float b) { Mat src = _src.getMat(); Mat mask = _mask.getMat(); _dst.create(src.size(), src.type()); Mat blend = _dst.getMat(); float red = r; float green = g; float blue = b; Mat gray = Mat::zeros(mask.size(), CV_8UC1); if(mask.channels() == 3) cvtColor(mask, gray, COLOR_BGR2GRAY); else gray = mask; Mat cs_mask = Mat::zeros(src.size(), CV_8UC3); src.copyTo(cs_mask, gray); Cloning obj; obj.local_color_change(src, cs_mask, gray, blend, red, green, blue); }
static bool transform(cv::InputArray _src, cv::OutputArray _dst, cv::InputArray _lut, const int tilesX, const int tilesY, const cv::Size & tileSize) { cv::ocl::Kernel k("transform", cv::ocl::imgproc::clahe_oclsrc); if(k.empty()) return false; int tile_size[2]; tile_size[0] = tileSize.width; tile_size[1] = tileSize.height; cv::UMat src = _src.getUMat(); _dst.create(src.size(), src.type()); cv::UMat dst = _dst.getUMat(); cv::UMat lut = _lut.getUMat(); size_t localThreads[3] = { 32, 8, 1 }; size_t globalThreads[3] = { (size_t)src.cols, (size_t)src.rows, 1 }; int idx = 0; idx = k.set(idx, cv::ocl::KernelArg::ReadOnlyNoSize(src)); idx = k.set(idx, cv::ocl::KernelArg::WriteOnlyNoSize(dst)); idx = k.set(idx, cv::ocl::KernelArg::ReadOnlyNoSize(lut)); idx = k.set(idx, src.cols); idx = k.set(idx, src.rows); idx = k.set(idx, tile_size); idx = k.set(idx, tilesX); k.set(idx, tilesY); return k.run(2, globalThreads, localThreads, false); }
void textureFlattening(cv::InputArray _src, cv::InputArray _mask, cv::OutputArray _dst, double low_threshold, double high_threshold, int kernel_size) { Mat src = _src.getMat(); Mat mask = _mask.getMat(); _dst.create(src.size(), src.type()); Mat blend = _dst.getMat(); Mat gray = Mat::zeros(mask.size(), CV_8UC1); if(mask.channels() == 3) cvtColor(mask, gray, COLOR_BGR2GRAY); else gray = mask; Mat cs_mask = Mat::zeros(src.size(), CV_8UC3); src.copyTo(cs_mask, gray); Cloning obj; obj.texture_flatten(src, cs_mask, gray, low_threshold, high_threshold, kernel_size, blend); }
void stereo_disparity_normal(cv::InputArray left_image, cv::InputArray right_image, cv::OutputArray disp_, int max_dis_level, int scale, float sigma) { cv::Mat imL = left_image.getMat(); cv::Mat imR = right_image.getMat(); CV_Assert(imL.size() == imR.size()); CV_Assert(imL.type() == CV_8UC3 && imR.type() == CV_8UC3); cv::Size imageSize = imL.size(); disp_.create(imageSize, CV_8U); cv::Mat disp = disp_.getMat(); CDisparityHelper dispHelper; //step 1: cost initialization cv::Mat costVol = dispHelper.GetMatchingCost(imL, imR, max_dis_level); //step 2: cost aggregation CSegmentTree stree; CColorWeight cWeight(imL); stree.BuildSegmentTree(imL.size(), sigma, TAU, cWeight); stree.Filter(costVol, max_dis_level); //step 3: disparity computation cv::Mat disparity = dispHelper.GetDisparity_WTA((float*)costVol.data, imageSize.width, imageSize.height, max_dis_level); MeanFilter(disparity, disparity, 3); disparity *= scale; disparity.copyTo(disp); }
bool OpenNI2Grabber::grabFrame(cv::OutputArray _color) { if (_color.kind() != cv::_InputArray::MAT) BOOST_THROW_EXCEPTION(GrabberException("Grabbing only into cv::Mat")); _color.create(p->color_image_resolution.height, p->color_image_resolution.width, CV_8UC3); cv::Mat color = _color.getMat(); return p->grabFrame(color); }
void UndistorterPTAM::undistort(const cv::Mat& image, cv::OutputArray result) const { if (!valid) { result.getMatRef() = image; return; } if (image.rows != in_height || image.cols != in_width) { printf("UndistorterPTAM: input image size differs from expected input size! Not undistorting.\n"); result.getMatRef() = image; return; } if (in_height == out_height && in_width == out_width && inputCalibration[4] == 0) { // No transformation if neither distortion nor resize result.getMatRef() = image; return; } result.create(out_height, out_width, CV_8U); cv::Mat resultMat = result.getMatRef(); assert(result.getMatRef().isContinuous()); assert(image.isContinuous()); uchar* data = resultMat.data; for(int idx = out_width*out_height-1;idx>=0;idx--) { // get interp. values float xx = remapX[idx]; float yy = remapY[idx]; if(xx<0) data[idx] = 0; else { // get integer and rational parts int xxi = xx; int yyi = yy; xx -= xxi; yy -= yyi; float xxyy = xx*yy; // get array base pointer const uchar* src = (uchar*)image.data + xxi + yyi * in_width; // interpolate (bilinear) data[idx] = xxyy * src[1+in_width] + (yy-xxyy) * src[in_width] + (xx-xxyy) * src[1] + (1-xx-yy+xxyy) * src[0]; } } }
void IBackgroundSubtractor_GLSL::getLatestForegroundMask(cv::OutputArray _oLastFGMask) { _oLastFGMask.create(m_oImgSize,CV_8UC1); cv::Mat oLastFGMask = _oLastFGMask.getMat(); glAssert(GLImageProcAlgo::m_bFetchingOutput || GLImageProcAlgo::setOutputFetching(true)) if(GLImageProcAlgo::m_nInternalFrameIdx>0) GLImageProcAlgo::fetchLastOutput(oLastFGMask); else oLastFGMask = cv::Scalar_<uchar>(0); }
void Eular2Rotation(const double pitch, const double roll, const double yaw, cv::OutputArray dest) { dest.create(3, 3, CV_64F); Mat a = Mat::eye(3, 3, CV_64F); a.copyTo(dest); rotYaw(dest, dest, yaw); rotPitch(dest, dest, pitch); rotPitch(dest, dest, roll); }
void stereo::stereoMatching(cv::InputArray _recImage1, cv::InputArray _recIamge2, cv::OutputArray _disparityMap, int minDisparity, int numDisparities, int SADWindowSize, int P1, int P2) { Mat img1 = _recImage1.getMat(); Mat img2 = _recIamge2.getMat(); _disparityMap.create(img1.size(), CV_16S); Mat dis = _disparityMap.getMat(); StereoSGBM matcher(minDisparity, numDisparities, SADWindowSize, P1, P2); matcher(img1, img2, dis); dis = dis / 16.0; }
void IEdgeDetector_GLSL::getLatestEdgeMask(cv::OutputArray _oLastEdgeMask) { lvAssert_(GLImageProcAlgo::m_bGLInitialized,"algo must be initialized first"); _oLastEdgeMask.create(GLImageProcAlgo::m_oFrameSize,CV_8UC1); cv::Mat oLastEdgeMask = _oLastEdgeMask.getMat(); lvAssert_(GLImageProcAlgo::m_bFetchingOutput || GLImageProcAlgo::setOutputFetching(true),"algo not initialized with mat output support") if(GLImageProcAlgo::m_nInternalFrameIdx>0) GLImageProcAlgo::fetchLastOutput(oLastEdgeMask); else oLastEdgeMask = cv::Scalar_<uchar>(0); }
void warmify(cv::InputArray src, cv::OutputArray dst, uchar delta) { CV_Assert(src.type() == CV_8UC3); Mat imgSrc = src.getMat(); CV_Assert(imgSrc.data); dst.create(src.size(), CV_8UC3); Mat imgDst = dst.getMat(); imgDst = imgSrc + Scalar(0, delta, delta); }
void BackgroundSubtractor_<ParallelUtils::eGLSL>::getLatestForegroundMask(cv::OutputArray _oLastFGMask) { _oLastFGMask.create(m_oImgSize,CV_8UC1); cv::Mat oLastFGMask = _oLastFGMask.getMat(); if(!GLImageProcAlgo::m_bFetchingOutput) glAssert(GLImageProcAlgo::setOutputFetching(true)) else if(m_nFrameIdx>0) GLImageProcAlgo::fetchLastOutput(oLastFGMask); else oLastFGMask = cv::Scalar_<uchar>(0); }
/** * @brief Compute Scharr derivative kernels for sizes different than 3 * @param kx_ The derivative kernel in x-direction * @param ky_ The derivative kernel in y-direction * @param dx The derivative order in x-direction * @param dy The derivative order in y-direction * @param scale The kernel size */ void compute_derivative_kernels(cv::OutputArray kx_, cv::OutputArray ky_, const size_t& dx, const size_t& dy, const size_t& scale) { const int ksize = 3 + 2*(scale-1); // The usual Scharr kernel if (scale == 1) { getDerivKernels(kx_,ky_,dx,dy,0,true,CV_32F); return; } kx_.create(ksize,1,CV_32F,-1,true); ky_.create(ksize,1,CV_32F,-1,true); Mat kx = kx_.getMat(); Mat ky = ky_.getMat(); float w = 10.0/3.0; float norm = 1.0/(2.0*scale*(w+2.0)); for (int k = 0; k < 2; k++) { Mat* kernel = k == 0 ? &kx : &ky; int order = k == 0 ? dx : dy; float kerI[1000]; for (int t = 0; t<ksize; t++) { kerI[t] = 0; } if (order == 0) { kerI[0] = norm; kerI[ksize/2] = w*norm; kerI[ksize-1] = norm; } else if (order == 1) { kerI[0] = -1; kerI[ksize/2] = 0; kerI[ksize-1] = 1; } Mat temp(kernel->rows, kernel->cols, CV_32F, &kerI[0]); temp.copyTo(*kernel); } }
static void seqToMat(const CvSeq* seq, cv::OutputArray _arr) { if( seq && seq->total > 0 ) { _arr.create(1, seq->total, seq->flags, -1, true); cv::Mat arr = _arr.getMat(); cvCvtSeqToArray(seq, arr.data); } else _arr.release(); }
void IPPE::PoseSolver::solveCanonicalForm(cv::InputArray _canonicalObjPoints, cv::InputArray _normalizedInputPoints, cv::InputArray _H, cv::OutputArray _Ma, cv::OutputArray _Mb) { _Ma.create(4, 4, CV_64FC1); _Mb.create(4, 4, CV_64FC1); cv::Mat Ma = _Ma.getMat(); cv::Mat Mb = _Mb.getMat(); cv::Mat H = _H.getMat(); //initialise poses: Ma.setTo(0); Ma.at<double>(3, 3) = 1; Mb.setTo(0); Mb.at<double>(3, 3) = 1; //Compute the Jacobian J of the homography at (0,0): double j00, j01, j10, j11, v0, v1; j00 = H.at<double>(0, 0) - H.at<double>(2, 0) * H.at<double>(0, 2); j01 = H.at<double>(0, 1) - H.at<double>(2, 1) * H.at<double>(0, 2); j10 = H.at<double>(1, 0) - H.at<double>(2, 0) * H.at<double>(1, 2); j11 = H.at<double>(1, 1) - H.at<double>(2, 1) * H.at<double>(1, 2); //Compute the transformation of (0,0) into the image: v0 = H.at<double>(0, 2); v1 = H.at<double>(1, 2); //compute the two rotation solutions: cv::Mat Ra = Ma.colRange(0, 3).rowRange(0, 3); cv::Mat Rb = Mb.colRange(0, 3).rowRange(0, 3); computeRotations(j00, j01, j10, j11, v0, v1, Ra, Rb); //for each rotation solution, compute the corresponding translation solution: cv::Mat ta = Ma.colRange(3, 4).rowRange(0, 3); cv::Mat tb = Mb.colRange(3, 4).rowRange(0, 3); computeTranslation(_canonicalObjPoints, _normalizedInputPoints, Ra, ta); computeTranslation(_canonicalObjPoints, _normalizedInputPoints, Rb, tb); }
void stereo::rectifyImage(cv::InputArray _inImage1, cv::InputArray _inImage2, cv::InputArray _map11, cv::InputArray _map12, cv::InputArray _map21, cv::InputArray _map22, cv::OutputArray _recImage1, cv::OutputArray _recImage2) { Mat img1 = _inImage1.getMat(); Mat img2 = _inImage2.getMat(); Mat map11 = _map11.getMat(); Mat map12 = _map12.getMat(); Mat map21 = _map21.getMat(); Mat map22 = _map22.getMat(); _recImage1.create(map11.size(), img1.type()); Mat recImg1 = _recImage1.getMat(); _recImage2.create(map21.size(), img2.type()); Mat recImg2 = _recImage2.getMat(); remap(img1, recImg1, map11, map12, INTER_LINEAR); remap(img2, recImg2, map21, map22, INTER_LINEAR); }
void StereoMatch::StereoMatching(cv::InputArray rec_image1, cv::InputArray rec_image2, cv::OutputArray disparity_map, int min_disparity, int num_disparities, int SAD_window_size, int P1, int P2) { cv::Mat img1 = rec_image1.getMat(); cv::Mat img2 = rec_image2.getMat(); disparity_map.create(img1.size(), CV_16S); cv::Mat dis = disparity_map.getMat(); cv::StereoSGBM matcher(min_disparity, num_disparities, SAD_window_size, P1, P2); matcher(img1, img2, dis); dis = dis / 16.0; }
void EdgeDetectorCanny::apply(cv::InputArray _oInputImage, cv::OutputArray _oEdgeMask) { cv::Mat oInputImg = _oInputImage.getMat(); CV_Assert(!oInputImg.empty()); CV_Assert(oInputImg.channels()==1 || oInputImg.channels()==3 || oInputImg.channels()==4); _oEdgeMask.create(oInputImg.size(),CV_8UC1); cv::Mat oEdgeMask = _oEdgeMask.getMat(); oEdgeMask = cv::Scalar_<uchar>(0); cv::Mat oTempEdgeMask = oEdgeMask.clone(); for(size_t nCurrThreshold=0; nCurrThreshold<UCHAR_MAX; ++nCurrThreshold) { apply_threshold(oInputImg,oTempEdgeMask,double(nCurrThreshold)); oEdgeMask += oTempEdgeMask/UCHAR_MAX; } cv::normalize(oEdgeMask,oEdgeMask,0,UCHAR_MAX,cv::NORM_MINMAX); }
void MixtureOfGaussianCPU::operator() (cv::InputArray in, cv::OutputArray out, float learningRate) { cv::Mat frame = in.getMat(); ++nframe; float alpha = learningRate >= 0 && nframe > 1 ? learningRate : 1.0f/std::min(nframe, history); out.create(frame.size(), CV_8U); cv::Mat mask = out.getMat(); calc_impl(frame.data, mask.data, bgmodel.ptr<MixtureData>(), alpha); }
void Tools::ReduceRowByMost(cv::InputArray _src, cv::OutputArray _dst, cv::InputArray _mask) { cv::Mat src = _src.getMat(); _dst.create(1, src.cols, CV_8UC1); cv::Mat dst = _dst.getMat(); cv::Mat mask = _mask.getMat(); if (src.depth() != CV_8U || mask.depth() != CV_8U) { throw "TYPE DON'T SUPPORT"; } int i, j, addr; cv::Size size = src.size(); uchar *srcd = src.data; uchar *dstd = dst.data; uchar *maskd = mask.data; std::map<uchar, int> m; std::map<uchar, int>::iterator p; uchar mostVal; uchar rVal; int r; for (i = 0;i < size.width;++i) { m.clear(); mostVal = 0; for (j = 0;j < size.height;++j) { addr = j*size.width + i; if (!maskd[addr]) continue; for (r = -2;r <= 2;++r) { rVal = (uchar)(((int)srcd[addr] + r) % 180); if (m.find(rVal) != m.end()) { m[rVal]++; } else { m[rVal] = 1; } } } if (m.size() == 0) { dstd[i] = 0; continue; } mostVal = m.begin()->first; for (p = m.begin();p != m.end();++p) { if (p->second > m[mostVal]) { mostVal = p->first; } } dstd[i] = mostVal; } }
void Camera::undistortLUT(cv::InputArray source, cv::OutputArray dest) { cv::Mat src = source.getMat(); dest.create(src.size(), src.type()); cv::Mat dst = dest.getMat(); int stripeSize = std::min(std::max(1, (1 << 12) / std::max(camFrameWidth, 1)), camFrameHeight); for (int y = 0; y < src.rows; y += stripeSize) { int stripe = std::min(stripeSize, src.rows - y); cv::Mat map1Part = map1LUT[y]; cv::Mat map2Part = map2LUT[y]; cv::Mat destPart = dst.rowRange(y, y + stripe); cv::remap(src, destPart, map1Part, map2Part, cv::INTER_LINEAR, cv::BORDER_CONSTANT); } }
void RadiometricResponse::directMap(cv::InputArray _E, cv::OutputArray _I) const { if (_E.empty()) { _I.clear(); return; } auto E = _E.getMat(); _I.create(_E.size(), CV_8UC3); auto I = _I.getMat(); #if CV_MAJOR_VERSION > 2 E.forEach<cv::Vec3f>( [&I, this](cv::Vec3f& v, const int* p) { I.at<cv::Vec3b>(p[0], p[1]) = inverseLUT(response_channels_, v); }); #else for (int i = 0; i < E.rows; i++) for (int j = 0; j < E.cols; j++) I.at<cv::Vec3b>(i, j) = inverseLUT(response_channels_, E.at<cv::Vec3f>(i, j)); #endif }
void FilterBase::apply(cv::InputArray _src, cv::OutputArray _dst, const int &ddepth){ int stype = _src.type(); int dcn = _src.channels(); int depth = CV_MAT_DEPTH(stype); if (0 <= ddepth) depth = ddepth; Mat src, dst; src = _src.getMat(); Size sz = src.size(); _dst.create(sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); int imageWidth = src.rows; int imageHeight = src.cols; Mat srcChannels[3]; split(src, srcChannels); int margineWidth = kernel.cols / 2; int margineHeight = kernel.rows / 2; double kernelElemCount = (double)(kernel.cols * kernel.rows); for(int ch = 0; ch < dcn; ++ch){ for(int y = 0; y < imageHeight; ++y){ Vec3d *ptr = dst.ptr<Vec3d>(y); for(int x = 0; x < imageWidth; ++x){ if (isEdge(x, y, imageWidth, imageHeight, margineWidth, margineWidth)){ ptr[x][ch] = calcKernelOutputAtEdge(srcChannels[ch], kernel, x, y, imageWidth, imageHeight, margineWidth, margineHeight); }else{ ptr[x][ch] = calcKernelOutput(srcChannels[ch], kernel, x, y, margineWidth, margineHeight, kernelElemCount); } } } } }