void coherenceEnhancingShockFilter(cv::InputArray src, cv::OutputArray dest_, const int sigma, const int str_sigma_, const double blend, const int iter) { Mat dest = src.getMat(); const int str_sigma = min(31, str_sigma_); for (int i = 0; i < iter; i++) { Mat gray; if (src.channels() == 3)cvtColor(dest, gray, CV_BGR2GRAY); else gray = dest; Mat eigen; if (gray.type() == CV_8U || gray.type() == CV_32F || gray.type() == CV_64F) cornerEigenValsAndVecs(gray, eigen, str_sigma, 3); else { Mat grayf; gray.convertTo(grayf, CV_32F); cornerEigenValsAndVecs(grayf, eigen, str_sigma, 3); } vector<Mat> esplit(6); split(eigen, esplit); Mat x = esplit[2]; Mat y = esplit[3]; Mat gxx; Mat gyy; Mat gxy; Sobel(gray, gxx, CV_32F, 2, 0, sigma); Sobel(gray, gyy, CV_32F, 0, 2, sigma); Sobel(gray, gxy, CV_32F, 1, 1, sigma); Mat gvv = x.mul(x).mul(gxx) + 2 * x.mul(y).mul(gxy) + y.mul(y).mul(gyy); Mat mask; compare(gvv, 0, mask, cv::CMP_LT); Mat di, ero; erode(dest, ero, Mat()); dilate(dest, di, Mat()); di.copyTo(ero, mask); addWeighted(dest, blend, ero, 1.0 - blend, 0.0, dest); } dest.copyTo(dest_); }
void unprojectPointsFisheye( cv::InputArray distorted, cv::OutputArray undistorted, cv::InputArray K, cv::InputArray D, cv::InputArray R, cv::InputArray P) { // will support only 2-channel data now for points CV_Assert(distorted.type() == CV_32FC2 || distorted.type() == CV_64FC2); undistorted.create(distorted.size(), CV_MAKETYPE(distorted.depth(), 3)); CV_Assert(P.empty() || P.size() == cv::Size(3, 3) || P.size() == cv::Size(4, 3)); CV_Assert(R.empty() || R.size() == cv::Size(3, 3) || R.total() * R.channels() == 3); CV_Assert(D.total() == 4 && K.size() == cv::Size(3, 3) && (K.depth() == CV_32F || K.depth() == CV_64F)); cv::Vec2d f, c; if (K.depth() == CV_32F) { cv::Matx33f camMat = K.getMat(); f = cv::Vec2f(camMat(0, 0), camMat(1, 1)); c = cv::Vec2f(camMat(0, 2), camMat(1, 2)); } else { cv::Matx33d camMat = K.getMat(); f = cv::Vec2d(camMat(0, 0), camMat(1, 1)); c = cv::Vec2d(camMat(0, 2), camMat(1, 2)); } cv::Vec4d k = D.depth() == CV_32F ? (cv::Vec4d)*D.getMat().ptr<cv::Vec4f>(): *D.getMat().ptr<cv::Vec4d>(); cv::Matx33d RR = cv::Matx33d::eye(); if (!R.empty() && R.total() * R.channels() == 3) { cv::Vec3d rvec; R.getMat().convertTo(rvec, CV_64F); RR = cv::Affine3d(rvec).rotation(); } else if (!R.empty() && R.size() == cv::Size(3, 3)) R.getMat().convertTo(RR, CV_64F); if(!P.empty()) { cv::Matx33d PP; P.getMat().colRange(0, 3).convertTo(PP, CV_64F); RR = PP * RR; } // start undistorting const cv::Vec2f* srcf = distorted.getMat().ptr<cv::Vec2f>(); const cv::Vec2d* srcd = distorted.getMat().ptr<cv::Vec2d>(); cv::Vec3f* dstf = undistorted.getMat().ptr<cv::Vec3f>(); cv::Vec3d* dstd = undistorted.getMat().ptr<cv::Vec3d>(); size_t n = distorted.total(); int sdepth = distorted.depth(); for(size_t i = 0; i < n; i++ ) { cv::Vec2d pi = sdepth == CV_32F ? (cv::Vec2d)srcf[i] : srcd[i]; // image point cv::Vec2d pw((pi[0] - c[0])/f[0], (pi[1] - c[1])/f[1]); // world point double theta_d = sqrt(pw[0]*pw[0] + pw[1]*pw[1]); double theta = theta_d; if (theta_d > 1e-8) { // compensate distortion iteratively for(int j = 0; j < 10; j++ ) { double theta2 = theta*theta, theta4 = theta2*theta2, theta6 = theta4*theta2, theta8 = theta6*theta2; theta = theta_d / (1 + k[0] * theta2 + k[1] * theta4 + k[2] * theta6 + k[3] * theta8); } } double z = std::cos(theta); double r = std::sin(theta); cv::Vec3d pu = cv::Vec3d(r*pw[0], r*pw[1], z); //undistorted point // reproject cv::Vec3d pr = RR * pu; // rotated point optionally multiplied by new camera matrix cv::Vec3d fi; // final normalize(pr, fi); if( sdepth == CV_32F ) dstf[i] = fi; else dstd[i] = fi; } }
bool VideoWriter_IntelMFX::write_one(cv::InputArray bgr) { mfxStatus res; mfxFrameSurface1 *workSurface = 0; mfxSyncPoint sync; if (!bgr.empty() && (bgr.dims() != 2 || bgr.type() != CV_8UC3 || bgr.size() != frameSize)) { MSG(cerr << "MFX: invalid frame passed to encoder: " << "dims/depth/cn=" << bgr.dims() << "/" << bgr.depth() << "/" << bgr.channels() << ", size=" << bgr.size() << endl); return false; } if (!bgr.empty()) { workSurface = pool->getFreeSurface(); if (!workSurface) { // not enough surfaces MSG(cerr << "MFX: Failed to get free surface" << endl); return false; } const int rows = workSurface->Info.Height; const int cols = workSurface->Info.Width; Mat Y(rows, cols, CV_8UC1, workSurface->Data.Y, workSurface->Data.Pitch); Mat UV(rows / 2, cols, CV_8UC1, workSurface->Data.UV, workSurface->Data.Pitch); to_nv12(bgr, Y, UV); CV_Assert(Y.ptr() == workSurface->Data.Y); CV_Assert(UV.ptr() == workSurface->Data.UV); } while (true) { outSurface = 0; DBG(cout << "Calling with surface: " << workSurface << endl); res = encoder->EncodeFrameAsync(NULL, workSurface, &bs->stream, &sync); if (res == MFX_ERR_NONE) { res = session->SyncOperation(sync, 1000); // 1 sec, TODO: provide interface to modify timeout if (res == MFX_ERR_NONE) { // ready to write if (!bs->write()) { MSG(cerr << "MFX: Failed to write bitstream" << endl); return false; } else { DBG(cout << "Write bitstream" << endl); return true; } } else { MSG(cerr << "MFX: Sync error: " << res << endl); return false; } } else if (res == MFX_ERR_MORE_DATA) { DBG(cout << "ERR_MORE_DATA" << endl); return false; } else if (res == MFX_WRN_DEVICE_BUSY) { DBG(cout << "Waiting for device" << endl); sleep(1); continue; } else { MSG(cerr << "MFX: Bad status: " << res << endl); return false; } } }