Ejemplo n.º 1
0
// Evaluates a Gaussian kernel with bandwidth SIGMA for all relative shifts between input images X and Y, which must both be MxN. They must    also be periodic (ie., pre-processed with a cosine window).
cv::Mat KCFTracker::gaussianCorrelation(cv::Mat x1, cv::Mat x2)
{
    using namespace FFTTools;
    cv::Mat c = cv::Mat( cv::Size(size_patch[1], size_patch[0]), CV_32F, cv::Scalar(0) );
    // HOG features
    if (_hogfeatures) {
        cv::Mat caux;
        cv::Mat x1aux;
        cv::Mat x2aux;
        for (int i = 0; i < size_patch[2]; i++) {
            x1aux = x1.row(i);   // Procedure do deal with cv::Mat multichannel bug
            x1aux = x1aux.reshape(1, size_patch[0]);
            x2aux = x2.row(i).reshape(1, size_patch[0]);
            cv::mulSpectrums(fftd(x1aux), fftd(x2aux), caux, 0, true); 
            caux = fftd(caux, true);
            rearrange(caux);
            caux.convertTo(caux,CV_32F);
            c = c + real(caux);
        }
    }
    // Gray features
    else {
        cv::mulSpectrums(fftd(x1), fftd(x2), c, 0, true);
        c = fftd(c, true);
        rearrange(c);
        c = real(c);
    }
    cv::Mat d; 
    cv::max(( (cv::sum(x1.mul(x1))[0] + cv::sum(x2.mul(x2))[0])- 2. * c) / (size_patch[0]*size_patch[1]*size_patch[2]) , 0, d);

    cv::Mat k;
    cv::exp((-d / (sigma * sigma)), k);
    return k;
}
Ejemplo n.º 2
0
// Ridgeness operator: F_vv.
//	REF [book] >> section 9.1.2 (p. 254) in "Digital and Medical Image Processing", 2005.
//	REF [book] >> Figure 9.10 & 9.11 (p. 260) in "Digital and Medical Image Processing", 2005.
cv::Mat ImageFilter::RidgenessOperator::operator()(const cv::Mat& img, const std::size_t apertureSize, const double sigma) const
{
	cv::Mat Fx, Fy, Fxx, Fyy, Fxy;
	ImageFilter::computeDerivativesOfImage(img, apertureSize, sigma, Fx, Fy, Fxx, Fyy, Fxy);

	// Compute Fvv.
	// REF [book] >> p. 255 ~ 256 in "Digital and Medical Image Processing", 2005.
	const cv::Mat Fx2(Fx.mul(Fx)), Fy2(Fy.mul(Fy));
	return (Fy2.mul(Fxx) - 2 * Fx.mul(Fy).mul(Fxy) + Fx2.mul(Fyy)) / (Fx2 + Fy2);
}
Ejemplo n.º 3
0
static void magnitude(const cv::Mat& I_X, const cv::Mat& I_Y, cv::Mat& I_mag)
{
	CV_Assert(I_X.type() == CV_64F && I_X.type() == I_Y.type());

	I_mag = I_X.mul(I_X) + I_Y.mul(I_Y);
	int s = I_mag.rows * I_mag.cols;
	double* p = (double*)I_mag.data;
	for (int i = 0; i < s; ++i) {
		*p = cv::sqrt(*p);
		p++;
	}
}
Ejemplo n.º 4
0
// Isophote curvature operator: -F_vv / F_w.
//	REF [book] >> Figure 9.12 (p. 261) in "Digital and Medical Image Processing", 2005.
cv::Mat ImageFilter::IsophoteCurvatureOperator::operator()(const cv::Mat& img, const std::size_t apertureSize, const double sigma) const
{
	cv::Mat Fx, Fy, Fxx, Fyy, Fxy;
	ImageFilter::computeDerivativesOfImage(img, apertureSize, sigma, Fx, Fy, Fxx, Fyy, Fxy);

	// Compute FvvFw.
	// REF [book] >> p. 255 ~ 256 in "Digital and Medical Image Processing", 2005.
	const cv::Mat Fx2(Fx.mul(Fx)), Fy2(Fy.mul(Fy));
	cv::Mat den;
	cv::pow(Fx2 + Fy2, 1.5, den);
	return -(Fy2.mul(Fxx) - 2 * Fx.mul(Fy).mul(Fxy) + Fx2.mul(Fyy)) / den;
}
Ejemplo n.º 5
0
cv::Mat GuidedFilterMono::filterSingleChannel(const cv::Mat &p) const
{
    cv::Mat mean_p = boxfilter(p, r);
    cv::Mat mean_Ip = boxfilter(I.mul(p), r);
    cv::Mat cov_Ip = mean_Ip - mean_I.mul(mean_p); // this is the covariance of (I, p) in each local patch.

    cv::Mat a = cov_Ip / (var_I + eps); // Eqn. (5) in the paper;
    cv::Mat b = mean_p - a.mul(mean_I); // Eqn. (6) in the paper;

    cv::Mat mean_a = boxfilter(a, r);
    cv::Mat mean_b = boxfilter(b, r);

    return mean_a.mul(I) + mean_b;
}
Ejemplo n.º 6
0
void computeWeights(const cv::Mat &imagePatch, const spatiogram &qTarget, const spatiogram &pCurrent,
                    const cv::Mat &w, cv::Mat &weights){

    CV_Assert(qTarget.bins==pCurrent.bins);
    cv::Mat sqco;
    cv::divide(qTarget.cd,pCurrent.cd,sqco);
    cv::sqrt(sqco, sqco);
    cv::Mat rel = w.mul( sqco );
    std::vector<cv::Mat> weightC;
    cv::Mat tc =cv::Mat::zeros(imagePatch.rows, imagePatch.cols, CV_64FC1);

    int n = 256/qTarget.bins;
    std::vector<double> bins;
    linspace(bins, 0, 256, n);
    int m = pCurrent.bins/imagePatch.channels();
    for (int l=0; l<imagePatch.channels(); l++){
        for (int j=0; j<m; j++){
            cv::Mat temp;
            binelements(imagePatch, bins, l, j, temp);
            tc = tc + (rel.at<double>(0,l*m+j))*temp;
        }
        weightC.push_back(qTarget.C*tc);
    }
    mat3min( weightC, weights );
    //weights=weightC[0];
}
Ejemplo n.º 7
0
double Matrix::Frobenius(cv::Mat A) {
  CV_Assert(A.size != 0);
  double frobenius;
  cv::Mat square_A;
  square_A = A.mul(A);
  frobenius = sqrt(cv::sum(square_A)[0]);
  return frobenius;
}
Ejemplo n.º 8
0
double Descriptor::bhattacharyya(cv::Mat k, cv::Mat q)
{
    cv::normalize(k, k, 1, 0, cv::NORM_L1);
    cv::normalize(q, q, 1, 0, cv::NORM_L1);
    
    cv::Mat temp = k.mul(q);
    sqrt(temp, temp);
    
    return (double)sqrt(1 - cv::sum(temp)[0]);
    // sqrt(1-sum(sqrt(k.*q)))
}
// Compute 3D object (shape/texture/expression) from weights, given 3DMM basis (MU, PCs, EV). Ouput size Vx3.
//       The input vector "weight" is vertical, with float numbers
cv::Mat BaselFaceEstimator::coef2object(cv::Mat weight, cv::Mat MU, cv::Mat PCs, cv::Mat EV){
	int M = weight.rows;
	Mat tmpShape;
	if (M == 0) 
		tmpShape = MU.clone();
	else {
		Mat subPC = PCs(Rect(0,0,M,PCs.rows));
		Mat subEV = EV(Rect(0,0,1,M));
		tmpShape = MU + subPC * weight.mul(subEV);
	}
	return tmpShape.reshape(1,tmpShape.rows/3);
}
Ejemplo n.º 10
0
// The bhattacharyya distance between vector and vector q;
float ReidDescriptor::bhattacharyya(cv::Mat k, cv::Mat q)
{
	cv::normalize(k, k, 1, 0, cv::NORM_L1);
	cv::normalize(q, q, 1, 0, cv::NORM_L1);

	//show the histograms
	//drawHist("hist1", k);
	//drawHist("hist2", q);

	cv::Mat temp = k.mul(q);
	sqrt(temp, temp);

	return (float)sqrt(1 - cv::sum(temp)[0]); // sqrt(1-sum(sqrt(k.*q)))
}
Ejemplo n.º 11
0
void Metric::si_filter(cv::Mat& src, cv::Mat& si_filtered, cv::Mat& hfiltered,  cv::Mat& vfiltered, int len){
	
	float filterMask[len];
	float filterA [len][len];

	getFilterMask(len, filterMask);
	for(int i=0; i<len; i++){
		for(int j=0; j<len; j++){
			filterA[i][j] = filterMask[i];
		}
	}
	cv::Mat kernel = cv::Mat(len, len, CV_32F, &filterA);
	cv::Mat tmp;

	cv::filter2D(src, hfiltered, -1, kernel);
	cv::filter2D(src, vfiltered, -1, kernel.t());

	cv::Mat h2 = hfiltered.mul(hfiltered);
	cv::Mat v2 = vfiltered.mul(vfiltered);

	cv::add(h2,v2, tmp); 
	cv::sqrt(tmp, si_filtered);	 
}
Ejemplo n.º 12
0
//===========================================================================
void MPatch::Response(cv::Mat &im,cv::Mat &resp)
{
  assert((im.type() == CV_32F) && (resp.type() == CV_64F));
  assert((im.rows >= _h) && (im.cols >= _w));
  int h = im.rows - _h + 1, w = im.cols - _w + 1;
  if(resp.rows != h || resp.cols != w)resp.create(h,w,CV_64F);
  if(res_.rows != h || res_.cols != w)res_.create(h,w,CV_64F);
  if(_p.size() == 1){_p[0].Response(im,resp); sum2one(resp);}
  else{
    resp = cvScalar(1.0);
    for(int i = 0; i < (int)_p.size(); i++){
      _p[i].Response(im,res_); sum2one(res_); resp = resp.mul(res_);
    }
    sum2one(resp); 
  }return;
}
Ejemplo n.º 13
0
        void apply(const cv::Mat& A, cv::Mat &B)
        {
            Sobel(A, grad_x, ddepth, 1, 0, kernel, scale, delta, cv::BORDER_DEFAULT);
            Sobel(A, grad_y, ddepth, 0, 1, kernel, scale, delta, cv::BORDER_DEFAULT);

            Sobel(grad_x, d_xx, ddepth, 1, 0, kernel, scale, delta, cv::BORDER_DEFAULT);
            Sobel(grad_x, d_xy, ddepth, 0, 1, kernel, scale, delta, cv::BORDER_DEFAULT);
            Sobel(grad_y, d_yy, ddepth, 0, 1, kernel, scale, delta, cv::BORDER_DEFAULT);

            diskr = (((d_xx - d_yy) / 2.0).mul(((d_xx - d_yy) / 2.0)) + d_xy.mul(d_xy));
            sqrt(diskr, root);
            largeC = (d_xx + d_yy) / 2.0 + root;
            smallC = (d_xx + d_yy) / 2.0 - root;

            switch (output)
            {
                case picDx:
                    normalize(grad_x, B, 0, 1, CV_MINMAX);
                    break;
                case picDy:
                    normalize(grad_y, B, 0, 1, CV_MINMAX);
                    break;
                case picDxx:
                    normalize(d_xx, B, 0, 1, CV_MINMAX);
                    break;
                case picDxy:
                    normalize(d_xy, B, 0, 1, CV_MINMAX);
                    break;
                case picDyy:
                    normalize(d_yy, B, 0, 1, CV_MINMAX);
                    break;
                case picEVSmall:
                    normalize(smallC, B, 0, 1, CV_MINMAX);
                    break;
                case picEVLarge:
                    normalize(largeC, B, 0, 1, CV_MINMAX);
                    break;
                case picDerivInput:
                default:
                    B = A;
                    break;
            }
        }
Ejemplo n.º 14
0
void Triangulator::triangulateFromVp(cv::Mat &vp, cv::Mat &xyz){

    // Solve for xyzw using determinant tensor
    cv::Mat C = determinantTensor;
    std::vector<cv::Mat> xyzw(4);
    for(unsigned int i=0; i<4; i++){
//        xyzw[i].create(vp.size(), CV_32F);
        xyzw[i] = C.at<float>(cv::Vec4i(i,0,1,1)) - C.at<float>(cv::Vec4i(i,2,1,1))*uc - C.at<float>(cv::Vec4i(i,0,2,1))*vc -
                C.at<float>(cv::Vec4i(i,0,1,2))*vp + C.at<float>(cv::Vec4i(i,2,1,2))*vp.mul(uc) + C.at<float>(cv::Vec4i(i,0,2,2))*vp.mul(vc);
    }

    // Convert to non homogenous coordinates
    for(unsigned int i=0; i<3; i++)
        xyzw[i] /= xyzw[3];

    // Merge
    cv::merge(std::vector<cv::Mat>(xyzw.begin(), xyzw.begin()+3), xyz);

}
Ejemplo n.º 15
0
/**
 * Interpolate D19 pixel values
 */
void CalibrationFilter::interpolate()
{
    // Gaussian interpolation mask
    float coeff[9] = {
        .707,  1.0, .707,
         1.0,  0.0,  1.0,
        .707,  1.0, .707 };
    const cv::Mat mask(3, 3, CV_32F, coeff);

    for(int i=1; i<mFrame32F_big.rows-1; ++i)
    {
        for(int j=1; j<mFrame32F_big.cols-1; ++j)
        {
            // Check if pixel is dead
            if(mAlive_big.at<unsigned char>(i, j) == 0)
            {
                // Dead pixel: build interpolation matrix
                cv::Mat neighbor( 3, 3, CV_32F );
                mAlive_big( cv::Range(i-1, i+2), cv::Range(j-1, j+2) ).convertTo( neighbor, CV_32F );

                //QLOG_DEBUG() << TAG << "neighbor sum" << cv::sum( neighbor )[0];

                // Compute interpolated value
                // 'M' is the same as 'mask' but with 0-coefficient over dead pixels
                const cv::Mat M = mask.mul( neighbor, 1.0 );

                // 'roi' Region of interest is the 3x3 region centered on the dead pixel
                const cv::Mat roi = mFrame32F_big( cv::Range(i-1, i+2), cv::Range(j-1, j+2) );
                double sum = cv::sum( M )[0];
                if(sum >= 1.0)
                {
                    mFrame32F_big.at<float>(i, j) = M.dot( roi ) / sum;
                }
                else
                {
                    mFrame32F_big.at<float>(i, j) = 0;
                }
            }
        }
    }
}
Ejemplo n.º 16
0
Archivo: MyLib.cpp Proyecto: jgmao/MTC
double mylib::WeightedSum(const cv::Mat& A, const cv::Mat& W)
{
	if (A.size() != W.size())
		exit(0);
	//int a = A.type();
	//int b = W.type();
	double result=0.0;
	cv::Scalar s;
	vector<Mat> Wv;
	Mat W3;
	for (int i=0; i< A.channels(); i++)
	{
		Wv.push_back(W);
	}
	cv::merge(Wv,W3);
	W3 = A.mul(W3);
	s = cv::sum(W3);
	for (int i=0; i<A.channels(); i++)
		result += s[i];
	return result / A.channels();


}
void GeneralTransform::Rejection(cv::Mat& diff, cv::Mat& filtered_diff, cv::Mat& query_cloud, cv::Mat& filtered_query_cloud, double threshold)
{
    if(threshold != 0 && diff.rows > 10)
    {
        cv::Mat dist, idx, tmp_diff;
        tmp_diff = diff.mul(diff);
        cv::reduce(tmp_diff, dist, 1, CV_REDUCE_SUM);
        cv::sqrt(dist, dist);
        cv::sortIdx(dist, idx, CV_SORT_EVERY_COLUMN + CV_SORT_ASCENDING);
        int count = (int)(dist.rows * (1 - threshold));
        /*while(count < dist.rows && dist.at<double>(idx.at<int>(count, 0), 0) < threshold)
            count++;*/
        // std::cout << "original: " << diff.rows << " filtered: " << count << std::endl;
        filtered_diff = cv::Mat::zeros(count, diff.cols, CV_64F);
        filtered_query_cloud = cv::Mat::zeros(count, query_cloud.cols, CV_64F);
        
        for(int i = 0; i < count; i++)      
        {
            // diff
            for(int m = 0; m < diff.cols; m++)
                filtered_diff.at<double>(i, m) = diff.at<double>(idx.at<int>(i, 0), m);
            // query_cloud      
            for(int n = 0; n < query_cloud.cols; n++)
                filtered_query_cloud.at<double>(i, n) = query_cloud.at<double>(idx.at<int>(i, 0), n);       
        }
    }
    else
    {
        filtered_diff = cv::Mat::zeros(diff.rows, diff.cols, CV_64F);
        diff.copyTo(filtered_diff);
        filtered_query_cloud = cv::Mat::zeros(query_cloud.rows, query_cloud.cols, CV_64F);
        query_cloud.copyTo(filtered_query_cloud);
    }


}
/**
 * @brief This function computes the Perona and Malik conductivity coefficient g2
 * g2 = 1 / (1 + dL^2 / k^2)
 * @param Lx First order image derivative in X-direction (horizontal)
 * @param Ly First order image derivative in Y-direction (vertical)
 * @param dst Output image
 * @param k Contrast factor parameter
 */
void pm_g2(const cv::Mat &Lx, const cv::Mat& Ly, cv::Mat& dst, float k) {
    dst = 1./(1. + (Lx.mul(Lx) + Ly.mul(Ly))/(k*k));
}
Ejemplo n.º 19
0
/**-----------------------------------------------------------*
 * @fn  DrawTransPinP
 * @brief   透過画像を重ねて描画する
 * @param[out] img_dst
 * @param[in ] transImg 前景画像。アルファチャンネル付きであること(CV_8UC4)
 * @param[in ] baseImg  背景画像。アルファチャンネル不要(CV_8UC3)
 *------------------------------------------------------------*/
 void DrawTransPinP(cv::Mat &img_dst, const cv::Mat transImg, const cv::Mat baseImg, vector<cv::Point2f> tgtPt)
{
    cv::Mat img_rgb, img_aaa, img_1ma;
    vector<cv::Mat>planes_rgba, planes_rgb, planes_aaa, planes_1ma;
    int maxVal = pow(2, 8*baseImg.elemSize1())-1;
 
    //透過画像はRGBA, 背景画像はRGBのみ許容。ビット深度が同じ画像のみ許容
    if(transImg.data==NULL || baseImg.data==NULL || transImg.channels()<4 ||baseImg.channels()<3 || (transImg.elemSize1()!=baseImg.elemSize1()) )
    {
        img_dst = cv::Mat(100,100, CV_8UC3);
        img_dst = cv::Scalar::all(maxVal);
        return;
    }
 
    //書き出し先座標が指定されていない場合は背景画像の中央に配置する
    if(tgtPt.size()<4)
    {
        //座標指定(背景画像の中心に表示する)
        int ltx = (baseImg.cols - transImg.cols)/2;
        int lty = (baseImg.rows - transImg.rows)/2;
        int ww  = transImg.cols;
        int hh  = transImg.rows;
 
        tgtPt.push_back(cv::Point2f(ltx   , lty));
        tgtPt.push_back(cv::Point2f(ltx+ww, lty));
        tgtPt.push_back(cv::Point2f(ltx+ww, lty+hh));
        tgtPt.push_back(cv::Point2f(ltx   , lty+hh));
    }
 
    //変形行列を作成
    vector<cv::Point2f>srcPt;
    srcPt.push_back( cv::Point2f(0, 0) );
    srcPt.push_back( cv::Point2f(transImg.cols-1, 0) );
    srcPt.push_back( cv::Point2f(transImg.cols-1, transImg.rows-1) );
    srcPt.push_back( cv::Point2f(0, transImg.rows-1) );
    cv::Mat mat = cv::getPerspectiveTransform(srcPt, tgtPt);
 
    //出力画像と同じ幅・高さのアルファ付き画像を作成
    cv::Mat alpha0(baseImg.rows, baseImg.cols, transImg.type() );
    alpha0 = cv::Scalar::all(0);
    cv::warpPerspective(transImg, alpha0, mat,alpha0.size(), cv::INTER_CUBIC, cv::BORDER_TRANSPARENT);
 
    //チャンネルに分解
    cv::split(alpha0, planes_rgba);
 
    //RGBA画像をRGBに変換   
    planes_rgb.push_back(planes_rgba[0]);
    planes_rgb.push_back(planes_rgba[1]);
    planes_rgb.push_back(planes_rgba[2]);
    merge(planes_rgb, img_rgb);
 
    //RGBA画像からアルファチャンネル抽出   
    planes_aaa.push_back(planes_rgba[3]);
    planes_aaa.push_back(planes_rgba[3]);
    planes_aaa.push_back(planes_rgba[3]);
    merge(planes_aaa, img_aaa);
 
    //背景用アルファチャンネル   
    planes_1ma.push_back(maxVal-planes_rgba[3]);
    planes_1ma.push_back(maxVal-planes_rgba[3]);
    planes_1ma.push_back(maxVal-planes_rgba[3]);
    merge(planes_1ma, img_1ma);
 
    img_dst = img_rgb.mul(img_aaa, 1.0/(double)maxVal) + baseImg.mul(img_1ma, 1.0/(double)maxVal);
}
Ejemplo n.º 20
0
/**
 * @brief This function computes the Perona and Malik conductivity coefficient g2
 * g2 = 1 / (1 + dL^2 / k^2)
 * @param Lx First order image derivative in X-direction (horizontal)
 * @param Ly First order image derivative in Y-direction (vertical)
 * @param dst Output image
 * @param k Contrast factor parameter
 */
void pm_g2(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) {
  dst = 1.0/(1.0+(Lx.mul(Lx)+Ly.mul(Ly))/(k*k));
}
Ejemplo n.º 21
0
/**
 * @brief This function computes the Perona and Malik conductivity coefficient g1
 * g1 = exp(-|dL|^2/k^2)
 * @param Lx First order image derivative in X-direction (horizontal)
 * @param Ly First order image derivative in Y-direction (vertical)
 * @param dst Output image
 * @param k Contrast factor parameter
 */
void pm_g1(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) {
  exp(-(Lx.mul(Lx)+Ly.mul(Ly))/(k*k),dst);
}
Ejemplo n.º 22
0
/**
 * @brief This function computes Charbonnier conductivity coefficient gc
 * gc = 1 / sqrt(1 + dL^2 / k^2)
 * @param Lx First order image derivative in X-direction (horizontal)
 * @param Ly First order image derivative in Y-direction (vertical)
 * @param dst Output image
 * @param k Contrast factor parameter
 * @note For more information check the following paper: J. Weickert
 * Applications of nonlinear diffusion in image processing and computer vision,
 * Proceedings of Algorithmy 2000
 */
void charbonnier_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) {
  Mat den;
  cv::sqrt(1.0+(Lx.mul(Lx)+Ly.mul(Ly))/(k*k),den);
  dst = 1.0/ den;
}
Ejemplo n.º 23
0
/**
 * @brief This function computes Weickert conductivity coefficient gw
 * @param Lx First order image derivative in X-direction (horizontal)
 * @param Ly First order image derivative in Y-direction (vertical)
 * @param dst Output image
 * @param k Contrast factor parameter
 * @note For more information check the following paper: J. Weickert
 * Applications of nonlinear diffusion in image processing and computer vision,
 * Proceedings of Algorithmy 2000
 */
void weickert_diffusivity(const cv::Mat& Lx, const cv::Mat& Ly, cv::Mat& dst, const float& k) {
  Mat modg;
  pow((Lx.mul(Lx) + Ly.mul(Ly))/(k*k),4,modg);
  cv::exp(-3.315/modg, dst);
  dst = 1.0 - dst;
}
Ejemplo n.º 24
0
static cv::Mat dequantize(const cv::Mat& m, int step) {
	return m.mul(cv::Scalar::all(step));
}
Ejemplo n.º 25
0
// [ref] undistort_images_using_formula() in ${CPP_RND_HOME}/test/machine_vision/opencv/opencv_image_undistortion.cpp
void KinectSensor::computeHomogeneousImageCoordinates(const cv::Size &imageSize, const cv::Mat &K, const cv::Mat &distCoeffs, cv::Mat &IC_homo, cv::Mat &IC_homo_undist)
{
	// [ref] "Joint Depth and Color Camera Calibration with Distortion Correction", D. Herrera C., J. Kannala, & J. Heikkila, TPAMI, 2012

	// homogeneous image coordinates: zero-based coordinates
	cv::Mat(3, imageSize.height * imageSize.width, CV_64FC1, cv::Scalar::all(1)).copyTo(IC_homo);
	//IC_homo = cv::Mat::ones(3, imageSize.height * imageSize.width, CV_64FC1);
	{
#if 0
		// 0 0 0 ...   0 1 1 1 ...   1 ... 639 639 639 ... 639
		// 0 1 2 ... 479 0 1 2 ... 479 ...   0   1   2 ... 479

		cv::Mat arr(1, imageSize.height, CV_64FC1);
		for (int i = 0; i < imageSize.height; ++i)
			arr.at<double>(0, i) = (double)i;

		for (int i = 0; i < imageSize.width; ++i)
		{
			IC_homo(cv::Range(0, 1), cv::Range(i * imageSize.height, (i + 1) * imageSize.height)).setTo(cv::Scalar::all(i));
			arr.copyTo(IC_homo(cv::Range(1, 2), cv::Range(i * imageSize.height, (i + 1) * imageSize.height)));
		}
#else
		// 0 1 2 ... 639 0 1 2 ... 639 ...   0   1   2 ... 639
		// 0 0 0 ...   0 1 1 1 ...   1 ... 479 479 479 ... 479

		cv::Mat arr(1, imageSize.width, CV_64FC1);
		for (int i = 0; i < imageSize.width; ++i)
			arr.at<double>(0, i) = (double)i;

		for (int i = 0; i < imageSize.height; ++i)
		{
			arr.copyTo(IC_homo(cv::Range(0, 1), cv::Range(i * imageSize.width, (i + 1) * imageSize.width)));
			IC_homo(cv::Range(1, 2), cv::Range(i * imageSize.width, (i + 1) * imageSize.width)).setTo(cv::Scalar::all(i));
		}
#endif
	}

	// homogeneous normalized camera coordinates
	const cv::Mat CC_norm(K.inv() * IC_homo);

	// apply distortion
	{
		//const cv::Mat xn(CC_norm(cv::Range(0,1), cv::Range::all()));
		const cv::Mat xn(CC_norm(cv::Range(0,1), cv::Range::all()) / CC_norm(cv::Range(2,3), cv::Range::all()));
		//const cv::Mat yn(CC_norm(cv::Range(1,2), cv::Range::all()));
		const cv::Mat yn(CC_norm(cv::Range(1,2), cv::Range::all()) / CC_norm(cv::Range(2,3), cv::Range::all()));

		const cv::Mat xn2(xn.mul(xn));
		const cv::Mat yn2(yn.mul(yn));
		const cv::Mat xnyn(xn.mul(yn));
		const cv::Mat r2(xn2 + yn2);
		const cv::Mat r4(r2.mul(r2));
		const cv::Mat r6(r4.mul(r2));

		const double &k1 = distCoeffs.at<double>(0);
		const double &k2 = distCoeffs.at<double>(1);
		const double &k3 = distCoeffs.at<double>(2);
		const double &k4 = distCoeffs.at<double>(3);
		const double &k5 = distCoeffs.at<double>(4);

		const cv::Mat xg(2.0 * k3 * xnyn + k4 * (r2 + 2.0 * xn2));
		const cv::Mat yg(k3 * (r2 + 2.0 * yn2) + 2.0 * k4 * xnyn);

		const cv::Mat coeff(1.0 + k1 * r2 + k2 * r4 + k5 * r6);
		cv::Mat xk(3, imageSize.height * imageSize.width, CV_64FC1, cv::Scalar::all(1));
		cv::Mat(coeff.mul(xn) + xg).copyTo(xk(cv::Range(0,1), cv::Range::all()));
		cv::Mat(coeff.mul(yn) + yg).copyTo(xk(cv::Range(1,2), cv::Range::all()));

		IC_homo_undist = K * xk;
	}
}
Ejemplo n.º 26
0
cv::Mat GuidedFilterColor::filterSingleChannel(const cv::Mat &p) const
{
    cv::Mat mean_p = boxfilter(p, r);

    cv::Mat mean_Ip_r = boxfilter(Ichannels[0].mul(p), r);
    cv::Mat mean_Ip_g = boxfilter(Ichannels[1].mul(p), r);
    cv::Mat mean_Ip_b = boxfilter(Ichannels[2].mul(p), r);

    // covariance of (I, p) in each local patch.
    cv::Mat cov_Ip_r = mean_Ip_r - mean_I_r.mul(mean_p);
    cv::Mat cov_Ip_g = mean_Ip_g - mean_I_g.mul(mean_p);
    cv::Mat cov_Ip_b = mean_Ip_b - mean_I_b.mul(mean_p);

    cv::Mat a_r = invrr.mul(cov_Ip_r) + invrg.mul(cov_Ip_g) + invrb.mul(cov_Ip_b);
    cv::Mat a_g = invrg.mul(cov_Ip_r) + invgg.mul(cov_Ip_g) + invgb.mul(cov_Ip_b);
    cv::Mat a_b = invrb.mul(cov_Ip_r) + invgb.mul(cov_Ip_g) + invbb.mul(cov_Ip_b);

    cv::Mat b = mean_p - a_r.mul(mean_I_r) - a_g.mul(mean_I_g) - a_b.mul(mean_I_b); // Eqn. (15) in the paper;

    return (boxfilter(a_r, r).mul(Ichannels[0])
          + boxfilter(a_g, r).mul(Ichannels[1])
          + boxfilter(a_b, r).mul(Ichannels[2])
          + boxfilter(b, r));  // Eqn. (16) in the paper;
}