示例#1
0
void Triangulator::triangulateFromUpVp(cv::Mat &up, cv::Mat &vp, cv::Mat &xyz){

    std::cerr << "WARNING! NOT FULLY IMPLEMENTED!" << std::endl;
    int N = up.rows * up.cols;

    cv::Mat projPointsCam(2, N, CV_32F);
    uc.reshape(0,1).copyTo(projPointsCam.row(0));
    vc.reshape(0,1).copyTo(projPointsCam.row(1));

    cv::Mat projPointsProj(2, N, CV_32F);
    up.reshape(0,1).copyTo(projPointsProj.row(0));
    vp.reshape(0,1).copyTo(projPointsProj.row(1));

    cv::Mat Pc(3,4,CV_32F,cv::Scalar(0.0));
    cv::Mat(calibration.Kc).copyTo(Pc(cv::Range(0,3), cv::Range(0,3)));

    cv::Mat Pp(3,4,CV_32F), temp(3,4,CV_32F);
    cv::Mat(calibration.Rp).copyTo(temp(cv::Range(0,3), cv::Range(0,3)));
    cv::Mat(calibration.Tp).copyTo(temp(cv::Range(0,3), cv::Range(3,4)));
    Pp = cv::Mat(calibration.Kp) * temp;

    cv::Mat xyzw;
    cv::triangulatePoints(Pc, Pp, projPointsCam, projPointsProj, xyzw);

    xyz.create(3, N, CV_32F);
    for(int i=0; i<N; i++){
        xyz.at<float>(0,i) = xyzw.at<float>(0,i)/xyzw.at<float>(3,i);
        xyz.at<float>(1,i) = xyzw.at<float>(1,i)/xyzw.at<float>(3,i);
        xyz.at<float>(2,i) = xyzw.at<float>(2,i)/xyzw.at<float>(3,i);
    }

    xyz = xyz.t();
    xyz = xyz.reshape(3, up.rows);
}
示例#2
0
//===========================================================================
void removeMean(cv::Mat &shape, double &tx, double &ty)
{
  cv::Mat avg;
  int n = shape.rows;
  shape = shape.reshape(0,2);
  cv::reduce(shape, avg, 1, CV_REDUCE_AVG);
  shape.row(0) = shape.row(0) - avg.rl(0,0);
  shape.row(1) = shape.row(1) - avg.rl(1,0);

  shape = shape.reshape(0, n);

  tx = avg.rl(0,0);
  ty = avg.rl(1,0);
}
示例#3
0
// Compute histogram and CDF for an image with mask
void Preprocessor::do1ChnHist(const cv::Mat &_i, const cv::Mat &mask, double* h, double* cdf)
{
    cv::Mat _t = _i.reshape(1,1);
    cv::Mat _tm;

    mask.copyTo(_tm);
    _tm = _tm.reshape(1,1);

    for(int p=0;p<_t.cols;p++)
    {
        if(_tm.at<uchar>(0,p) > 0)
        {
            uchar c = _t.at<uchar>(0,p);
            h[c] += 1.0;
        }
    }

    //normalize hist
    Mat _tmp(1,256,CV_64FC1,h);
    double minVal,maxVal;
    cv::minMaxLoc(_tmp,&minVal,&maxVal);
    _tmp = _tmp / maxVal;

    cdf[0] = h[0];
    for(int j=1;j<256;j++)
    {
        cdf[j] = cdf[j-1]+h[j];
    }

    //normalize CDF
    _tmp.data = (uchar*)cdf;
    cv::minMaxLoc(_tmp,&minVal,&maxVal);
    _tmp = _tmp / maxVal;
}
void DigitRecognizer::preProcessImage(const cv::Mat& inImage, cv::Mat& outImage)
{
    cv::Mat grayImage,blurredImage,thresholdImage,contourImage,regionOfInterest;
    std::vector<std::vector<cv::Point> > contours;

    if (inImage.channels()==3) {
        cv::cvtColor(inImage,grayImage , CV_BGR2GRAY);
    } else {
        inImage.copyTo(grayImage);
    }
    blurredImage = grayImage;
    //cv::GaussianBlur(grayImage, blurredImage, cv::Size(3, 3), 2, 2);
    cv::adaptiveThreshold(blurredImage, thresholdImage, 255, cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY_INV, 3, 0);

    int rows = thresholdImage.rows;
    int cols = thresholdImage.cols;
    for (int r=0; r<rows; r++) {
        for (int c=0; c<cols; c++) {
            uchar p = thresholdImage.at<uchar>(r,c);
            if (p==0) continue;
            bool allZeros = true;

            for (int i=-1; i<=1; i++) {
                for (int j=-1; j<=1; j++) {
                    if (i==0 && j==0) continue;
                    if (allZeros && (r+i>-1) && (r+i<rows) && (c+j>-1) && (c+j<cols)
                            && (thresholdImage.at<uchar>(r+i,c+j)==p)) {
                        allZeros = false;
                    }
                }
            }
            if (allZeros == true) {
                thresholdImage.at<uchar>(r,c) = 0;
            }
        }
    }

    thresholdImage.copyTo(contourImage);
    cv::findContours(contourImage, contours, cv::RETR_LIST, cv::CHAIN_APPROX_SIMPLE);

    int idx = 0;
    size_t area = 0;
    for (size_t i = 0; i < contours.size(); i++)
    {
        if (area < contours[i].size() )
        {
            idx = i;
            area = contours[i].size();
        }
    }

    cv::Rect rec = cv::boundingRect(contours[idx]);

    regionOfInterest = thresholdImage(rec);
    cv::resize(regionOfInterest,outImage, cv::Size(sizex, sizey));
    outImage = outImage.reshape(0, sizeimage).t();
}
示例#5
0
void rotateFlowCloud(cv::Mat & out_flow, const cv::Mat & in_flow, const cv::Mat & transform_mat)
{
	Mat flow_reshaped;
	flow_reshaped = in_flow.reshape(1, in_flow.rows * in_flow.cols);

	//transform
	out_flow = transform_mat*flow_reshaped.t();
	out_flow.convertTo(out_flow, CV_32FC1);
}
示例#6
0
const cv::Mat descriptor::SIFTDescriptor::ComputeDescriptor(const cv::Mat& image, const cv::Mat& position)
{
  const PointArrayPtr points = MatHelper::matToPoints(position);
  const cv::Mat descriptor = computeDescriptorForPoints(image, points);
  const cv::Mat resultDescriptor = descriptor.reshape(0, 1).t();
  
  const double kExtendedPart = 1;
  cv::Mat extendedDescriptor = resultDescriptor;
  extendedDescriptor.push_back(kExtendedPart);
  
  return extendedDescriptor;
}
示例#7
0
void cameraToWorld(const cv::Mat &R_in, const cv::Mat &T_in, const cv::Mat & in_points_ori,
                   cv::Mat &points_out) {
  cv::Mat_<float> R, T, in_points;
  R_in.convertTo(R, CV_32F);
  T_in.reshape(1, 1).convertTo(T, CV_32F);
  if (in_points_ori.empty() == false)
  {
    in_points_ori.reshape(1, in_points_ori.size().area()).convertTo(in_points, CV_32F);
    cv::Mat_<float> T_repeat;
    cv::repeat(T, in_points.rows, 1, T_repeat);

    // Apply the inverse translation/rotation
    cv::Mat points = (in_points - T_repeat) * R;
    // Reshape to the original size
    points_out = points.reshape(3, in_points_ori.rows);
  }
  else
  {
    points_out = cv::Mat();
  }
}
float sg::median( cv::Mat const & matrix )
{
  cv::Mat vec = matrix.reshape( 0, 1 );
  cv::sort( vec, vec, 0 );

  size_t half = vec.cols / 2;
  float y = vec.at<float>( 0, half + 1 );

  if( ( 2 * half ) == vec.cols )
    y = ( vec.at<float>( 0, half ) + y ) / 2.0f;

  return y;
}
示例#9
0
static int countViolations(const cv::Mat& expected, const cv::Mat& actual, const cv::Mat& diff, double eps, double* max_violation = 0, double* max_allowed = 0)
{
    cv::Mat diff64f;
    diff.reshape(1).convertTo(diff64f, CV_64F);

    cv::Mat expected_abs = cv::abs(expected.reshape(1));
    cv::Mat actual_abs = cv::abs(actual.reshape(1));
    cv::Mat maximum, mask;
    cv::max(expected_abs, actual_abs, maximum);
    cv::multiply(maximum, cv::Vec<double, 1>(eps), maximum, CV_64F);
    cv::compare(diff64f, maximum, mask, cv::CMP_GT);

    int v = cv::countNonZero(mask);

    if (v > 0 && max_violation != 0 && max_allowed != 0)
    {
        int loc[10];
        cv::minMaxIdx(maximum, 0, max_allowed, 0, loc, mask);
        *max_violation = diff64f.at<double>(loc[1], loc[0]);
    }

    return v;
}
示例#10
0
// -----------------------------------------------------------------------------
//
// Purpose and Method: 
// Inputs: 
// Outputs: 
// Dependencies:
// Restrictions and Caveats:
//
//    The motion params are always from template to current image and with this
//    method we have to use the inverse motion model.
// -----------------------------------------------------------------------------  
cv::Mat
Homography2D::transformCoordsToTemplate
  (
  cv::Mat coords,
  cv::Mat params   
  )
{
  cv::Mat H = params.reshape(1,3).t();
  cv::Mat invH      = H.inv().t();                                        

  cv::Mat inv_params = invH.reshape(1,9);
  
  return transformCoordsToImage(coords, inv_params);  
};
示例#11
0
static void randu(cv::Mat& m)
{
    const int bigValue = 0x00000FFF;
    if (m.depth() < CV_32F)
    {
        int minmax[] = {0, 256};
        cv::Mat mr = cv::Mat(m.rows, (int)(m.cols * m.elemSize()), CV_8U, m.ptr(), m.step[0]);
        cv::randu(mr, cv::Mat(1, 1, CV_32S, minmax), cv::Mat(1, 1, CV_32S, minmax + 1));
    }
    else if (m.depth() == CV_32F)
    {
        //float minmax[] = {-FLT_MAX, FLT_MAX};
        float minmax[] = {-bigValue, bigValue};
        cv::Mat mr = m.reshape(1);
        cv::randu(mr, cv::Mat(1, 1, CV_32F, minmax), cv::Mat(1, 1, CV_32F, minmax + 1));
    }
    else
    {
        //double minmax[] = {-DBL_MAX, DBL_MAX};
        double minmax[] = {-bigValue, bigValue};
        cv::Mat mr = m.reshape(1);
        cv::randu(mr, cv::Mat(1, 1, CV_64F, minmax), cv::Mat(1, 1, CV_64F, minmax + 1));
    }
}
示例#12
0
void getOffset(cv::Mat& src1,cv::Mat& src2,int offset[3])
{
	int ch=src1.channels();

	cv::Mat Vector1,Vector2;
	Vector1=src1.reshape(3,src1.rows*src1.cols);
	Vector2=src2.reshape(3,src2.rows*src2.cols);
	std::vector<cv::Mat> singleVector1,singleVector2;
	cv::split(Vector1,singleVector1);
	cv::split(Vector2,singleVector2);

	int iAve1[3],iAve2[3];

	for(int i=0;i<ch;i++)
	{
		cv::Mat cvAve1,cvAve2;
		cv::reduce(singleVector1[i],cvAve1,0,CV_REDUCE_AVG); 
		cv::reduce(singleVector2[i],cvAve2,0,CV_REDUCE_AVG); 

		iAve1[i]=cvAve1.at<uchar>(0.0);
		iAve2[i]=cvAve2.at<uchar>(0.0);
		offset[i]=iAve1[i]-iAve2[i];
	}
}
示例#13
0
	//transform coordinates in the Lie algebra basis to a matrix in the Lie group
	inline cv::Mat algebra2group(const cv::Mat &lieAlgebraCoords) const
	{
		Mat coords = lieAlgebraCoords.reshape(0, 1);
		assert( coords.cols == basis.size() );
		assert( coords.type() == CV_64FC1 );

		const int dim = 3;
		Mat lieAlgebraMat(dim, dim, CV_64F, Scalar(0));
		for (size_t i = 0; i < basis.size(); i++) {
			lieAlgebraMat += basis[i].vector2mat(coords.at<double> (0, i));
		}

		Mat lieGroupMat = matrixExp(lieAlgebraMat);
		return lieGroupMat;
	}
示例#14
0
void corruptSaltPepper(cv::Mat& img, const float ratio)
{
    std::srand((unsigned int)std::time(0));

    cv::Mat imgv = img.reshape(0, 1);

    for (int i = 0; i < img.cols * img.rows; i++)
    {
        const float rnd = (std::rand() % 10000) / 10000.0f;

        if (rnd <= ratio / 2.0f) {
            imgv.at<float>(i) = 0;
        } else if (rnd <= ratio) {
            imgv.at<float>(i) = 1;
        }
    }
}
示例#15
0
cv::Mat EyeTrackProcessor::calcGradientMagnitude( const cv::Mat& im ) {
  
  // Convert gradient to vector of points
  cv::Mat grad;
  grad = im.reshape(1, im.rows*im.cols);
  cv::Mat gradMag(grad.rows, 1, CV_64FC1);
  std::vector<cv::Point2d> gradPts;
  for ( int i=0; i<grad.rows; i++) {
    gradPts.push_back(cv::Point2d(grad.at<double>(i,0), grad.at<double>(i,1)));
  }
  for ( int i=0; i<gradPts.size(); ++i ) {
    auto normVal = cv::norm(gradPts[i]);
    // gradPt *= (normVal == 0) ? 0 : 1/cv::norm(gradPt);
    gradMag.at<double>(i) = normVal;
  }
  gradMag = gradMag.reshape( 0, im.rows );
  return gradMag;
}
示例#16
0
arma::fvec
computeHistogram(const cv::Mat& quantized, const arma::ivec3& levels, bool normalize) {
    BOOST_ASSERT(quantized.depth() == cv::DataType<T>::type);

    const cv::Size& sz = quantized.size();
    cv::Mat data = quantized.reshape(1, sz.width * sz.height);

    BOOST_ASSERT(cv::Size(3, sz.width * sz.height) == data.size());
    BOOST_ASSERT(1 == data.channels());

    const int& hue_bins = levels[0];
    const int& sat_bins = levels[1];
    const int& val_bins = levels[2];

    arma::icube colors(hue_bins, sat_bins, val_bins);
    colors.zeros();

    arma::ivec grays(val_bins + 1);
    grays.zeros();

    // for each HSV tuple increment the corresponding histogram bin
    for (int i = 0; i < data.rows; i++) {
        cv::Mat t = data.row(i);

        const T& h = t.at<T>(0);
        const T& s = t.at<T>(1);
        const T& v = t.at<T>(2);

        if (v == 1) { // black
            grays(0)++;
        }
        else if (s == 1 and v > 1) { // grays
            grays(v-1)++;
        }
        else {
            colors(h-1, s-2, v-2)++;
        }
    }

    colors.reshape(hue_bins * sat_bins * val_bins, 1, 1);
    arma::ivec values = arma::join_cols(arma::ivec(colors), grays);

    return arma::conv_to<arma::fvec>::from(values) /= arma::as_scalar(arma::sum(values));
}
示例#17
0
// -----------------------------------------------------------------------------
//
// Purpose and Method: 
// Inputs: 
// Outputs: 
// Dependencies:
// Restrictions and Caveats:
//
// -----------------------------------------------------------------------------  
bool
Homography2D::invalidParams
  (
  cv::Mat params
  )
{
  cv::Mat M;
  cv::Mat H = params.reshape(1,3).t();

  cv::SVD svd(H, cv::SVD::NO_UV);
  
  int rank = 0;
  for (int i=0; i<H.rows; i++)
  {
    if (svd.w.at<MAT_TYPE>(i,0)>1.E-6)
    {
      rank++;
    }
  }
  
  SHOW_VALUE(H);
  
  // Stablish the 
  cv::Mat points = (cv::Mat_<MAT_TYPE>(4, 2) <<   0, 0,
                                                100, 0,
		                                100, 100,
		                                  0, 100);
  cv::Mat transformed_points;
  points.copyTo(transformed_points);
//   cv::perspectiveTransform(points, transformed_points, H);
  transformed_points = transformCoordsToImage(points, params);

  double detH = cv::determinant(H);
  
//   return (rank<3) || (!consistentPoints(points, transformed_points, 0, 1, 2)) 
//                   || (!consistentPoints(points, transformed_points, 1, 2, 3)) 
//                   || (!consistentPoints(points, transformed_points, 0, 2, 3)) 
//                   || (!consistentPoints(points, transformed_points, 0, 1, 3)); 
  return (rank<3) ||(detH < (1./10.)) || (detH > 10.) 
                  || (!consistentPoints(points, transformed_points, 0, 1, 2)) 
                  || (!consistentPoints(points, transformed_points, 1, 2, 3)) 
                  || (!consistentPoints(points, transformed_points, 0, 2, 3)) 
                  || (!consistentPoints(points, transformed_points, 0, 1, 3)); 
};
示例#18
0
bool
DetectorNCC::response(cv::Mat & im, cv::Mat & sh,
		      cv::Size wSize,
		      cv::Mat& visi)
{

  cv::Mat simTransfMat;
  CalcSimT(_refs, sh, simTransfMat);

  int n = sh.rows/2;
  cv::Mat shape = sh.reshape(0,2);

  prob_.resize(n);
  wmem_.resize(n);
  pmem_.resize(n);

#ifdef _OPENMP
#pragma omp parallel for
#endif
  for(int i=0; i<n; i++){
    if(visi.it(i,0) ==0) continue;
    int w = wSize.width + _patch[i]._w - 1; 
    int h = wSize.height + _patch[i]._h - 1;
    simTransfMat.rl(0,2) = shape.rl(0,i);
    simTransfMat.rl(1,2) = shape.rl(1,i);
 
    if((w>wmem_[i].cols) || (h>wmem_[i].rows))
      wmem_[i].create(h,nextMultipleOf4(w),CV_32F);
    
    cv::Mat wimg = wmem_[i](cv::Rect(0,0,w,h));
    CvMat wimg_o = wimg,sim_o = simTransfMat; IplImage im_o = im;
    cvGetQuadrangleSubPix(&im_o,&wimg_o,&sim_o);
    if(wSize.height > pmem_[i].rows)
      pmem_[i].create(wSize.height, nextMultipleOf4(wSize.width), CV_REAL);
    
    prob_[i] = pmem_[i](cv::Rect(cv::Point(0,0),wSize));
    _patch[i].Response(wimg,prob_[i]);
  }

  return true;
}
示例#19
0
// -----------------------------------------------------------------------------
//
// Purpose and Method: 
// Inputs: 
// Outputs: 
// Dependencies:
// Restrictions and Caveats:
//
// -----------------------------------------------------------------------------  
cv::Mat
Homography2D::scaleInputImageResolution
  (
  cv::Mat params,
  double scale
  )
{
  cv::Mat newH;
  cv::Mat new_params = cv::Mat::eye(params.rows, 1, cv::DataType<MAT_TYPE>::type);
  cv::Mat H          = params.reshape(1,3).t();
  
  cv::Mat S    = cv::Mat::eye(3,3,cv::DataType<MAT_TYPE>::type);
  S.at<MAT_TYPE>(0,0) = scale;
  S.at<MAT_TYPE>(1,1) = scale;

  newH = S*H;
  newH = newH.t();
  newH.reshape(1,9).copyTo(new_params);
  
  return new_params;
}
示例#20
0
// -----------------------------------------------------------------------------
//
// Purpose and Method: 
// Inputs: 
// Outputs: 
// Dependencies:
// Restrictions and Caveats:
//
// -----------------------------------------------------------------------------  
cv::Mat
Homography2D::transformCoordsToImage
  (
  cv::Mat coords,
  cv::Mat params   
  )
{
  assert(coords.cols == 2); // We need two dimensional coordinates
  
  cv::Mat H                      = params.reshape(1, 3).t();
  cv::Mat homogeneous_coords     = cv::Mat::ones(coords.rows, 3, cv::DataType<MAT_TYPE>::type);
  cv::Mat homogeneous_coords_ref = homogeneous_coords(cv::Range::all(), cv::Range(0, 2));
  coords.copyTo(homogeneous_coords_ref);
  
  cv::Mat homogeneous_new_coords = (homogeneous_coords * H.t());
  
  // Divide by the third homogeneous coordinates to get the cartersian coordinates.
  for (int j=0; j<3; j++)
  {
    cv::Mat col     = homogeneous_new_coords.col(j).mul(1.0 / homogeneous_new_coords.col(2));
    cv::Mat col_new = homogeneous_new_coords.col(j);
    col.copyTo(col_new);
  }
    
  cv::Mat homogeneous_new_coords_ref = homogeneous_new_coords(cv::Range::all(), cv::Range(0, 2)); 
  cv::Mat new_coords;
  homogeneous_new_coords_ref.copyTo(new_coords);
  
#ifdef DEBUG
  // write Mat objects to the file
  cv::FileStorage fs("transformCoordsToImage.xml", cv::FileStorage::WRITE);
  fs << "H" << H;
  fs << "coords" << coords;
  fs << "new_coords" << new_coords;
  fs.release();
#endif
  
  return new_coords;  
};
// -----------------------------------------------------------------------------
//
// Purpose and Method: 
// Inputs: 
// Outputs: 
// Dependencies:
// Restrictions and Caveats:
//
// -----------------------------------------------------------------------------  
cv::Mat
Homography2DFactorizedProblem::computeSigmaMatrix
  (
  cv::Mat& params
  )
{ 
  assert(params.rows == 9);
  assert(params.cols == 1);

  cv::Mat H            = params.reshape(1, 3).t();
  cv::Mat invH         = H.inv();
  
  cv::Mat Sigma     = cv::Mat::zeros(9, 9, cv::DataType<MAT_TYPE>::type);
  cv::Mat Sigma_roi = Sigma(cv::Range(0,3), cv::Range(0,3));
  invH.copyTo(Sigma_roi);
  Sigma_roi         = Sigma(cv::Range(3,6), cv::Range(3,6));
  invH.copyTo(Sigma_roi);
  Sigma_roi         = Sigma(cv::Range(6,9), cv::Range(6,9));
  invH.copyTo(Sigma_roi);
      
  return Sigma;
};
char tag_recognition::wordRecognition(cv::Mat &src)
{
    QTime t;
    t.start();
    //check input image
    if((src.cols == 1 && src.rows == 1) || (src.cols > 18 || src.rows > 20) || (float)src.rows/(float)src.cols < 1.0 )
    {
        //        qDebug() << "shit word" << src.cols << src.rows;
        return '!';
    }
    else
    {
//#ifdef DEBUG_TSAI
//        cv::imshow("src",src);
//#endif
        int leftPadding = round((src.rows-src.cols)/2.0);
        int rightPadding = floor((src.rows-src.cols)/2.0);

        cv::copyMakeBorder(src,src,0,0,leftPadding,rightPadding,cv::BORDER_CONSTANT,cv::Scalar(255));
        cv::resize(src,src,cv::Size(16,16));
        cv::equalizeHist(src,src);
        //        cv::imshow("padding",src);


        src = src.reshape(1,1);
        cv::normalize(src,src,0,1,cv::NORM_MINMAX);
        src.convertTo(src,CV_32FC1);


        //        SVMModel = cv::ml::StatModel::load<cv::ml::SVM>("svm_grid_search_opt.yaml");

        char result = SVMModel->predict(src);
        //        qDebug() << "result" << result;
        return result;
    }
    //        cv::waitKey(500);
}
示例#23
0
bool SimplestColorBalance(cv::Mat ori,float upperthresh,float lowerthresh){

	int totalarea=ori.rows*ori.cols;
	upperthresh=upperthresh*totalarea;
	lowerthresh=lowerthresh*totalarea;
	cv::Mat sorted_ori;
	cv::Mat reshapeOri;
	reshapeOri=ori.reshape(0,1);
	cv::sort(reshapeOri,sorted_ori,CV_SORT_ASCENDING  );

	int Vmin=(sorted_ori.at<float>(lowerthresh));
	int Vmax=sorted_ori.at<float>((ori.rows*ori.cols-1)-upperthresh);
	for (int i=0; i<ori.rows; i++ ){
		for (int j=0; j<ori.cols; j++ ){
			if(ori.at<float>(i,j)<Vmin)ori.at<float>(i,j)=0;
			else if(ori.at<float>(i,j)>Vmax)ori.at<float>(i,j)=255;
			else ori.at<float>(i,j)= (ori.at<float>(i,j)-Vmin)*255./(Vmax-Vmin);

		}
	}

	return 1;

}
void Silhouette::findSimilarityTransformation(const cv::Mat &src, const cv::Mat &dst, Mat &transformationMatrix, int iterationsCount, float min2dScaleChange)
{
    CV_Assert(src.type() == CV_32FC2);
    CV_Assert(dst.type() == CV_32FC2);
    CV_Assert(!transformationMatrix.empty());

    //Ptr<cv::flann::IndexParams> flannIndexParams = new cv::flann::KDTreeIndexParams();
    Ptr<cv::flann::IndexParams> flannIndexParams = new cv::flann::LinearIndexParams();
    cv::flann::Index flannIndex(dst.reshape(1), *flannIndexParams);

    Mat srcTransformationMatrix = transformationMatrix.clone();

    for (int iter = 0; iter < iterationsCount; ++iter)
    {
        Mat transformedPoints;
        transform(src, transformedPoints, transformationMatrix);
        Mat srcTwoChannels = transformedPoints;
        Mat srcOneChannel = srcTwoChannels.reshape(1);

        Mat correspondingPoints = Mat(srcTwoChannels.size(), srcTwoChannels.type());
        for (int i = 0; i < src.rows; ++i)
        {
            vector<float> query = srcOneChannel.row(i);
            int knn = 1;
            vector<int> indices(knn);
            vector<float> dists(knn);
            flannIndex.knnSearch(query, indices, dists, knn, cv::flann::SearchParams());
            Mat row = correspondingPoints.row(i);
            dst.row(indices[0]).copyTo(row);
        }

        const bool isFullAffine = false;
        CV_Assert(srcTwoChannels.channels() == 2);
        CV_Assert(correspondingPoints.channels() == 2);
        CV_Assert(srcTwoChannels.type() == correspondingPoints.type());
        Mat currentTransformationMatrix = Mat::zeros(2, 3, CV_64FC1);
        //currentTransformationMatrix = estimateRigidTransform(srcTwoChannels, correspondingPoints, isFullAffine);

        CvMat matA = srcTwoChannels, matB = correspondingPoints, matM = currentTransformationMatrix;
        bool isTransformEstimated = cvEstimateRigidTransform(&matA, &matB, &matM, isFullAffine );
        if (!isTransformEstimated)
        {
            break;
        }

        Mat composedTransformation;
        composeAffineTransformations(transformationMatrix, currentTransformationMatrix, composedTransformation);
        transformationMatrix = composedTransformation;
    }

    float srcScale = estimateScale(src, srcTransformationMatrix);
    float finalScale = estimateScale(src, transformationMatrix);
    const float eps = 1e-06;
    if (srcScale > eps)
    {
        float scaleChange = finalScale / srcScale;
        if (scaleChange < min2dScaleChange)
        {
            transformationMatrix = srcTransformationMatrix;
        }
    }
}
示例#25
0
// -----------------------------------------------------------------------------
//
// Purpose and Method: 
// Inputs: 
// Outputs: 
// Dependencies:
// Restrictions and Caveats:
//
// -----------------------------------------------------------------------------  
cv::Mat
Homography2D::warpImage
  (
  cv::Mat image,
  cv::Mat params,
  cv::Mat template_coords,
  std::vector<int>& template_ctrl_points_indices
  )
{
  MAT_TYPE min_x, max_x, min_y, max_y;
  cv::Mat warped_image;

  cv::Mat M;
  cv::Mat H = params.reshape(1,3).t();
  
  // Find minimum x and minimum y in template coords
  cv::MatConstIterator_<MAT_TYPE> it;
  max_x = std::numeric_limits<MAT_TYPE>::min();
  max_y = std::numeric_limits<MAT_TYPE>::min();
  min_x = std::numeric_limits<MAT_TYPE>::max();
  min_y = std::numeric_limits<MAT_TYPE>::max();

  for (int i = 0; i < template_coords.rows; i++)
  {  
    MAT_TYPE x = template_coords.at<MAT_TYPE>(i,0);
    MAT_TYPE y = template_coords.at<MAT_TYPE>(i,1);
    
    if (x > max_x) max_x = x;
    if (x < min_x) min_x = x;
    if (y > max_y) max_y = y;
    if (y < min_y) min_y = y;
  }

  cv::Mat TR  = (cv::Mat_<MAT_TYPE>(3,3) << 1.,    0,    min_x, 
		                            0,    1.,    min_y, 
		                            0,     0,    1.);
  warped_image = cv::Mat::zeros(max_y-min_y+1, max_x-min_x+1, cv::DataType<uint8_t>::type);
  
//   if (scale < 0.000000001)
//   {
//     return warped_image;
//   }
  
  // TR is necessary because the Warpers do warping taking
  // the pixel (0,0) as the left and top most pixel of the template.
  // So, we have move the (0,0) to the center of the Template.
  M = H*TR;

#ifdef DEBUG
  // write Mat objects to the file
  cv::FileStorage fs("template_coords_warpImage.xml", cv::FileStorage::WRITE);
  fs << "template_coords" << template_coords;
  fs << "M" << M;
  fs.release();
#endif  

  cv::warpPerspective(image, warped_image, M, 
		      cv::Size(warped_image.cols,  warped_image.rows), 
		      cv::INTER_AREA | cv::WARP_INVERSE_MAP);
  return warped_image;
};
void GeneralTransform::SetTransformationByElements(cv::Mat& transform, const cv::Mat& elements)
{
	int dim_transform = transform.rows;
	cv::Mat identity = cv::Mat::eye(dim_transform, dim_transform, CV_64F);
	transform.rowRange(0, dim_transform - 1) = identity.rowRange(0, dim_transform - 1) + elements.reshape(1, transform_dim_ - 1);
}
示例#27
0
void meshGrid(const cv::Mat &X_val, const cv::Mat &Y_val,
              cv::Mat &X_grid, cv::Mat &Y_grid)
{
  cv::repeat(X_val,Y_val.total(),1,X_grid);
  cv::repeat(Y_val.reshape(1,1).t(),1,X_val.total(),Y_grid);
}
void meshgrid_helper(const cv::Mat &xgv, const cv::Mat &ygv, cv::Mat &X, cv::Mat &Y) {
  cv::repeat(xgv.reshape(1,1), ygv.total(), 1, X);
  cv::repeat(ygv.reshape(1,1).t(), 1, xgv.total(), Y);
}
void savePointCloud(const std::string file, cv::InputArray point_cloud, const bool is_binary, cv::InputArray color_image, const bool remove_miss_point)
{
	const cv::Mat points = point_cloud.getMat();
	const bool color_cloud = (!color_image.empty() && (point_cloud.size() == color_image.size()));
	
	if (points.empty())
		return;

	if (points.type() != CV_32FC1)
		points.reshape(1, 3);

	if (color_cloud)
	{
		const cv::Mat im = color_image.getMat();
		pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZRGB>);

		for (int row = 0; row < points.rows; ++row)
		{
			const auto ptr = points.ptr<cv::Vec3f>(row);
			const auto ptr_image = im.ptr<cv::Vec3b>(row);

			for (int col = 0; col < points.cols; ++col)
			{
				const auto val = ptr[col];
				const auto color = ptr_image[col];

				if (!remove_miss_point || (remove_miss_point && val[2] < 9999.9f))
				{
					pcl::PointXYZRGB p;
					p.x = val[0], p.y = val[1], p.z = val[2], p.r = color[0], p.g = color[1], p.b = color[2];
					cloud->push_back(p);
				}
			}
		}

		if (cloud->size() == 0)
			return;
		else if (is_binary)
			pcl::io::savePLYFileBinary(file, *cloud);
		else
			pcl::io::savePLYFileASCII(file, *cloud);
	}
	else
	{
		pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);

		for (int row = 0; row < points.rows; ++row)
		{
			const auto ptr = points.ptr<cv::Vec3f>(row);

			for (int col = 0; col < points.cols; ++col)
			{
				const auto val = ptr[col];

				if (!remove_miss_point || (remove_miss_point && val[2] < 9999.9f))
					cloud->push_back(pcl::PointXYZ(val[0], val[1], val[2]));
			}
		}

		if (cloud->size() == 0)
			return;
		else if (is_binary)
			pcl::io::savePLYFileBinary(file, *cloud);
		else
			pcl::io::savePLYFileASCII(file, *cloud);
	}
	return;
}