Example #1
0
bool gram_schmidt(Mat &src)
{
	Mat a(3, 1, CV_64FC1);
	Mat b(3, 1, CV_64FC1);
	Mat c(3, 1, CV_64FC1);


	src.col(0).copyTo(a);
	src.col(1).copyTo(b);
	src.col(2).copyTo(c);



	b = b - ((a.dot(b)) / (a.dot(a)))*a;



	c = c - a.dot(c) / a.dot(a)*a - b.dot(c)/b.dot(b)*b;




	norm_vec(a);
	norm_vec(b);
	norm_vec(c);

	a.copyTo(src.col(0));
	b.copyTo(src.col(1));
	c.copyTo(src.col(2));

	return true;

}
	void findBlobs::sortTheDistFromThreePoint(const Mat& ip, const Mat& mc, vector<int>& idx)
	{
		idx.clear();
		int numOfCol = mc.cols;

		Mat ip1 = ip.col(0);
		Mat ip1Mat = ip1 * Mat::ones(1,numOfCol, ip.type());
		subtract(ip1Mat, mc, ip1Mat);
		pow(ip1Mat, 2, ip1Mat);

		Mat ip2 = ip.col(1);
		Mat ip2Mat = ip2 * Mat::ones(1,numOfCol, ip.type());
		subtract(ip2Mat, mc, ip2Mat);
		pow(ip2Mat, 2, ip2Mat);

		Mat ip3 = ip.col(2);
		Mat ip3Mat = ip3 * Mat::ones(1,numOfCol, ip.type());
		subtract(ip3Mat, mc, ip3Mat);
		pow(ip3Mat, 2, ip3Mat);

		std::vector<double> dist;
		dist.clear();
		for (int i = 0; i < numOfCol; i++)
		{
			Scalar s = sum(ip1Mat.col(i)) + sum(ip2Mat.col(i)) + sum(ip3Mat.col(i));
			dist.push_back(s[0]);
			idx.push_back(i);
		}
		sort(idx.begin(), idx.end(),
			[& dist](size_t i1, size_t i2) {return dist[i1] <  dist[i2];});
	}
Example #3
0
void sharpen(const Mat&image, Mat&result)
{
	//分配图像
	result.create(image.rows, image.cols, image.type());
	//处理除了第一行和最后一行
	for (int i = 1; i < image.rows - 1; i++){
		const uchar* previous =
			image.ptr<const uchar>(i - 1);
		const uchar* current =
			image.ptr<const uchar>(i);
		const uchar* next =
			image.ptr<const uchar>(i - 1);
		//output
		uchar*output = result.ptr<uchar>(i);
		for (int j = 1; j < image.cols - 1; j++){
			//saturate对结果进行截断
			*output++ = cv::saturate_cast<uchar>(
				5 * current[j] - current[j-1]
				- current[j + 1] - previous[j] - next[j]
				);
		}
	}
	//未处理的像素设置为0
	result.row(0).setTo(Scalar(0));
	result.row(result.rows - 1).setTo(Scalar(0));
	result.col(0).setTo(Scalar(0));

	result.col(result.cols - 1).setTo(Scalar(0));

}
Example #4
0
Mat computeConvolution(Mat &m, double sigma, bool highpass) {

    // TODO only allow C1 or C3  CV_Assert(src.channels() == 3);
    Mat result;

    if (!m.empty()) {
        result = m.clone();
        // Store type to restore it when the convolution is computed
        int type = result.channels() == 1 ? CV_64F : CV_64FC3;
        // Convert the image to a 64F type, (one or three channels)
        result.convertTo(result, type);
        Mat kernel = myGetGaussianKernel1D(sigma, highpass);
        // This kernel is separable, apply convolution for rows and columns separately
        for (int i = 0; i < result.rows; i++) {
            Mat row = result.row(i);
            row = convolutionOperator1D(row, kernel, BORDER_REFLECT);
            row.copyTo(result.row(i));
        }
        for (int i = 0; i < result.cols; i++) {
            Mat col = result.col(i);
            col = convolutionOperator1D(col, kernel, BORDER_REFLECT);
            col.copyTo(result.col(i));
        }
        result.convertTo(result, m.type());
    }
    return result;
}
Example #5
0
/*step的方式遍历*/
void sharpen2(const Mat &image, Mat &result) 
{
	result.create(image.size(), image.type()); // allocate if necessary

	int step= image.step1();
	const uchar* previous= image.data;		// ptr to previous row
	const uchar* current=  image.data+step; // ptr to current row
	const uchar* next= image.data+2*step;   // ptr to next row
	uchar *output= result.data+step;		// ptr to output row

	for (int j= 1; j<image.rows-1; j++) { // for each row (except first and last)
		for (int i=1; i<image.cols-1; i++) { // for each column (except first and last)

			output[i]= saturate_cast<uchar>(5*current[i]-current[i-1]-current[i+1]-previous[i]-next[i]); 
		}

		previous+= step;
		current+= step;
		next+= step;
		output+= step;
	}

	// Set the unprocess pixels to 0
	result.row(0).setTo(cv::Scalar(0));
	result.row(result.rows-1).setTo(cv::Scalar(0));
	result.col(0).setTo(cv::Scalar(0));
	result.col(result.cols-1).setTo(cv::Scalar(0));
}
inline
void
op_fliplr::apply(Mat<typename T1::elem_type>& out, const Op<T1,op_fliplr>& in)
  {
  arma_extra_debug_sigprint();
  
  typedef typename T1::elem_type eT;
  
  const unwrap<T1>  tmp(in.m);
  const Mat<eT> X = tmp.M;
  
  if(&out != &X)
    {
    out.copy_size(X);
    
    for(uword i=0; i<X.n_cols; ++i)
      {
      out.col(i) = X.col(X.n_cols-1 - i);
      }
    }
  else
    {
    const uword N = X.n_cols / 2;
    
    for(uword i=0; i<N; ++i)
      {
      out.swap_cols(i, X.n_cols-1 - i);
      }
    }
  }
//==============================================================================
Mat
shape_model::
procrustes(const Mat &X,
           const int itol,
           const float ftol)
{
    int N = X.cols,n = X.rows/2;
    
    //remove centre of mass
    Mat P = X.clone();
    for(int i = 0; i < N; i++){
        Mat p = P.col(i);
        float mx = 0,my = 0;
        for(int j = 0; j < n; j++){mx += p.fl(2*j); my += p.fl(2*j+1);}
        mx /= n; my /= n;
        for(int j = 0; j < n; j++){p.fl(2*j) -= mx; p.fl(2*j+1) -= my;}
    }
    //optimise scale and rotation
    Mat C_old;
    for(int iter = 0; iter < itol; iter++){
        Mat C = P*Mat::ones(N,1,CV_32F)/N;
        normalize(C,C);
        if(iter > 0){if(norm(C,C_old) < ftol)break;}
        C_old = C.clone();
        for(int i = 0; i < N; i++){
            Mat R = this->rot_scale_align(P.col(i),C);
            for(int j = 0; j < n; j++){
                float x = P.fl(2*j,i),y = P.fl(2*j+1,i);
                P.fl(2*j  ,i) = R.fl(0,0)*x + R.fl(0,1)*y;
                P.fl(2*j+1,i) = R.fl(1,0)*x + R.fl(1,1)*y;
            }
        }
    }return P;
}
Example #8
0
Mat Sharpen(Mat img)
{
    int i,j;
    Mat result;
    cvtColor(img,img,CV_BGR2GRAY);
    namedWindow("grayscale",1);
    imshow("grayscale",img);
    waitKey(2000);
    result.create(img.size(),img.type());
    for(j=1;j<img.rows-1;j++){
    uchar* previous=img.ptr<uchar>(j-1);
    uchar* current=img.ptr<uchar>(j);
    uchar* next=img.ptr<uchar>(j+1);
    uchar* output=result.ptr<uchar>(j);
    for(i=1;i<img.cols-1;i++)
    {
        *output++=cv::saturate_cast<uchar>(5*current[i]-current[i-1]-current[i+1]-previous[i]-next[i]);//saturate_cast makes resuting negative
                                                                                                  // pixel value 0 nd values above 255 255
    }
    }
    result.row(0).setTo(cv::Scalar(0));
    result.col(0).setTo(cv::Scalar(0));
    result.row(result.rows-1).setTo(cv::Scalar(0));
    result.col(result.cols-1).setTo(cv::Scalar(0));
    return result;

}
//对图像进行锐化处理
void ncu::sharpen(cv::Mat& img_in, Mat& img_out)
{
	img_out.create(img_in.size(), img_in.type());

	//处理边界内部的像素点,图像最外围的像素点应该额外处理
	for (int row = 1; row < img_in.rows - 1; row++)
	{
		//前一行像素点
		const uchar* previous = img_in.ptr<const uchar>(row - 1);
		//待处理的当前行
		const uchar* current = img_in.ptr<const uchar>(row);
		//下一行
		const uchar* next = img_in.ptr<const uchar>(row + 1);

		uchar *output = img_out.ptr<uchar>(row + 1);
		int ch = img_in.channels();
		int starts = ch;
		int ends = (img_in.cols - 1)*ch;
		for (int col = starts; col < ends; col++)
		{
			//输出图像的遍历指针与当前行的指针同步递增,以每行的每一个像素点的每一个通道值为一个递增量,
			//因为要考虑到图像的通道数
			*output++ = saturate_cast<uchar>(5 * current[col] - current[col - ch] - current[col + ch] - previous[col] - next[col]);
		}
	}//结束循环
	//处理边界,外围像素点设为 0 
	img_out.row(0).setTo(Scalar::all(0));
	img_out.row(img_out.rows - 1).setTo(Scalar::all(0));
	img_out.col(0).setTo(Scalar::all(0));
	img_out.col(img_out.cols - 1).setTo(Scalar::all(0));

}
Example #10
0
// Draw bounding boxes on top of an image.
void	showboxes( Mat &img_color, const Mat &boxes )
{
	static const Scalar TopBoxColor = CV_RGB(255,0,0);
	static	const Scalar PartBoxColor = CV_RGB(0,0,255);

	int numfilters = int( boxes.cols / 4.f );
	for( int i=numfilters-1; i>=0; i-- ){
		Mat x1s = boxes.col( 4*i );
		Mat y1s = boxes.col( 4*i+1 );
		Mat x2s = boxes.col( 4*i+2 );
		Mat y2s = boxes.col( 4*i+3 );
		// draw each object
		for( int k=0; k<x1s.rows; k++ ){
			float	x1 = x1s.at<float>(k);
			float	y1 = y1s.at<float>(k);
			float	x2 = x2s.at<float>(k);
			float	y2 = y2s.at<float>(k);
			if( x1==0 && y1==0 && x2==0 && y2==0 )
				continue;
			Point2f		UL( x1, y1 );
			Point2f		BR( x2, y2 );
			if( i>0 )
				rectangle( img_color, UL, BR, PartBoxColor );
			else
				rectangle( img_color, UL, BR, TopBoxColor );
		}
	}

}
Example #11
0
void Sharpen(const Mat& myImage,Mat& Result)
{
    CV_Assert(myImage.depth() == CV_8U);  // 仅接受uchar图像

    Result.create(myImage.size(),myImage.type());
    const int nChannels = myImage.channels();

    for(int j = 1 ; j < myImage.rows-1; ++j)
    {
        const uchar* previous = myImage.ptr<uchar>(j - 1);
        const uchar* current  = myImage.ptr<uchar>(j    );
        const uchar* next     = myImage.ptr<uchar>(j + 1);

        uchar* output = Result.ptr<uchar>(j);

        for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i)
        {
            *output++ = saturate_cast<uchar>(5*current[i]
                         -current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]);
        }
    }

    Result.row(0).setTo(Scalar(0));
    Result.row(Result.rows-1).setTo(Scalar(0));
    Result.col(0).setTo(Scalar(0));
    Result.col(Result.cols-1).setTo(Scalar(0));
}
Example #12
0
Mat normalizePCCoeff(Mat pc, float scale, float* Cx, float* Cy, float* Cz, float* MinVal, float* MaxVal)
{
  double minVal=0, maxVal=0;

  Mat x,y,z, pcn;
  pc.col(0).copyTo(x);
  pc.col(1).copyTo(y);
  pc.col(2).copyTo(z);

  float cx = (float) cv::mean(x).val[0];
  float cy = (float) cv::mean(y).val[0];
  float cz = (float) cv::mean(z).val[0];

  cv::minMaxIdx(pc, &minVal, &maxVal);

  x=x-cx;
  y=y-cy;
  z=z-cz;
  pcn.create(pc.rows, 3, CV_32FC1);
  x.copyTo(pcn.col(0));
  y.copyTo(pcn.col(1));
  z.copyTo(pcn.col(2));

  cv::minMaxIdx(pcn, &minVal, &maxVal);
  pcn=(float)scale*(pcn)/((float)maxVal-(float)minVal);

  *MinVal=(float)minVal;
  *MaxVal=(float)maxVal;
  *Cx=(float)cx;
  *Cy=(float)cy;
  *Cz=(float)cz;

  return pcn;
}
Example #13
0
	void CGraph::getNodes(size_t start_node, size_t num_nodes, Mat& pots) const {
		if (!num_nodes) num_nodes = getNumNodes() - start_node;

		// Assertions
		DGM_ASSERT_MSG(start_node + num_nodes <= getNumNodes(), "The given ranges exceed the number of nodes(%zu)", getNumNodes());

		if (pots.empty() || pots.cols != m_nStates || pots.rows != num_nodes)
			pots = Mat(static_cast<int>(num_nodes), m_nStates, CV_32FC1);
		
		transpose(pots, pots);

#ifdef ENABLE_PPL
		int size = pots.cols;
		int rangeSize = size / (concurrency::GetProcessorCount() * 10);
		rangeSize = MAX(1, rangeSize);
		//printf("Processors: %d\n", concurrency::GetProcessorCount());
		concurrency::parallel_for(0, size, rangeSize, [start_node, size, rangeSize, &pots, this](int i) {
			Mat pot;
			for (int j = 0; (j  < rangeSize) && (i + j < size); j++)
				getNode(start_node + i + j, lvalue_cast(pots.col(i + j)));
		});
#else
		for (int n = 0; n < pots.cols; n++)
			getNode(start_node + n, lvalue_cast(pots.col(n)));
#endif
		transpose(pots, pots);
	}
Example #14
0
int main()
{
  BenchTimer t;
  int tries = 10;
  int rep = 400000;
  typedef Matrix3f Mat;
  typedef Vector3f Vec;
  Mat A = Mat::Random(3,3);
  A = A.adjoint() * A;

  SelfAdjointEigenSolver<Mat> eig(A);
  BENCH(t, tries, rep, eig.compute(A));
  std::cout << "Eigen:  " << t.best() << "s\n";

  Mat evecs;
  Vec evals;
  BENCH(t, tries, rep, eigen33(A,evecs,evals));
  std::cout << "Direct: " << t.best() << "s\n\n";

  std::cerr << "Eigenvalue/eigenvector diffs:\n";
  std::cerr << (evals - eig.eigenvalues()).transpose() << "\n";
  for(int k=0;k<3;++k)
    if(evecs.col(k).dot(eig.eigenvectors().col(k))<0)
      evecs.col(k) = -evecs.col(k);
  std::cerr << evecs - eig.eigenvectors() << "\n\n";
}
bool estimate_Rt_fromE(const Mat3 & K1, const Mat3 & K2,
  const Mat & x1, const Mat & x2,
  const Mat3 & E, const std::vector<size_t> & vec_inliers,
  Mat3 * R, Vec3 * t)
{
  // Accumulator to find the best solution
  std::vector<size_t> f(4, 0);

  std::vector<Mat3> Es; // Essential,
  std::vector<Mat3> Rs;  // Rotation matrix.
  std::vector<Vec3> ts;  // Translation matrix.

  Es.push_back(E);
  // Recover best rotation and translation from E.
  MotionFromEssential(E, &Rs, &ts);

  //-> Test the 4 solutions will all the point
  assert(Rs.size() == 4);
  assert(ts.size() == 4);

  Mat34 P1, P2;
  Mat3 R1 = Mat3::Identity();
  Vec3 t1 = Vec3::Zero();
  P_From_KRt(K1, R1, t1, &P1);

  for (unsigned int i = 0; i < 4; ++i)
  {
    const Mat3 &R2 = Rs[i];
    const Vec3 &t2 = ts[i];
    P_From_KRt(K2, R2, t2, &P2);
    Vec3 X;

    for (size_t k = 0; k < vec_inliers.size(); ++k)
    {
      const Vec2 & x1_ = x1.col(vec_inliers[k]),
        &x2_ = x2.col(vec_inliers[k]);
      TriangulateDLT(P1, x1_, P2, x2_, &X);
      // Test if point is front to the two cameras.
      if (Depth(R1, t1, X) > 0 && Depth(R2, t2, X) > 0)
      {
        ++f[i];
      }
    }
  }
  // Check the solution:
  const std::vector<size_t>::iterator iter = max_element(f.begin(), f.end());
  if (*iter == 0)
  {
    std::cerr << std::endl << "/!\\There is no right solution,"
      << " probably intermediate results are not correct or no points"
      << " in front of both cameras" << std::endl;
    return false;
  }
  const size_t index = std::distance(f.begin(), iter);
  (*R) = Rs[index];
  (*t) = ts[index];

  return true;
}
Example #16
0
void cv::sortMatrixColumnsByIndices(const Mat& src, const vector<int>& indices, Mat& dst) {
    dst.create(src.rows, src.cols, src.type());
    for(int idx = 0; idx < indices.size(); idx++) {
        Mat originalCol = src.col(indices[idx]);
        Mat sortedCol = dst.col(idx);
        originalCol.copyTo(sortedCol);
    }
}
Example #17
0
void cv::sortMatrixByColumn(const Mat& src, Mat& dst, vector<int> sorted_indices) {
	dst.create(src.rows, src.cols, src.type());
	for(int idx = 0; idx < sorted_indices.size(); idx++) {
		Mat originalCol = src.col(sorted_indices[idx]);
		Mat sortedCol = dst.col(idx);
		originalCol.copyTo(sortedCol);
	}
}
void expression_recognizer::getEigenPictures( Mat& a, Mat& b, Mat& c, Mat& d, Mat& e) {
	a = eigenface_model->getMat("mean");
	Mat W = eigenface_model->getMat("eigenvectors");
	b = W.col(0).clone();
	c = W.col(1).clone();
	d = W.col(2).clone();
	e = W.col(3).clone();
}
Example #19
0
File: Dip2.cpp Project: kziel1/dip
// checks basic properties of the filtering result
void Dip2::test_adaptiveFilter(void){

   Mat input = Mat::ones(9,9, CV_32FC1);
   input.at<float>(4,4) = 255;

   Mat output = adaptiveFilter(input, 5, 255);
   Mat output2 = adaptiveFilter(input, 5, -1);

   if ( (input.cols != output.cols) || (input.rows != output.rows) ){
      cout << "ERROR: Dip2::adaptiveFilter(): input.size != output.size --> Wrong border handling?" << endl;
      return;
   }
  if ( (sum(output.row(0) < 0).val[0] > 0) or
           (sum(output.row(0) > 255).val[0] > 0) or
           (sum(output.row(8) < 0).val[0] > 0) or
           (sum(output.row(8) > 255).val[0] > 0) or
           (sum(output.col(0) < 0).val[0] > 0) or
           (sum(output.col(0) > 255).val[0] > 0) or
           (sum(output.col(8) < 0).val[0] > 0) or
           (sum(output.col(8) > 255).val[0] > 0) ){
         cout << "ERROR: Dip2::adaptiveFilter(): Border of result contains too large/small values --> Wrong border handling?" << endl;
         return;
   }else{
      if ( (sum(output < 0).val[0] > 0) or
         (sum(output > 255).val[0] > 0) ){
            cout << "ERROR: Dip2::adaptiveFilter(): Result contains too large/small values!" << endl;
            return;
      }
   }
   float ref[9][9] = {{0, 0, 0, 0, 0, 0, 0, 0, 0},
                      {0, 1, 1, 1, 1, 1, 1, 1, 0},
                      {0, 1, (24+255)/25., (24+255)/25., (24+255)/25., (24+255)/25., (24+255)/25., 1, 0},
                      {0, 1, (24+255)/25., (24+255)/25., (24+255)/25., (24+255)/25., (24+255)/25., 1, 0},
                      {0, 1, (24+255)/25., (24+255)/25., (24+255)/25., (24+255)/25., (24+255)/25., 1, 0},
                      {0, 1, (24+255)/25., (24+255)/25., (24+255)/25., (24+255)/25., (24+255)/25., 1, 0},
                      {0, 1, (24+255)/25., (24+255)/25., (24+255)/25., (24+255)/25., (24+255)/25., 1, 0},
                      {0, 1, 1, 1, 1, 1, 1, 1, 0},
                      {0, 0, 0, 0, 0, 0, 0, 0, 0}};
   float ref2[9][9] = {{0, 0, 0, 0, 0, 0, 0, 0, 0},
                      {0, 1, 1, 1, 1, 1, 1, 1, 0},
                      {0, 1, 1, 1, 1, 1, 1, 1, 0},
                      {0, 1, 1, (8+255)/9., (8+255)/9., (8+255)/9., 1, 1, 0},
                      {0, 1, 1, (8+255)/9., (8+255)/9., (8+255)/9., 1, 1, 0},
                      {0, 1, 1, (8+255)/9., (8+255)/9., (8+255)/9., 1, 1, 0},
                      {0, 1, 1, 1, 1, 1, 1, 1, 0},
                      {0, 1, 1, 1, 1, 1, 1, 1, 0},
                      {0, 0, 0, 0, 0, 0, 0, 0, 0}};
   for(int y=1; y<8; y++){
      for(int x=1; x<8; x++){
         if ( (abs(output.at<float>(y,x) - ref[y][x]) > 0.0001) || (abs(output2.at<float>(y,x) - ref2[y][x]) > 0.0001) ){
            cout << "ERROR: Dip2::adaptiveFilter(): Result contains wrong values!" << endl;
            return;
         }
      }
   }
   cout << "Message: Dip2::adaptiveFilter() seems to be correct" << endl;
}
Example #20
0
static void apron(Mat v)
{
  int r = v.rows;
  int c = v.cols;
  v.row(0).setTo(0x22);    v.row(1).setTo(0x22);
  v.row(r-2).setTo(0x22);  v.row(r-1).setTo(0x22);
  v.col(0).setTo(0x22);    v.col(1).setTo(0x22);
  v.col(c-2).setTo(0x22);  v.col(c-1).setTo(0x22);
}
void GaussSeidelSolver::computeNormalDifferential(Mat normalVectors, int imageNumRow, Mat& xDifferentialMat, Mat& yDifferentialMat) {
	Mat zVector = normalVectors.col(0).clone();
	Mat xVector = normalVectors.col(1).clone();
	Mat yVector = normalVectors.col(2).clone();

	Mat zMat = zVector.reshape(0, imageNumRow);
	xDifferentialMat = xVector.reshape(0, imageNumRow) / zMat;
	yDifferentialMat = yVector.reshape(0, imageNumRow) / zMat;
}
Example #22
0
 void getGradient(Mat src, Mat& dst, int pos)
 {
     dst = Mat::zeros(src.size(), CV_32F);
     Mat a, b;
     Mat grad = src.colRange(1, src.cols) - src.colRange(0, src.cols - 1);
     grad.copyTo(dst.colRange(pos, src.cols + pos - 1));
     if(pos == 1) {
         src.col(0).copyTo(dst.col(0));
     }
 }
// Check whether homography transform M actually transforms
// given vectors x1 to x2. Used to check validness of a reconstructed
// homography matrix.
// TODO(sergey): Consider using this in all tests since possible homography
// matrix is not fixed to a single value and different-looking matrices
// might actually crrespond to the same exact transform.
void CheckHomography2DTransform(const Mat3 &H,
                                const Mat &x1,
                                const Mat &x2) {
  for (int i = 0; i < x2.cols(); ++i) {
    Vec3 x2_expected = x2.col(i);
    Vec3 x2_observed = H * x1.col(i);
    x2_observed /= x2_observed(2);
    EXPECT_MATRIX_NEAR(x2_expected, x2_observed, 1e-8);
  }
}
Example #24
0
inline
void
op_fliplr::apply_direct(Mat<eT>& out, const Mat<eT>& X)
  {
  arma_extra_debug_sigprint();
  
  const uword X_n_rows = X.n_rows;
  const uword X_n_cols = X.n_cols;
  
  const uword X_n_cols_m1 = X_n_cols - 1;
  
  if(&out != &X)
    {
    out.set_size(X_n_rows, X_n_cols);
    
    if(X_n_rows == 1)
      {
      const eT*   X_mem =   X.memptr();
            eT* out_mem = out.memptr();
      
      for(uword col=0; col < X_n_cols; ++col)
        {
        out_mem[X_n_cols_m1 - col] = X_mem[col];
        }
      }
    else
      {
      for(uword col=0; col < X_n_cols; ++col)
        {
        out.col(X_n_cols_m1 - col) = X.col(col);
        }
      }
    }
  else  // in-place operation
    {
    const uword N = X_n_cols / 2;
    
    if(X_n_rows == 1)
      {
      eT* out_mem = out.memptr();
      
      for(uword col=0; col < N; ++col)
        {
        std::swap(out_mem[X_n_cols_m1 - col], out_mem[col]);
        }
      }
    else
      {
      for(uword col=0; col < N; ++col)
        {
        out.swap_cols(X_n_cols_m1 - col, col);
        }
      }
    }
  }
Example #25
0
/**
 * Fits a plane to a 3D point cloud of data. The returned matrix is in the following format:
 * First column: plane normal
 * Next 4 columns: 4 corners of the plane that contains all the data
 */
Mat TableObjectDetector::fitPlane(const Mat depthWorld) {
    Mat P = getClosestPoints(depthWorld, 1.0);
    Mat Pmean(1, 3, CV_64F);
    Pmean.at<double>(0) = mean(P.col(0))[0];
    Pmean.at<double>(1) = mean(P.col(1))[0];
    Pmean.at<double>(2) = mean(P.col(2))[0];
        
    for (int i=0; i<P.rows; i++) {
        P.row(i) = P.row(i)-Pmean;
    }
    Mat PT; transpose(P, PT);
    Mat A = PT*P;
    // Find normal of LSQ plane
    SVD svd(A, SVD::FULL_UV);
    Mat norm; transpose(svd.vt.row(2), norm);
    
    double nx = norm.at<double>(0);
    double ny = norm.at<double>(1);
    double nz = norm.at<double>(2);   
    double rho = (Pmean.at<double>(0)*nx +
                  Pmean.at<double>(1)*ny +
                  Pmean.at<double>(2)*nz);

    // Put the normal in the plane matrix
    Mat plane(3, 5, CV_64F);
    norm.col(0).copyTo(plane.col(0));

    // Generate corners of the plane
    double xmin, ymin, xmax, ymax;
    xmin = -0.5;
    xmax = 0.5;
    ymin = -0.5;
    ymax = 0.1;
    
    plane.at<double>(0, 1) = xmin;
    plane.at<double>(0, 2) = xmax;
    plane.at<double>(0, 3) = xmax;
    plane.at<double>(0, 4) = xmin;
    plane.at<double>(1, 1) = ymin;
    plane.at<double>(1, 2) = ymin;
    plane.at<double>(1, 3) = ymax;
    plane.at<double>(1, 4) = ymax;
    double z1 = (rho-nx*xmin-ny*ymin)/nz;
    double z2 = (rho-nx*xmax-ny*ymin)/nz;
    double z3 = (rho-nx*xmax-ny*ymax)/nz;
    double z4 = (rho-nx*xmin-ny*ymax)/nz;
    plane.at<double>(2, 1) = z1;
    plane.at<double>(2, 2) = z2;
    plane.at<double>(2, 3) = z3;
    plane.at<double>(2, 4) = z4;

    return plane;
}
Example #26
0
double VarRTM::PredictAUC(RTMC &m, Mat &z_bar) {
  VReal real,pre;
  for (int d = 0; d < held_out_net_.cols(); d++) {
    for (SpMatInIt it(held_out_net_, d); it; ++it) {
      double label = it.value();
      Vec pi = z_bar.col(d).cwiseProduct(z_bar.col(it.index()));
      double prob = Sigmoid(pi.dot(m.eta));
      real.push_back(label);
      pre.push_back(prob);
    }
  }
  return AUC(real,pre);
}
 IndMatchDecorator(const std::vector<IndMatch> & vec_matches,
   const Mat & leftFeat,
   const Mat & rightFeat)
   :_vec_matches(vec_matches)
 {
   for (size_t i = 0; i < vec_matches.size(); ++i) {
     const size_t I = vec_matches[i]._i;
     const size_t J = vec_matches[i]._j;
     _vecDecoredMatches.push_back(
       IndMatchDecoratorStruct(leftFeat.col(I)(0),leftFeat.col(I)(1),
       rightFeat.col(J)(0), rightFeat.col(J)(1), vec_matches[i]));
   }
 }
void	PM_type::bboxpred_input( const Mat &ds, const Mat &bs, Mat &A, Mat &x1, Mat &y1, Mat &x2, Mat &y2, Mat &w, Mat &h )
//
//% Construct input for the bbox predictor from detections
//% and filter bounding boxes.
//%   [A, x1, y1, x2, y2, w, h] = bboxpred_input(ds, bs)
//%
//%   If beta_x1 is a vector of vector of learned regression coefficients for
//%   predicting the new location of the x1 component of a bounding box,
//%   the new x1 is predicted as:					   
//%
//%     dx1 = A*beta_x1;
//%     x1 = x1 + w*dx1;
//%
//%   Computing x2, y1, and y2 are similar.
//%
//% Return values
//%   A       Each row is a feature vector (predictor variables)
//%   x1      Original detection window coordinates
//%   y1        ...
//%   x2        ...
//%   y2        ...
//%   w       Widths of original detection windows
//%   h       Heights of original detection windows
//%
//% Arguments
//%   ds     Detection window coordinates
//%   bs     Coordinates for each filter placed in the detection
{
	// detection windows' coordinates
	x1 = ds.col(0);
	y1 = ds.col(1);
	x2 = ds.col(2);
	y2 = ds.col(3);
	// detection windows' widths and heights
	w = x2 - x1;
	h = y2 - y1;
	// detection windows' centers
	Mat	rx = x1 + w*0.5f;
	Mat	ry = y1 + h*0.5f;

	int		A_cols = (int)(bs.cols/4.f) * 2 + 1;
	A.create(bs.rows,A_cols,CV_32FC1);
	int		i = 0;
	for( int j=3; j<bs.cols; j+=4 ){
		// filters' centers
		Mat px = bs.col(j-3) + ( bs.col(j-1) - bs.col(j-3) )*0.5;
		Mat py = bs.col(j-2) + ( bs.col(j) - bs.col(j-2) )*0.5;
		A.col(i++) = ( px - rx ) / w;
		A.col(i++) = ( py - ry ) / h;
	}
	if( i!=A_cols-1 )
		throw runtime_error("");
	A.col(i) = Scalar(1);
}
Example #29
0
void libfacerec::sortMatrixColumnsByIndices(InputArray _src, InputArray _indices, OutputArray _dst) {
    if(_indices.getMat().type() != CV_32SC1) {
    	string error_message = format("cv::sortRowsByIndices only works on integer indices! Expected: %d. Given: %d.", CV_32SC1, _indices.getMat().type());
    	CV_Error(CV_StsBadArg, error_message);
    }
    Mat src = _src.getMat();
    vector<int> indices = _indices.getMat();
    _dst.create(src.rows, src.cols, src.type());
    Mat dst = _dst.getMat();
    for(int idx = 0; idx < indices.size(); idx++) {
        Mat originalCol = src.col(indices[idx]);
        Mat sortedCol = dst.col(idx);
        originalCol.copyTo(sortedCol);
    }
}
// Check the properties of a fundamental matrix:
//
//   1. The determinant is 0 (rank deficient)
//   2. The condition x'T*F*x = 0 is satisfied to precision.
//
bool ExpectFundamentalProperties(const Mat3 &F,
                                 const Mat &ptsA,
                                 const Mat &ptsB,
                                 double precision) {
  bool bOk = true;
  bOk &= F.determinant() < precision;
  assert(ptsA.cols() == ptsB.cols());
  const Mat hptsA = ptsA.colwise().homogeneous();
  const Mat hptsB = ptsB.colwise().homogeneous();
  for (int i = 0; i < ptsA.cols(); ++i) {
    const double residual = hptsB.col(i).dot(F * hptsA.col(i));
    bOk &= residual < precision;
  }
  return bOk;
}