コード例 #1
0
cv::Mat ConstraintsHelpers::compNewPos(cv::Mat lprevImu, cv::Mat lcurImu,
									cv::Mat lprevEnc, cv::Mat lcurEnc,
									cv::Mat lposOrigMapCenter,
									cv::Mat lmapCenterOrigGlobal,
									ros::NodeHandle &nh)
{
	Mat ret = Mat::eye(4, 4, CV_32FC1);
	if(!lposOrigMapCenter.empty() && !lmapCenterOrigGlobal.empty()){
		//cout << "compOrient(lcurImu) = " << compOrient(lcurImu) << endl;
		//cout << "lmapCenterGlobal = " << lmapCenterGlobal << endl;
		//cout << "Computing curPos" << endl;
		//cout << "encodersCur - encodersPrev = " << encodersCur - encodersPrev << endl;
		Mat trans = lmapCenterOrigGlobal.inv()*compTrans(compOrient(lprevImu), lcurEnc - lprevEnc, nh);
		//cout << "trans = " << trans << endl;
		//cout << "Computing curTrans" << endl;
		Mat curTrans = Mat(lposOrigMapCenter, Rect(3, 0, 1, 4)) + trans;
		//cout << "Computing curRot" << endl;

		Mat curRot = lmapCenterOrigGlobal.inv()*compOrient(lcurImu);
		//cout << "curRot = " << curRot << endl;

		curRot.copyTo(ret);
		curTrans.copyTo(Mat(ret, Rect(3, 0, 1, 4)));
	}
	return ret;
}
コード例 #2
0
void CameraHandler::setIntrinsic(const cv::Mat &K)
{
  if (K.rows != 3 &&
      K.cols != 3)
    return;
  _cam_param.K = K;
  _cam_param.K_inv = K.inv();
}
コード例 #3
0
ファイル: Utils.cpp プロジェクト: 13983441921/opencv
// Computes the norm of the rotation error
double get_rotation_error(const cv::Mat &R_true, const cv::Mat &R)
{
  cv::Mat error_vec, error_mat;
  error_mat = R_true * cv::Mat(R.inv()).mul(-1);
  cv::Rodrigues(error_mat, error_vec);

  return cv::norm(error_vec);
}
コード例 #4
0
ファイル: poseRT.cpp プロジェクト: WalkingMachine/sara_commun
PoseRT PoseRT::obj2cam(const cv::Mat &Rt_obj2cam)
{
  Mat projectiveMatrix_obj = getProjectiveMatrix();
  Mat projectiveMatrix_cam = Rt_obj2cam * projectiveMatrix_obj * Rt_obj2cam.inv(DECOMP_SVD);

  PoseRT pose_cam(projectiveMatrix_cam);
  return pose_cam;
}
コード例 #5
0
ファイル: homography.cpp プロジェクト: ipab-rad/netcam_stream
void calc_homography(Mat& image) {
    vector<Point2f> real_corners;
    calcChessboardCorners2D(BOARD_SIZE, SQUARE_SIZE, real_corners);

    vector<Point2f> pointBuf;
    bool found = findChessboardCorners(image, BOARD_SIZE, pointBuf,
                                       CALIB_CB_ADAPTIVE_THRESH |
                                       CALIB_CB_FAST_CHECK  |
                                       CV_CALIB_CB_FILTER_QUADS);
    // CALIB_CB_NORMALIZE_IMAGE

    // if (found)
    // {
    //     // May lead to drifting corners
    //     Mat viewGray;
    //     cvtColor(image, viewGray, CV_BGR2GRAY);
    //     cornerSubPix(viewGray, pointBuf, Size(11,11),
    //           Size(-1,-1), TermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1));
    // }
    drawChessboardCorners(image, BOARD_SIZE, Mat(pointBuf), found);
    ROS_INFO("[CALC_HOMOGRAPHY] Found board: %d", found);

    if (found) {
        ROS_DEBUG_STREAM("Number of calc. points: " << pointBuf.size() << std::endl <<
                         pointBuf << std::endl);
        ROS_DEBUG_STREAM("Number of found points: " << real_corners.size() << std::endl
                         << real_corners << std::endl);

        H = cv::findHomography(pointBuf, real_corners);

        ROS_INFO_STREAM("[CALC_HOMOGRAPHY] H:\n" << H);

        // Save to file
        std::string path = ros::package::getPath("netcam_stream");
        FileStorage fs(path + "/calibration/homography" + to_str(CAMERA_ID) + ".yaml",
                       FileStorage::WRITE);
        fs << "H_" + to_str(CAMERA_ID) << H;
        fs.release();

        // Output to terminal in formated way:
        for (int i = 0; i < 3; ++i)
            for (int j = 0; j < 3; ++j)
                std::cout << "H" << i << j << " " << H.at<double>(i, j) << std::endl;

        ROS_INFO("Matrix Inverse:");
        H_inv = H.inv();
        // Output to terminal in formated way:
        for (int i = 0; i < 3; ++i)
            for (int j = 0; j < 3; ++j)
                std::cout << "H" << i << j << " " << H_inv.at<double>(i, j) << std::endl;
    }

    imshow("Calc Board", image);
    waitKey(10);

}
コード例 #6
0
////////////////////////////////////////////////////////////////////////////////////////////////////////
//compare covariance matricies
float compareCov(cv::Mat a, cv::Mat b)
{
	cv::Mat eigen_values;
	cv::eigen(a.inv() * b, eigen_values);
	cv::Mat ln;
	cv::Mat ln2;
	cv::log(eigen_values, ln);
	cv::multiply(ln,ln,ln2);
	cv::Scalar temp = cv::sum(ln2);
	return temp[0];
}
コード例 #7
0
double Gauss::probability(const Vec3f &mean, const cv::Mat &covmat, Vec3f color){
    double mul = 0;
    Mat miu(1,3,CV_64FC1);
    Mat ans(1,1,CV_64FC1);
    miu.at<double>(0,0) = color[0] - mean[0];
    miu.at<double>(0,1) = color[1] - mean[1];
    miu.at<double>(0,2) = color[2] - mean[2];
    ans = miu * covmat.inv() * miu.t();
    mul = (-0.5)*ans.at<double>(0,0);
    return 1.0f/sqrt(determinant(covmat))*exp(mul);
}
コード例 #8
0
 double DARP::gaussian(const cv::Mat & x,
         const cv::Mat & mu,
         const cv::Mat & cov)
 {
     cv::Mat diff=x-mu;
     double det=determinant(cov);
     double aux=(0.5/M_PI)*(1.0/sqrt(det));
     
     cv::Mat exponent=-0.5*diff.t()*cov.inv()*diff;
     
     return aux*exp(exponent.at<double>(0));
 }
コード例 #9
0
    unsigned long RenderPose(cv::Mat& image, cv::Mat& rot_3x3_CfromO, cv::Mat& trans_3x1_CfromO)
    {
        cv::Mat object_center(3, 1, CV_64FC1);
        double* p_object_center = object_center.ptr<double>(0);
        p_object_center[0] = 0;
        p_object_center[1] = 0;
        p_object_center[2] = 0;

        cv::Mat rot_inv = rot_3x3_CfromO.inv();

        // Compute coordinate axis for visualization
        cv::Mat pt_axis(4, 3, CV_64FC1);
        double* p_pt_axis = pt_axis.ptr<double>(0);
        p_pt_axis[0] = 0 + p_object_center[0];
        p_pt_axis[1] = 0 + p_object_center[1];
        p_pt_axis[2] = 0 + p_object_center[2];
        p_pt_axis = pt_axis.ptr<double>(1);
        p_pt_axis[0] = 0.1 + p_object_center[0];
        p_pt_axis[1] = 0 + p_object_center[1];
        p_pt_axis[2] = 0 + p_object_center[2];
        p_pt_axis = pt_axis.ptr<double>(2);
        p_pt_axis[0] = 0 + p_object_center[0];
        p_pt_axis[1] = 0.1 + p_object_center[1];
        p_pt_axis[2] = 0 + p_object_center[2];
        p_pt_axis = pt_axis.ptr<double>(3);
        p_pt_axis[0] = 0 + p_object_center[0];
        p_pt_axis[1] = 0 + p_object_center[1];
        p_pt_axis[2] = 0.1 + p_object_center[2];

        // Transform data points
        std::vector<cv::Point> vec_2d(4, cv::Point());
        for (int i=0; i<4; i++)
        {
            cv::Mat vec_3d = pt_axis.row(i).clone();
            vec_3d = vec_3d.t();
            vec_3d = rot_3x3_CfromO*vec_3d;
            vec_3d += trans_3x1_CfromO;
            double* p_vec_3d = vec_3d.ptr<double>(0);

            ReprojectXYZ(p_vec_3d[0], p_vec_3d[1], p_vec_3d[2],
                         vec_2d[i].x , vec_2d[i].y);
        }

        // Render results
        int line_width = 1;
        cv::line(image, vec_2d[0], vec_2d[1], cv::Scalar(0, 0, 255), line_width);
        cv::line(image, vec_2d[0], vec_2d[2], cv::Scalar(0, 255, 0), line_width);
        cv::line(image, vec_2d[0], vec_2d[3], cv::Scalar(255, 0, 0), line_width);

        return ipa_Utils::RET_OK;
    }
コード例 #10
0
ファイル: poseRT.cpp プロジェクト: WalkingMachine/sara_commun
void PoseRT::computeDistance(const PoseRT &pose1, const PoseRT &pose2, double &rotationDistance, double &translationDistance, const cv::Mat &Rt_obj2cam)
{
  Mat Rt_diff_cam = pose1.getProjectiveMatrix() * pose2.getProjectiveMatrix().inv(DECOMP_SVD);
  Mat Rt_diff_obj = Rt_diff_cam;
  if (!Rt_obj2cam.empty())
  {
    Rt_diff_obj = Rt_obj2cam.inv(DECOMP_SVD) * Rt_diff_cam * Rt_obj2cam;
  }

  Mat rvec_diff_obj, tvec_diff_obj;
  getRvecTvec(Rt_diff_obj, rvec_diff_obj, tvec_diff_obj);
  rotationDistance = norm(rvec_diff_obj);
  translationDistance = norm(tvec_diff_obj);
}
コード例 #11
0
ファイル: ApplicationContext.cpp プロジェクト: yzbx/opencv-qt
ApplicationContext::ApplicationContext(const cv::Mat& mask, const cv::Mat& homography, float fps, float pixelToMeters, DrawingFlags drawingFlags, BlobTrackerAlgorithmParams algoParams, bool recordBGS, InputFrameProviderIface* frameProvider)
: mMask(mask)
, mHomography(homography)
, mInverseHomography(homography.inv())
, mFPS(fps)
, mSecondByFrame(1/fps)
, mMetersByPixel(1/pixelToMeters)
, mPixelToMeters(pixelToMeters)
, mTracker(nullptr)
, mObjectDatabaseManager(nullptr)
, mDrawingFlags(drawingFlags)
, mCurrentId(0)
, mTrackerParams(algoParams)
, mRecordingBGS(recordBGS)
, mFrameProvider(frameProvider)
, mBGSPath("bgs/")
{
	
}
コード例 #12
0
cv::Mat applyS(cv::Mat H, double u1, cv::Mat thetaMatrix, cv::Mat p, bool inverse = false) {
//	std::cout << "\n" << "[applyS] Start" << "\n";
	p = p / p.at<double>(2, 0);

	cv::Mat transformedPoint = cv::Mat::zeros(2, 1, CV_64F);
	transformedPoint.at<double>(0, 0) = p.at<double>(0, 0);
	transformedPoint.at<double>(1, 0) = p.at<double>(1, 0);
	
	transformedPoint = thetaMatrix.inv() * transformedPoint;
//	std::cout << "\n" << "[applyS] Changing point coords" << "\n";
	
	cv::Mat leftyMatrix;
	double leftyMatrixTmp[2][2] = {
		{H.at<double>(1, 1), H.at<double>(0, 1)},
		{-H.at<double>(0, 1), H.at<double>(1, 1)}
	};
	
	cv::Mat(2, 2, CV_64F, &leftyMatrixTmp).copyTo(leftyMatrix);
	
	cv::Mat rightMatrix;
	double rightMatrixTmp[2][1] = {
		{ (H.at<double>(0, 0) - H.at<double>(1, 1)) * u1 + H.at<double>(0, 2) },
		{ (H.at<double>(1, 0) + H.at<double>(0, 1)) * u1 + H.at<double>(1, 2) }
	};
	cv::Mat(2, 1, CV_64F, &rightMatrixTmp).copyTo(rightMatrix);
	
	double multiplier = 1 / (1 + H.at<double>(2, 0) * u1);
	
	if(inverse) {
		leftyMatrix.at<double>(1,1) *= 0.7673;
		leftyMatrix = leftyMatrix.inv();
		rightMatrix = rightMatrix * -1;
		
		multiplier = (1 / multiplier);
	}
	
	
	cv::Mat output = multiplier * ((leftyMatrix * transformedPoint) + rightMatrix);
	output = thetaMatrix * output;
	
	return output;
}
コード例 #13
0
ファイル: main.cpp プロジェクト: andrewjchen/ethzasl_apriltag
int main( int argc, char **argv )
{
	if(argc<4) {
		usage(argc,argv);
		return 1;
	}
	is = helper::createImageSource(argv[1]);
	if(is.empty() || is->done()) {
		loglne("[main] createImageSource failed or no valid imagesource!");
		return -1;
	}
	is->pause(false);
	is->reportInfo();
	is->get(frame);
	imgW = frame.cols; imgH = frame.rows;
	videoFromWebcam = false;
	if( is->classname() == "ImageSource_Camera" ) {
		videoFromWebcam = true;
	}

	loglni("[main] loading K matrix from: "<<argv[2]);
	double K[9];
	std::ifstream kfile(argv[2]);
	for(int i=0; i<9; ++i) kfile >> K[i];
	tracker.loadK(K);
	loglni("[main] K matrix loaded:");
	loglni(helper::PrintMat<>(3,3,K));

	loglni("[main] load template image from: "<<argv[3]);
	tracker.loadTemplate(argv[3]);

	//////////////// TagDetector /////////////////////////////////////////
	int tagid = 0; //default tag16h5
	if(argc>5) tagid = atoi(argv[5]);
	tagFamily = TagFamilyFactory::create(tagid);
	if(tagFamily.empty()) {
		loglne("[main] create TagFamily fail!");
		return -1;
	}
	detector = new TagDetector(tagFamily);
	if(detector.empty()) {
		loglne("[main] create TagDetector fail!");
		return -1;
	}
	Mat temp = imread(argv[3]);
	if( findAprilTag(temp, 0, HI, true) ) {
		namedWindow("template");
		imshow("template", temp);
		iHI = HI.inv();
	} else {
		loglne("[main error] detector did not find any apriltag on template image!");
		return -1;
	}

	//////////////// OSG ////////////////////////////////////////////////
	osg::ref_ptr<osg::Group> root = new osg::Group;

	string scenefilename = (argc>4?argv[4]:("cow.osg"));
	osg::ref_ptr<osg::Node> cow = osgDB::readNodeFile(scenefilename);
	arscene = new helper::ARSceneRoot;
	helper::FixMat<3,double>::Type matK = helper::FixMat<3,double>::ConvertType(K);
	CV2CG::cv2cg(matK,0.01,500,imgW,imgH,*arscene);
	manipMat = new osg::MatrixTransform(osg::Matrix::identity());
	manipMat->addChild(cow);
	manipMat->getOrCreateStateSet()->setMode(GL_NORMALIZE, osg::StateAttribute::ON);
	arscene->addChild(manipMat);

	osg::ref_ptr<osg::Image> backgroundImage = new osg::Image;
	helper::cvmat2osgimage(frame,backgroundImage);
	arvideo = new helper::ARVideoBackground(backgroundImage);
	root->setUpdateCallback(new ARUpdateCallback);

	root->addChild(arvideo);
	root->addChild(arscene);

	viewer.setSceneData(root);
	viewer.addEventHandler(new osgViewer::StatsHandler);
	viewer.addEventHandler(new osgViewer::WindowSizeHandler);
	viewer.addEventHandler(new QuitHandler);

	//start tracking thread
	OpenThreads::Thread::Init();
	TrackThread* thr = new TrackThread;
	thr->start();

	viewer.run();

	delete thr;
	loglni("[main] DONE...exit!");
	return 0;
}
コード例 #14
0
cv::Mat halfProjectiveWarp(cv::Mat H, cv::Mat image0, int u1, cv::Mat thetaMatrixTmp, bool left) {
	std::cout << "\n" << "[halfProjectiveWarp] Start" << "\n";
	cv::Mat thetaMatrix = cv::Mat::zeros(3, 3, CV_64F);
	thetaMatrix.at<double>(0, 0) = thetaMatrixTmp.at<double>(0, 0);
	thetaMatrix.at<double>(1, 0) = thetaMatrixTmp.at<double>(1, 0);
	thetaMatrix.at<double>(0, 1) = thetaMatrixTmp.at<double>(0, 1);
	thetaMatrix.at<double>(1, 1) = thetaMatrixTmp.at<double>(1, 1);
	
	thetaMatrix.at<double>(2, 0) = 0;
	thetaMatrix.at<double>(2, 1) = 0;
	thetaMatrix.at<double>(2, 2) = 1;
	thetaMatrix.at<double>(0, 2) = 0;
	thetaMatrix.at<double>(1, 2) = 0;
	
	cv::Size imageSize = image0.size();
	cv::Mat output;
	
	std::cout << "\n" << "[halfProjectiveWarp] Calculating bounds" << "\n";
	cv::Mat p00 = cv::Mat::zeros(3, 1, CV_64F);
	p00.at<double>(2, 0) = 1;
	p00 = H * p00;
	
	cv::Mat p01 = cv::Mat::zeros(3, 1, CV_64F);
	p01.at<double>(1, 0) = imageSize.height - 1;
	p01.at<double>(2, 0) = 1;
	p01 = H * p01;
	
	cv::Mat p10 = cv::Mat::zeros(3, 1, CV_64F);
	p10.at<double>(0, 0) = imageSize.width - 1;
	p10.at<double>(2, 0) = 1;
	p10 = applyS(H, u1, thetaMatrixTmp, p10);

	cv::Mat p11 = cv::Mat::zeros(3, 1, CV_64F);
	p11.at<double>(0, 0) = imageSize.width - 1;
	p11.at<double>(1, 0) = imageSize.height - 1;
	p11.at<double>(2, 0) = 1;
	p11 = applyS(H, u1, thetaMatrixTmp, p11);
	
	std::cout << "\n" <<  "[halfProjectiveWarp] Calculating new image size" << "\n";
	int height = std::max(p01.at<double>(1, 0), p11.at<double>(1, 0)) - std::min(p00.at<double>(1, 0), p10.at<double>(1, 0));
	int width = std::max(p10.at<double>(0, 0), p11.at<double>(0, 0)) - std::min(p00.at<double>(0, 0), p10.at<double>(0, 0));

	int minX = std::min(p00.at<double>(0, 0), p10.at<double>(0, 0));
	int maxX = std::max(p10.at<double>(0, 0), p11.at<double>(0, 0));
	
	int minY = std::min(p00.at<double>(1, 0), p10.at<double>(1, 0));
	int maxY = std::max(p01.at<double>(1, 0), p11.at<double>(1, 0));
	
	cv::Size newSize = cv::Size(width, height);
	std::cout << "\n" <<  newSize << "\n";
	output = cv::Mat::zeros(newSize, CV_8UC3);
	
	cv::Mat pointTmp = cv::Mat::zeros(2, 1, CV_64F);
	cv::Mat pointTmp3d = cv::Mat::zeros(3, 1, CV_64F);
	pointTmp3d.at<double>(2, 0) = 1;

	for (int x = 0; x < width; x++) {
		for (int y = 0; y < height; y++) {
			pointTmp.at<double>(0, 0) = x + minX;
			pointTmp.at<double>(1, 0) = y + minY;
			
			pointTmp = thetaMatrix.inv() * pointTmp;
			
			pointTmp3d.at<double>(0, 0) = pointTmp.at<double>(0, 0);
			pointTmp3d.at<double>(1, 0) = pointTmp.at<double>(1, 0);

			cv::Mat correspondingPoint;
			if(pointTmp.at<double>(0, 0) < u1) {
				// Apply H
				correspondingPoint = H.inv() * pointTmp3d;
				correspondingPoint = correspondingPoint / correspondingPoint.at<double>(2, 0);
				correspondingPoint = thetaMatrix * correspondingPoint;
			}
			else {
				// Apply S inverse
				correspondingPoint = applyS(H, u1, thetaMatrixTmp, thetaMatrix * pointTmp3d, true);
			}
			
			if(correspondingPoint.at<double>(0, 0) > 0 && correspondingPoint.at<double>(0, 0) < image0.size().width &&
			   correspondingPoint.at<double>(1, 0) > 0 && correspondingPoint.at<double>(1, 0) < image0.size().height) {
				output.at<cv::Vec3b>(y, x) = image0.at<cv::Vec3b>(correspondingPoint.at<double>(1, 0), correspondingPoint.at<double>(0, 0));
			}
		}
	}
	
//	for (int x = 0; x < image0.size().width; x++) {
//		for (int y = 0; y < image0.size().height; y++) {
//			pointTmp.at<double>(0, 0) = x;
//			pointTmp.at<double>(1, 0) = y;
//			
//			pointTmp = thetaMatrix.inv() * pointTmp;
//			
//			pointTmp3d.at<double>(0, 0) = pointTmp.at<double>(0, 0);
//			pointTmp3d.at<double>(1, 0) = pointTmp.at<double>(1, 0);
//			
//			cv::Mat correspondingPoint = applyS(H, u1, thetaMatrixTmp, pointTmp3d);
//			correspondingPoint.at<double>(0, 0) += minX;
//			correspondingPoint.at<double>(1, 0) += minY;
//
//			cv::Mat correspondingPointUV = thetaMatrix.inv() * correspondingPoint;
//			
//			if(correspondingPointUV.at<double>(0, 0) >= u1) {
//				
//				if(correspondingPoint.at<double>(0, 0) > 0 && correspondingPoint.at<double>(1, 0) > 0) {
//					output.at<cv::Vec3b>(correspondingPoint.at<double>(1, 0), correspondingPoint.at<double>(0, 0)) = image0.at<cv::Vec3b>(y, x);
//				}
//			}
//		}
//	}
	
	return output;
}
コード例 #15
0
ファイル: lineslam.cpp プロジェクト: caomw/LineSLAM
void Node::extractLineDepth(const cv::Mat& depth_float, const cv::Mat& K, 
	double ratio_of_collinear_pts, double line_3d_len_thres_m, double depth_scaling)
	// extract the 3d info of an frame line if availabe from the depth image
	// input: depth, lines
	// output: lines with 3d info
{		
	int depth_CVMatDepth = depth_float.depth();
	for(int i=0; i<lines.size(); ++i)	{ // each line
		double len = cv::norm(lines[i].p - lines[i].q);		
		vector<cv::Point3d> pts3d;
		// iterate through a line
   		double numSmp = min(max(len/sysPara.line_sample_interval, (double)sysPara.line_sample_min_num), (double)sysPara.line_sample_max_num);  // smaller numSmp for fast speed, larger numSmp should be more accurate
		pts3d.reserve(numSmp);
		for(int j=0; j<=numSmp; ++j) {
			// use nearest neighbor to querry depth value
			// assuming position (0,0) is the top-left corner of image, then the
			// top-left pixel's center would be (0.5,0.5)
			cv::Point2d pt = lines[i].p * (1-j/numSmp) + lines[i].q * (j/numSmp);
			if(pt.x<0 || pt.y<0 || pt.x >= depth_float.cols || pt.y >= depth_float.rows ) continue;
			int row, col; // nearest pixel for pt
			if((floor(pt.x) == pt.x) && (floor(pt.y) == pt.y)) {// boundary issue
				col = max(int(pt.x-1),0);
				row = max(int(pt.y-1),0);
			} else {
				col = int(pt.x);
				row = int(pt.y);
			}
			double zval = -1;
			double depval = depth_float.at<float>(row,col);
			if(depth_CVMatDepth == CV_32F) 
				depval = depth_float.at<float>(row,col);
			else if (depth_CVMatDepth == CV_64F) 
				depval = depth_float.at<double>(row,col);
			else {
				cerr<<"Node::extractLineDepth: depth image matrix type is not float/double\n";
				exit(0);	
			}	
			if(depval < EPS) { // no depth info

			} else {
				zval = depval/depth_scaling; // in meter, z-value
			}

			// export 3d points to file
			if (zval > 0 ) {
				cv::Point2d xy3d = mat2cvpt(K.inv()*cvpt2mat(pt))*zval;	
				pts3d.push_back(cv::Point3d(xy3d.x, xy3d.y, zval));
			}
		}
		//if (pts3d.size() < max(10.0,len*ratio_of_collinear_pts))
		if (pts3d.size() < max(5.0, numSmp *ratio_of_collinear_pts))
			continue;

		RandomLine3d tmpLine;		

#ifdef EXTRACTLINE_USE_MAHDIST
		vector<RandomPoint3d> rndpts3d;
		rndpts3d.reserve(pts3d.size());
		// compute uncertainty of 3d points
		for(int j=0; j<pts3d.size();++j) {
			rndpts3d.push_back(compPt3dCov(pts3d[j], K, asynch_time_diff_sec_));
		}
		// using ransac to extract a 3d line from 3d pts
		tmpLine = extract3dline_mahdist(rndpts3d);
		
#else
		tmpLine = extract3dline(pts3d);
#endif
		if(tmpLine.pts.size()/numSmp > ratio_of_collinear_pts	&&
			cv::norm(tmpLine.A - tmpLine.B) > line_3d_len_thres_m) {
				MLEstimateLine3d(tmpLine, sysPara.line3d_mle_iter_num);//expensive, postpone to after line matching
				lines[i].haveDepth = true;
				lines[i].line3d = tmpLine;
				
		}		
		lines[i].line3d.pts.clear();
	}	

}
コード例 #16
0
ファイル: essential_mat.cpp プロジェクト: caomw/LineSLAM
void essn_ransac (cv::Mat* pts1, cv::Mat* pts2, vector<cv::Mat>& bestEs, cv::Mat K, 
					vector<cv::Mat>& inlierMasks, int imsize, bool usePrior, cv::Mat t_prior)
	// running 5-point algorithm 
	// re-estimation at the end
	// Input: points pairs normalized by camera matrix K
	// output: 

{
	bestEs.clear();
	inlierMasks.clear();
	int maxSize;
	int trialNum = 5;
	int maxIterNum = 500, maxIterNum0 = 500;
	double confidence = 0.999;
	double threshDist = 1;		//  in image units 

	vector<int> numInlier_E; // num of inliers for each E 
	vector<double> resds;
	for (int trial = 0; trial < trialNum;  ++trial) {		
		cv::Mat bestMss1, bestMss2;
		int N = pts1->cols;		
		cv::Mat Kpts1 = K*(*pts1);
		cv::Mat Kpts2 = K*(*pts2);
		vector<int> rnd;
		for (int i=0; i<N; ++i)	rnd.push_back(i);
		cv::Mat mss1(3,5,CV_64F), mss2(3,5,CV_64F);	
		vector<int> maxInlierSet;
		cv::Mat bestE;
		double best_resd;
		int iter = 0;

		while (iter < maxIterNum) {
			++iter;
			// --- choose minimal solution set: mss1<->mss2
			random_shuffle(rnd.begin(),rnd.end());
			for (int i=0; i<mss1.cols; ++i) {
				pts1->col(rnd[i]).copyTo(mss1.col(i));
				pts2->col(rnd[i]).copyTo(mss2.col(i));
				// DEGENERATE COFIGURATION ????????????????????s
			}		
			// compute minimal solution by 5-point
			vector<cv::Mat> Es;   // multiple results from 5point alg.		
			fivepoint_stewnister(mss1, mss2, Es);

			// find concensus set
			vector<int> curInlierSet;
			for (int k=0; k < Es.size(); ++k) {
				curInlierSet.clear();
				bool consitent_with_prior = true;
				if(usePrior) { //use prior translation vector to filter 
					cv::Mat Ra, Rb, t1;
					decEssential (&Es[k], &Ra, &Rb, &t1);
					double angleThresh = 40; // degree
					if (abs(t1.dot(t_prior)/cv::norm(t1)/cv::norm(t_prior)) < cos(angleThresh*PI/180))
						consitent_with_prior = false;				
				}
				if(!consitent_with_prior)	continue;

				// check if already found
				bool existE = false;
				for(int kk=0; kk < bestEs.size(); ++ kk) {
					if(isEssnMatSimilar(bestEs[kk], Es[k])) {
						existE = true;
						break;
					}
				}
				if(existE) continue;
				double resd = 0;
				cv::Mat F = K.t().inv() * Es[k] * K.inv();
				for (int i=0; i<N; ++i) {				
					double dist_sq = fund_samperr(Kpts1.col(i), Kpts2.col(i),F);		
					if (dist_sq < threshDist * threshDist) {
						curInlierSet.push_back(i);
						resd += dist_sq;
					}
				}
				resd = resd/curInlierSet.size();
				if(curInlierSet.size() > maxInlierSet.size()) {
					maxInlierSet = curInlierSet;
					bestE = Es[k];
					best_resd = resd;
				}
			}
			//re-compute maximum iteration number:maxIterNum
			if(maxInlierSet.size()>0) {
			maxIterNum = min(maxIterNum0, 
				(int)abs(log(1-confidence)/log(1-pow(double(maxInlierSet.size())/N,5.0))));
			}
		}
		if (maxInlierSet.size()<mss1.cols) {
			continue;
		}
		
		cv::Mat tmpE = bestE.clone();	
		while (true) {		
			vector<cv::Mat> Es;
			vector<int> maxInlierSet_k;
			cv::Mat bestE_k;				
			// re-estimate by optimization
			cv::Mat inliers1(3,maxInlierSet.size(),CV_64F),
				inliers2(3,maxInlierSet.size(),CV_64F);
			for (int i=0; i<maxInlierSet.size(); ++i) {
				pts1->col(maxInlierSet[i]).copyTo(inliers1.col(i));
				pts2->col(maxInlierSet[i]).copyTo(inliers2.col(i));
			}				
		//	optimize_E_g2o(inliers1, inliers2, K, &tmpE);
			optimizeEmat (inliers1, inliers2, K, &tmpE);			
			vector<int> curInlierSet;
			double resd = 0;
			cv::Mat F = K.t().inv() * tmpE * K.inv();
			for (int i=0; i<N; ++i) {
				double dist_sq = fund_samperr(Kpts1.col(i), Kpts2.col(i), F);
				if (dist_sq < threshDist * threshDist) {
					curInlierSet.push_back(i);
					resd += dist_sq;
				}
			}
			resd = resd/curInlierSet.size();
			if(curInlierSet.size() > maxInlierSet.size())  {
				maxInlierSet = curInlierSet;
				bestE = tmpE;
				best_resd = resd;				
			} else
				break;
		}
		cv::Mat inliers1(3,maxInlierSet.size(),CV_64F),
			inliers2(3,maxInlierSet.size(),CV_64F);
		for (int i=0; i<maxInlierSet.size(); ++i) {
			pts1->col(maxInlierSet[i]).copyTo(inliers1.col(i));
			pts2->col(maxInlierSet[i]).copyTo(inliers2.col(i));
		}	
	
		cv::Mat inlierMask = cv::Mat::zeros(N,1,CV_8U);
		for (int i=0; i<maxInlierSet.size(); ++i)
			inlierMask.at<uchar>(maxInlierSet[i]) = 1; 

		if(bestEs.empty()) {
			bestEs.push_back(bestE);
			maxSize = maxInlierSet.size();
			inlierMasks.push_back(inlierMask);
			numInlier_E.push_back(maxInlierSet.size());
			resds.push_back(best_resd);
		} else {
			bool existE = false;
				for(int k=0; k < bestEs.size(); ++ k) {
					if(isEssnMatSimilar(bestEs[k], bestE)) {
						existE = true;
						break;
					}
				}
			if (!existE && maxInlierSet.size() > 0.9 * maxSize 
				&& abs((int)maxInlierSet.size()-maxSize) <= 15) {
				bestEs.push_back(bestE);
				maxSize = max((int)maxInlierSet.size(), maxSize);
				inlierMasks.push_back(inlierMask);
				numInlier_E.push_back(maxInlierSet.size());
				resds.push_back(best_resd);
			}
		}
		if(bestEs.size() >= 4) break;

		cv::Mat Ra, Rb, t;
		decEssential (&bestEs.back(), &Ra, &Rb, &t);
		if(rotateAngleDeg(Ra) < 3 && rotateAngleDeg(Rb) < 3) {
			trialNum = 2; 
		//	cout<<rotateAngleDeg(Ra)<<'\t'<<rotateAngleDeg(Rb)<<endl;
		}
}

		for(int i=0; i<bestEs.size(); ++i) {
		cv::Mat Ra, Rb, t;
		decEssential (&bestEs[i], &Ra, &Rb, &t);
//		cout<<t;
//		cout<<numInlier_E[i]<<", res="<<resds[i]<<endl;
		}
	}
コード例 #17
0
ファイル: rgbd_util.cpp プロジェクト: HuJianbing/rgbdtools
void buildRegisteredDepthImage(
    const cv::Mat& intr_rect_ir,
    const cv::Mat& intr_rect_rgb,
    const cv::Mat& ir2rgb,
    const cv::Mat& depth_img_rect,
    cv::Mat& depth_img_rect_reg)
{
    int w = depth_img_rect.cols;
    int h = depth_img_rect.rows;

    depth_img_rect_reg = cv::Mat::zeros(h, w, CV_16UC1);

    cv::Mat intr_rect_ir_inv = intr_rect_ir.inv();

    // Eigen intr_rect_rgb (3x3)
    Eigen::Matrix<double, 3, 3> intr_rect_rgb_eigen;
    for (int u = 0; u < 3; ++u)
        for (int v = 0; v < 3; ++v)
            intr_rect_rgb_eigen(v,u) =  intr_rect_rgb.at<double>(v, u);

    // Eigen rgb2ir_eigen (3x4)
    Eigen::Matrix<double, 3, 4> ir2rgb_eigen;
    for (int u = 0; u < 4; ++u)
        for (int v = 0; v < 3; ++v)
            ir2rgb_eigen(v,u) =  ir2rgb.at<double>(v, u);

    // Eigen intr_rect_ir_inv (4x4)
    Eigen::Matrix4d intr_rect_ir_inv_eigen;
    for (int v = 0; v < 3; ++v)
        for (int u = 0; u < 3; ++u)
            intr_rect_ir_inv_eigen(v,u) = intr_rect_ir_inv.at<double>(v,u);

    intr_rect_ir_inv_eigen(0, 3) = 0;
    intr_rect_ir_inv_eigen(1, 3) = 0;
    intr_rect_ir_inv_eigen(2, 3) = 0;
    intr_rect_ir_inv_eigen(3, 0) = 0;
    intr_rect_ir_inv_eigen(3, 1) = 0;
    intr_rect_ir_inv_eigen(3, 2) = 0;
    intr_rect_ir_inv_eigen(3, 3) = 1;

    // multiply into single (3x4) matrix
    Eigen::Matrix<double, 3, 4> H_eigen =
        intr_rect_rgb_eigen * (ir2rgb_eigen * intr_rect_ir_inv_eigen);

    // *** reproject

    Eigen::Vector3d p_rgb;
    Eigen::Vector4d p_depth;

    for (int v = 0; v < h; ++v)
        for (int u = 0; u < w; ++u)
        {
            uint16_t z = depth_img_rect.at<uint16_t>(v,u);

            if (z != 0)
            {
                p_depth(0,0) = u * z;
                p_depth(1,0) = v * z;
                p_depth(2,0) = z;
                p_depth(3,0) = 1.0;
                p_rgb = H_eigen * p_depth;

                double px = p_rgb(0,0);
                double py = p_rgb(1,0);
                double pz = p_rgb(2,0);

                int qu = (int)(px / pz);
                int qv = (int)(py / pz);

                // skip outside of image
                if (qu < 0 || qu >= w || qv < 0 || qv >= h) continue;

                uint16_t& val = depth_img_rect_reg.at<uint16_t>(qv, qu);

                // z buffering
                if (val == 0 || val > pz) val = pz;
            }
        }
}
コード例 #18
0
ファイル: Homo1.cpp プロジェクト: XavierCao/OpenCVtest
bool ParallaxErrorAnalysis_GradientPatch(const cv::Mat& img1,const shape& img1_shp, const cv::Mat& img2, const shape& img2_shp, const cv::Mat& img2_original,
        const cv::Mat& H, const cv::Size2i& img_size ,double& res_error)
{
    vector<cv::Point2f> overlap_points;


    for(int r = 0 ; r < img_size.height; r++)
    {
        for(int c = 0; c < img_size.width; c++)
        {
            shape t_img1_shape = img1_shp;
            shape t_img2_shape = img2_shp;
            cv::Point2f t_point(c, r);
            if(t_img1_shape.isInShape(c, r) && t_img2_shape.isInShape(c, r) )
            {
                // 记下这些点
                overlap_points.push_back(t_point);
            }
        }
    }

    // 找到这些点在 img2_original 上的对应点
    cv::Mat Hn = H.inv();
    vector<cv::Point2f> correPoints_ori(overlap_points.size(),*new cv::Point2f);
    cv::perspectiveTransform(overlap_points, correPoints_ori, Hn); // 两个参数都要是 vector<cv::Point2f> f 不能是 i

    //检测有没有越界的
    /*ofstream Dout("2.txt",ios::out);
    for(int i = 0; i < correPoints_ori.size(); i++)
    {
    	if(correPoints_ori[i].x < 0 || correPoints_ori[i].x > img2_original.cols
    		|| correPoints_ori[i].y < 0 || correPoints_ori[i].y > img2_original.rows)
    	{

    		Dout<< correPoints_ori[i].x << " "<<correPoints_ori[i].y << endl;
    	}
    }
    Dout.clear();
    Dout.close();*/

    // 计算 error
    res_error = 0;
    cv::Mat img_blend;
    vector<cv::Mat> imgs;
    vector<shape> shapes;
    imgs.push_back(img1);
    imgs.push_back(img2);
    shapes.push_back(img1_shp);
    shapes.push_back(img2_shp);
    blending_all(imgs, shapes, img_size, img_blend);

    // 求 Iij 和 Ij 的 Gradient 图
    cv::Mat img_blend_G;
    cv::Mat img2_original_G;
    gradientGray(img_blend, img_blend_G);
    gradientGray(img2_original, img2_original_G);

    // test 测试 到底有哪些点 不一样。
    vector<cv::Point2f> err_points_img2;
    vector<uchar> err_img2;
    // Gradient 图求 error
    double res = 0;
    for(int i = 0; i < overlap_points.size(); i++)
    {
        uchar Gray1 = img_blend_G.at<uchar>(overlap_points[i]);
        uchar Gray2 = img2_original_G.at<uchar>(correPoints_ori[i]);
        int tr = abs(Gray1 - Gray2);
        //if(tr != 0) // 很多是相差就1,这样基本一样的
        //{
        //	res += tr;
        //}
        if(tr >= 5) // 很多是相差就1,这样基本一样的
        {
            res += tr;
            err_points_img2.push_back(correPoints_ori[i]);
            err_img2.push_back(tr);
        }
    }

    res_error = res;

    // 测试的图
    // 新建一张 img_size 的黑色图
    IplImage* st_img = cvCreateImage(img2_original.size(), IPL_DEPTH_8U, 3);
    for(int i = 0; i < st_img->height; i++)
    {
        uchar *ptrImage = (uchar*)(st_img->imageData + i * st_img->widthStep);
        for (int j = 0; j < st_img->width; j++)
        {
            ptrImage[3 * j + 0]=0;
            ptrImage[3 * j + 1]=0;
            ptrImage[3 * j + 2]=0;
        }
    }
    cv::Mat test_img = st_img;
    for(int i = 0; i < err_points_img2.size(); i++)
    {
        test_img.at<Vec3b>(err_points_img2[i])[2] = err_img2[i];
    }
    imwrite("error_ori.jpg",img2_original);
    imwrite("error_test.jpg",test_img);
    //end test

    return true;
}
コード例 #19
0
// Morph homography matrix
void MorphHomography(const cv::Mat& Hom, cv::Mat& MorphHom1, cv::Mat& MorphHom2, float blend_ratio)
{
	cv::Mat invHom = Hom.inv();
	MorphHom1 = cv::Mat::eye(3,3,CV_32FC1) * (1.0 - blend_ratio) + Hom * blend_ratio;
	MorphHom2 = cv::Mat::eye(3,3,CV_32FC1) * blend_ratio + invHom * (1.0 - blend_ratio);
}
コード例 #20
0
		// callback for the complete message
		void complete_message_callback(const homog_track::HomogComplete& msg)
		{
			/********** Begin splitting up the incoming message *********/
			// getting boolean indicating the reference has been set
			reference_set = msg.reference_set;

			// if the reference is set then will break out the points
			if (reference_set)
			{
				// initializer temp scalar to zero
				temp_scalar = cv::Mat::zeros(1,1,CV_64F);
			
				// getting the current marker points
				circles_curr = msg.current_points;
				
				// getting the refernce marker points
				circles_ref = msg.reference_points;
				
				// setting the current points to the point vector
				curr_red_p.x = circles_curr.red_circle.x;
				curr_green_p.x = circles_curr.green_circle.x;
				curr_cyan_p.x = circles_curr.cyan_circle.x;
				curr_purple_p.x = circles_curr.purple_circle.x;
				curr_red_p.y = circles_curr.red_circle.y;
				curr_green_p.y = circles_curr.green_circle.y;
				curr_cyan_p.y = circles_curr.cyan_circle.y;
				curr_purple_p.y = circles_curr.purple_circle.y;
				curr_points_p.push_back(curr_red_p);
				curr_points_p.push_back(curr_green_p);
				curr_points_p.push_back(curr_cyan_p);
				curr_points_p.push_back(curr_purple_p);
				
				
				
				// converting the points to be the projective coordinates
				for (int ii = 0; ii < curr_points_m.size(); ii++)
				{
					curr_points_m[ii] = K.inv(cv::DECOMP_LU)*curr_points_m[ii];
					std::cout << "currpoints at " << ii << " is: " << curr_points_m[ii] << std::endl;
					
				}
				
				// setting the reference points to the point vector
				ref_red_p.x = circles_ref.red_circle.x;
				ref_green_p.x = circles_ref.green_circle.x;
				ref_cyan_p.x = circles_ref.cyan_circle.x;
				ref_purple_p.x = circles_ref.purple_circle.x;
				ref_red_p.y = circles_ref.red_circle.y;
				ref_green_p.y = circles_ref.green_circle.y;
				ref_cyan_p.y = circles_ref.cyan_circle.y;
				ref_purple_p.y = circles_ref.purple_circle.y;
				ref_points_p.push_back(ref_red_p);
				ref_points_p.push_back(ref_green_p);
				ref_points_p.push_back(ref_cyan_p);
				ref_points_p.push_back(ref_purple_p);
				
				
				// setting the reference points to the matrix vector, dont need to do the last one because its already 1
				ref_red_m.at<double>(0,0) = ref_red_p.x;
				ref_red_m.at<double>(1,0) = ref_red_p.y;
				ref_green_m.at<double>(0,0) = ref_green_p.x;
				ref_green_m.at<double>(1,0) = ref_green_p.y;
				ref_cyan_m.at<double>(0,0) = ref_cyan_p.x;
				ref_cyan_m.at<double>(1,0) = ref_cyan_p.y;
				ref_purple_m.at<double>(0,0) = ref_purple_p.x;
				ref_purple_m.at<double>(1,0) = ref_purple_p.y;
				ref_points_m.push_back(ref_red_m);
				ref_points_m.push_back(ref_green_m);
				ref_points_m.push_back(ref_cyan_m);
				ref_points_m.push_back(ref_purple_m);
				
				
				// converting the points to be the projective coordinates
				for (int ii = 0; ii < ref_points_m.size(); ii++)
				{
					ref_points_m[ii] = K.inv(cv::DECOMP_LU)*ref_points_m[ii];
					//std::cout << "refpoints at " << ii << " is: " << ref_points_m[ii] << std::endl;
				}
				
				// if any of the points have a -1 will skip over the homography
				if (curr_red_p.x != -1 && curr_green_p.x != -1 && curr_cyan_p.x != -1 && curr_purple_p.x != -1)
				{
					//std::cout << "hi" << std::endl;
					
					// finding the perspective homography
					G = cv::findHomography(curr_points_p,ref_points_p,0);
					//G = cv::findHomography(ref_points_p,ref_points_p,0);
					
					std::cout << "G: " << G << std::endl;
					
					// decomposing the homography into the four solutions
					// G and K are 3x3
					// R is 3x3
					// 3x1
					// 3x1
					// successful_decomp is the number of solutions found
					successful_decomp = cv::decomposeHomographyMat(G,K,R,T,n);
					
					std::cout << "successful_decomp: " << successful_decomp << std::endl;
					
					
					// if the decomp is successful will find the best matching
					if (successful_decomp > 0)
					{
						
						std::cout << std::endl << std::endl << " begin check for visibility" << std::endl;
						
						// finding the alphas
						alpha_red.data = 1/(G.at<double>(2,0)*ref_red_p.x + G.at<double>(2,1)*ref_red_p.y + 1);
						alpha_green.data = 1/(G.at<double>(2,0)*ref_green_p.x + G.at<double>(2,1)*ref_green_p.y + 1);
						alpha_cyan.data = 1/(G.at<double>(2,0)*ref_cyan_p.x + G.at<double>(2,1)*ref_cyan_p.y + 1);
						alpha_purple.data = 1/(G.at<double>(2,0)*ref_purple_p.x + G.at<double>(2,1)*ref_purple_p.y + 1);
						
						// finding the solutions that give the positive results
						for (int ii = 0; ii < successful_decomp; ii++)
						{
							
							std::cout << "solution set number " << ii << std::endl;
							
							// performing the operation transpose(m)*R*n to check if greater than 0 later
							// order operating on is red green cyan purple
							for (int jj = 0; jj < 4; jj++)
							{
								
								//std::cout << " T size: " << T[ii].size() << std::endl;
								//std::cout << " T type: " << T[ii].type() << std::endl;
								std::cout << " T value: " << T[ii] << std::endl;
								
								//std::cout << " temp scalar 1 " << std::endl;
								//std::cout << " temp scalar size: " << temp_scalar.size() << std::endl;
								//std::cout << " temp scalar type: " << temp_scalar.type() << std::endl;
								//std::cout << " temp scalar value " << temp_scalar <<std::endl;
								temp_scalar = curr_points_m[jj].t();
								
								//std::cout << " temp scalar 2 " << std::endl;
								//std::cout << " temp scalar size: " << temp_scalar.size() << std::endl;
								//std::cout << " temp scalar type: " << temp_scalar.type() << std::endl;
								//std::cout << " temp scalar value " << temp_scalar <<std::endl;
								
								//std::cout << " R size: " << R[ii].size() << std::endl;
								//std::cout << " R type: " << R[ii].type() << std::endl;
								//std::cout << " R value: " << R[ii] << std::endl;
								temp_scalar = temp_scalar*R[ii];
								
								//std::cout << " temp scalar 3 " << std::endl;
								//std::cout << " temp scalar size: " << temp_scalar.size() << std::endl;
								//std::cout << " temp scalar type: " << temp_scalar.type() << std::endl;
								//std::cout << " temp scalar value " << temp_scalar <<std::endl;
								
								//std::cout << " n size: " << n[ii].size() << std::endl;
								//std::cout << " n type: " << n[ii].type() << std::endl;
								std::cout << " n value: " << n[ii] << std::endl;
								temp_scalar = temp_scalar*n[ii];
								
								//std::cout << " temp scalar size: " << temp_scalar.size() << std::endl;
								//std::cout << " temp scalar type: " << temp_scalar.type() << std::endl;
								//std::cout << " temp scalar value " << temp_scalar <<std::endl;
								//std::cout << " temp scalar value at 0,0" << temp_scalar.at<double>(0,0) << std::endl;
								
								scalar_value_check.push_back(temp_scalar.at<double>(0,0));
								
								////std::cout << " scalar value check size: " << scalar_value_check.size() << std::endl;
								//std::cout << " \tthe value for the " << jj << " visibility check is: " << scalar_value_check[4*ii+jj] << std::endl;
								
							}
						}
						
						std::cout << " end check for visibility" << std::endl << std::endl;
						
						// restting first solution found and second solution found
						first_solution_found = false;
						second_solution_found = false;
						fc_found = false;
						
						// getting the two solutions or only one if there are not two
						for (int ii = 0; ii < successful_decomp; ii++)
						{
							// getting the values onto the temporary vector
							// getting the start and end of the next solution
							temp_solution_start = scalar_value_check.begin() + 4*ii;
							temp_solution_end = scalar_value_check.begin() + 4*ii+4;
							temp_solution.assign(temp_solution_start,temp_solution_end);
							
							// checking if all the values are positive
							all_positive = true;
							current_temp_index = 0;
							while (all_positive && current_temp_index < 4)
							{
								if (temp_solution[current_temp_index] >= 0)
								{
									current_temp_index++;
								}
								else
								{
									all_positive = false;
								}
							}
							
							// if all the values were positive and a first solution has not been found will assign 
							// to first solution. if all positive and first solution has been found will assign
							// to second solution. if all positive is false then will not do anything
							if (all_positive && first_solution_found && !second_solution_found)
							{
								// setting it to indicate a solution has been found
								second_solution_found = true;
								
								// setting the rotation, translation, and normal to be the second set
								second_R = R[ii];
								second_T = T[ii];
								second_n = n[ii];
								
								// setting the projected values
								second_solution = temp_solution;
							}
							else if (all_positive && !first_solution_found)
							{
								// setting it to indicate a solution has been found
								first_solution_found = true;
								
								// setting the rotation, translation, and normal to be the first set
								first_R = R[ii];
								first_T = T[ii];
								first_n = n[ii];
								
								// setting the projected values
								first_solution = temp_solution;
							}
							
							// erasing all the values from the temp solution
							temp_solution.erase(temp_solution.begin(),temp_solution.end());
						}
						
						// erasing all the scalar values from the check
						scalar_value_check.erase(scalar_value_check.begin(),scalar_value_check.end());
					
					
						// displaying the first solution if it was found
						if (first_solution_found)
						{
							std::cout << std::endl << "first R: " << first_R << std::endl;
							std::cout << "first T: " << first_T << std::endl;
							std::cout << "first n: " << first_n << std::endl;
							for (double ii : first_solution)
							{
								std::cout << ii << " ";
							}
							std::cout << std::endl;
							
						}
						
						// displaying the second solution if it was found
						if (second_solution_found)
						{
							std::cout << std::endl << "second R: " << second_R << std::endl;
							std::cout << "second T: " << second_T << std::endl;
							std::cout << "second n: " << second_n << std::endl;
							for (double ii : second_solution)
							{
								std::cout << ii << " ";
							}
							std::cout << std::endl;
						}
						
						// because the reference is set to the exact value when when n should have only a z componenet, the correct
						// choice should be the one closest to n_ref = [0,0,1]^T which will be the one with the greatest dot product with n_ref
						if (first_solution_found && second_solution_found)
						{
							if (first_n.dot(n_ref) >= second_n.dot(n_ref))
							{
								R_fc = first_R;
								T_fc = first_T;
							}
							else
							{
								R_fc = second_R;
								T_fc = second_T;
							}
							fc_found = true;
						}
						else if(first_solution_found)
						{
							R_fc = first_R;
							T_fc = first_T;
							fc_found = true;
						}
						
						//if a solution was found will publish
						// need to convert to pose message so use
						if (fc_found)
						{
							// converting the rotation from a cv matrix to quaternion, first need it as a matrix3x3
							R_fc_tf[0][0] = R_fc.at<double>(0,0);
							R_fc_tf[0][1] = R_fc.at<double>(0,1);
							R_fc_tf[0][2] = R_fc.at<double>(0,2);
							R_fc_tf[1][0] = R_fc.at<double>(1,0);
							R_fc_tf[1][1] = R_fc.at<double>(1,1);
							R_fc_tf[1][2] = R_fc.at<double>(1,2);
							R_fc_tf[2][0] = R_fc.at<double>(2,0);
							R_fc_tf[2][1] = R_fc.at<double>(2,1);
							R_fc_tf[2][2] = R_fc.at<double>(2,2);
							std::cout << "Final R:\n" << R_fc << std::endl;
							
							// converting the translation to a vector 3
							T_fc_tf.setX(T_fc.at<double>(0,0));
							T_fc_tf.setY(T_fc.at<double>(0,1));
							T_fc_tf.setZ(T_fc.at<double>(0,2));
							std::cout << "Final T :\n" << T_fc << std::endl;
							
							// getting the rotation as a quaternion
							R_fc_tf.getRotation(Q_fc_tf);
							
							std::cout << "current orientation:" << "\n\tx:\t" << Q_fc_tf.getX() 
																<< "\n\ty:\t" << Q_fc_tf.getY() 
																<< "\n\tz:\t" << Q_fc_tf.getZ() 
																<< "\n\tw:\t" << Q_fc_tf.getW() 
																<< std::endl;
				
							std::cout << "norm of quaternion:\t" << Q_fc_tf.length() << std::endl;
							
							// getting the negated version of the quaternion for the check
							Q_fc_tf_negated = tf::Quaternion(-Q_fc_tf.getX(),-Q_fc_tf.getY(),-Q_fc_tf.getZ(),-Q_fc_tf.getW());
							
							std::cout << "negated orientation:" << "\n\tx:\t" << Q_fc_tf_negated.getX() 
																<< "\n\ty:\t" << Q_fc_tf_negated.getY() 
																<< "\n\tz:\t" << Q_fc_tf_negated.getZ() 
																<< "\n\tw:\t" << Q_fc_tf_negated.getW() 
																<< std::endl;
																
							std::cout << "norm of negated quaternion:\t" << Q_fc_tf_negated.length() << std::endl;
							
							// showing the last orientation
							std::cout << "last orientation:" << "\n\tx:\t" << Q_fc_tf_last.getX() 
															 << "\n\ty:\t" << Q_fc_tf_last.getY() 
															 << "\n\tz:\t" << Q_fc_tf_last.getZ() 
															 << "\n\tw:\t" << Q_fc_tf_last.getW() 
															 << std::endl;
																
							std::cout << "norm of last quaternion:\t" << Q_fc_tf_last.length() << std::endl;
							
							// checking if the quaternion has flipped
							Q_norm_current_diff = std::sqrt(std::pow(Q_fc_tf.getX() - Q_fc_tf_last.getX(),2.0)
														  + std::pow(Q_fc_tf.getY() - Q_fc_tf_last.getY(),2.0) 
														  + std::pow(Q_fc_tf.getZ() - Q_fc_tf_last.getZ(),2.0) 
														  + std::pow(Q_fc_tf.getW() - Q_fc_tf_last.getW(),2.0));
							
							std::cout << "current difference:\t" << Q_norm_current_diff << std::endl;
							
							Q_norm_negated_diff = std::sqrt(std::pow(Q_fc_tf_negated.getX() - Q_fc_tf_last.getX(),2.0)
														  + std::pow(Q_fc_tf_negated.getY() - Q_fc_tf_last.getY(),2.0) 
														  + std::pow(Q_fc_tf_negated.getZ() - Q_fc_tf_last.getZ(),2.0) 
														  + std::pow(Q_fc_tf_negated.getW() - Q_fc_tf_last.getW(),2.0));
							
							std::cout << "negated difference:\t" << Q_norm_negated_diff << std::endl;
							
							if (Q_norm_current_diff > Q_norm_negated_diff)
							{
								Q_fc_tf = Q_fc_tf_negated;
							}
							
							// updating the last
							Q_fc_tf_last = Q_fc_tf;
							
							// converting the tf quaternion to a geometry message quaternion
							Q_fc_gm.x = Q_fc_tf.getX();
							Q_fc_gm.y = Q_fc_tf.getY();
							Q_fc_gm.z = Q_fc_tf.getZ();
							Q_fc_gm.w = Q_fc_tf.getW();
							
							// converting the tf vector3 to a point
							P_fc_gm.x = T_fc_tf.getX();
							P_fc_gm.y = T_fc_tf.getY();
							P_fc_gm.z = T_fc_tf.getZ();
							
							// setting the transform with the values
							fc_tf.setOrigin(T_fc_tf);
							fc_tf.setRotation(Q_fc_tf);
							tf_broad.sendTransform(tf::StampedTransform(fc_tf, msg.header.stamp,"f_star","f_current"));
							
							// setting the decomposed message
							pose_fc_gm.position = P_fc_gm;
							pose_fc_gm.orientation = Q_fc_gm;
							decomposed_msg.pose = pose_fc_gm;
							decomposed_msg.header.stamp = msg.header.stamp;
							decomposed_msg.header.frame_id = "current_frame_normalized";
							decomposed_msg.alpha_red = alpha_red;
							decomposed_msg.alpha_green = alpha_green;
							decomposed_msg.alpha_cyan = alpha_cyan;
							decomposed_msg.alpha_purple = alpha_purple;
							homog_decomp_pub.publish(decomposed_msg);
							
							std::cout << "complete message\n" << decomposed_msg << std::endl << std::endl;
							
							// publish the marker
							marker.pose = pose_fc_gm;
							marker_pub.publish(marker);
							
						}
					}
				}

				// erasing all the temporary points
				if (first_solution_found || second_solution_found)
				{
					// erasing all the point vectors and matrix vectors
					curr_points_p.erase(curr_points_p.begin(),curr_points_p.end());
					ref_points_p.erase(ref_points_p.begin(),ref_points_p.end());
					curr_points_m.erase(curr_points_m.begin(),curr_points_m.end());
					ref_points_m.erase(ref_points_m.begin(),ref_points_m.end());
				}
			}
			/********** End splitting up the incoming message *********/
			
		}
コード例 #21
0
// [ref] undistort_images_using_formula() in ${CPP_RND_HOME}/test/machine_vision/opencv/opencv_image_undistortion.cpp
void KinectSensor::computeHomogeneousImageCoordinates(const cv::Size &imageSize, const cv::Mat &K, const cv::Mat &distCoeffs, cv::Mat &IC_homo, cv::Mat &IC_homo_undist)
{
	// [ref] "Joint Depth and Color Camera Calibration with Distortion Correction", D. Herrera C., J. Kannala, & J. Heikkila, TPAMI, 2012

	// homogeneous image coordinates: zero-based coordinates
	cv::Mat(3, imageSize.height * imageSize.width, CV_64FC1, cv::Scalar::all(1)).copyTo(IC_homo);
	//IC_homo = cv::Mat::ones(3, imageSize.height * imageSize.width, CV_64FC1);
	{
#if 0
		// 0 0 0 ...   0 1 1 1 ...   1 ... 639 639 639 ... 639
		// 0 1 2 ... 479 0 1 2 ... 479 ...   0   1   2 ... 479

		cv::Mat arr(1, imageSize.height, CV_64FC1);
		for (int i = 0; i < imageSize.height; ++i)
			arr.at<double>(0, i) = (double)i;

		for (int i = 0; i < imageSize.width; ++i)
		{
			IC_homo(cv::Range(0, 1), cv::Range(i * imageSize.height, (i + 1) * imageSize.height)).setTo(cv::Scalar::all(i));
			arr.copyTo(IC_homo(cv::Range(1, 2), cv::Range(i * imageSize.height, (i + 1) * imageSize.height)));
		}
#else
		// 0 1 2 ... 639 0 1 2 ... 639 ...   0   1   2 ... 639
		// 0 0 0 ...   0 1 1 1 ...   1 ... 479 479 479 ... 479

		cv::Mat arr(1, imageSize.width, CV_64FC1);
		for (int i = 0; i < imageSize.width; ++i)
			arr.at<double>(0, i) = (double)i;

		for (int i = 0; i < imageSize.height; ++i)
		{
			arr.copyTo(IC_homo(cv::Range(0, 1), cv::Range(i * imageSize.width, (i + 1) * imageSize.width)));
			IC_homo(cv::Range(1, 2), cv::Range(i * imageSize.width, (i + 1) * imageSize.width)).setTo(cv::Scalar::all(i));
		}
#endif
	}

	// homogeneous normalized camera coordinates
	const cv::Mat CC_norm(K.inv() * IC_homo);

	// apply distortion
	{
		//const cv::Mat xn(CC_norm(cv::Range(0,1), cv::Range::all()));
		const cv::Mat xn(CC_norm(cv::Range(0,1), cv::Range::all()) / CC_norm(cv::Range(2,3), cv::Range::all()));
		//const cv::Mat yn(CC_norm(cv::Range(1,2), cv::Range::all()));
		const cv::Mat yn(CC_norm(cv::Range(1,2), cv::Range::all()) / CC_norm(cv::Range(2,3), cv::Range::all()));

		const cv::Mat xn2(xn.mul(xn));
		const cv::Mat yn2(yn.mul(yn));
		const cv::Mat xnyn(xn.mul(yn));
		const cv::Mat r2(xn2 + yn2);
		const cv::Mat r4(r2.mul(r2));
		const cv::Mat r6(r4.mul(r2));

		const double &k1 = distCoeffs.at<double>(0);
		const double &k2 = distCoeffs.at<double>(1);
		const double &k3 = distCoeffs.at<double>(2);
		const double &k4 = distCoeffs.at<double>(3);
		const double &k5 = distCoeffs.at<double>(4);

		const cv::Mat xg(2.0 * k3 * xnyn + k4 * (r2 + 2.0 * xn2));
		const cv::Mat yg(k3 * (r2 + 2.0 * yn2) + 2.0 * k4 * xnyn);

		const cv::Mat coeff(1.0 + k1 * r2 + k2 * r4 + k5 * r6);
		cv::Mat xk(3, imageSize.height * imageSize.width, CV_64FC1, cv::Scalar::all(1));
		cv::Mat(coeff.mul(xn) + xg).copyTo(xk(cv::Range(0,1), cv::Range::all()));
		cv::Mat(coeff.mul(yn) + yg).copyTo(xk(cv::Range(1,2), cv::Range::all()));

		IC_homo_undist = K * xk;
	}
}
コード例 #22
0
// REF [function] >> rectify_kinect_images_from_IR_to_RGB_using_depth() in ${CPP_RND_HOME}/test/machine_vision/opencv/opencv_image_rectification.cpp
// IR (left) to RGB (right).
void KinectSensor::rectifyImagePairFromIRToRGBUsingDepth(
	const cv::Mat &input_image_left, const cv::Mat &input_image_right, cv::Mat &output_image_left, cv::Mat &output_image_right,
	const cv::Size &imageSize_left, const cv::Size &imageSize_right,
	const cv::Mat &K_left, const cv::Mat &K_right, const cv::Mat &R, const cv::Mat &T,
	const cv::Mat &IC_homo_left
) const
{
	// Homogeneous normalized camera coordinates (left).
	const cv::Mat CC_norm_left(K_left.inv() * IC_homo_left);

	// Camera coordinates (left).
	cv::Mat CC_left;
	{
		cv::Mat tmp;
#if 0
		// 0 0 0 ...   0 1 1 1 ...   1 ... 639 639 639 ... 639.
		// 0 1 2 ... 479 0 1 2 ... 479 ...   0   1   2 ... 479.

		((cv::Mat)input_image_left.t()).convertTo(tmp, CV_64FC1, 1.0, 0.0);
#else
		// 0 1 2 ... 639 0 1 2 ... 639 ...   0   1   2 ... 639.
		// 0 0 0 ...   0 1 1 1 ...   1 ... 479 479 479 ... 479.

		input_image_left.convertTo(tmp, CV_64FC1, 1.0, 0.0);
#endif
		cv::repeat(tmp.reshape(1, 1), 3, 1, CC_left);
		CC_left = CC_left.mul(CC_norm_left);
	}

	// Camera coordinates (right).
	cv::Mat CC_right;
#if 0
	cv::repeat(T, 1, imageSize_left.width*imageSize_left.height, CC_right);
	CC_right = R.t() * (CC_left - CC_right);
#else
	cv::repeat(T, 1, imageSize_left.width*imageSize_left.height, CC_right);
	CC_right = R * CC_left + CC_right;
#endif

	// Homogeneous normalized camera coordinates (right).
	cv::Mat CC_norm_right;
	cv::repeat(CC_right(cv::Range(2, 3), cv::Range::all()), 3, 1, CC_norm_right);
	CC_norm_right = CC_right / CC_norm_right;

	// Homogeneous image coordinates (right).
	const cv::Mat IC_homo_right(K_right * CC_norm_right);  // Zero-based coordinates.

	// The left image is mapped onto the right image.
	cv::Mat(input_image_right.size(), input_image_left.type(), cv::Scalar::all(0)).copyTo(output_image_left);
	//output_image_left = cv::Mat::zeros(input_image_right.size(), input_image_left.type());
#pragma omp parallel
//#pragma omp parallel shared(a, b, c, d, loop_count, chunk) private(i)
//#pragma omp parallel shared(a, b, c, d, loop_count, i, chunk)
	{
#pragma omp for
//#pragma omp for schedule(dynamic, 100) nowait
		for (int idx = 0; idx < imageSize_left.height*imageSize_left.width; ++idx)
		{
			const int &cc = (int)cvRound(IC_homo_right.at<double>(0,idx));
			const int &rr = (int)cvRound(IC_homo_right.at<double>(1,idx));
			if (0 <= cc && cc < imageSize_right.width && 0 <= rr && rr < imageSize_right.height)
				output_image_left.at<unsigned short>(rr, cc) = (unsigned short)cvRound(CC_left.at<double>(2, idx));
		}
	}

	input_image_right.copyTo(output_image_right);
}
コード例 #23
0
void ConstraintsHelpers::processPointCloud(cv::Mat hokuyoData,
											cv::Mat& pointCloudOrigMapCenter,
											std::queue<PointsPacket>& pointsInfo,
											std::chrono::high_resolution_clock::time_point hokuyoTimestamp,
											std::chrono::high_resolution_clock::time_point curTimestamp,
											cv::Mat curPosOrigMapCenter,
											std::mutex& mtxPointCloud,
											cv::Mat cameraOrigLaser,
											cv::Mat cameraOrigImu,
											ros::NodeHandle &nh)
{
	Mat hokuyoCurPoints(6, hokuyoData.cols, CV_32FC1);
	int minLaserDist, pointCloudTimeout;

	nh.getParam("minLaserDist", minLaserDist);
	nh.getParam("pointCloudTimeout", pointCloudTimeout);

	//cout << "Copying hokuyo data" << endl;
	int countPoints = 0;
	for(int c = 0; c < hokuyoData.cols; c++){
		//cout << hokuyoData.at<int>(2, c) << endl;
		if(hokuyoData.at<int>(2, c) > minLaserDist){
			hokuyoCurPoints.at<float>(0, countPoints) = -hokuyoData.at<int>(1, c);
			hokuyoCurPoints.at<float>(1, countPoints) = 0.0;
			hokuyoCurPoints.at<float>(2, countPoints) = hokuyoData.at<int>(0, c);
			hokuyoCurPoints.at<float>(3, countPoints) = 1.0;
			hokuyoCurPoints.at<float>(4, countPoints) = hokuyoData.at<int>(2, c);
			hokuyoCurPoints.at<float>(5, countPoints) = hokuyoData.at<int>(3, c);
			countPoints++;
		}
	}
	hokuyoCurPoints = hokuyoCurPoints.colRange(0, countPoints);
//	cout << "hokuyoCurPoints.size() = " << hokuyoCurPoints.size() << endl;
//	cout << "hokuyoData.size() = " << hokuyoData.size() << endl;

	//cout << "Removing old points" << endl;
	//remove all points older than pointCloudTimeout ms
	int pointsSkipped = 0;
	if(pointsInfo.size() > 0){
		//cout << "dt = " << std::chrono::duration_cast<std::chrono::milliseconds>(curTimestamp - pointsQueue.front().timestamp).count() << endl;
		while(std::chrono::duration_cast<std::chrono::milliseconds>(curTimestamp - pointsInfo.front().timestamp).count() > pointCloudTimeout){
			pointsSkipped += pointsInfo.front().numPoints;
			pointsInfo.pop();
			if(pointsInfo.size() == 0){
				break;
			}
		}
	}

	std::unique_lock<std::mutex> lck(mtxPointCloud);
	//cout << "Moving pointCloudOrigMapCenter, pointsSkipped = " << pointsSkipped << endl;
	Mat tmpAllPoints(hokuyoCurPoints.rows, pointCloudOrigMapCenter.cols + hokuyoCurPoints.cols - pointsSkipped, CV_32FC1);
	if(!pointCloudOrigMapCenter.empty()){
		//cout << "copyTo" << endl;
		if(pointsSkipped <	pointCloudOrigMapCenter.cols){
			pointCloudOrigMapCenter.colRange(pointsSkipped,
												pointCloudOrigMapCenter.cols).
							copyTo(tmpAllPoints.colRange(0, pointCloudOrigMapCenter.cols - pointsSkipped));
		}
	}
//	if(debugLevel >= 1){
//		cout << "countPoints = " << countPoints << ", hokuyoCurPoints.size = " << hokuyoCurPoints.size() << endl;
//	}
	if(countPoints > 0){

		//cout << "Moving to camera orig" << endl;
		hokuyoCurPoints.rowRange(0, 4) = cameraOrigLaser.inv()*hokuyoCurPoints.rowRange(0, 4);
		//cout << "Addding hokuyoCurPoints" << endl;
//		if(debugLevel >= 1){
//			cout << "Addding hokuyoCurPoints" << endl;
//		}
		Mat curPointCloudOrigMapCenter(hokuyoCurPoints.rows, hokuyoCurPoints.cols, CV_32FC1);
		Mat tmpCurPoints = curPosOrigMapCenter*cameraOrigImu*hokuyoCurPoints.rowRange(0, 4);
		tmpCurPoints.copyTo(curPointCloudOrigMapCenter.rowRange(0, 4));
		hokuyoCurPoints.rowRange(4, 6).copyTo(curPointCloudOrigMapCenter.rowRange(4, 6));
		//cout << hokuyoCurPointsGlobal.channels() << ", " << hokuyoAllPointsGlobal.channels() << endl;
//		cout << pointCloudOrigMapCenter.size() << " " << curPointCloudCameraMapCenter.size() << " " << tmpAllPoints.size() << endl;
//		if(debugLevel >= 1){
//			cout << "pointsSkipped = " << pointsSkipped << endl;
//		}
		curPointCloudOrigMapCenter.copyTo(tmpAllPoints.colRange(pointCloudOrigMapCenter.cols - pointsSkipped,
																	pointCloudOrigMapCenter.cols + hokuyoCurPoints.cols - pointsSkipped));
//		if(debugLevel >= 1){
//			cout << "curPointCloudCameraMapCenter copied" << endl;
//		}
		pointsInfo.push(PointsPacket(hokuyoTimestamp, hokuyoCurPoints.cols));

		pointCloudOrigMapCenter = tmpAllPoints;
	}
	lck.unlock();
}
コード例 #24
0
rayPlaneIntersection(cv::Point2f uv, const cv::Mat& centroid, const cv::Mat& normal, const cv::Mat_<float>& Kinv)
{
    cv::Matx33d dKinv(Kinv);
    cv::Vec3d dNormal(normal);
    return rayPlaneIntersection(cv::Vec3d(uv.x, uv.y, 1), centroid.dot(normal), dNormal, dKinv);
}

const int W = 640;
const int H = 480;
int window_size = 5;
float focal_length = 525;
float cx = W / 2.f + 0.5f;
float cy = H / 2.f + 0.5f;

cv::Mat K = (cv::Mat_<double>(3, 3) << focal_length, 0, cx, 0, focal_length, cy, 0, 0, 1);
cv::Mat Kinv = K.inv();

static cv::RNG rng;
struct Plane
{

    cv::Vec3d n, p;
    double p_dot_n;
    Plane()
    {
        n[0] = rng.uniform(-0.5, 0.5);
        n[1] = rng.uniform(-0.5, 0.5);
        n[2] = -0.3; //rng.uniform(-1.f, 0.5f);
        n = n / cv::norm(n);
        set_d(rng.uniform(-2.0, 0.6));
    }
コード例 #25
0
ファイル: PoseEdit.cpp プロジェクト: abardam/ghost-system
void onMouse(int e, int x, int y, int, void *){
	switch (e)
	{
	case cv::EVENT_LBUTTONDOWN:
		start.x = x;
		start.y = y;
		prev = start;
		mousedn = true;
		rmousedn = false;
		break;
	case cv::EVENT_LBUTTONUP:
		mousedn = false;
		rmousedn = false;
		break;
	case cv::EVENT_MOUSEMOVE:
		if(mousedn){
			cv::Point curr(x,y);
			angle_y += (curr.x - prev.x)/180.*CV_PI;
			angle_x += (curr.y - prev.y)/180.*CV_PI;
			prev = curr;
		}
		else if(rmousedn){
			if(!alljoints){
				if(cjoint != -1){
					cv::Vec3f ray = lerpPoint(x,y,boundingBoxLerp,lc);
					cv::Vec3f newPos = vectorProject(mat_to_vec3(trans * wcSkeletons[frame].points.col(cjoint)), ray);
					cv::Mat(trans.inv() * vec3_to_mat4(newPos)).copyTo(wcSkeletons[frame].points.col(cjoint));
					calculateSkeletonOffsetPoints(vidRecord, wcSkeletons, cylinderBody);
				}
			}else{
				
				if(cjoint != -1){
					cv::Vec3f ray = lerpPoint(x,y,boundingBoxLerp,lc);
					cv::Vec3f newPosProj = vectorProject(mat_to_vec3(trans * wcSkeletons[frame].points.col(cjoint)), ray);
					cv::Mat newPos = trans.inv() * vec3_to_mat4(newPosProj);
					
					cv::Mat oldPos = wcSkeletons[frame].points.col(cjoint);

					cv::Mat diff = newPos - oldPos;

					newPos.copyTo(wcSkeletons[frame].points.col(cjoint));

					for(int joint = 0; joint < NUMJOINTS; ++joint){
						if(joint != cjoint){

							cv::Mat newPosCopyTransform = wcSkeletons[frame].points.col(joint) + diff;

							newPosCopyTransform.copyTo(wcSkeletons[frame].points.col(joint));
						}
					}

					calculateSkeletonOffsetPoints(vidRecord, wcSkeletons, cylinderBody);
				}
			}
		}
		break;
	case cv::EVENT_RBUTTONDOWN:
		{
			mousedn = false;
			rmousedn = true;

			cv::Point curr(x,y);

			float cdist=10000;
			cjoint=-1;

			for(int joint=0;joint<NUMJOINTS;++joint)
			{
				cv::Vec3f jv = mat_to_vec3(trans * wcSkeletons[frame].points.col(joint));
				cv::Vec2f jv2 = mat4_to_vec2(getCameraMatrixScene() * vec3_to_mat4(jv));
				cv::Point pj(jv2(0), jv2(1));

				float dist = cv::norm(curr-pj);
				if(dist < cdist && dist < 10){
					cdist = dist;
					cjoint = joint;
				}
			}
			lastjoint = cjoint;
			break;
		}
	case cv::EVENT_RBUTTONUP:
		{
			mousedn = false;
			rmousedn = false;
			cjoint = -1;
		}
	default:
		break;
	}
}
コード例 #26
0
ファイル: PPatchGenerator.cpp プロジェクト: CGAnderson/htld
/**
*@param img Blurred Image
*@param h Transformation Matrix(3x3)
*@param bb Region of Interest (xMin, yMin, xMax, yMax)
*@param result Image Transformed by h
**/
void warpImageROI(cv::Mat &img, 
				  double xMin,
				  double yMin,
				  double xMax,
				  double yMax,
				  int bbW,
				  int bbH,
				  cv::Mat &h,
				  unsigned char filledColor,
				  unsigned char *result) {

	double curx, cury, curz, wx, wy, wz, ox, oy, oz;
	int x, y;
	double xx, yy;
	int i;
	unsigned char *tmp;
	unsigned char *rowY, *rowYP1;

	cv::Mat invT = h.inv(cv::DECOMP_LU);//Matlab's inv Method Only Takes the Inverse of Square, non-Singular Matrices...
	double *invPtr = invT.ptr<double>();
	ox = *(invPtr + 2);
	oy = *(invPtr + invT.cols + 2);
	oz = *(invPtr + 2 * invT.cols + 2);

	yy = yMin;
	for(int j=0; j<bbH; j++) {
		/* calculate x, y for current row */
		curx = *(invPtr + 1) * yy + ox;
		cury = *(invPtr + invT.cols + 1) * yy + oy;
		curz = *(invPtr + 2 * invT.cols + 1) * yy + oz;
		xx   = xMin; 

		for(i=0; i<bbW; i++) {
			/* calculate x, y in current column */
			wx = *(invPtr)*xx + curx;
			wy = *(invPtr + invT.cols) * xx + cury;
			wz = *(invPtr + 2 * invT.cols) * xx + curz;
			wx /= wz; wy /= wz;
			
			x = (int)floor(wx);
			y = (int)floor(wy);

			if(x>=0 && y>=0) {
				wx -= x; wy -= y;//[0,1]...
				if(x+1 == img.cols && wx == 1.0)
					x--;
				if(y+1 == img.rows && wy == 1.0)
					y--;
				if((x+1) < img.cols && (y+1) < img.rows) {
					//Different Pixels Nearby Have Different Contributions to the Result!...
					/* img[x,y]*(1-wx)*(1-wy) + 
						img[x+1,y]*wx*(1-wy) +
						img[x,y+1]*(1-wx)*wy + 
						img[x+1,y+1]*wx*wy 
						*/
					rowY   = img.ptr<unsigned char>(y);//In order to Have Access to Elements Faster...
					rowYP1 = img.ptr<unsigned char>(y + 1);//In order to Have Access to Elements Faster...
					*result++ = cv::saturate_cast<unsigned char>(
						            (rowY[x]   * (1.0 - wx) + rowY[x + 1]   * wx) * (1.0 - wy) + 
									(rowYP1[x] * (1.0 - wx) + rowYP1[x + 1] * wx) * wy
								);
				} else 
					*result++ = filledColor;
			} else 
				*result++ = filledColor;
			xx = xx + 1;
		}//End of Inner-for-Loop...
		yy = yy + 1;
	}//End of Outermost-for-Loop...
}
コード例 #27
0
ファイル: essential_mat.cpp プロジェクト: caomw/LineSLAM
int essn_ransac (cv::Mat* pts1, cv::Mat* pts2, cv::Mat* E, cv::Mat K, 
	cv::Mat* inlierMask, int imsize)
	// running 5-point algorithm 
	// re-estimation at the end
	// Input: points pairs normalized by camera matrix K
	// output: 
{
	cv::Mat bestMss1, bestMss2;
	int maxIterNum = 500, N = pts1->cols;
	double confidence = 0.995;
	double threshDist = 1;// (0.5*imsize/640.0);		//  in image units 0.64
	int iter = 0;
	vector<int> rnd;
	for (int i=0; i<N; ++i)
		rnd.push_back(i);
	cv::Mat mss1(3,5,CV_64F), mss2(3,5,CV_64F);
	vector<int> maxInlierSet;
	cv::Mat bestE;
	while (iter < maxIterNum) {
		++iter;
		// --- choose minimal solution set: mss1<->mss2
		random_shuffle(rnd.begin(),rnd.end());
		for (int i=0; i < mss1.cols; ++i) {
			pts1->col(rnd[i]).copyTo(mss1.col(i));
			pts2->col(rnd[i]).copyTo(mss2.col(i));
			// DEGENERATE COFIGURATION ????????????????????s
		}		
		// compute minimal solution by 5-point
		vector<cv::Mat> Es;   // multiple results from 5point alg.
		fivepoint_stewnister(mss1, mss2, Es);
		// find concensus set
		vector<int> curInlierSet;

		for (int k=0; k < Es.size(); ++k) {
			curInlierSet.clear();
			cv::Mat F = K.t().inv() * Es[k] * K.inv();
			
			for (int i=0; i<N; ++i) {
				double dist = sqrt(fund_samperr(K*pts1->col(i), K*pts2->col(i),F));
				if (dist < threshDist)
					curInlierSet.push_back(i);
			}
			if(curInlierSet.size() > maxInlierSet.size()) {
				maxInlierSet = curInlierSet;
				bestE = Es[k];
			}
		}
		//re-compute maximum iteration number:maxIterNum
//		maxIterNum = abs(log(1-confidence)
//			/log(1-pow(double(maxInlierSet.size())/N,5.0)));
	}
	if (maxInlierSet.size() < mss1.cols) {
		//cout<< <<endl;
		cout<<"essn_ransac: Largest concensus set is too small for minimal estimation."
			<<endl;
		return 0;
	}

	cv::Mat tmpE = bestE.clone();
	while (true) {		
		vector<cv::Mat> Es;
		vector<int> maxInlierSet_k;
		cv::Mat bestE_k;		
		// re-estimate
		cv::Mat inliers1(3,maxInlierSet.size(),CV_64F),
			inliers2(3,maxInlierSet.size(),CV_64F);
		for (int i=0; i<maxInlierSet.size(); ++i) {
			pts1->col(maxInlierSet[i]).copyTo(inliers1.col(i));
			pts2->col(maxInlierSet[i]).copyTo(inliers2.col(i));
		}				
		//opt_essn_pts (inliers1, inliers2, &tmpE);
		optimizeEmat (inliers1, inliers2, K, &tmpE);
		vector<int> curInlierSet;
		for (int i=0; i<N; ++i) {
			double dist = sqrt(fund_samperr(K*pts1->col(i), K*pts2->col(i),
				K.t().inv() * tmpE * K.inv()));
			if (dist < threshDist)
				curInlierSet.push_back(i);
		}
		if(curInlierSet.size() > maxInlierSet.size())  {
			maxInlierSet = curInlierSet;
			bestE = tmpE;
		} else
			break;
	}

	cv::Mat inliers1(3,maxInlierSet.size(),CV_64F),
		inliers2(3,maxInlierSet.size(),CV_64F);
	for (int i=0; i<maxInlierSet.size(); ++i) {
		pts1->col(maxInlierSet[i]).copyTo(inliers1.col(i));
		pts2->col(maxInlierSet[i]).copyTo(inliers2.col(i));
	}	
	//	opt_essn_pts (inliers1, inliers2, &bestE);
	inlierMask->create(N,1,CV_8U);
	*inlierMask = *inlierMask*0;
	for (int i=0; i<maxInlierSet.size(); ++i)
		inlierMask->at<uchar>(maxInlierSet[i]) = 1; 
	*E = bestE;

	return 1;
}
コード例 #28
0
ファイル: lineslam.cpp プロジェクト: caomw/LineSLAM
void Frame::extractLineDepth()
	// extract the 3d info of an frame line if availabe from the depth image
	// input: depth, lines
	// output: lines with 3d info
{	double depth_scaling = 1;
	int n_3dln = 0;
    #pragma omp  parallel for
	for(int i=0; i<lines.size(); ++i)	{ // each line
		double len = cv::norm(lines[i].p - lines[i].q);		
		vector<cv::Point3d> pts3d;
		// iterate through a line
		double numSmp = (double) min((int)len, 100); // number of line points sampled
		pts3d.reserve(numSmp);
		for(int j=0; j<=numSmp; ++j) {
			// use nearest neighbor to querry depth value
			// assuming position (0,0) is the top-left corner of image, then the
			// top-left pixel's center would be (0.5,0.5)
			cv::Point2d pt = lines[i].p * (1-j/numSmp) + lines[i].q * (j/numSmp);
			if(pt.x<0 || pt.y<0 || pt.x >= depth.cols || pt.y >= depth.rows ) continue;
			int row, col; // nearest pixel for pt
			if((floor(pt.x) == pt.x) && (floor(pt.y) == pt.y)) {// boundary issue
				col = max(int(pt.x-1),0);
				row = max(int(pt.y-1),0);
			} else {
				col = int(pt.x);
				row = int(pt.y);
			}
			double zval = -1;
			if(depth.at<double>(row,col) < EPS) { // no depth info

			} else {
				zval = depth.at<double>(row,col)/depth_scaling; // in meter, z-value
			}

			// export 3d points to file
			if (zval > 0 ) {
				cv::Point2d xy3d = mat2cvpt(K.inv()*cvpt2mat(pt))*zval;	
				pts3d.push_back(cv::Point3d(xy3d.x, xy3d.y, zval));
			}
		}
		if (pts3d.size() < max(10.0,len*sysPara.ratio_of_collinear_pts))
			continue;

		RandomLine3d tmpLine;		

		vector<RandomPoint3d> rndpts3d;
		rndpts3d.reserve(pts3d.size());
		// compute uncertainty of 3d points
		for(int j=0; j<pts3d.size();++j) {
			rndpts3d.push_back(compPt3dCov(pts3d[j], K, 0));
		}
		// using ransac to extract a 3d line from 3d pts
		tmpLine = extract3dline_mahdist(rndpts3d);
	

		if(tmpLine.pts.size()/len > sysPara.ratio_of_collinear_pts	&&
			cv::norm(tmpLine.A - tmpLine.B) > sysPara.line3d_length_thresh) {
				MLEstimateLine3d (tmpLine, 100);
				vector<RandomPoint3d>().swap(tmpLine.pts);
				lines[i].haveDepth = true;
				lines[i].line3d = tmpLine;
				n_3dln++;
		}
	}	
 
}
コード例 #29
0
bool ImageTransformation::findHomography( const Keypoints& source, const Keypoints& result, const Matches& input, Matches& inliers, cv::Mat& homography)
{
    if (input.size() < 4)
        return false;
    
    const int pointsCount = input.size();
    const float reprojectionThreshold = 2;

    //Prepare src and dst points
    std::vector<cv::Point2f> srcPoints, dstPoints;    
    for (int i = 0; i < pointsCount; i++)
    {
        srcPoints.push_back(source[input[i].trainIdx].pt);
        dstPoints.push_back(result[input[i].queryIdx].pt);
    }
      
    // Find homography using RANSAC algorithm
    std::vector<unsigned char> status;
    homography = cv::findHomography(srcPoints, dstPoints, cv::RANSAC, reprojectionThreshold, status);
    
    // Warp dstPoints to srcPoints domain using inverted homography transformation
    std::vector<cv::Point2f> srcReprojected;
    cv::perspectiveTransform(dstPoints, srcReprojected, homography.inv());

    // Pass only matches with low reprojection error (less than reprojectionThreshold value in pixels)
    inliers.clear();
    for (int i = 0; i < pointsCount; i++)
    {
        cv::Point2f actual = srcPoints[i];
        cv::Point2f expect = srcReprojected[i];
        cv::Point2f v = actual - expect;
        float distanceSquared = v.dot(v);
        
        if (/*status[i] && */distanceSquared <= reprojectionThreshold * reprojectionThreshold)
        {
            inliers.push_back(input[i]);
        }
    }
    
    // Test for bad case
    if (inliers.size() < 4)
        return false;
    
    // Now use only good points to find refined homography:
    std::vector<cv::Point2f> refinedSrc, refinedDst;
    for (int i = 0; i < inliers.size(); i++)
    {
        refinedSrc.push_back(source[inliers[i].trainIdx].pt);
        refinedDst.push_back(result[inliers[i].queryIdx].pt);
    }
    
    // Use least squares method to find precise homography
    cv::Mat homography2 = cv::findHomography(refinedSrc, refinedDst, 0, reprojectionThreshold);

    // Reproject again:
    cv::perspectiveTransform(dstPoints, srcReprojected, homography2.inv());
    inliers.clear();

    for (int i = 0; i < pointsCount; i++)
    {
        cv::Point2f actual = srcPoints[i];
        cv::Point2f expect = srcReprojected[i];
        cv::Point2f v = actual - expect;
        float distanceSquared = v.dot(v);

        if (distanceSquared <= reprojectionThreshold * reprojectionThreshold)
        {
            inliers.push_back(input[i]);
        }
    }
 
    homography = homography2;
    return inliers.size() >= 4;
}
コード例 #30
0
ファイル: Homo1.cpp プロジェクト: XavierCao/OpenCVtest
bool ParallaxErrorAnalysis_colorDiffPixels(const cv::Mat& img1,const shape& img1_shp, const cv::Mat& img2, const shape& img2_shp, const cv::Mat& img2_original,
        const cv::Mat& H, const cv::Size2i& img_size ,double& res_error)
{
    vector<cv::Point2f> overlap_points;


    for(int r = 0 ; r < img_size.height; r++)
    {
        for(int c = 0; c < img_size.width; c++)
        {
            shape t_img1_shape = img1_shp;
            shape t_img2_shape = img2_shp;
            cv::Point2f t_point(c, r);
            if(t_img1_shape.isInShape(c, r) && t_img2_shape.isInShape(c, r) )
            {
                // 记下这些点
                overlap_points.push_back(t_point);
            }
        }
    }

    // 找到这些点在 img2_original 上的对应点
    cv::Mat Hn = H.inv();
    vector<cv::Point2f> correPoints_ori(overlap_points.size(),*new cv::Point2f);
    cv::perspectiveTransform(overlap_points, correPoints_ori, Hn); // 两个参数都要是 vector<cv::Point2f> f 不能是 i

    //检测有没有越界的
    /*ofstream Dout("2.txt",ios::out);
    for(int i = 0; i < correPoints_ori.size(); i++)
    {
    	if(correPoints_ori[i].x < 0 || correPoints_ori[i].x > img2_original.cols
    		|| correPoints_ori[i].y < 0 || correPoints_ori[i].y > img2_original.rows)
    	{

    		Dout<< correPoints_ori[i].x << " "<<correPoints_ori[i].y << endl;
    	}
    }
    Dout.clear();
    Dout.close();*/

    // 计算 error
    res_error = 0;
    cv::Mat img_blend;
    vector<cv::Mat> imgs;
    vector<shape> shapes;
    imgs.push_back(img1);
    imgs.push_back(img2);
    shapes.push_back(img1_shp);
    shapes.push_back(img2_shp);
    blending_all(imgs, shapes, img_size, img_blend);

    double res = 0;
    double Be = 0, Ge = 0, Re = 0;


    for(int i = 0; i < overlap_points.size(); i++)
    {
        Be += abs( img_blend.at<Vec3b>(overlap_points[i])[0] - img2_original.at<Vec3b>(correPoints_ori[i])[0]);
        Ge += abs( img_blend.at<Vec3b>(overlap_points[i])[1] - img2_original.at<Vec3b>(correPoints_ori[i])[1]);
        Re += abs( img_blend.at<Vec3b>(overlap_points[i])[2] - img2_original.at<Vec3b>(correPoints_ori[i])[2]);
    }

    res = Be + Ge + Re;
    res_error = res;


    return true;
}