template <typename PointT> void PCL2P3d(const pcl::PointCloud<PointT> &cloud,vector<cv::Point3f>& p3ds){

	int s = cloud.size();
	p3ds.resize(s);
	for(int i=0;i<s;++i){
		p3ds[i].x = cloud[i].x;
		p3ds[i].y = cloud[i].y;
		p3ds[i].z = cloud[i].z;
	}

}
int removeOutliers(pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_in){
	std::cout << "*** Removing outliers from cloud..***" << std::endl;
	int numberPoints = cloud_in->size();
	pcl::StatisticalOutlierRemoval<pcl::PointXYZ> sor;
	sor.setInputCloud(cloud_in);
	sor.setMeanK (50);
	sor.setStddevMulThresh(1.0);
	sor.filter(*cloud_in);
	std::cout << "*** removeOutliers complete*** \nTotal outliers removed: " << numberPoints- cloud_in->size() << std::endl;

}
 CalibrateKinectCheckerboard()
   : nh_("~"), it_(nh_), calibrated(false)
 {
   // Load parameters from the server.
   nh_.param<std::string>("fixed_frame", fixed_frame, "/base_link");
   nh_.param<std::string>("camera_frame", camera_frame, "/camera_link");
   nh_.param<std::string>("target_frame", target_frame, "/calibration_pattern");
   nh_.param<std::string>("tip_frame", tip_frame, "/gripper_link");
   
   nh_.param<int>("checkerboard_width", checkerboard_width, 6);
   nh_.param<int>("checkerboard_height", checkerboard_height, 7);
   nh_.param<double>("checkerboard_grid", checkerboard_grid, 0.027);
   
   // Set pattern detector sizes
   pattern_detector_.setPattern(cv::Size(checkerboard_width, checkerboard_height), checkerboard_grid, CHESSBOARD);
   
   transform_.translation().setZero();
   transform_.matrix().topLeftCorner<3, 3>() = Quaternionf().setIdentity().toRotationMatrix();
   
   // Create subscriptions
   info_sub_ = nh_.subscribe("/camera/rgb/camera_info", 1, &CalibrateKinectCheckerboard::infoCallback, this);
   
   // Also publishers
   pub_ = it_.advertise("calibration_pattern_out", 1);
   detector_pub_ = nh_.advertise<pcl::PointCloud<pcl::PointXYZ> >("detector_cloud", 1);
   physical_pub_ = nh_.advertise<pcl::PointCloud<pcl::PointXYZ> >("physical_points_cloud", 1);
   
   // Create ideal points
   ideal_points_.push_back( pcl::PointXYZ(0, 0, 0) );
   ideal_points_.push_back( pcl::PointXYZ((checkerboard_width-1)*checkerboard_grid, 0, 0) );
   ideal_points_.push_back( pcl::PointXYZ(0, (checkerboard_height-1)*checkerboard_grid, 0) );
   ideal_points_.push_back( pcl::PointXYZ((checkerboard_width-1)*checkerboard_grid, (checkerboard_height-1)*checkerboard_grid, 0) );
   
   // Create proper gripper tip point
   nh_.param<double>("gripper_tip_x", gripper_tip.point.x, 0.0);
   nh_.param<double>("gripper_tip_y", gripper_tip.point.y, 0.0);
   nh_.param<double>("gripper_tip_z", gripper_tip.point.z, 0.0);
   gripper_tip.header.frame_id = tip_frame;
   
   ROS_INFO("[calibrate] Initialized.");
 }
/* To investigate on if the closest match to the target descriptor is also the previously mathced source */
void experiment_correspondences (pcl::PointCloud<pcl::PFHSignature125>::Ptr &source_descriptors,
                              pcl::PointCloud<pcl::PFHSignature125>::Ptr &target_descriptors,
                              vector<int> &correct){
  vector<int> corr1(source_descriptors->size ());
  vector<int> corr2(target_descriptors->size ());
  correct.resize(source_descriptors->size());
  // Use a KdTree to search for the nearest matches in feature space
  pcl::KdTreeFLANN<pcl::PFHSignature125> descriptor_kdtree;
  descriptor_kdtree.setInputCloud (target_descriptors);

  // Find the index of the best match for each keypoint, and store it in "correspondences_out"
  const int k = 1;
  std::vector<int> k_indices (k);
  std::vector<float> k_squared_distances (k);
  for (size_t i = 0; i < source_descriptors->size (); ++i)
  {
    descriptor_kdtree.nearestKSearch (*source_descriptors, i, k, k_indices, k_squared_distances);
    corr1[i] = k_indices[0];
  }

  // Use a KdTree to search for the nearest matches in feature space
  pcl::KdTreeFLANN<pcl::PFHSignature125> descriptor_kdtree2;
  descriptor_kdtree2.setInputCloud (source_descriptors);

  // Find the index of the best match for each keypoint, and store it in "correspondences_out"
  std::vector<int> k_indices2 (k);
  std::vector<float> k_squared_distances2 (k);
  for (size_t i = 0; i < target_descriptors->size (); ++i)
  {
    descriptor_kdtree2.nearestKSearch (*target_descriptors, i, k, k_indices2, k_squared_distances2);
    corr2[i] = k_indices2[0];
  }
  int count = 0;
  for(size_t i=0; i<source_descriptors->points.size(); i++){
    if(abs(corr1[corr2[i]])!=i){ count++; correct[i]=0;}
    else{
      correct[i]=1;
    }
  }
  cout<<"Not matched correspondences : "<<count<<endl;
}
Exemple #5
0
void mpcl_compute_normals(pcl::PointCloud<pcl::PointXYZ> &cloud,
                          int ksearch,
                          pcl::PointCloud<pcl::Normal> &out)
{
    pcl::search::KdTree<pcl::PointXYZ>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZ> ());
    pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> ne;

    ne.setSearchMethod (tree);
    ne.setInputCloud (cloud.makeShared());
    ne.setKSearch (ksearch);
    ne.compute (out);
}
bool StereoSensorProcessor::cleanPointCloud(const pcl::PointCloud<pcl::PointXYZRGB>::Ptr pointCloud)
{
  pcl::PointCloud<pcl::PointXYZRGB> tempPointCloud;

  originalWidth_ = pointCloud->width;
  pcl::removeNaNFromPointCloud(*pointCloud, tempPointCloud, indices_);
  tempPointCloud.is_dense = true;
  pointCloud->swap(tempPointCloud);

  ROS_DEBUG("ElevationMap: cleanPointCloud() reduced point cloud to %i points.", static_cast<int>(pointCloud->size()));
  return true;
}
    pcl::PointCloud<PointT>::Ptr removeWallsCloud(pcl::PointCloud<PointT>::Ptr cloud_seg)
    {
        pcl::PointCloud<PointT>::Ptr cloud_plane (new pcl::PointCloud<PointT>);
        pcl::ModelCoefficients::Ptr coeff (new pcl::ModelCoefficients);
        pcl::PointIndices::Ptr inliers (new pcl::PointIndices);

        pcl::PointCloud<pcl::Normal>::Ptr cloud_normals (new pcl::PointCloud<pcl::Normal>);
        pcl::search::KdTree<PointT>::Ptr tree (new pcl::search::KdTree<PointT> ());
        pcl::NormalEstimation<PointT, pcl::Normal> ne;

        pcl::SACSegmentationFromNormals<PointT, pcl::Normal> seg;
        pcl::ExtractIndices<PointT> extract;

        // Estimate point normals
        ne.setSearchMethod (tree);
        ne.setKSearch (50);

        seg.setOptimizeCoefficients (true);
        seg.setModelType (pcl::SACMODEL_NORMAL_PLANE);
        seg.setMethodType (pcl::SAC_RANSAC);
        seg.setDistanceThreshold (seg_distance);
        seg.setNormalDistanceWeight (normal_distance_weight);
        seg.setMaxIterations (1000);

        int i = 0, nr_points = (int) cloud_seg->points.size ();
        // While 20% of the original cloud is still there
        while (cloud_seg->points.size () > seg_percentage * nr_points && i < 10 && cloud_seg->points.size() > 0)
        {
            //seg.setInputCloud (cloud);
            ne.setInputCloud (cloud_seg);
            ne.compute (*cloud_normals);
            //seg.setInputCloud (cloud);
            seg.setInputCloud (cloud_seg);
            seg.setInputNormals (cloud_normals);
            seg.segment (*inliers, *coeff);
            if (inliers->indices.size () == 0)
            {
                break;
            }
            if(inliers->indices.size() < nr_points/20 || inliers->indices.size() < 10){
                i++;
                continue;
            }
            // Extract the planar inliers from the input cloud
            extract.setInputCloud (cloud_seg);
            extract.setIndices (inliers);
            extract.setNegative (true);
            extract.filter (*cloud_plane);
            cloud_seg.swap (cloud_plane);
            i++;
        }
        return cloud_seg;
    }
int icp_alignment(pcl::PointCloud<pcl::PointXYZ>::ConstPtr prev_cloud,
		pcl::PointCloud<pcl::PointXYZ>::ConstPtr cloud_in,
		pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_out) {
	std::cout << "Performing ICP alignment..." << std::endl;
	pcl::IterativeClosestPoint<pcl::PointXYZ, pcl::PointXYZ> icp;
	std::cout << "prev_cloud size = " << prev_cloud->size() << std::endl;
	icp.setInputSource(prev_cloud);
	std::cout << "cloud_in size = " << cloud_in->size() << std::endl;
	icp.setInputTarget(cloud_in);
	icp.align(*cloud_out);
	std::cout << "cloud_out size = " << cloud_out->size() << std::endl;
	if (icp.hasConverged()) {
		std::cout << "has converged:" << icp.hasConverged() << " score: "
				<< icp.getFitnessScore() << std::endl;
		std::cout << icp.getFinalTransformation() << std::endl;
		return 0;
	} else {
		PCL_ERROR("\nERROR: ICP has not converged. \n");
		return 1;
	}
}
Exemple #9
0
int Conversion::convert(const Eigen::MatrixXf & matrix, pcl::PointCloud<pcl::PointXYZ>::Ptr & cloud){
 //cloud->empty();
 
 for (int i=0; i<matrix.rows();i++){
            pcl::PointXYZ basic_point;
            basic_point.x = matrix(i,0);
            basic_point.y = matrix(i,1);
            basic_point.z = matrix(i,2);
            cloud->push_back(basic_point);
      }
 return 1;
}
void CloudNoizeFiltration(pcl::PointCloud<pcl::PointXYZ>::Ptr &cloud, pcl::PointCloud<pcl::PointXYZ>::Ptr &filtered_cloud){
	// Create the filtering object
	pcl::PointCloud<pcl::PointXYZ>::Ptr filtered_cloud_tmp(new pcl::PointCloud<pcl::PointXYZ>);
	pcl::StatisticalOutlierRemoval<pcl::PointXYZ> sor;
	sor.setInputCloud(cloud);

	sor.setMeanK(10);
	sor.setStddevMulThresh(1.0);
	sor.setNegative(false);
	sor.filter(*filtered_cloud_tmp);
	filtered_cloud.swap(filtered_cloud_tmp);
}
 void
 SegmenterLight::computeNormals (pcl::PointCloud<pcl::PointXYZRGB>::Ptr &cloud_in,
                            pcl::PointCloud<pcl::Normal>::Ptr &normals_out)
 {
   normals_out.reset (new pcl::PointCloud<pcl::Normal>);
   surface::ZAdaptiveNormals::Parameter za_param;
   za_param.adaptive = true;
   surface::ZAdaptiveNormals nor (za_param);
   nor.setInputCloud (cloud_in);
   nor.compute ();
   nor.getNormals (normals_out);
 }
void RunVisualization(const vector<cv::Point3d>& pointcloud,
					  const std::vector<cv::Vec3b>& pointcloud_RGB,
					  const Mat& img_1_orig, 
					  const Mat& img_2_orig,
					  const vector<KeyPoint>& correspImg1Pt) {
	cloud.reset(new pcl::PointCloud<pcl::PointXYZRGB>);
	orig_cloud.reset(new pcl::PointCloud<pcl::PointXYZRGB>);
	//  pcl::io::loadPCDFile ("output.pcd", *cloud);
    
	PopulatePCLPointCloud(pointcloud,pointcloud_RGB,img_1_orig,img_2_orig,correspImg1Pt);
//	SORFilter();
	copyPointCloud(*cloud,*orig_cloud);
//	FindNormalsMLS();
//	FindFloorPlaneRANSAC();
	
	//	pcl::PointCloud<pcl::PointXYZRGB>::Ptr final (new pcl::PointCloud<pcl::PointXYZRGB>);
	//	pcl::copyPointCloud<pcl::PointXYZRGB>(*cloud, inliers, *final);
	
    pcl::visualization::CloudViewer viewer("Cloud Viewer");
    
    //blocks until the cloud is actually rendered
    viewer.showCloud(orig_cloud,"orig");
	//	viewer.showCloud(final);
    
    //use the following functions to get access to the underlying more advanced/powerful
    //PCLVisualizer
    
    //This will only get called once
    viewer.runOnVisualizationThreadOnce (viewerOneOff);
    
    //This will get called once per visualization iteration
	viewer.runOnVisualizationThread (viewerThread);
    while (!viewer.wasStopped ())
    {
		//you can also do cool processing here
		//FIXME: Note that this is running in a separate thread from viewerPsycho
		//and you should guard against race conditions yourself...
		//		user_data++;
    }
}	
Exemple #13
0
  void
  FeatureFactory::extractSurfaceFeatures (pcl::PointCloud<pcl::Normal>::Ptr pc2_normals, pcl::PointCloud<
      feature_factory::FeaturePoint<SURFACE_FEATURES_HISTOGRAM> > &surface_zen_features, pcl::PointCloud<
      feature_factory::FeaturePoint<SURFACE_FEATURES_HISTOGRAM> > &surface_azi_features)
  {
    cv::Mat normals_zen = cv::Mat (RANGE_IMAGE_HEIGHT, RANGE_IMAGE_WIDTH, CV_32F);
    cv::Mat normals_azi = cv::Mat (RANGE_IMAGE_HEIGHT, RANGE_IMAGE_WIDTH, CV_32F);

    for (uint16_t ri = 0; ri < RANGE_IMAGE_HEIGHT; ri++)
      for (uint16_t ci = 0; ci < RANGE_IMAGE_WIDTH; ci++)
      {
        double n_x = pc2_normals->points[ri * RANGE_IMAGE_WIDTH + ci].normal_x;
        double n_y = pc2_normals->points[ri * RANGE_IMAGE_WIDTH + ci].normal_y;
        double n_z = pc2_normals->points[ri * RANGE_IMAGE_WIDTH + ci].normal_z;

        //always return [0, PI]
        normals_zen.at<float> (ri, ci) = atan2 (sqrt (n_x * n_x + n_y * n_y), n_z);

        //may return [-PI, PI]
        normals_azi.at<float> (ri, ci) = atan2 (n_y, n_x);
        if (normals_azi.at<float> (ri, ci) < 0)
          normals_azi.at<float> (ri, ci) += 2 * PI;
      }

    const uint16_t n_grids_per_row = RANGE_IMAGE_WIDTH / grid_edge_size;
    const uint16_t n_grids_per_col = RANGE_IMAGE_HEIGHT / grid_edge_size;
    const uint16_t n_grids = n_grids_per_col * n_grids_per_row;

    surface_azi_features.resize (n_grids);
    surface_zen_features.resize (n_grids);
    for (uint16_t gri = 0; gri < n_grids_per_col; gri++)
      for (uint16_t gci = 0; gci < n_grids_per_row; gci++)
      {
        cv::Rect roi = cv::Rect (gci * grid_edge_size, gri * grid_edge_size, grid_edge_size, grid_edge_size);
        surface_azi_features[gri * n_grids_per_row + gci]
            = feature_factory::FeaturePoint<SURFACE_FEATURES_HISTOGRAM>::calculate (normals_azi, roi, 0, 2 * PI);
        surface_zen_features[gri * n_grids_per_row + gci]
            = feature_factory::FeaturePoint<SURFACE_FEATURES_HISTOGRAM>::calculate (normals_zen, roi, 0, PI);
      }
  }
void Node::crop_cloud(pcl::PointCloud<pcl::PointXYZ> &pcl_cloud, int laser, ros::Time time)
{
//CROP CLOUD
  pcl::PointCloud<pcl::PointXYZ>::iterator i;

  for (i = pcl_cloud.begin(); i != pcl_cloud.end();)
  {

    bool remove_point = 0;

    if (i->z < 0 || i->z > max_z)
    {
      remove_point = 1;
    }

    if (sqrt(pow(i->x,2) + pow(i->y,2)) > max_radius)
    {
      remove_point = 1;
    }

    tf::StampedTransform transform;
    listener_.lookupTransform ("/world", "/laser1", time, transform);

    if (sqrt(pow(transform.getOrigin().x() - i->x,2) + pow(transform.getOrigin().y() - i->y,2)) > 0.25 && laser == 1)
    {
      remove_point = 1;
    }

    if (remove_point == 1)
    {
      i = pcl_cloud.erase(i);
    }
    else
    {
      i++;
    }

  }
//END CROP CLOUD
}
	/* ------------------------------------------------------------------------------------------ */
	void makeImage () {

		// Compute the maximum depth
		double maxDepth = -1, minDepth = 10000000;
		for (int h=0; h<point_cloud_ptr->height; h++) {
			for (int w=0; w<point_cloud_ptr->width; w++) {
				pcl::PointXYZRGBA point = point_cloud_ptr->at(w, h);
				if(point.z > maxDepth) maxDepth = point.z;
				if(point.z < minDepth) minDepth = point.z;
			}
		}

		// Create the image
		im = cv::Mat(point_cloud_ptr->height, 2*point_cloud_ptr->width, CV_8UC3);
		if (point_cloud_ptr->empty()) return;

		for (int h=0; h<im.rows; h++) {
			for (int w=0; w<point_cloud_ptr->width; w++) {
				pcl::PointXYZRGBA point = point_cloud_ptr->at(w, h);
				Eigen::Vector3i rgb = point.getRGBVector3i();
				im.at<cv::Vec3b>(h,w)[0] = rgb[2];
				im.at<cv::Vec3b>(h,w)[1] = rgb[1];
				im.at<cv::Vec3b>(h,w)[2] = rgb[0];
			}
			for (int w=0; w<point_cloud_ptr->width; w++) {
				pcl::PointXYZRGBA point = point_cloud_ptr->at(w, h);
				int val = (int) (255 * ((point.z-minDepth)/(maxDepth-minDepth)));
				im.at<cv::Vec3b>(h,w+point_cloud_ptr->width)[0] = val;
				im.at<cv::Vec3b>(h,w+point_cloud_ptr->width)[1] = val;
				im.at<cv::Vec3b>(h,w+point_cloud_ptr->width)[2] = val;
			}

		}

		// Convert the image to hsv for processing later
  	cv::cvtColor(im, im_hsv, cv::COLOR_BGR2HSV);

		cv::imshow("image", im);
		cv::waitKey(1);
	}
void AdjustCloudNormal(pcl::PointCloud<PointT>::Ptr cloud, pcl::PointCloud<NormalT>::Ptr cloud_normals)
{
    pcl::PointCloud<myPointXYZ>::Ptr center(new pcl::PointCloud<myPointXYZ>());
    ComputeCentroid(cloud, center);
    
    myPointXYZ origin = center->at(0);
    std::cerr << origin.x << " " << origin.y << " " << origin.z << std::endl;
    pcl::transformPointCloud(*cloud, *cloud, Eigen::Vector3f(-origin.x, -origin.y, -origin.z), Eigen::Quaternion<float> (1,0,0,0));
    
    int num = cloud->points.size();
    float diffx, diffy, diffz, dist, theta;
    for( int j = 0; j < num ; j++ )
    {
        PointT temp = cloud->at(j);
        NormalT temp_normals = cloud_normals->at(j);
        diffx = temp.x;
        diffy = temp.y;
        diffz = temp.z;
        dist = sqrt( diffx*diffx + diffy*diffy + diffz*diffz );
		
        theta = acos( (diffx*temp_normals.normal_x + diffy*temp_normals.normal_y + diffz*temp_normals.normal_z)/dist );
        if( theta > PI/2)
        {
            cloud_normals->at(j).normal_x = -cloud_normals->at(j).normal_x;
            cloud_normals->at(j).normal_y = -cloud_normals->at(j).normal_y;
            cloud_normals->at(j).normal_z = -cloud_normals->at(j).normal_z;
        }
    }
}
Exemple #17
0
template <typename PointInT, typename PointOutT> void
pcl_1_8::Edge<PointInT, PointOutT>::sobelMagnitudeDirection (
    const pcl::PointCloud<PointInT> &input_x, 
    const pcl::PointCloud<PointInT> &input_y,
    pcl::PointCloud<PointOutT> &output)
{
  convolution_.setInputCloud (input_x.makeShared());
  pcl::PointCloud<pcl::PointXYZI>::Ptr kernel_x (new pcl::PointCloud<pcl::PointXYZI>);
  pcl::PointCloud<pcl::PointXYZI>::Ptr magnitude_x (new pcl::PointCloud<pcl::PointXYZI>);
  kernel_.setKernelType (kernel<pcl::PointXYZI>::SOBEL_X);
  kernel_.fetchKernel (*kernel_x);
  convolution_.setKernel (*kernel_x);
  convolution_.filter (*magnitude_x);

  convolution_.setInputCloud (input_y.makeShared());
  pcl::PointCloud<pcl::PointXYZI>::Ptr kernel_y (new pcl::PointCloud<pcl::PointXYZI>);
  pcl::PointCloud<pcl::PointXYZI>::Ptr magnitude_y (new pcl::PointCloud<pcl::PointXYZI>);
  kernel_.setKernelType (kernel<pcl::PointXYZI>::SOBEL_Y);
  kernel_.fetchKernel (*kernel_y);
  convolution_.setKernel (*kernel_y);
  convolution_.filter (*magnitude_y);

  const int height = input_x.height;
  const int width = input_x.width;

  output.resize (height * width);
  output.height = height;
  output.width = width;

  for (size_t i = 0; i < output.size (); ++i)
  {
    output[i].magnitude_x = (*magnitude_x)[i].intensity;
    output[i].magnitude_y = (*magnitude_y)[i].intensity;
    output[i].magnitude = 
      sqrtf ((*magnitude_x)[i].intensity * (*magnitude_x)[i].intensity + 
             (*magnitude_y)[i].intensity * (*magnitude_y)[i].intensity);
    output[i].direction = 
      atan2f ((*magnitude_y)[i].intensity, (*magnitude_x)[i].intensity);
  }
}
	void DynModelExporter2::createMarkerForConvexHull(pcl::PointCloud<pcl::PointXYZ>& plane_cloud, pcl::ModelCoefficients::Ptr& plane_coefficients, visualization_msgs::Marker& marker)
	{
		// init marker
	    marker.type = visualization_msgs::Marker::TRIANGLE_LIST;
	    marker.action = visualization_msgs::Marker::ADD;

	    // project the points of the plane on the plane
	    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_projected (new pcl::PointCloud<pcl::PointXYZ> ());
	    pcl::ProjectInliers<pcl::PointXYZ> proj;
	    proj.setModelType (pcl::SACMODEL_PLANE);
	    proj.setInputCloud (plane_cloud.makeShared());
	    proj.setModelCoefficients (plane_coefficients);
	    proj.filter(*cloud_projected);

	    // create the convex hull in the plane
	    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_hull (new pcl::PointCloud<pcl::PointXYZ> ());
	    pcl::ConvexHull<pcl::PointXYZ > chull;
	    chull.setInputCloud (cloud_projected);
	    chull.reconstruct(*cloud_hull);

	    // work around known bug in ROS Diamondback perception_pcl: convex hull is centered around centroid of input cloud (fixed in pcl svn revision 443)
	    // thus: we shift the mean of cloud_hull to the mean of cloud_projected (fill dx, dy, dz and apply when creating the marker points)
	    Eigen::Vector4f meanPointCH, meanPointCP;
	    pcl::compute3DCentroid(*cloud_projected, meanPointCP);
	    pcl::compute3DCentroid(*cloud_hull, meanPointCH);
	    //float dx = 0;//meanPointCP[0]-meanPointCH[0];
	    //float dy = 0;//meanPointCP[1]-meanPointCH[1];
	    //float dz = 0;//meanPointCP[2]-meanPointCH[2];

	    // create colored part of plane by creating marker for each triangle between neighbored points on contour of convex hull an midpoint
	    marker.points.clear();
	    for (unsigned int j = 0; j < cloud_hull->points.size(); ++j)
	    {
	    	geometry_msgs::Point p;

	        p.x = cloud_hull->points[j].x; p.y = cloud_hull->points[j].y; p.z = cloud_hull->points[j].z;
	        marker.points.push_back( p );

	        p.x = cloud_hull->points[(j+1)%cloud_hull->points.size() ].x; p.y = cloud_hull->points[(j+1)%cloud_hull->points.size()].y; p.z = cloud_hull->points[(j+1)%cloud_hull->points.size()].z;
	        marker.points.push_back( p );

	        p.x = meanPointCP[0]; p.y = meanPointCP[1]; p.z = meanPointCP[2];
	        marker.points.push_back( p );

	    }

	// scale of the marker
	marker.scale.x = 1;
	marker.scale.y = 1;
	marker.scale.z = 1;

	}
  bool calibrate(const std::string frame_id)
  {
    physical_points_.empty();
    physical_points_.header.frame_id = fixed_frame;
    cout << "Is the checkerboard correct? " << endl;
    cout << "Move edge of gripper to point 1 in image and press Enter. " << endl;
    cin.ignore();
    addPhysicalPoint();
    cout << "Move edge of gripper to point 2 in image and press Enter. " << endl;
    cin.ignore();
    addPhysicalPoint();
    cout << "Move edge of gripper to point 3 in image and press Enter. " << endl;
    cin.ignore();
    addPhysicalPoint();
    cout << "Move edge of gripper to point 4 in image and press Enter. " << endl;
    cin.ignore();
    addPhysicalPoint();
    
    Eigen::Matrix4f t;
    
    physical_pub_.publish(physical_points_);
    
    pcl::registration::TransformationEstimationSVD<pcl::PointXYZ, pcl::PointXYZ> svd_estimator;
    svd_estimator.estimateRigidTransformation( physical_points_, image_points_, t );

    // Output       
    tf::Transform transform = tfFromEigen(t), trans_full, camera_transform_unstamped;
    tf::StampedTransform camera_transform;
  
    cout << "Resulting transform (camera frame -> fixed frame): " << endl << t << endl << endl;
    
    try
    {
      tf_listener_.lookupTransform(frame_id, camera_frame, ros::Time(0), camera_transform);
    }
    catch (tf::TransformException& ex)
    {
      ROS_WARN("[calibrate] TF exception:\n%s", ex.what());
      return false;
    }

    camera_transform_unstamped = camera_transform;
    trans_full = camera_transform_unstamped.inverse()*transform;
    
    Eigen::Matrix4f t_full = EigenFromTF(trans_full);
    Eigen::Matrix4f t_full_inv = (Eigen::Transform<float,3,Affine>(t_full).inverse()).matrix();
    
    cout << "Resulting transform (fixed frame -> camera frame): " << endl << t_full << endl << endl;
    printStaticTransform(t_full_inv, fixed_frame, camera_frame);

    return true;
  }
Exemple #20
0
template <typename PointT> inline unsigned int
pcl::computeCovarianceMatrix (const pcl::PointCloud<PointT> &cloud,
                              Eigen::Matrix3d &covariance_matrix)
{
  // create the buffer on the stack which is much faster than using cloud.points[indices[i]] and centroid as a buffer
  Eigen::Matrix<double, 1, 6, Eigen::RowMajor> accu = Eigen::Matrix<double, 1, 6, Eigen::RowMajor>::Zero ();

  unsigned int point_count;
  if (cloud.is_dense)
  {
    point_count = cloud.size ();
    // For each point in the cloud
    for (size_t i = 0; i < point_count; ++i)
    {
      accu [0] += cloud[i].x * cloud[i].x;
      accu [1] += cloud[i].x * cloud[i].y;
      accu [2] += cloud[i].x * cloud[i].z;
      accu [3] += cloud[i].y * cloud[i].y;
      accu [4] += cloud[i].y * cloud[i].z;
      accu [5] += cloud[i].z * cloud[i].z;
    }
  }
  else
  {
    point_count = 0;
    for (size_t i = 0; i < cloud.points.size (); ++i)
    {
      if (!isFinite (cloud[i]))
        continue;

      accu [0] += cloud[i].x * cloud[i].x;
      accu [1] += cloud[i].x * cloud[i].y;
      accu [2] += cloud[i].x * cloud[i].z;
      accu [3] += cloud[i].y * cloud[i].y;
      accu [4] += cloud[i].y * cloud[i].z;
      accu [5] += cloud[i].z * cloud[i].z;
      ++point_count;
    }
  }

  if (point_count != 0)
  {
    accu /= static_cast<double> (point_count);
    covariance_matrix.coeffRef (0) = accu [0];
    covariance_matrix.coeffRef (1) = covariance_matrix.coeffRef (3) = accu [1];
    covariance_matrix.coeffRef (2) = covariance_matrix.coeffRef (6) = accu [2];
    covariance_matrix.coeffRef (4) = accu [3];
    covariance_matrix.coeffRef (5) = covariance_matrix.coeffRef (7) = accu [4];
    covariance_matrix.coeffRef (8) = accu [5];
  }
  return (point_count);
}
Eigen::Matrix4f Recognizer::aligner(pcl::CorrespondencesPtr corrs, pcl::PointCloud<PointType>::Ptr source_keypoints, pcl::PointCloud<PointType>::Ptr target_keypoints, float cg_thresh) {
    pcl::GeometricConsistencyGrouping<PointType, PointType> gc_clusterer;
    std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > rototranslations;
    std::vector<pcl::Correspondences> clustered_corrs;
    Eigen::Matrix4f best_transform = Eigen::Matrix4f::Identity();
    float f_min = std::numeric_limits<float>::max();

    for(float cg_size = 0.01f; cg_size < 0.03; cg_size += 0.005f) {
        gc_clusterer.setGCSize(cg_size);
        gc_clusterer.setGCThreshold(cg_thresh);
        gc_clusterer.setInputCloud(source_keypoints);
        gc_clusterer.setSceneCloud(target_keypoints);
        gc_clusterer.setModelSceneCorrespondences(corrs);
        gc_clusterer.recognize(rototranslations, clustered_corrs);

        for(size_t i = 0; i < rototranslations.size(); ++i) {
            pcl::PointCloud<PointType>::Ptr rotated_source(new pcl::PointCloud<PointType>()), registered_source(new pcl::PointCloud<PointType>());
            pcl::transformPointCloud(*source_keypoints, *rotated_source, rototranslations[i]);

            pcl::IterativeClosestPoint<PointType,PointType> icp;
            icp.setInputSource(rotated_source);
            icp.setInputTarget(target_keypoints);
            icp.setTransformationEpsilon(1e-10);
            icp.setMaxCorrespondenceDistance(0.05);
            icp.setMaximumIterations(100);

            icp.setEuclideanFitnessEpsilon(0.05);
            icp.align(*registered_source);

            pcl::PointCloud<PointType>::Ptr scoring_biggest, scoring_smallest;
            //Select the biggest cloud
            if(target_keypoints->size() < registered_source->size()) {
                scoring_biggest = registered_source;
                scoring_smallest = target_keypoints;
            }
            else {
                scoring_biggest = target_keypoints;
                scoring_smallest = registered_source;
            }

            float score = this->getFitnessScore(scoring_smallest, scoring_biggest);

            if(score < f_min) {
                f_min = score;
                best_transform = icp.getFinalTransformation()*rototranslations[i];
            }
        }
    }

    this->last_score = f_min;
    return best_transform;
}
Exemple #22
0
//////////////////////////////////////////////////////////////////////////////
// Assumes input, kernel and output images have 0's and 1's only
template<typename PointT> void
pcl::Morphology<PointT>::erosionBinary (pcl::PointCloud<PointT> &output)
{
  const int height = input_->height;
  const int width = input_->width;
  const int kernel_height = structuring_element_->height;
  const int kernel_width = structuring_element_->width;
  bool mismatch_flag;

  output.width = width;
  output.height = height;
  output.resize (width * height);

  for (int i = 0; i < height; i++)
  {
    for (int j = 0; j < width; j++)
    {
      // Operation done only at 1's
      if ((*input_)(j, i).intensity == 0)
      {
        output (j, i).intensity = 0;
        continue;
      }
      mismatch_flag = false;
      for (int k = 0; k < kernel_height; k++)
      {
        if (mismatch_flag)
          break;
        for (int l = 0; l < kernel_width; l++)
        {
          // We only check for 1's in the kernel
          if ((*structuring_element_)(l, k).intensity == 0)
            continue;
          if ((i + k - kernel_height / 2) < 0 || (i + k - kernel_height / 2) >= height || (j + l - kernel_width / 2) < 0 || (j + l - kernel_width / 2) >= width)
          {
            continue;
          }
          // If one of the elements of the kernel and image dont match, 
          // the output image is 0. So, move to the next point.
          if ((*input_)(j + l - kernel_width / 2, i + k - kernel_height / 2).intensity != 1)
          {
            output (j, i).intensity = 0;
            mismatch_flag = true;
            break;
          }
        }
      }
      // Assign value according to mismatch flag
      output (j, i).intensity = (mismatch_flag) ? 0 : 1;
    }
  }
}
pcl::PointCloud<pcl::PointXYZRGBNormal>::Ptr CloudFactory::createColorCloud(const pcl::PointCloud<pcl::PointNormal>::Ptr &cloud_, uint8_t r_, uint8_t g_, uint8_t b_)
{
	pcl::PointCloud<pcl::PointXYZRGBNormal>::Ptr coloredCloud(new pcl::PointCloud<pcl::PointXYZRGBNormal>());
	coloredCloud->reserve(cloud_->size());

	for (unsigned int i = 0; i < cloud_->width; i++)
	{
		pcl::PointNormal p = cloud_->points[i];
		coloredCloud->push_back(PointFactory::createPointXYZRGBNormal(p.x, p.y, p.z, p.normal_x, p.normal_y, p.normal_z, p.curvature, r_, g_, b_));
	}

	return coloredCloud;
}
Exemple #24
0
void 
pcl::StereoMatching::getVisualMap (pcl::PointCloud<pcl::RGB>::Ptr vMap)
{

  if ( vMap->width != width_ || vMap->height != height_)
  {
    vMap->resize(width_*height_);
    vMap->width = width_;
    vMap->height = height_;
  }

  if ( vMap->is_dense)
    vMap->is_dense = false;

  pcl::RGB invalid_val;
  invalid_val.r = 0;
  invalid_val.g = 255;
  invalid_val.b = 0;

  float scale = 255.0f / (16.0f * static_cast<float> (max_disp_));

  for (int y = 0; y<height_; y++)
  {
    for (int x = 0; x<width_; x++)
    {
      if (disp_map_[y * width_+ x] <= 0)
      {
        vMap->at (x,y) = invalid_val;
      }
      else
      {
        unsigned char val = static_cast<unsigned char> (floor (scale*disp_map_[y * width_+ x]));
        vMap->at (x, y).r = val;
        vMap->at (x, y).g = val;
        vMap->at (x, y).b = val;
      }
    }
  }
}
pcl::PointCloud<PointT>::Ptr FilterBoundary(const pcl::PointCloud<PointT>::Ptr cloud)
{
    float threshold = 0.04;
    int BLen = 5;
    
    int w = cloud->width;
    int h = cloud->height;
    int num = cloud->size();
    cv::Mat depthmap = cv::Mat::ones(h+2*BLen, w+2*BLen, CV_32FC1)*-1000;
    
    for(int i = 0 ; i < num; i++ )
    {
        if( pcl_isfinite(cloud->at(i).z) == true )
        {
            int r_idx = i / w + BLen;
            int c_idx = i % w + BLen;
            depthmap.at<float>(r_idx, c_idx) = cloud->at(i).z;
        }
    }
    
    pcl::PointCloud<PointT>::Ptr cloud_f(new pcl::PointCloud<PointT>());
    for(int i=0 ; i<num; i++ ){
        
        int row = i / w + BLen;
        int col = i % w + BLen;
        if( pcl_isfinite(cloud->at(i).z) == true )
        {
        
            float zCur = depthmap.at<float>(row, col);

            if( fabs(depthmap.at<float>(row-BLen, col)-zCur) > threshold || fabs(depthmap.at<float>(row+BLen, col)-zCur) > threshold
                || fabs(zCur-depthmap.at<float>(row, col-BLen)) > threshold || fabs(zCur-depthmap.at<float>(row, col+BLen)) > threshold)
                        ;//Boundary->push_back(cloud_rgb->points[i]);
            else
                cloud_f->push_back(cloud->at(i));
        }
    }
    return cloud_f;
}
void saveObj(std::string outfile, pcl::PolygonMesh& mesh, pcl::PointCloud<pcl::PointXYZRGB>::Ptr pointColors) {
  FILE *f = fopen(outfile.c_str(), "w");
  
  for(int v = 0; v < pointColors->size(); v++) {
    pcl::PointXYZRGB p = (*pointColors)[v];
    fprintf(f, "v %f %f %f %f %f %f\n", p.x, p.y, p.z, p.r/255., p.g/255., p.b/255.);
  }
  for(int t = 0; t < mesh.polygons.size(); t++) {
    pcl::Vertices triangle = mesh.polygons[t];
    fprintf(f, "f %i %i %i\n", triangle.vertices[0]+1, triangle.vertices[1]+1, triangle.vertices[2]+1);
  }
  fclose(f);
}
// Find a bounding box around the object to grasp (pc)
// Outputs the shape and the pose of the bounding box
// Inspired from https://github.com/unboundedrobotics/ubr1_preview/blob/master/ubr1_grasping/src/shape_extraction.cpp
BoundingBox findBoundingBox(const pcl::PointCloud<pcl::PointXYZRGB>::ConstPtr& pc){

    struct BoundingBox BB;

    ros::NodeHandle nh;
    ros::Rate r(3);
    double x_min =  9999.0;
    double x_max = -9999.0;
    double y_min =  9999.0;
    double y_max = -9999.0;
    double z_min =  9999.0;
    double z_max = -9999.0;

    for(int i = 0; i < pc->size(); i++){
        double pc_x = pc->at(i).x;
        double pc_y = pc->at(i).y;
        double pc_z = pc->at(i).z;

        if(pc_x < x_min) x_min = pc_x;
        if(pc_y < y_min) y_min = pc_y;
        if(pc_z < z_min) z_min = pc_z;

        if(pc_x > x_max) x_max = pc_x;
        if(pc_y > y_max) y_max = pc_y;
        if(pc_z > z_max) z_max = pc_z;
    }


    BB.x_min = x_min;
    BB.x_max = x_max;
    BB.y_min = y_min;
    BB.y_max = y_max;
    BB.z_min = z_min;
    BB.z_max = z_max;


    return BB;

}
	void filterSpatialArea(const pcl::PointCloud<pcl::PointXY> &current_2d_scan, pcl::PointCloud<pcl::PointXY> &output)
	{
		pcl::PointCloud<pcl::PointXY> ret;
		for(unsigned int i = 0; i < current_2d_scan.size(); i++)
		{
            if( abs(current_2d_scan[i].y) < OBJECT_Y_RANGE && current_2d_scan[i].x > OBJECT_MIN_X && current_2d_scan[i].x < OBJECT_MAX_X )
            {
            	ret.push_back( current_2d_scan[i] );
            }
		}
		// Return value
		output = ret;
	}
void SuperFrameParser::convertImageToPointCloud (const sensor_msgs::ImagePtr& depth_msg, const pcl::PointCloud<pcl::PointXYZ>::Ptr &cloud)
{
    cloud->height = depth_msg->height;
    cloud->width = depth_msg->width;
    cloud->resize (cloud->height * cloud->width);
    // Use correct principal point from calibration
    float center_x = depth_info_->K[2]; // c_x
    float center_y = depth_info_->K[5]; // c_y

    // Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y)
    double unit_scaling = 0.001f;
    float constant_x = unit_scaling / depth_info_->K[0]; // f_x
    float constant_y = unit_scaling / depth_info_->K[4]; // f_y
    float bad_point = std::numeric_limits<float>::quiet_NaN ();

    pcl::PointCloud<pcl::PointXYZ>::iterator pt_iter = cloud->begin ();
    const uint16_t* depth_row = reinterpret_cast<const uint16_t*> (&depth_msg->data[0]);
    int row_step = depth_msg->step / sizeof (uint16_t);
    for (int v = 0; v < (int)depth_msg->height; ++v, depth_row += row_step)
    {
        for (int u = 0; u < (int)depth_msg->width; ++u)
        {
            pcl::PointXYZ& pt = *pt_iter++;
            uint16_t depth = depth_row[u];

            // Missing points denoted by NaNs
            if (depth == 0.0f)
            {
                pt.x = pt.y = pt.z = bad_point;
                continue;
            }

            // Fill in XYZ
            pt.x = (u - center_x) * depth * constant_x;
            pt.y = (v - center_y) * depth * constant_y;
            pt.z = depth * 0.001f;
        }
    }
}
Exemple #30
0
/** Assumptions: No 3d Info available yet */
void Node::projectTo3D(const cv::Mat& depth,
        std::vector<cv::KeyPoint>& feature_locations_2d,
        std::vector<Eigen::Vector4f>& feature_locations_3d,
        const pcl::PointCloud<pcl::PointXYZRGB>& point_cloud){ //TODO: Use this instead of member "pc" or remove argument

    std::clock_t starttime=std::clock();

    cv::Point2f p2d;


    if(feature_locations_3d.size()){
        ROS_INFO("There is already 3D Information in the FrameInfo, clearing it");
        feature_locations_3d.clear();
    }


    for(unsigned int i = 0; i < feature_locations_2d.size(); /*increment at end of loop*/){
        p2d = feature_locations_2d[i].pt;
        if (p2d.x >= depth.cols || p2d.x < 0 ||  p2d.y >= depth.rows || p2d.y < 0 ||
                std::isnan(p2d.x) || std::isnan(p2d.y)){
            ROS_WARN_STREAM("Ignoring invalid keypoint: " << p2d); //Does it happen at all? If not, remove this code block
            feature_locations_2d.erase(feature_locations_2d.begin()+i);
            continue;
        }

        float depth_val = depth.at<float>((int)p2d.y, (int)p2d.x);
        if(std::isnan(depth_val) )  {//No 3d pose would be available
            ROS_DEBUG_STREAM("No depth for: " << p2d);
            feature_locations_2d.erase(feature_locations_2d.begin()+i);
            continue;
        }

        pcl::PointXYZRGB p3d = point_cloud.at((int) p2d.x,(int) p2d.y);

        // Todo: should not happen
        if ( isnan(p3d.x) || isnan(p3d.y) || isnan(p3d.z)){
            // ROS_INFO("nan in pointcloud! %f, %f",p2d.x, p2d.y);
            feature_locations_2d.erase(feature_locations_2d.begin()+i);
            continue;
        }


        feature_locations_3d.push_back(Eigen::Vector4f(p3d.x, p3d.y, p3d.z, 1.0));
        i++; //Only increment if no element is removed from vector
    }



    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "function runtime: "<< ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC  <<"sec");

}