// TODO: add a way to specify the cost as a designed signed distance. 
void RadialPlan::updateNodeCosts(const pcl::PointCloud<pcl::PointXYZ> & pointCloud, Side side, float d_desired, float d_safety) {
#ifdef SAVE_OUTPUT
    FILE *pc = fopen("pc","w");
#endif
    bool ignore_glare_point = false;
    size_t count_glare = 0;
    if (filter_glare) {
        for (unsigned int i=0;i<pointCloud.size();i++) {
            double r = hypot(pointCloud[i].x,pointCloud[i].y);
            if (r < r_glare) {
                count_glare += 1;
            }
        }
        ignore_glare_point = (count_glare>0) && (count_glare < 4);
        ROS_INFO("%d glare point: ignoring %d",(int)count_glare,(int)ignore_glare_point);
    }

    std::vector<float> r_max(n_j,NAN);
    nns.reset();
    unsigned int j = 0, n_useful = pointCloud.size();
    if (ignore_glare_point) {
        n_useful -= count_glare;
    }
   	nns_cloud.resize(2, n_useful);
    for (unsigned int i=0;i<pointCloud.size();i++) {
        double r = hypot(pointCloud[i].x,pointCloud[i].y);
        if (ignore_glare_point && (r < r_glare)) {
            continue;
        }
        assert(j < n_useful);
        nns_cloud(0,j) = pointCloud[i].x;
        nns_cloud(1,j) = pointCloud[i].y;
        float alpha = round(atan2(pointCloud[i].y, pointCloud[i].x) * 2 * ang_range / angle_scale);
        int i_alpha = (int)alpha;
        if (abs(i_alpha) <= ang_range) {
            float r = hypot(pointCloud[i].y,pointCloud[i].x);
            if (isnan(r_max[ang_range + i_alpha]) || (r < r_max[ang_range + i_alpha])) {
                r_max[ang_range + i_alpha] = r;
            }
        }
#ifdef SAVE_OUTPUT
        fprintf(pc,"%e %e %e\n",pointCloud[i].x,pointCloud[i].y,0.0);
#endif
        j += 1;
    }
#ifdef SAVE_OUTPUT
    fclose(pc);
#endif

#ifdef SAVE_OUTPUT
    FILE * bd = fopen("bd", "w");
    float dalpha = angle_scale / (4*ang_range);
    for (int j=0;j<(signed)n_j;j++) {
        float alpha = (j - ang_range)*angle_scale / (2*ang_range);
        fprintf(bd,"%e %e\n%e %e\n%e %e\n",
                r_max[j]*cos(alpha-dalpha), r_max[j]*sin(alpha-dalpha), 
                r_max[j]*cos(alpha), r_max[j]*sin(alpha), 
                r_max[j]*cos(alpha+dalpha), r_max[j]*sin(alpha+dalpha));
    }
    fclose(bd);
#endif


    boost::shared_ptr<NaboFilter> filter(new NaboFilter(nns_cloud, nns_query));
    // Create a kd-tree for M, note that M must stay valid during the lifetime of the kd-tree.
    nns.reset(NNSearchF::createKDTreeLinearHeap(nns_cloud));

    // Look for the nearest neighbours of each query point, 
    // We do not want approximations but we want to sort by the distance,
    nns->knn(nns_query, indices, dists2, 1, 0, 0);
    for (int j=0;j < (signed)n_j; j++) {
        for (unsigned int r = 0; r < n_r; r ++ ) {
            unsigned int ix = r*n_j + j;
            float d = sqrt(dists2(0,ix));
            if (isnan(r_max[j]) || (r * r_scale < r_max[j])) {
                if (d > d_safety) {
                    node_safety(r,j) = 0.0;
                } else {
                    // Adding obstacle repulsion
                    node_safety(r,j) = d_safety/(d+1e-10) - 1;
                }
            } else {
                // Behind the point cloud
                node_safety(r,j) = NAN;
            }
        }
    }

#ifdef SAVE_OUTPUT
    FILE * fp = fopen("nodes","w");
    printf("Node costs\n");
#endif
    for (unsigned int k=0;k<n_k;k++) {
#ifdef SIGNED_DISTANCE
        float beta = (k-conn_range) * angle_scale / (2*ang_range);
        nns->setFilter(filter);
        switch (side) {
            case LEFT:
                filter->setOrientationOffset(beta + M_PI/2.);
                break;
            case RIGHT:
                filter->setOrientationOffset(beta - M_PI/2.);
                break;
        }
        nns->knn(nns_query, indices, dists2, 1, 0, 0);
#endif
        for (int j=0;j < (signed)n_j; j++) {
#ifdef SAVE_OUTPUT
            float alpha = (j-ang_range) * angle_scale / (2*ang_range);
#endif
            for (unsigned int r = 0; r < n_r; r ++ ) {
                unsigned int ix = r*n_j + j;
                float d = sqrt(dists2(0,ix));
                // Compute the side of the closest point, to make sure the
                // distance is signed 
                float de = (d - d_desired) /* / (std::max(r,1u) * r_scale) */;
                // divide by (std::max(r,1) * r_scale) ?? 
                node_cost(r,j,k) = (de * de); 
#ifdef SAVE_OUTPUT
                printf("%3d,%3d,%d -> %6.2f %6.2f -> %6.2f (r_max %6.2f d %6.2f)\n",r,j,k,alpha,r*r_scale,node_cost(r,j,k),r_max[j],d);
                fprintf(fp,"%e %e %e %e %e %e\n",r*r_scale*cos(alpha),r*r_scale*sin(alpha),r*r_scale,alpha,d,node_cost(r,j,conn_range));
#endif
            }
        }
    }
#ifdef SAVE_OUTPUT
    fclose(fp);
#endif

}
Example #2
0
int main(int argc, char **argv)
{
    if(argc != 8){
      std::cout << "Usage: rosrun ndt_localizer local2global \"filename\" \"x\" \"y\" \"z\" \"roll\" \"pitch\" \"yaw\"" << std::endl;
      exit(1);
    }

    ros::init(argc, argv, "local2global");
    ros::NodeHandle n;

    std::string filename = argv[1];
    std::cout << filename << std::endl;

    if(pcl::io::loadPCDFile<pcl::PointXYZI> (filename, *additional_map_ptr) == -1){
      std::cout << "Couldn't read " << filename << "." << std::endl;
      return(-1);
    }
    std::cout << "Loaded " << additional_map_ptr->size() << " data points from " << filename << "." << std::endl;

    initial_x = std::stod(argv[2]);
    initial_y = std::stod(argv[3]);
    initial_z = std::stod(argv[4]);
    initial_roll = std::stod(argv[5]);
    initial_pitch = std::stod(argv[6]);
    initial_yaw = std::stod(argv[7]);

    std::cout << "initial_pose: "
	      << initial_x << " " << initial_y << " " << initial_z << " "
	      << initial_roll << " " << initial_pitch << " " << initial_yaw << std::endl;

    previous_pos.x = initial_x;
    previous_pos.y = initial_y;
    previous_pos.z = initial_z;
    previous_pos.roll = initial_roll;
    previous_pos.pitch = initial_pitch;
    previous_pos.yaw = initial_yaw;
    
    current_pos.x = initial_x;
    current_pos.y = initial_y;
    current_pos.z = initial_z;
    current_pos.roll = initial_roll;
    current_pos.pitch = initial_pitch;
    current_pos.yaw = initial_yaw;

    ndt_pose_pub = n.advertise<geometry_msgs::PoseStamped>("/current_pose", 1000);

    control_pose_pub = n.advertise<geometry_msgs::PoseStamped>("/control_pose", 1000);

    ndt_map_pub = n.advertise<sensor_msgs::PointCloud2>("/ndt_map", 1000, true);

    // subscribing parameter
    ros::Subscriber param_sub = n.subscribe("config/ndt", 10, param_callback);

    // subscribing gnss position
    ros::Subscriber gnss_sub = n.subscribe("gnss_pose", 10, gnss_callback);

    // subscribing map data (only once)
    ros::Subscriber map_sub = n.subscribe("points_map", 10, map_callback);

    // subscribing the velodyne data
    //    ros::Subscriber velodyne_sub = n.subscribe("points_raw", 1000, velodyne_callback);
    //    ros::Subscriber velodyne_sub = n.subscribe("cloud_pcd", 1000, velodyne_callback);

    ros::spin();

    return 0;
}
Example #3
0
/**
 * Given a cloud of datapts (an in/out variable), a vector of dataPts, an index to a specific
 * point in datapts and a distanceThreshold, add the list of indices into dataPts which are
 * within the distanceThreshold  to dirtyPts.
 *
 * @param dataPTSreduced_cloud a reference to a vector of Points
 * @param point_cloud_for_reduction   the collection of points that we want to delete some points from
  * @param distanceThreshold an integer Euclidean distance
 */
pcl::PointCloud<pcl::PointXYZRGB> Track::removeClosestDataCloudPoints(pcl::PointCloud<pcl::PointXYZRGB> point_cloud_for_reduction,pcl::PointCloud<pcl::PointXYZRGB> removal_Cloud, int distanceThreshold){

    //TODO, MAKE THIS PARALLEL

        //NOTE: you cannot feed a KNN searcher clouds with 1 or fewer datapoints!

        //KDTREE SEARCH

        pcl::KdTreeFLANN<pcl::PointXYZRGB> kdtree;
        // Neighbors within radius search

        std::vector<int> pointIdxRadiusSearch;
        std::vector<float> pointRadiusSquaredDistance;

        double point_radius = distanceThreshold;

        PointCloud<pcl::PointXYZRGB> point_cloud_flattened;//Point cloud with extra Hue Dimension crushed to 0

        PointCloud<pcl::PointXYZRGB> point_cloud_for_return;
        point_cloud_for_return.reserve(point_cloud_for_reduction.size());


        if(point_cloud_for_reduction.size()>1){

            bool *marked= new bool[point_cloud_for_reduction.size()];
            memset(marked,false,sizeof(bool)*point_cloud_for_reduction.size());

            bool *parmarked= new bool[point_cloud_for_reduction.size()];
            memset(parmarked,false,sizeof(bool)*point_cloud_for_reduction.size());
            //Make a version of the original data cloud that is flattened to z=0 but with the same indice
            copyPointCloud( point_cloud_for_reduction,point_cloud_flattened);


            for(int q=0; q<point_cloud_flattened.size();q++){

                point_cloud_flattened.at(q).z=0;

            }

           pcl:: PointCloud<PointXYZRGB>::Ptr point_cloud_for_reduction_ptr (new pcl::PointCloud<PointXYZRGB> (point_cloud_flattened));

           // K nearest neighbor search with Radius

            kdtree.setInputCloud (point_cloud_for_reduction_ptr); //Needs to have more than 1 data pt or segfault




            double t = (double)getTickCount();


//The below has been parallelized,and runs about 2X faster
                /**
            // iterate over points in model and remove those points within a certain distance
            for (unsigned int c=0; c < removal_Cloud.size(); c++)
            {

                if(point_cloud_for_reduction.size()<2){
                    break;
                }


                pcl::PointXYZRGB searchPoint;

                searchPoint.x = removal_Cloud.points[c].x;
                searchPoint.y = removal_Cloud.points[c].y;
               //Need to take z as zero when using a flattened data point cloud
               searchPoint.z = 0;

                // qDebug() <<"Datapts before incremental remove"<< point_cloud_for_reduction.size();
                if ( kdtree.radiusSearch (searchPoint, point_radius, pointIdxRadiusSearch, pointRadiusSquaredDistance) > 0 )
                {
                    for (size_t i = 0; i < pointIdxRadiusSearch.size (); ++i){
                        if(point_cloud_for_reduction.size()>0) ///NOTE CHANGED FROM > 1

                        marked[pointIdxRadiusSearch[i]]=true;
    //                        point_cloud_for_reduction.erase(point_cloud_for_reduction.begin()+pointIdxRadiusSearch[i]);// point_cloud_for_reduction.points[ pointIdxRadiusSearch[i] ]
                    }
                }


            }

            //DESTROY ALL MARKED POINTS
            for(uint q=0; q< point_cloud_for_reduction.size(); q++){
                if(!marked[q]){
                    point_cloud_for_return.push_back(point_cloud_for_reduction.at(q));
    //                point_cloud_for_return.at(q) = point_cloud_for_reduction.at(q);

                }

            }
            t = ((double)getTickCount() - t)/getTickFrequency();
            cout << "::: time serial remove " << t << endl;
/**/

            //PARALLEL VERSION



            /// Timing
             t = (double)getTickCount();

                cv::parallel_for_(Range(0, removal_Cloud.size()),Remove_Parallel(&kdtree, removal_Cloud,point_cloud_for_reduction, point_radius, &parmarked) );



                //DESTROY ALL MARKED POINTS
                for(uint q=0; q< point_cloud_for_reduction.size(); q++){
                    if(!parmarked[q]){
                        point_cloud_for_return.push_back(point_cloud_for_reduction.at(q));
        //                point_cloud_for_return.at(q) = point_cloud_for_reduction.at(q);

                    }

                }

                t = ((double)getTickCount() - t)/getTickFrequency();
                cout << "::: time parallel remove " << t << endl;


            delete[] marked;
                delete[] parmarked;
        }

        //point_cloud_for_reduction.resize();
        return point_cloud_for_return;

    }
Example #4
0
void PbMapMaker::detectPlanesCloud( pcl::PointCloud<PointT>::Ptr &pointCloudPtr_arg,
                                    Eigen::Matrix4f &poseKF,
                                    double distThreshold, double angleThreshold, double minInliersF)
{
  unsigned minInliers = minInliersF * pointCloudPtr_arg->size();

  #ifdef _VERBOSE
    cout << "detectPlanes in a cloud with " << pointCloudPtr_arg->size() << " points " << minInliers << " minInliers\n";
  #endif

  pcl::PointCloud<PointT>::Ptr pointCloudPtr_arg2(new pcl::PointCloud<PointT>);
  pcl::copyPointCloud(*pointCloudPtr_arg,*pointCloudPtr_arg2);

  pcl::PointCloud<pcl::PointXYZRGBA>::Ptr alignedCloudPtr(new pcl::PointCloud<pcl::PointXYZRGBA>);
  pcl::transformPointCloud(*pointCloudPtr_arg,*alignedCloudPtr,poseKF);

  { mrpt::synch::CCriticalSectionLocker csl(&CS_visualize);
    *mPbMap.globalMapPtr += *alignedCloudPtr;
  } // End CS

  // Downsample voxel map's point cloud
  static pcl::VoxelGrid<pcl::PointXYZRGBA> grid;
  grid.setLeafSize(0.02,0.02,0.02);
  pcl::PointCloud<pcl::PointXYZRGBA> globalMap;
  grid.setInputCloud (mPbMap.globalMapPtr);
  grid.filter (globalMap);
  mPbMap.globalMapPtr->clear();
  *mPbMap.globalMapPtr = globalMap;

  pcl::IntegralImageNormalEstimation<PointT, pcl::Normal> ne;
  ne.setNormalEstimationMethod (ne.COVARIANCE_MATRIX);
  ne.setMaxDepthChangeFactor (0.02f); // For VGA: 0.02f, 10.0f
  ne.setNormalSmoothingSize (5.0f);
  ne.setDepthDependentSmoothing (true);

  pcl::OrganizedMultiPlaneSegmentation<PointT, pcl::Normal, pcl::Label> mps;
  mps.setMinInliers (minInliers);
  mps.setAngularThreshold (angleThreshold); // (0.017453 * 2.0) // 3 degrees
  mps.setDistanceThreshold (distThreshold); //2cm

  pcl::PointCloud<pcl::Normal>::Ptr normal_cloud (new pcl::PointCloud<pcl::Normal>);
  ne.setInputCloud (pointCloudPtr_arg2);
  ne.compute (*normal_cloud);

#ifdef _VERBOSE
  double plane_extract_start = pcl::getTime ();
#endif
  mps.setInputNormals (normal_cloud);
  mps.setInputCloud (pointCloudPtr_arg2);

  std::vector<pcl::PlanarRegion<PointT>, aligned_allocator<pcl::PlanarRegion<PointT> > > regions;
  std::vector<pcl::ModelCoefficients> model_coefficients;
  std::vector<pcl::PointIndices> inlier_indices;
  pcl::PointCloud<pcl::Label>::Ptr labels (new pcl::PointCloud<pcl::Label>);
  std::vector<pcl::PointIndices> label_indices;
  std::vector<pcl::PointIndices> boundary_indices;
  mps.segmentAndRefine (regions, model_coefficients, inlier_indices, labels, label_indices, boundary_indices);

  #ifdef _VERBOSE
    double plane_extract_end = pcl::getTime();
    std::cout << "Plane extraction took " << double (plane_extract_end - plane_extract_start) << std::endl;
//    std::cout << "Frame took " << double (plane_extract_end - normal_start) << std::endl;
    cout << regions.size() << " planes detected\n";
  #endif

  // Create a vector with the planes detected in this keyframe, and calculate their parameters (normal, center, pointclouds, etc.)
  // in the global reference
  vector<Plane> detectedPlanes;
  for (size_t i = 0; i < regions.size (); i++)
  {
    Plane plane;

    Vector3f centroid = regions[i].getCentroid ();
    plane.v3center = compose(poseKF, centroid);
    plane.v3normal = poseKF.block(0,0,3,3) * Vector3f(model_coefficients[i].values[0], model_coefficients[i].values[1], model_coefficients[i].values[2]);
//  assert(plane.v3normal*plane.v3center.transpose() <= 0);
//    if(plane.v3normal*plane.v3center.transpose() <= 0)
//      plane.v3normal *= -1;

    // Extract the planar inliers from the input cloud
    pcl::ExtractIndices<pcl::PointXYZRGBA> extract;
    extract.setInputCloud (pointCloudPtr_arg2);
    extract.setIndices ( boost::make_shared<const pcl::PointIndices> (inlier_indices[i]) );
    extract.setNegative (false);
    extract.filter (*plane.planePointCloudPtr);    // Write the planar point cloud

    static pcl::VoxelGrid<pcl::PointXYZRGBA> plane_grid;
    plane_grid.setLeafSize(0.05,0.05,0.05);
    pcl::PointCloud<pcl::PointXYZRGBA> planeCloud;
    plane_grid.setInputCloud (plane.planePointCloudPtr);
    plane_grid.filter (planeCloud);
    plane.planePointCloudPtr->clear();
    pcl::transformPointCloud(planeCloud,*plane.planePointCloudPtr,poseKF);

    pcl::PointCloud<pcl::PointXYZRGBA>::Ptr contourPtr(new pcl::PointCloud<pcl::PointXYZRGBA>);
    contourPtr->points = regions[i].getContour();
//    plane.contourPtr->points = regions[i].getContour();
//    pcl::transformPointCloud(*plane.contourPtr,*plane.polygonContourPtr,poseKF);
    pcl::transformPointCloud(*contourPtr,*plane.polygonContourPtr,poseKF);
    plane_grid.setInputCloud (plane.polygonContourPtr);
//    plane_grid.filter (*plane.contourPtr);
//    plane.calcConvexHull(plane.contourPtr);
    plane_grid.filter (*contourPtr);
    plane.calcConvexHull(contourPtr);
    plane.computeMassCenterAndArea();
    plane.areaVoxels= plane.planePointCloudPtr->size() * 0.0025;

    #ifdef _VERBOSE
      cout << "Area plane region " << plane.areaVoxels<< " of Chull " << plane.areaHull << " of polygon " << plane.compute2DPolygonalArea() << endl;
    #endif

    // Check whether this region correspond to the same plane as a previous one (this situation may happen when there exists a small discontinuity in the observation)
    bool isSamePlane = false;
    for (size_t j = 0; j < detectedPlanes.size(); j++)
      if( areSamePlane(detectedPlanes[j], plane, configPbMap.max_cos_normal, configPbMap.max_dist_center_plane, configPbMap.proximity_threshold) ) // The planes are merged if they are the same
      {
        isSamePlane = true;

        mergePlanes(detectedPlanes[j], plane);

        #ifdef _VERBOSE
          cout << "\tTwo regions support the same plane in the same KeyFrame\n";
        #endif

        break;
      }
    if(!isSamePlane)
      detectedPlanes.push_back(plane);
  }

  #ifdef _VERBOSE
    cout << detectedPlanes.size () << " Planes detected\n";
  #endif

  // Merge detected planes with previous ones if they are the same
  size_t numPrevPlanes = mPbMap.vPlanes.size();
//  set<unsigned> observedPlanes;
  observedPlanes.clear();
  for (size_t i = 0; i < detectedPlanes.size (); i++)
  {
    // Check similarity with previous planes detected
    bool isSamePlane = false;
    vector<Plane>::iterator itPlane = mPbMap.vPlanes.begin();
//  if(frameQueue.size() != 12)
    for(size_t j = 0; j < numPrevPlanes; j++, itPlane++) // numPrevPlanes
    {
      if( areSamePlane(mPbMap.vPlanes[j], detectedPlanes[i], configPbMap.max_cos_normal, configPbMap.max_dist_center_plane, configPbMap.proximity_threshold) ) // The planes are merged if they are the same
      {
//        if (j==2 && frameQueue.size() == 12)
//        {
//          cout << "Same plane\n";
//
////          ofstream pbm;
////          pbm.open("comparePlanes.txt");
//          {
//            cout << " ID " << mPbMap.vPlanes[j].id << " obs " << mPbMap.vPlanes[j].numObservations;
//            cout << " areaVoxels " << mPbMap.vPlanes[j].areaVoxels << " areaVoxels " << mPbMap.vPlanes[j].areaHull;
//            cout << " ratioXY " << mPbMap.vPlanes[j].elongation << " structure " << mPbMap.vPlanes[j].bFromStructure << " label " << mPbMap.vPlanes[j].label;
//            cout << "\n normal\n" << mPbMap.vPlanes[j].v3normal << "\n center\n" << mPbMap.vPlanes[j].v3center;
//            cout << "\n PpalComp\n" << mPbMap.vPlanes[j].v3PpalDir << "\n RGB\n" << mPbMap.vPlanes[j].v3colorNrgb;
//            cout << "\n Neighbors (" << mPbMap.vPlanes[j].neighborPlanes.size() << "): ";
//            for(map<unsigned,unsigned>::iterator it=mPbMap.vPlanes[j].neighborPlanes.begin(); it != mPbMap.vPlanes[j].neighborPlanes.end(); it++)
//              cout << it->first << " ";
//            cout << "\n CommonObservations: ";
//            for(map<unsigned,unsigned>::iterator it=mPbMap.vPlanes[j].neighborPlanes.begin(); it != mPbMap.vPlanes[j].neighborPlanes.end(); it++)
//              cout << it->second << " ";
//            cout << "\n ConvexHull (" << mPbMap.vPlanes[j].polygonContourPtr->size() << "): \n";
//            for(unsigned jj=0; jj < mPbMap.vPlanes[j].polygonContourPtr->size(); jj++)
//              cout << "\t" << mPbMap.vPlanes[j].polygonContourPtr->points[jj].x << " " << mPbMap.vPlanes[j].polygonContourPtr->points[jj].y << " " << mPbMap.vPlanes[j].polygonContourPtr->points[jj].z << endl;
//            cout << endl;
//          }
//          {
////            cout << " ID " << detectedPlanes[i].id << " obs " << detectedPlanes[i].numObservations;
////            cout << " areaVoxels " << detectedPlanes[i].areaVoxels << " areaVoxels " << detectedPlanes[i].areaHull;
////            cout << " ratioXY " << detectedPlanes[i].elongation << " structure " << detectedPlanes[i].bFromStructure << " label " << detectedPlanes[i].label;
//            cout << "\n normal\n" << detectedPlanes[i].v3normal << "\n center\n" << detectedPlanes[i].v3center;
////            cout << "\n PpalComp\n" << detectedPlanes[i].v3PpalDir << "\n RGB\n" << detectedPlanes[i].v3colorNrgb;
////            cout << "\n Neighbors (" << detectedPlanes[i].neighborPlanes.size() << "): ";
////            for(map<unsigned,unsigned>::iterator it=detectedPlanes[i].neighborPlanes.begin(); it != detectedPlanes[i].neighborPlanes.end(); it++)
////              cout << it->first << " ";
////            cout << "\n CommonObservations: ";
////            for(map<unsigned,unsigned>::iterator it=detectedPlanes[i].neighborPlanes.begin(); it != detectedPlanes[i].neighborPlanes.end(); it++)
////              cout << it->second << " ";
//            cout << "\n ConvexHull (" << detectedPlanes[i].polygonContourPtr->size() << "): \n";
//            for(unsigned jj=0; jj < detectedPlanes[i].polygonContourPtr->size(); jj++)
//              cout << "\t" << detectedPlanes[i].polygonContourPtr->points[jj].x << " " << detectedPlanes[i].polygonContourPtr->points[jj].y << " " << detectedPlanes[i].polygonContourPtr->points[jj].z << endl;
//            cout << endl;
//          }
////          pbm.close();
//        }

        isSamePlane = true;

        mergePlanes(mPbMap.vPlanes[j], detectedPlanes[i]);

        // Update proximity graph
        checkProximity(mPbMap.vPlanes[j], configPbMap.proximity_neighbor_planes); // Detect neighbors

        #ifdef _VERBOSE
          cout << "Previous plane " << mPbMap.vPlanes[j].id << " area " << mPbMap.vPlanes[j].areaVoxels<< " of polygon " << mPbMap.vPlanes[j].compute2DPolygonalArea() << endl;
        #endif

        if( observedPlanes.count(mPbMap.vPlanes[j].id) == 0 ) // If this plane has already been observed through a previous partial plane in this same keyframe, then we must not account twice in the observation count
        {
          mPbMap.vPlanes[j].numObservations++;

          // Update co-visibility graph
          for(set<unsigned>::iterator it = observedPlanes.begin(); it != observedPlanes.end(); it++)
            if(mPbMap.vPlanes[j].neighborPlanes.count(*it))
            {
              mPbMap.vPlanes[j].neighborPlanes[*it]++;
              mPbMap.vPlanes[*it].neighborPlanes[mPbMap.vPlanes[j].id]++; // j = mPbMap.vPlanes[j]
            }
            else
            {
              mPbMap.vPlanes[j].neighborPlanes[*it] = 1;
              mPbMap.vPlanes[*it].neighborPlanes[mPbMap.vPlanes[j].id] = 1;
            }

          observedPlanes.insert(mPbMap.vPlanes[j].id);
        }

        #ifdef _VERBOSE
          cout << "Same plane\n";
        #endif

        itPlane++;
        for(size_t k = j+1; k < numPrevPlanes; k++, itPlane++) // numPrevPlanes
          if( areSamePlane(mPbMap.vPlanes[j], mPbMap.vPlanes[k], configPbMap.max_cos_normal, configPbMap.max_dist_center_plane, configPbMap.proximity_threshold) ) // The planes are merged if they are the same
          {
            mergePlanes(mPbMap.vPlanes[j], mPbMap.vPlanes[k]);

            mPbMap.vPlanes[j].numObservations += mPbMap.vPlanes[k].numObservations;

            for(set<unsigned>::iterator it = mPbMap.vPlanes[k].nearbyPlanes.begin(); it != mPbMap.vPlanes[k].nearbyPlanes.end(); it++)
              mPbMap.vPlanes[*it].nearbyPlanes.erase(mPbMap.vPlanes[k].id);

            for(map<unsigned,unsigned>::iterator it = mPbMap.vPlanes[k].neighborPlanes.begin(); it != mPbMap.vPlanes[k].neighborPlanes.end(); it++)
              mPbMap.vPlanes[it->first].neighborPlanes.erase(mPbMap.vPlanes[k].id);

            // Update plane index
            for(size_t h = k+1; h < numPrevPlanes; h++)
              --mPbMap.vPlanes[h].id;

            for(size_t h = 0; h < numPrevPlanes; h++)
            {
              if(k==h)
                continue;

              for(set<unsigned>::iterator it = mPbMap.vPlanes[h].nearbyPlanes.begin(); it != mPbMap.vPlanes[h].nearbyPlanes.end(); it++)
                if(*it > mPbMap.vPlanes[k].id)
                {
                  mPbMap.vPlanes[h].nearbyPlanes.insert(*it-1);
                  mPbMap.vPlanes[h].nearbyPlanes.erase(*it);
                }

              for(map<unsigned,unsigned>::iterator it = mPbMap.vPlanes[h].neighborPlanes.begin(); it != mPbMap.vPlanes[h].neighborPlanes.end(); it++)
                if(it->first > mPbMap.vPlanes[k].id)
                {
                  mPbMap.vPlanes[h].neighborPlanes[it->first-1] = it->second;
                  mPbMap.vPlanes[h].neighborPlanes.erase(it);
                }
            }

            mPbMap.vPlanes.erase(itPlane);
            --numPrevPlanes;

            #ifdef _VERBOSE
              cout << "MERGE TWO PREVIOUS PLANES WHEREBY THE INCORPORATION OF A NEW REGION \n";
            #endif
          }

        break;
      }
    }
    if(!isSamePlane)
    {
      detectedPlanes[i].id = mPbMap.vPlanes.size();
      detectedPlanes[i].numObservations = 1;
      detectedPlanes[i].bFullExtent = false;
      detectedPlanes[i].nFramesAreaIsStable = 0;
      if(configPbMap.makeClusters)
      {
        detectedPlanes[i].semanticGroup = clusterize->currentSemanticGroup;
        clusterize->groups[clusterize->currentSemanticGroup].push_back(detectedPlanes[i].id);
      }

      #ifdef _VERBOSE
        cout << "New plane " << detectedPlanes[i].id << " area " << detectedPlanes[i].areaVoxels<< " of polygon " << detectedPlanes[i].areaHull << endl;
      #endif

      // Update proximity graph
      checkProximity(detectedPlanes[i], configPbMap.proximity_neighbor_planes);  // Detect neighbors with max separation of 1.0 meters

      // Update co-visibility graph
      for(set<unsigned>::iterator it = observedPlanes.begin(); it != observedPlanes.end(); it++)
      {
        detectedPlanes[i].neighborPlanes[*it] = 1;
        mPbMap.vPlanes[*it].neighborPlanes[detectedPlanes[i].id] = 1;
      }

      observedPlanes.insert(detectedPlanes[i].id);

      mPbMap.vPlanes.push_back(detectedPlanes[i]);
    }
  }

  if(frameQueue.size() == 12)
   cout << "Same plane? " << areSamePlane(mPbMap.vPlanes[2], mPbMap.vPlanes[9], configPbMap.max_cos_normal, configPbMap.max_dist_center_plane, configPbMap.proximity_threshold) << endl;

  #ifdef _VERBOSE
    cout << "\n\tobservedPlanes: ";
    cout << observedPlanes.size () << " Planes observed\n";
    for(set<unsigned>::iterator it = observedPlanes.begin(); it != observedPlanes.end(); it++)
      cout << *it << " ";
    cout << endl;
  #endif

    // For all observed planes
    for(set<unsigned>::iterator it = observedPlanes.begin(); it != observedPlanes.end(); it++)
    {
      Plane &observedPlane = mPbMap.vPlanes[*it];

      // Calculate principal direction
      observedPlane.calcElongationAndPpalDir();

      // Update color
      observedPlane.calcMainColor();
//    cout << "Plane " << observedPlane.id << " color\n" << observedPlane.v3colorNrgb << endl;

      // Infer knowledge from the planes (e.g. do these planes represent the floor, walls, etc.)
      if(configPbMap.inferStructure)
        mpPlaneInferInfo->searchTheFloor(poseKF, observedPlane);

    } // End for obsevedPlanes

    // Search the floor plane
    if(mPbMap.FloorPlane != -1) // Verify that the observed planes centers are above the floor
    {
      #ifdef _VERBOSE
        cout << "Verify that the observed planes centers are above the floor\n";
      #endif

      for(set<unsigned>::reverse_iterator it = observedPlanes.rbegin(); it != observedPlanes.rend(); it++)
      {
        if(static_cast<int>(*it) == mPbMap.FloorPlane)
          continue;
        if( mPbMap.vPlanes[mPbMap.FloorPlane].v3normal.dot(mPbMap.vPlanes[*it].v3center - mPbMap.vPlanes[mPbMap.FloorPlane].v3center) < -0.1 )
        {
          if(mPbMap.vPlanes[mPbMap.FloorPlane].v3normal.dot(mPbMap.vPlanes[*it].v3normal) > 0.99) //(cos 8.1º = 0.99)
          {
            mPbMap.vPlanes[*it].label = "Floor";
            mPbMap.vPlanes[mPbMap.FloorPlane].label = "";
            mPbMap.FloorPlane = *it;
          }
          else
          {
//            assert(false);
            mPbMap.vPlanes[mPbMap.FloorPlane].label = "";
            mPbMap.FloorPlane = -1;
            break;
          }
        }
      }
    }

  if(configPbMap.detect_loopClosure)
    for(set<unsigned>::iterator it = observedPlanes.begin(); it != observedPlanes.end(); it++)
    {
      if(mpPbMapLocaliser->vQueueObservedPlanes.size() < 10)
        mpPbMapLocaliser->vQueueObservedPlanes.push_back(*it);
    }

    #ifdef _VERBOSE
      cout << "DetectedPlanesCloud finished\n";
    #endif
}
Example #5
0
void reconfig(scene_processing::pcmergerConfig & config, uint32_t level) {
    conf = config;
    boost::recursive_mutex::scoped_lock lock(global_mutex);
 //   pcl::PointCloud<PointT>::Ptr prev_cloud_ptr(new pcl::PointCloud<PointT > ());
  //  pcl::PointCloud<PointT>::Ptr new_cloud_ptr(new pcl::PointCloud<PointT > ());


    if(conf.add_pc) {
       //conf.add_pc = false;
        doUpdate = true;
        *cloud_new_ptr = *cloud_mod_ptr;
        *cloud_merged_backup_ptr=*cloud_merged_ptr;
        if(Merged)
        {
            *cloud_merged_ptr += *cloud_new_ptr;
            Matrix4f globalTrans=computeTransformXYZYPR(config.x, config.y, config.z, config.yaw/180.0*PI, config.pitch/180.0*PI, config.roll/180.0*PI);
            writeMatrixToFile(globalTrans);

        }
        else
            *cloud_merged_ptr = *cloud_new_ptr;
        //*cloud_merged_ptr =*cloud_new_ptr;
        
        viewer.removePointCloud("new");
        if(Merged)
            viewer.removePointCloud("merged");
        Merged = true;
        pcl::toROSMsg(*cloud_merged_ptr, cloud_blobc_merged);
        color_handler_merged.reset(new pcl_visualization::PointCloudColorHandlerRGBField<sensor_msgs::PointCloud2 > (cloud_blobc_merged));
        viewer.addPointCloud(*cloud_merged_ptr, color_handler_merged, "merged", viewportOrig);
        ROS_INFO("displaying mergered pointcloud");

    }

    if(conf.setIT) {

        std::string transformsFileName=fn+".transforms.txt";
            transformFile.open(transformsFileName.data());

conf.setIT = false;
        doUpdate = true;
        InitialTransformConfig = conf;
        
            Matrix4f globalTrans=computeTransformXYZYPR(InitialTransformConfig.x, InitialTransformConfig.y, InitialTransformConfig.z, InitialTransformConfig.yaw/180.0*PI, InitialTransformConfig.pitch/180.0*PI, InitialTransformConfig.roll/180.0*PI);
            writeMatrixToFile(globalTrans);

        *cloud_new_ptr = *cloud_mod_ptr;
        ITpresent = true;
    }
    if(config.undo)
    {
        if(noMoreUndo)
        {
            conf.undo=false;
            doUpdate=true;
            return;
        }

        noMoreUndo=true;
        transformFile<<"endo"<<endl;
        *cloud_merged_ptr=*cloud_merged_backup_ptr;
        if(Merged)
            viewer.removePointCloud("merged");
        Merged = true;
        pcl::toROSMsg(*cloud_merged_ptr, cloud_blobc_merged);
        color_handler_merged.reset(new pcl_visualization::PointCloudColorHandlerRGBField<sensor_msgs::PointCloud2 > (cloud_blobc_merged));
        viewer.addPointCloud(*cloud_merged_ptr, color_handler_merged, "merged", viewportOrig);
        ROS_INFO("undo:displaying mergered pointcloud");
        conf.undo=false;
        doUpdate=true;
  if (pcl::io::loadPCDFile (std::string("tempAppend.pcd"), cloud_blobc_new) == -1)
  {
    ROS_ERROR ("Couldn't read file ");
    return ;
  }
//  ROS_INFO ("Loaded %d data points from %s with the following fields: %s", (int)(cloud_blob.width * cloud_blob.height), argv[1] ,pcl::getFieldsList (cloud_blob).c_str ());

  // Convert to the templated message type
   pcl::fromROSMsg (cloud_blobc_new, *cloud_new_ptr);
//   pcl::PointCloud<PointT>::Ptr cloud_ptr (new pcl::PointCloud<PointT> (cloud));

            if(ITpresent){
                cout<<"inside IT"<<endl;
                transformXYZYPR<PointT>(*cloud_new_ptr, *cloud_mod_ptr, InitialTransformConfig.x, InitialTransformConfig.y, InitialTransformConfig.z, InitialTransformConfig.yaw/180.0*PI, InitialTransformConfig.pitch/180.0*PI, InitialTransformConfig.roll/180.0*PI);
                *cloud_new_ptr = *cloud_mod_ptr;
                conf.pitch=0;
                conf.yaw=0;
                conf.roll=0;
            }
            //ROS_INFO("PointCloud with %d data points and frame %s (%f) received.", (int) cloud_new_ptr->points.size(), cloud_new_ptr->header.frame_id.c_str(), cloud_new_ptr->header.stamp.toSec());
            viewer.removePointCloud("new");
           // pcl::toROSMsg<PointT>(*cloud_new_ptr, cloud_blobc_new);
            color_handler_new.reset(new pcl_visualization::PointCloudColorHandlerRGBField<sensor_msgs::PointCloud2 > (cloud_blobc_new));
            viewer.addPointCloud(*cloud_new_ptr, color_handler_new, "new", viewportOrig);
            ROS_INFO("undo:displaying new point cloud");
            conf.x=0;
            conf.y=0;
            conf.z=0;
            conf.yaw=0;
            conf.pitch=0;
            conf.roll=0;

        
    }
    if(conf.skip_pc || conf.add_pc)
    {
        if (conf.skip_pc)
        {
            conf.x = -1;
            conf.y = -0.3;
            conf.z = 0.78;
            conf.yaw = 0;
            conf.pitch = 30;
            conf.roll = 0;
        }
        else
        {
            conf.x = -0.1;
            conf.y = -0.8;
            conf.z = 0.019;
            conf.yaw = 175;
            conf.pitch = 0;
            conf.roll = 0;
        }

        noMoreUndo=false;
        conf.skip_pc = false;
        conf.add_pc =false;
        
        doUpdate = true;
        int count = 0;
        cloud_blob_prev = cloud_blob_new;
        cloud_blob_new = reader.getNextCloud();
        cout<<"header"<<cloud_blob_new->header<<endl;
//        cloud_blob_new->
        ros::M_string::iterator iter;
        //for(iter=cloud_blob_new->__connection_header->begin ();iter!=cloud_blob_new->__connection_header->end ();iter++)
         // cout<<iter->first<<","<<iter->second<<endl;
        
        
        while(count < skipNum && cloud_blob_prev != cloud_blob_new)
        {
            cloud_blob_prev = cloud_blob_new;
            cloud_blob_new = reader.getNextCloud();
            count ++;
        }
        if (cloud_blob_prev != cloud_blob_new) {
            pcl::fromROSMsg(*cloud_blob_new, *cloud_temp_ptr);
            cloud_temp_ptr->header;
            cout<<"numPoints="<<cloud_temp_ptr->size ()<<endl;
            appendCamIndexAndDistance (cloud_temp_ptr,cloud_new_ptr,globalFrameCount,VectorG(0,0,0));
            globalFrameCount++;
            cloud_new_ptr->width=1;
            cloud_new_ptr->height=cloud_new_ptr->size();
            writer.write (std::string("tempAppend.pcd"),*cloud_new_ptr,true);
  if (pcl::io::loadPCDFile (std::string("tempAppend.pcd"), cloud_blobc_new) == -1)
  {
    ROS_ERROR ("Couldn't read temp file ");
    return ;
  }
//  ROS_INFO ("Loaded %d data points from %s with the following fields: %s", (int)(cloud_blob.width * cloud_blob.height), argv[1] ,pcl::getFieldsList (cloud_blob).c_str ());

  // Convert to the templated message type
   pcl::fromROSMsg (cloud_blobc_new, *cloud_new_ptr);
//   pcl::PointCloud<PointT>::Ptr cloud_ptr (new pcl::PointCloud<PointT> (cloud));
            
            if(ITpresent){
                cout<<"inside IT"<<endl;
                transformXYZYPR<PointT>(*cloud_new_ptr, *cloud_mod_ptr, InitialTransformConfig.x, InitialTransformConfig.y, InitialTransformConfig.z, InitialTransformConfig.yaw/180.0*PI, InitialTransformConfig.pitch/180.0*PI, InitialTransformConfig.roll/180.0*PI);
                *cloud_new_ptr = *cloud_mod_ptr;
             //   conf.pitch=0;
             //   conf.yaw=0;
             //   conf.roll=0;
            }
            //ROS_INFO("PointCloud with %d data points and frame %s (%f) received.", (int) cloud_new_ptr->points.size(), cloud_new_ptr->header.frame_id.c_str(), cloud_new_ptr->header.stamp.toSec());
            viewer.removePointCloud("new");
           // pcl::toROSMsg<PointT>(*cloud_new_ptr, cloud_blobc_new);
            color_handler_new.reset(new pcl_visualization::PointCloudColorHandlerRGBField<sensor_msgs::PointCloud2 > (cloud_blobc_new));
            viewer.addPointCloud(*cloud_new_ptr, color_handler_new, "new", viewportOrig);
            ROS_INFO("displaying new point cloud");
          /*  if(Merged){
                viewer.removePointCloud("merged");
                pcl::toROSMsg(*cloud_merged_ptr, cloud_blobc_merged);
                color_handler_merged.reset(new pcl_visualization::PointCloudColorHandlerRGBField<sensor_msgs::PointCloud2 > (cloud_blobc_merged));
                viewer.addPointCloud(*cloud_merged_ptr, color_handler_merged, "merged", viewportOrig);
            }*/
        }else {
            ROS_INFO("Finised reading all pointclouds! Select done to save.");
        }
    }
    
    if(conf.set_skip){
        conf.set_skip = false;
        doUpdate = true;
        skipNum = (conf.skipNum);
    }
    if(conf.update)
    {
        conf.update = false;
        doUpdate = true;
        updateUI();
    }
}
void Main_process() {
    //    pcl::VoxelGrid<PointXYZRGB> VG_humanSampling;

    //    double clustering_voxel_size = 0.003;
    //    VG_humanSampling.setLeafSize (clustering_voxel_size, clustering_voxel_size, clustering_voxel_size);
    //    VG_humanSampling.setDownsampleAllData (false);

          person_cluster->clear();
          cv::Vec2b point;

        // for(int i = 0; i<imageG.rows; ++i)
        //     for(int j = 0; j<imageG.cols; ++j )
        //     {
        //         point = imageG.at<cv::Vec2b>(i,j);
        //       if((point[0] == 0)&&(point[1] == 0)&&(point[2] == 0))
        //           //&&(point[1] != 0)&&(point[2] != 0))
        //         {
        //             person_cluster->points.push_back(global_cloud->at(j,i));
        //         }
        //     }

      pcl::PointCloud<PointXYZRGB>::Ptr cloud_ptr (new pcl::PointCloud<PointXYZRGB>);
              pcl::PCDReader reader;
			  reader.read<pcl::PointXYZRGB> ("/home/shaghayegh/catkin_ws/src/mythesis_body/01.pcd", *cloud_ptr);
			  	// reader.read<pcl::PointXYZRGB> ("/home/shaghayegh/catkin_ws/src/mythesis_body/src/model_uniform/third_shot.pcd", *person_cluster);
              // pcl::io::savePCDFile("/home/shaghayegh/catkin_ws/pcdmamuli.pcd",*person_cluster);
              // pcl::io::savePCDFileASCII("/home/shaghayegh/catkin_ws/pcdascii.pcd",*person_cluster);
              // pcl::io::savePCDFileBinary("/home/shaghayegh/catkin_ws/pcdBinary.pcd",*person_cluster);

    //           for (size_t i = 0; i < person_cluster->points.size (); ++i)
    // std::cout << "    " << person_cluster->points[i].x
    //           << " "    << person_cluster->points[i].y
    //           << " "    << person_cluster->points[i].z << std::endl;
			  	cout<<"model_size "<<cloud_ptr->points.size()<<endl;
                boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer1 (new pcl::visualization::PCLVisualizer ("3D Viewerman"));
                pcl::visualization::PointCloudColorHandlerRGBField<pcl::PointXYZRGB> rgb(cloud_ptr);
                viewer1->addPointCloud<pcl::PointXYZRGB> (cloud_ptr, rgb, "sample cloud");
                while (!viewer1->wasStopped ()) {
                    viewer1->spinOnce (1);
                    boost::this_thread::sleep (boost::posix_time::microseconds (100));
                }
                viewer1.reset();

    
      pcl::PointCloud<PointXYZRGB>::Ptr cloud_ptr2 (new pcl::PointCloud<PointXYZRGB>);

      *person_cluster = *cloud_ptr->makeShared();
      cout <<cloud_ptr->isOrganized()<<" organized "<<endl;
      cout<<cloud_ptr->header << " header "<<endl;
      waitKey(0);

      if(cloud_ptr->size()>0)
      {

          // std::vector<int> indices;
          // pcl::removeNaNFromPointCloud(*cloud_ptr, *cloud_ptr2, indices);
          // std::cout << "size: " << cloud_ptr2->points.size () << std::endl;//          pcl::removeNaNFromPointCloud()
        type_of_keypoint(cloud_ptr);
      }
    	ROS_INFO(" type of keypoint ");
                stringstream s2;
                string root_path_model = ros::package::getPath("mythesis_body")+"/src/model_uniform/";
                string endaddress = root_path_model; //ros::Pakeage::getPath(pick_and_place)+"src/Data/";
                s2 << endaddress << "third" << "_shot.pcd";
                pcl::io::savePCDFileBinary("/home/shaghayegh/catkin_ws/src/mythesis_body/01_uniform.pcd", *model_keypoints);
       cout << "*************keypoint size :  " << model_keypoints->size() << endl ;



      float descr_rad_(0.5);
      // calculat shot descriptor 
//        NormalEstimator norm;
//        model_normals = norm.get_normals(cloud_ptr);//?
//        pcl::io::savePCDFileASCII("/home/shaghayegh/catkin_ws/src/mythesis_body/01_shot_normalR9.pcd", *model_normals);


      pcl::NormalEstimation<pcl::PointXYZRGB, pcl::Normal> ne;
      ne.setInputCloud (cloud_ptr);
      pcl::search::KdTree<pcl::PointXYZRGB>::Ptr tree_normal (new pcl::search::KdTree<pcl::PointXYZRGB> ());
      ne.setSearchMethod (tree_normal);
//      pcl::PointCloud<pcl::Normal>::Ptr cloud_normals (new pcl::PointCloud<pcl::Normal>);
      ne.setRadiusSearch (9);
      ne.compute (*model_normals);
                pcl::io::savePCDFileASCII("/home/shaghayegh/catkin_ws/src/mythesis_body/01_shot_normalR9.pcd", *model_normals);

      //         if(cloud_ptr->size()>0)
      // {
      //     std::vector<int> indices;
      //     pcl::removeNaNFromPointCloud(*person_cluster, *cloud_ptr2, indices);
      //     std::cout << "size: " << cloud_ptr2->points.size () << std::endl;//          pcl::removeNaNFromPointCloud()
      // }

     
        //calculate descriptor
        pcl::SHOTColorEstimationOMP<pcl::PointXYZRGB, PointNormal, pcl::SHOT1344> est;
        est.setInputCloud(model_keypoints);
        est.setSearchSurface(person_cluster);//?doroste? bayad kole satho midadam ye faghat adamaro
        est.setInputNormals(model_normals);

        pcl::search::KdTree<pcl::PointXYZRGB>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZRGB>);
        est.setSearchMethod(tree);
        est.setRadiusSearch (descr_rad_);
        est.compute (*model_descriptors);

        //****************************************************************
               cout << "*************key_points size :  " << model_keypoints->size() << endl ;

       cout << "*************descriptor size :  " << model_descriptors->size() << endl ;
      //   temp_cluster.keypoint_size = model_keypoints->points.size();
      //   temp_cluster.descriptors = model_descriptors->makeShared();
      //   temp_cluster.objectName = "hichi";
      //   scene_clusters.push_back(temp_cluster);

      // save descriptor

                stringstream s3;
                string root_path_model_descriptor = ros::package::getPath("mythesis_body")+"/src/model_uniform/";
                string endaddress_descriptor = root_path_model_descriptor; //ros::Pakeage::getPath(pick_and_place)+"src/Data/";
                s3 << endaddress_descriptor << "descriptor second" << "_shot.pcd";

                pcl::io::savePCDFileASCII("/home/shaghayegh/catkin_ws/src/mythesis_body/01_shot_ShotnormalR9.pcd", *model_descriptors);


}
Example #7
0
template <typename PointT> inline unsigned
pcl::computeCovarianceMatrix (const pcl::PointCloud<PointT> &cloud,
                              const Eigen::Vector4f &centroid,
                              Eigen::Matrix3f &covariance_matrix)
{
  if (cloud.points.empty ())
    return (0);

  // Initialize to 0
  covariance_matrix.setZero ();

  unsigned point_count;
  // If the data is dense, we don't need to check for NaN
  if (cloud.is_dense)
  {
    point_count = cloud.size ();
    // For each point in the cloud
    for (size_t i = 0; i < point_count; ++i)
    {
      Eigen::Vector4f pt = cloud [i].getVector4fMap () - centroid;

      covariance_matrix (1, 1) += pt.y () * pt.y ();
      covariance_matrix (1, 2) += pt.y () * pt.z ();

      covariance_matrix (2, 2) += pt.z () * pt.z ();

      pt *= pt.x ();
      covariance_matrix (0, 0) += pt.x ();
      covariance_matrix (0, 1) += pt.y ();
      covariance_matrix (0, 2) += pt.z ();
    }
  }
  // NaN or Inf values could exist => check for them
  else
  {
    point_count = 0;
    // For each point in the cloud
    for (size_t i = 0; i < cloud.size (); ++i)
    {
      // Check if the point is invalid
      if (!isFinite (cloud [i]))
        continue;

      Eigen::Vector4f pt = cloud [i].getVector4fMap () - centroid;

      covariance_matrix (1, 1) += pt.y () * pt.y ();
      covariance_matrix (1, 2) += pt.y () * pt.z ();

      covariance_matrix (2, 2) += pt.z () * pt.z ();

      pt *= pt.x ();
      covariance_matrix (0, 0) += pt.x ();
      covariance_matrix (0, 1) += pt.y ();
      covariance_matrix (0, 2) += pt.z ();
      ++point_count;
    }
  }
  covariance_matrix (1, 0) = covariance_matrix (0, 1);
  covariance_matrix (2, 0) = covariance_matrix (0, 2);
  covariance_matrix (2, 1) = covariance_matrix (1, 2);

  return (point_count);
}
bool StereoSensorProcessor::cleanPointCloud(const pcl::PointCloud<pcl::PointXYZRGB>::Ptr pointCloud)
{
  pcl::PointCloud<pcl::PointXYZRGB> tempPointCloud;

  originalWidth_ = pointCloud->width;
  pcl::removeNaNFromPointCloud(*pointCloud, tempPointCloud, indices_);
  tempPointCloud.is_dense = true;
  pointCloud->swap(tempPointCloud);

  ROS_DEBUG("ElevationMap: cleanPointCloud() reduced point cloud to %i points.", static_cast<int>(pointCloud->size()));
  return true;
}
Example #9
0
void
pcl::gpu::people::PeopleDetector::shs5 (const pcl::PointCloud<PointT> &cloud,
                                        const std::vector<int>& indices,
                                        unsigned char *mask)
{
  pcl::device::Intr intr (fx_, fy_, cx_, cy_);
  intr.setDefaultPPIfIncorrect (cloud.width, cloud.height);

  const float *hue = &hue_host_.points[0];
  double squared_radius = CLUST_TOL_SHS * CLUST_TOL_SHS;

  std::vector<std::vector<int> > storage (100);

  // Process all points in the indices vector
  int total = static_cast<int> (indices.size ());
#ifdef _OPENMP
#pragma omp parallel for
#endif
  for (int k = 0; k < total; ++k)
  {
    int i = indices[k];
    if (mask[i])
      continue;

    mask[i] = 255;

    int id = 0;
#ifdef _OPENMP
    id = omp_get_thread_num();
#endif
    std::vector<int>& seed_queue = storage[id];
    seed_queue.clear ();
    seed_queue.reserve (cloud.size ());
    int sq_idx = 0;
    seed_queue.push_back (i);

    PointT p = cloud.points[i];
    float h = hue[i];

    while (sq_idx < (int) seed_queue.size ())
    {
      int index = seed_queue[sq_idx];
      const PointT& q = cloud.points[index];

      if (!pcl::isFinite (q))
        continue;

      // search window                  
      int left, right, top, bottom;
      getProjectedRadiusSearchBox (cloud.height, cloud.width, intr, q, squared_radius, left, right, top, bottom);

      int yEnd = (bottom + 1) * cloud.width + right + 1;
      int idx = top * cloud.width + left;
      int skip = cloud.width - right + left - 1;
      int xEnd = idx - left + right + 1;

      for (; xEnd < yEnd; idx += 2 * skip, xEnd += 2 * cloud.width)
        for (; idx < xEnd; idx += 2)
        {
          if (mask[idx])
            continue;

          if (sqnorm (cloud.points[idx], q) <= squared_radius)
          {
            float h_l = hue[idx];

            if (fabs (h_l - h) < DELTA_HUE_SHS)
            {
              seed_queue.push_back (idx);
              mask[idx] = 255;
            }
          }
        }

      sq_idx++;
    }
  }
}
Example #10
0
template <typename PointT> void
pcl::io::pointCloudTovtkPolyData (const pcl::PointCloud<PointT>& cloud, vtkPolyData* const pdata)
{
  // Get a list of all the fields available
  std::vector<pcl::PCLPointField> fields;
  pcl::for_each_type<typename pcl::traits::fieldList<PointT>::type>(pcl::detail::FieldAdder<PointT>(fields));

  // Coordinates (always must have coordinates)
  vtkIdType nr_points = cloud.points.size ();
  vtkSmartPointer<vtkPoints> points = vtkSmartPointer<vtkPoints>::New ();
  points->SetNumberOfPoints (nr_points);
  // Get a pointer to the beginning of the data array
  float *data = (static_cast<vtkFloatArray*> (points->GetData ()))->GetPointer (0);

  // Set the points
  if (cloud.is_dense)
  {
    for (vtkIdType i = 0; i < nr_points; ++i)
      memcpy (&data[i * 3], &cloud[i].x, 12);    // sizeof (float) * 3
  }
  else
  {
    vtkIdType j = 0;    // true point index
    for (vtkIdType i = 0; i < nr_points; ++i)
    {
      // Check if the point is invalid
      if (!std::isfinite (cloud[i].x) ||
          !std::isfinite (cloud[i].y) ||
          !std::isfinite (cloud[i].z))
        continue;

      memcpy (&data[j * 3], &cloud[i].x, 12);    // sizeof (float) * 3
      j++;
    }
    nr_points = j;
    points->SetNumberOfPoints (nr_points);
  }

  // Create a temporary PolyData and add the points to it
  vtkSmartPointer<vtkPolyData> temp_polydata = vtkSmartPointer<vtkPolyData>::New ();
  temp_polydata->SetPoints (points);

  // Check if Normals are present
  int normal_x_idx = -1, normal_y_idx = -1, normal_z_idx = -1;
  for (size_t d = 0; d < fields.size (); ++d)
  {
    if (fields[d].name == "normal_x")
      normal_x_idx = fields[d].offset;
    else if (fields[d].name == "normal_y")
      normal_y_idx = fields[d].offset;
    else if (fields[d].name == "normal_z")
      normal_z_idx = fields[d].offset;
  }
  if (normal_x_idx != -1 && normal_y_idx != -1 && normal_z_idx != -1)
  {
    vtkSmartPointer<vtkFloatArray> normals = vtkSmartPointer<vtkFloatArray>::New ();
    normals->SetNumberOfComponents (3); //3d normals (ie x,y,z)
    normals->SetNumberOfTuples (cloud.size ());
    normals->SetName ("Normals");

    for (size_t i = 0; i < cloud.size (); ++i)
    {
      float normal[3];
      pcl::getFieldValue<PointT, float> (cloud[i], normal_x_idx, normal[0]);
      pcl::getFieldValue<PointT, float> (cloud[i], normal_y_idx, normal[1]);
      pcl::getFieldValue<PointT, float> (cloud[i], normal_z_idx, normal[2]);
      normals->SetTupleValue (i, normal);
    }
    temp_polydata->GetPointData ()->SetNormals (normals);
  }

  // Colors (optional)
  int rgb_idx = -1;
  for (size_t d = 0; d < fields.size (); ++d)
  {
    if (fields[d].name == "rgb" || fields[d].name == "rgba")
    {
      rgb_idx = fields[d].offset;
      break;
    }
  }
  if (rgb_idx != -1)
  {
    vtkSmartPointer<vtkUnsignedCharArray> colors = vtkSmartPointer<vtkUnsignedCharArray>::New ();
    colors->SetNumberOfComponents (3);
    colors->SetNumberOfTuples (cloud.size ());
    colors->SetName ("RGB");

    for (size_t i = 0; i < cloud.size (); ++i)
    {
      unsigned char color[3];
      pcl::RGB rgb;
      pcl::getFieldValue<PointT, uint32_t> (cloud[i], rgb_idx, rgb.rgba);
      color[0] = rgb.r; color[1] = rgb.g; color[2] = rgb.b;
      colors->SetTupleValue (i, color);
    }
    temp_polydata->GetPointData ()->SetScalars (colors);
  }

  // Add 0D topology to every point
  vtkSmartPointer<vtkVertexGlyphFilter> vertex_glyph_filter = vtkSmartPointer<vtkVertexGlyphFilter>::New ();
  vertex_glyph_filter->SetInputData (temp_polydata);
  vertex_glyph_filter->Update ();

  pdata->DeepCopy (vertex_glyph_filter->GetOutput ());
}
/**
 * Gets the similarity between a cylinder and a given point cloud between 0.0 and 1.0
 * @param input_cloud_ptr the input cloud
 * @return likelihood
 */
double PointCloudDetection::getCylinderLikelihood(
        const pcl::PointCloud<PointRGBT>::Ptr input_cloud_ptr, pcl::ModelCoefficients& coefficients) {


    //The KD tree for the segmentation
    pcl::search::KdTree<PointRGBT>::Ptr tree(new pcl::search::KdTree<PointRGBT>());
    //Structure for normal estimation
    pcl::NormalEstimation<PointRGBT, pcl::Normal> ne; //Normal estimation

    //for the Ransac using Cylinder normals
    pcl::SACSegmentationFromNormals<PointRGBT, pcl::Normal> seg; // the ransac filter

    //the structure to store the cloud normals
    pcl::PointCloud<pcl::Normal> cloud_normals; // the cloud normals

    //for extraction
    pcl::ExtractIndices<PointRGBT> extract; //for point extraction from the filter
    pcl::ExtractIndices<pcl::Normal> extract_normals;

    //all points within a cylinder
    pcl::PointCloud<PointRGBT> cloud_out;

    //the sphere coefficents generated by segmentation
    pcl::PointIndices inliers;


  // Estimate point normals
    ne.setSearchMethod(tree);
    ne.setInputCloud(input_cloud_ptr);
    ne.setKSearch(50);
    ne.compute(cloud_normals);

    // Create the segmentation object for the planar model and set all the parameters
    seg.setOptimizeCoefficients(true);
    seg.setModelType(pcl::SACMODEL_CYLINDER);
    seg.setMethodType(pcl::SAC_RANSAC);

    seg.setNormalDistanceWeight(ransac_normal_dist_weight_);
    seg.setMaxIterations(ransac_iterations_);
    seg.setDistanceThreshold(ransac_dist_threshold_);
    seg.setRadiusLimits(ransac_min_radius_, ransac_max_radius_);

    seg.setInputCloud(input_cloud_ptr);
    seg.setInputNormals(boost::make_shared<pcl::PointCloud<pcl::Normal> >(cloud_normals));
    // Obtain the cylinder inliers and coefficients
    seg.segment(inliers, coefficients);

    if (debug_) PCL_WARN("Cylinder RanSac. Found %d inliers in a cloud of %d points\n",(int) inliers.indices.size(), (int) input_cloud_ptr->size());

    // Extract the inliers from the input cloud

    if (inliers.indices.size() > 0) {
        extract.setInputCloud(input_cloud_ptr);
        extract.setIndices(boost::make_shared<const pcl::PointIndices>(inliers));

        extract_normals.setInputCloud(boost::make_shared<pcl::PointCloud<pcl::Normal> >(cloud_normals));
        extract_normals.setIndices(boost::make_shared<const pcl::PointIndices>(inliers));

        extract.setNegative(false);
        extract.filter(cloud_out);

        return static_cast<double>(cloud_out.points.size())/input_cloud_ptr->size();

    } else {
        return 0.0;
    }
}
	inline void
	remove_cluster_2d(	const image_geometry::PinholeCameraModel &camera_model,
								const pcl::PointCloud<PointT> &in,
								pcl::PointCloud<PointT> &out,
								int rows,
								int cols,
								int k_neighbors = 4,
								int border = 25)
	{
		std::vector<int> points2d_indices;
		pcl::PointCloud<pcl::PointXY> points2d;

#ifdef DEBUG
		std::cout << "in points: "<< in.size() << "\n";
#endif

		// Project points into image space
		for(unsigned int i=0; i < in.points.size();  ++i){
			const PointT* pt = &in.points.at(i);
			if( pt->z > 1) { // min distance from camera 1m

				cv::Point2i point_image = camera_model.project3dToPixel(cv::Point3d(pt->x, pt->y, pt->z));

				if( between<int>(0+border, point_image.x, cols-border )
					&& between<int>( 0+border, point_image.y, rows-border )
				)
				{
					pcl::PointXY p_image;
					p_image.x = point_image.x;
					p_image.y = point_image.y;

					points2d.push_back(p_image);
					points2d_indices.push_back(i);
				}
			}
		}

#ifdef DEBUG
		std::cout << "projected 2d points: "<< points2d.size() << " indices: " << points2d_indices.size() << "\n";
#endif

		pcl::search::KdTree<pcl::PointXY> tree_n;
		tree_n.setInputCloud(points2d.makeShared());
		tree_n.setEpsilon(0.1);

		for(unsigned int i=0; i < points2d.size(); ++i){
			std::vector<int> k_indices;
			std::vector<float> square_distance;

			//tree_n.nearestKSearch(points2d.at(i), k_neighbors, k_indices, square_distance);
			tree_n.radiusSearch(points2d.at(i), k_neighbors, k_indices, square_distance);

			if(k_indices.empty()) continue; // Dont add points without neighbors

			look_up_indices(points2d_indices, k_indices);

			float edginess = 0;
			if(is_edge_z(in, points2d_indices.at(i), k_indices, square_distance, edginess, 0.1)){
				out.push_back(in.points.at(points2d_indices.at(i)));
				out.at(out.size()-1).intensity = edginess;
			}
		}

#ifdef DEBUG
		std::cout << "out 2d points: "<< out.size() << "\n";
#endif
	}
void union_find(pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr cloud, int K, std::vector<int>& inliers){

	if (cloud->size()==0){
		return;
	}
	if(K==0){
		ROS_WARN("parameter K for K nearest neighbor is 0, aborting");
		return;
	}
	std::vector<union_find_node*> forest;
	std::vector<union_find_node*> model(cloud->size(),NULL);//init to NULL
	std::vector<int> knnindices(K); //stores indices of the last K nearest neighbors search
	std::vector<float> knnSqDistances(K); //sotres squared distance of the last KNN search
	int knncount=0;

	//KDTree
	pcl::KdTreeFLANN<pcl::PointXYZRGBA> kdtree;

	kdtree.setInputCloud (cloud);

	for (int i=0;i<cloud->size();i++){
		//if point is not yet visited
		if(model[i]==NULL){
			//find K nearest neighbors of the curent point
			knncount = kdtree.nearestKSearch(cloud->at(i),K,knnindices,knnSqDistances);
			//union them to the curent point
			for (int result_i=0;result_i<knncount;result_i++){
				//create the point in the model if it do not exists
				if (model[knnindices[result_i]]==NULL){
					model[knnindices[result_i]] = new union_find_node;
					makeSet(model[knnindices[result_i]]);
				}
				//actualy do the union
				union_nodes(model[i],model[knnindices[result_i]]);
			}
			//add the root to the forest (check if do not already exists)
			addToForest(forest,find(model[i]));
		}
	}

	//now we have to find the biggest tree
	if(forest.size()!=0){
		size_t max_size=0, biggest_tree_i=0;
		for (size_t i=0;i<forest.size();i++){
			if( forest[i]->children_n > max_size){
				max_size = forest[i]->children_n;
				biggest_tree_i = i;
			}
		}

		union_find_node* the_root = forest[biggest_tree_i];

		//we add to the inliers all the points that have the_root as root
		for (int i=0; i < cloud->size(); i++){
			if(find(model[i])==the_root){
				inliers.push_back(i);
			}
		}
		for (int i=0;i<model.size();i++)
			delete model[i];
	}
}
void getRotations(const pcl::PointCloud<NormalType>::Ptr &cloud_normals,
                  const std::string &outName, Eigen::Vector3d &M1,
                  Eigen::Vector3d &M2, Eigen::Vector3d &M3) {

  if (!FLAGS_redo) {
    std::ifstream in(outName, std::ios::in | std::ios::binary);
    if (in.is_open()) {
      std::vector<Eigen::Matrix3d> R(NUM_ROTS);
      std::vector<Eigen::Vector3d> M(3);
      for (auto &r : R)
        in.read(reinterpret_cast<char *>(r.data()), sizeof(Eigen::Matrix3d));

      for (auto &m : M)
        in.read(reinterpret_cast<char *>(m.data()), sizeof(double) * 3);

      M1 = M[0];
      M2 = M[1];
      M3 = M[2];

      in.close();

      return;
    }
  }

  // NB: Convert pcl to eigen so linear algebra is easier
  std::vector<Eigen::Vector3d> normals;
  normals.reserve(cloud_normals->size());
  for (auto &n : *cloud_normals)
    normals.emplace_back(n.normal_x, n.normal_y, n.normal_z);

  if (!FLAGS_quietMode)
    std::cout << "N size: " << normals.size() << std::endl;

  std::vector<Eigen::Vector3d> M(3);
  satoshiRansacManhattan1(normals, M[0]);
  if (!FLAGS_quietMode) {
    std::cout << "D1: " << M[0] << std::endl << std::endl;
  }

  // NB: Select normals that are perpendicular to the first
  // dominate direction
  std::vector<Eigen::Vector3d> N2;
  for (auto &n : normals)
    if (std::asin(n.cross(M[0]).norm()) > PI / 2.0 - 0.02)
      N2.push_back(n);

  if (!FLAGS_quietMode)
    std::cout << "N2 size: " << N2.size() << std::endl;

  satoshiRansacManhattan2(N2, M[0], M[1], M[2]);

  if (!FLAGS_quietMode) {
    std::cout << "D2: " << M[1] << std::endl << std::endl;
    std::cout << "D3: " << M[2] << std::endl << std::endl;
  }

  if (std::abs(M[0][2]) > 0.5) {
    M1 = M[0];
    M2 = M[1];
  } else if (std::abs(M[1][2]) > 0.5) {
    M1 = M[1];
    M2 = M[0];
  } else {
    M1 = M[2];
    M2 = M[0];
  }

  if (M1[2] < 0)
    M1 *= -1.0;

  M3 = M1.cross(M2);

  M[0] = M1;
  M[1] = M2;
  M[2] = M3;

  std::vector<Eigen::Matrix3d> R(4);

  getMajorAngles(M1, M2, M3, R);

  if (!FLAGS_quietMode) {
    for (auto &r : R)
      std::cout << r << std::endl << std::endl;
  }

  if (FLAGS_save) {
    std::ofstream binaryWriter(outName, std::ios::out | std::ios::binary);
    for (auto &r : R)
      binaryWriter.write(reinterpret_cast<const char *>(r.data()),
                         sizeof(Eigen::Matrix3d));

    for (auto &m : M)
      binaryWriter.write(reinterpret_cast<const char *>(m.data()),
                         sizeof(double) * 3);

    binaryWriter.close();
  }
}
double
dist_bounding_box(carmen_point_t particle_pose, pcl::PointCloud<pcl::PointXYZ> &point_cloud, object_geometry_t model_geometry,
		object_geometry_t obj_geometry, carmen_vector_3D_t car_global_position, double x_pose, double y_pose)
{
	double sum = 0.0;
	long unsigned int pcl_size = point_cloud.size();
	carmen_position_t new_pt; //(x,y)
	double width_normalizer = norm_factor_y/model_geometry.width;
	double length_normalizer = norm_factor_x/model_geometry.length;
	double height_normalizer = norm_factor_x/model_geometry.height;

	pcl::PointCloud<pcl::PointXYZ>::iterator end = point_cloud.points.end();
	/*** BOX POSITIONING ***/

//	#pragma omp parallel for reduction(+ : sum)
//	for (pcl::PointCloud<pcl::PointXYZ>::iterator it = point_cloud.points.begin(); it < end; ++it)
//	{
//		new_pt = transf2d_bounding_box(car_global_position.x + it->x - particle_pose.x, car_global_position.y + it->y - particle_pose.y, -particle_pose.theta);
//		sum += dist_btw_point_and_box(new_pt, width_normalizer, length_normalizer);
//	}

	#pragma omp parallel for reduction(+ : sum)
	for (long unsigned int i = 0; i < pcl_size; i++)
	{
		new_pt = transf2d_bounding_box(car_global_position.x + point_cloud.points[i].x - particle_pose.x, car_global_position.y + point_cloud.points[i].y - particle_pose.y, -particle_pose.theta);
		sum += dist_btw_point_and_box(new_pt, width_normalizer, length_normalizer);
	}

	double penalty = 0.0;
	// Centroid's coordinates already global
	new_pt = transf2d_bounding_box(x_pose - particle_pose.x, y_pose - particle_pose.y, -particle_pose.theta);
	if (new_pt.x > 0.5*model_geometry.length || new_pt.y > 0.5*model_geometry.width)
	{
		new_pt.x *= length_normalizer;//4.5;
		new_pt.y *= width_normalizer;//2.1;
		penalty = sqrt(new_pt.x*new_pt.x + new_pt.y*new_pt.y);//new_pt.x + new_pt.y;//
	}

	/*** BOX DIMENSIONING ***/
	// Penalize based on the differences between dimensions of point cloud and model box
	double diff_length = (model_geometry.length - obj_geometry.length)*length_normalizer;
	double diff_width = (model_geometry.width - obj_geometry.width)*width_normalizer;
	double diff_height = (model_geometry.height - obj_geometry.height)*height_normalizer;
	double object_diagonal_xy = sqrt(obj_geometry.length*length_normalizer + obj_geometry.width*width_normalizer);
	double model_diagonal_xy = sqrt(model_geometry.length*length_normalizer + model_geometry.width*width_normalizer);
	double diff_diagonal = (model_diagonal_xy - object_diagonal_xy);
//	penalty += 2*diff_diagonal + diff_height;
	penalty += diff_length*diff_length + diff_width*diff_width + diff_height*diff_height + diff_diagonal*diff_diagonal;

	/*
	// aqui penaiza de acordo com o ponto de vista do objeto em relação ao carro.
	double local_x = x_pose - car_global_position.x;
	double local_y = y_pose - car_global_position.y;
	double local_theta = atan2(local_y,local_x);

	if((local_theta < M_PI/36 && local_theta > -M_PI/36) || (local_theta > 35*M_PI/36 && local_theta < -35*M_PI/36))
	{
		//caso 1
		diff_width = (model_geometry.width - obj_geometry.length)*width_normalizer;
		penalty += diff_width*diff_width + diff_height*diff_height;
	} else if( (local_theta > M_PI/36 && local_theta < 3*M_PI/8) || (local_theta > 5*M_PI/8 && local_theta < 35*M_PI/36) ||
			(local_theta < -M_PI/36 && local_theta > -3*M_PI/8) || (local_theta < -5*M_PI/8 && local_theta > -35*M_PI/36))
	{
		//caso 2
		penalty += diff_length*diff_length + diff_width*diff_width + diff_height*diff_height + diff_diagonal*diff_diagonal;
	} else if( (local_theta > 3*M_PI/8 && local_theta < 5*M_PI/8) || (local_theta < -3*M_PI/8 && local_theta > -5*M_PI/8) )
	{
		//caso 3
		penalty += diff_length*diff_length + diff_height*diff_height;
	}
	*/

	// Avoid division by zero
	if (pcl_size != 0)
		return sum/pcl_size + 0.2*penalty; //return normalized distance with penalty

	/* Else return very big distance */
	return 999999.0;
}
// METHOD THAT RECEIVES POINT CLOUDS (OPEN MP)
std::vector<cluster> poseEstimationSV::poseEstimationCore_openmp(pcl::PointCloud<pcl::PointXYZ>::ConstPtr cloud)
{
Tic();
	std::vector <std::vector < pose > > bestPosesAux;
	bestPosesAux.resize(omp_get_num_procs());

	//int bestPoseAlpha;
	//int bestPosePoint;
	//int bestPoseVotes;
	
	Eigen::Vector3f scenePoint;
	Eigen::Vector3f sceneNormal;


	pcl::PointIndices normals_nan_indices;
	pcl::ExtractIndices<pcl::PointNormal> nan_extract;

	float alpha;
	unsigned int alphaBin,index;
	// Iterators
	//unsigned int sr; // scene reference point
	pcl::PointCloud<pcl::PointNormal>::iterator si;	// scene paired point
	std::vector<pointPairSV>::iterator sameFeatureIt; // same key on hash table
	std::vector<boost::shared_ptr<pose> >::iterator bestPosesIt;

	Eigen::Vector4f feature;
	Eigen::Vector3f _pointTwoTransformed;
	std::cout<< "\tCloud size: " << cloud->size() << endl;
	//////////////////////////////////////////////
	// Downsample point cloud using a voxelgrid //
	//////////////////////////////////////////////

	pcl::PointCloud<pcl::PointXYZ>::Ptr cloudDownsampled(new pcl::PointCloud<pcl::PointXYZ> ());
  	// Create the filtering object
  	pcl::VoxelGrid<pcl::PointXYZ> sor;
  	sor.setInputCloud (cloud);
  	sor.setLeafSize (model->distanceStep,model->distanceStep,model->distanceStep);
  	sor.filter (*cloudDownsampled);
	std::cout<< "\tCloud size after downsampling: " << cloudDownsampled->size() << endl;

	// Compute point cloud normals (using cloud before downsampling information)
	std::cout<< "\tCompute normals... ";
	cloudNormals=model->computeSceneNormals(cloudDownsampled);
	std::cout<< "Done" << endl;

	/*boost::shared_ptr<pcl_visualization::PCLVisualizer> viewer2 = objectModel::viewportsVis(cloudFilteredNormals);

  	while (!viewer2->wasStopped ())
  	{
   		viewer2->spinOnce (100);
    		boost::this_thread::sleep (boost::posix_time::microseconds (100000));
  	}*/

	/*boost::shared_ptr<pcl_visualization::PCLVisualizer> viewer2 = objectModel::viewportsVis(model->modelCloud);

  	while (!viewer2->wasStopped ())
  	{
   		viewer2->spinOnce (100);
    		boost::this_thread::sleep (boost::posix_time::microseconds (100000));
  	}*/
	//////////////////////////////////////////////////////////////////////////////
	// Filter again to remove spurious normals nans (and it's associated point) //
	////////////////////////////////////////////////fa//////////////////////////////

	for (unsigned int i = 0; i < cloudNormals->points.size(); ++i) 
	{
		if (isnan(cloudNormals->points[i].normal[0]) || isnan(cloudNormals->points[i].normal[1]) || isnan(cloudNormals->points[i].normal[2]))
		{
	   		normals_nan_indices.indices.push_back(i);
		}
	}

	nan_extract.setInputCloud(cloudNormals);
	nan_extract.setIndices(boost::make_shared<pcl::PointIndices> (normals_nan_indices));
	nan_extract.setNegative(true);
	nan_extract.filter(*cloudWithNormalsDownSampled);
	std::cout<< "\tCloud size after removing NaN normals: " << cloudWithNormalsDownSampled->size() << endl;


	/////////////////////////////////////////////
	// Extract reference points from the scene //
	/////////////////////////////////////////////

	//pcl::RandomSample< pcl::PointCloud<pcl::PointNormal> > randomSampler;
	//randomSampler.setInputCloud(cloudWithNormalsDownSampled);
	// Create the filtering object
	int numberOfPoints=(int) (cloudWithNormalsDownSampled->size () )*referencePointsPercentage;
	int totalPoints=(int) (cloudWithNormalsDownSampled->size ());
	std::cout << "\tUniform sample a set of " << numberOfPoints << "(" << referencePointsPercentage*100 <<  "%)... ";
	referencePointsIndices->indices.clear();
	extractReferencePointsUniform(referencePointsPercentage,totalPoints);
	std::cout << "Done" << std::endl;
	//std::cout << referencePointsIndices->indices.size() << std::endl;

	//////////////
	// Votation //
	//////////////

	std::cout<< "\tVotation... ";

	omp_set_num_threads(omp_get_num_procs());
	//omp_set_num_threads(1);
	//int iteration=0;

        bestPoses.clear();
	#pragma omp parallel for private(alpha,alphaBin,alphaScene,sameFeatureIt,index,feature,si,_pointTwoTransformed) //reduction(+:iteration)  //nowait
	for(unsigned int sr=0; sr < referencePointsIndices->indices.size(); ++sr)
	{
	
		//++iteration;
		//std::cout << "iteration: " << iteration << " thread:" << omp_get_thread_num() << std::endl;
		//printf("Hello from thread %d, nthreads %d\n", omp_get_thread_num(), omp_get_num_threads());
		scenePoint=cloudWithNormalsDownSampled->points[referencePointsIndices->indices[sr]].getVector3fMap();
		sceneNormal=cloudWithNormalsDownSampled->points[referencePointsIndices->indices[sr]].getNormalVector3fMap();

		// Get transformation from scene frame to global frame
		Eigen::Vector3f cross=sceneNormal.cross (Eigen::Vector3f::UnitX ()). normalized();

		Eigen::Affine3f rotationSceneToGlobal;
		if(isnan(cross[0]))
		{
			rotationSceneToGlobal=Eigen::AngleAxisf(0.0,Eigen::Vector3f::UnitX ());
		}
		else
			rotationSceneToGlobal=Eigen::AngleAxisf(acosf (sceneNormal.dot (Eigen::Vector3f::UnitX ())),cross);

		Eigen::Affine3f transformSceneToGlobal = Eigen::Translation3f ( rotationSceneToGlobal* ((-1)*scenePoint)) * rotationSceneToGlobal;

		//////////////////////
		// Choose best pose //
		//////////////////////

		// Reset pose accumulator
		for(std::vector<std::vector<int> >::iterator accumulatorIt=accumulatorParallelAux[omp_get_thread_num()].begin();accumulatorIt < accumulatorParallelAux[omp_get_thread_num()].end(); ++accumulatorIt)
		{
			std::fill(accumulatorIt->begin(),accumulatorIt->end(),0); 
		}
		

		//std::cout << std::endl;
		for(si=cloudWithNormalsDownSampled->begin(); si < cloudWithNormalsDownSampled->end();++si)
		{
			// if same point, skip point pair
			if( (cloudWithNormalsDownSampled->points[referencePointsIndices->indices[sr]].x==si->x) && (cloudWithNormalsDownSampled->points[referencePointsIndices->indices[sr]].y==si->y) && (cloudWithNormalsDownSampled->points[referencePointsIndices->indices[sr]].z==si->z))
			{
				//std::cout << si->x << " " << si->y << " " << si->z << std::endl;
				continue;
			}	

			// Compute PPF
			pointPairSV PPF=pointPairSV(cloudWithNormalsDownSampled->points[sr],*si, transformSceneToGlobal);

			// Compute index
			index=PPF.getHash(*si,model->distanceStepInverted);

			// If distance between point pairs is bigger than the maximum for this model, skip point pair
			if(index>pointPairSV::maxHash)
			{
				//std::cout << "DEBUG" << std::endl;
				continue;
			}

			// If there is no similar point pair features in the model, skip point pair and avoid computing the alpha
			if(model->hashTable[index].size()==0)
				continue; 

			for(sameFeatureIt=model->hashTable[index].begin(); sameFeatureIt<model->hashTable[index].end(); ++sameFeatureIt)
			{
				// Vote on the reference point and angle (and object)
				alpha=sameFeatureIt->alpha-PPF.alpha; // alpha values between [-360,360]

				// alpha values should be between [-180,180] ANGLE_MAX = 2*PI
				if(alpha<(-PI))
					alpha=ANGLE_MAX+alpha;
				else if(alpha>(PI))
					alpha=alpha-ANGLE_MAX;
				//std::cout << "alpha after: " << alpha*RAD_TO_DEG << std::endl;
				//std::cout << "alpha after2: " << (alpha+PI)*RAD_TO_DEG << std::endl;
				alphaBin=static_cast<unsigned int> ( round((alpha+PI)*pointPair::angleStepInverted) ); // division is slower than multiplication
				//std::cout << "angle1: " << alphaBin << std::endl;
           			/*alphaBin = static_cast<unsigned int> (floor (alpha) + floor (PI *poseAngleStepInverted));
				std::cout << "angle2: " << alphaBin << std::endl;*/
				//alphaBin=static_cast<unsigned int> ( floor(alpha*poseAngleStepInverted) + floor(PI*poseAngleStepInverted) );
				if(alphaBin>=pointPair::angleBins)
				{	
					alphaBin=0;
					//ROS_INFO("naoooo");
					//exit(1);
				}

//#pragma omp critical
//{std::cout << index <<" "<<sameFeatureIt->id << " " << alphaBin << " " << omp_get_thread_num() << " " << accumulatorParallelAux[omp_get_thread_num()][sameFeatureIt->id][alphaBin] << std::endl;}

				accumulatorParallelAux[omp_get_thread_num()][sameFeatureIt->id][alphaBin]+=sameFeatureIt->weight;
			}
		}
		//ROS_INFO("DISTANCE:%f DISTANCE SQUARED:%f", model->maxModelDist, model->maxModel

		// Choose best pose (highest peak on the accumulator[peak with more votes])

		int bestPoseAlpha=0;
		int bestPosePoint=0;
		int bestPoseVotes=0;

		for(size_t p=0; p < model->modelCloud->size(); ++p)
		{
			for(unsigned int a=0; a < pointPair::angleBins; ++a)
			{
				if(accumulatorParallelAux[omp_get_thread_num()][p][a]>bestPoseVotes)
				{
					bestPoseVotes=accumulatorParallelAux[omp_get_thread_num()][p][a];
					bestPosePoint=p;
					bestPoseAlpha=a;
				}
			}
		}

		// A candidate pose was found
		if(bestPoseVotes!=0)
		{
			// Compute and store transformation from model to scene
			//boost::shared_ptr<pose> bestPose(new pose( bestPoseVotes,model->modelToScene(model->modelCloud->points[bestPosePoint],transformSceneToGlobal,static_cast<float>(bestPoseAlpha)*pointPair::angleStep-PI) ));

			bestPosesAux[omp_get_thread_num()].push_back(pose( bestPoseVotes,model->modelToScene(bestPosePoint,transformSceneToGlobal,static_cast<float>(bestPoseAlpha)*pointPair::angleStep-PI) ));
			//bestPoses.push_back(bestPose);

			//std::cout << bestPosesAux[omp_get_thread_num()].size() <<" " <<omp_get_thread_num()<< std::endl;
		}
		else 
		{
			continue;
		}

		// Choose poses whose votes are a percentage above a given threshold of the best pose
		accumulatorParallelAux[omp_get_thread_num()][bestPosePoint][bestPoseAlpha]=0; 	// This is more efficient than having an if condition to verify if we are considering the best pose again
		for(size_t p=0; p < model->modelCloud->size(); ++p)
		{
			for(unsigned int a=0; a < pointPair::angleBins; ++a)
			{
				if(accumulatorParallelAux[omp_get_thread_num()][p][a]>=accumulatorPeakThreshold*bestPoseVotes)
				{
					// Compute and store transformation from model to scene
					//boost::shared_ptr<pose> bestPose(new pose( accumulatorParallelAux[omp_get_thread_num()][p][a],model->modelToScene(model->modelCloud->points[p],transformSceneToGlobal,static_cast<float>(a)*pointPair::angleStep-PI ) ));


					//bestPoses.push_back(bestPose);
					bestPosesAux[omp_get_thread_num()].push_back(pose( bestPoseVotes,model->modelToScene(bestPosePoint,transformSceneToGlobal,static_cast<float>(bestPoseAlpha)*pointPair::angleStep-PI) ));
					//std::cout << bestPosesAux[omp_get_thread_num()].size() <<" " <<omp_get_thread_num()<< std::endl;
				}
			}
		}
	}

	std::cout << "Done" << std::endl;


	for(int i=0; i<omp_get_num_procs(); ++i)
	{
		for(unsigned int j=0; j<bestPosesAux[i].size(); ++j)
			bestPoses.push_back(bestPosesAux[i][j]);
	}
	std::cout << "\thypothesis number: " << bestPoses.size() << std::endl << std::endl;

	if(bestPoses.size()==0)
	{
		clusters.clear();
		return clusters;
	}

	
	//////////////////////
	// Compute clusters //
	//////////////////////
Tac();
	std::cout << "\tCompute clusters... ";
Tic();
	clusters=poseClustering(bestPoses);
Tac();
	std::cout << "Done" << std::endl;

	return clusters;
}
/*!
 * @brief メソッドPointCloudMethod::extractPlane().平面を検出するメソッド
 * @param pcl::PointCloud<pcl::PointXYZ>::Ptr inputPointCloud
 * @return pcl::PointCloud<pcl::PointXYZ>::Ptr outputPointCloud
 */
pcl::PointCloud<pcl::PointXYZRGB>::Ptr PointCloudMethod::extractPlane(pcl::PointCloud<pcl::PointXYZRGB>::Ptr &inputPointCloud, bool optimize, double threshold, bool negative)
{
	cout << "before Extract Plane => " << inputPointCloud->size() << endl;

	pcl::PointCloud<pcl::PointXYZRGB>::Ptr filtered(new pcl::PointCloud<pcl::PointXYZRGB>());
	pcl::ModelCoefficients::Ptr coefficients(new pcl::ModelCoefficients);
	pcl::PointIndices::Ptr inliers(new pcl::PointIndices);

	//セグメンテーションオブジェクトの生成
	pcl::SACSegmentation<pcl::PointXYZRGB> seg;

	//オプション
	seg.setOptimizeCoefficients(optimize);

	//Mandatory
	seg.setModelType(pcl::SACMODEL_PLANE);
	seg.setMethodType(pcl::SAC_RANSAC);
	seg.setDistanceThreshold(threshold);

	seg.setInputCloud(inputPointCloud->makeShared());
	seg.segment(*inliers, *coefficients);


	//pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_f(new pcl::PointCloud<pcl::PointXYZRGB>());
	//int i = 0, nr_points = (int)inputPointCloud->points.size();
	//while (inputPointCloud->points.size() > 0.3*nr_points)
	//{
	//	seg.setInputCloud(inputPointCloud);
	//	seg.segment(*inliers, *coefficients);
	//	if (inliers->indices.size() == 0)
	//	{
	//		cout << "Could not estimate a planar model for the given dataset." << endl;
	//		break;
	//	}
	//	pcl::ExtractIndices<pcl::PointXYZRGB> extract;
	//	extract.setInputCloud(inputPointCloud);
	//	extract.setIndices(inliers);
	//	extract.setNegative(false);
	//	extract.filter(*filtered);
	//	extract.setNegative(true);
	//	extract.filter(*cloud_f);
	//	*inputPointCloud = *cloud_f;
	//}

	//pcl::search::KdTree<pcl::PointXYZRGB>::Ptr tree(new pcl::search::KdTree<pcl::PointXYZRGB>);
	//tree->setInputCloud(filtered);
	//vector<pcl::PointIndices> cluster_indices;
	//pcl::EuclideanClusterExtraction<pcl::PointXYZRGB> ec;
	//ec.setClusterTolerance(0.02); //cm
	//ec.setMinClusterSize(100);
	//ec.setMaxClusterSize(25000);
	//ec.setSearchMethod(tree);
	//ec.setInputCloud(filtered);
	//ec.extract(cluster_indices);

	//int j = 0;
	//for (vector<pcl::PointIndices>::const_iterator it = cluster_indices.begin(); it != cluster_indices.end(); ++it)
	//{
	//	pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_cluster(new pcl::PointCloud<pcl::PointXYZRGB>);
	//	for (vector<int>::const_iterator pit = it->indices.begin(); pit != it->indices.end(); ++pit)
	//	{
	//		cloud_cluster->points.push_back(filtered->points[*pit]);
	//	}
	//	cloud_cluster->width = cloud_cluster->points.size();
	//	cloud_cluster->height = 1;
	//	cloud_cluster->is_dense = true;
	//	filtered = cloud_cluster;
	//	j++;
	//}

	/*for (size_t i = 0; i < inliers->indices.size(); ++i){
		inputPointCloud->points[inliers->indices[i]].r = 255;
		inputPointCloud->points[inliers->indices[i]].g = 255;
		inputPointCloud->points[inliers->indices[i]].b = 255;
		}*/

	pcl::ExtractIndices<pcl::PointXYZRGB> extract;
	extract.setInputCloud(inputPointCloud);
	extract.setIndices(inliers);
	extract.setNegative(negative);
	extract.filter(*filtered);

	cout << "after Extract Plane => " << filtered->size() << endl;
	return filtered;
}
cv::Mat PointCloudImageCreator::projectPointCloudToImagePlane(
    const pcl::PointCloud<PointT>::Ptr cloud,
    const jsk_recognition_msgs::ClusterPointIndices::ConstPtr indices,
    const sensor_msgs::CameraInfo::ConstPtr &camera_info,
    cv::Mat &mask) {
    if (cloud->empty()) {
       ROS_ERROR("INPUT CLOUD EMPTY");
       return cv::Mat();
    }
    cv::Mat objectPoints = cv::Mat(static_cast<int>(cloud->size()), 3, CV_32F);

    cv::RNG rng(12345);
    std::vector<cv::Vec3b> colors;
    std::vector<int> labels(cloud->size());
    for (int j = 0; j < indices->cluster_indices.size(); j++) {
       std::vector<int> point_indices = indices->cluster_indices[j].indices;
       for (auto it = point_indices.begin(); it != point_indices.end(); it++) {
          int i = *it;
          objectPoints.at<float>(i, 0) = cloud->points[i].x;
          objectPoints.at<float>(i, 1) = cloud->points[i].y;
          objectPoints.at<float>(i, 2) = cloud->points[i].z;
          labels[i] = j;
       }
       colors.push_back(cv::Vec3b(rng.uniform(0, 255),
                                  rng.uniform(0, 255),
                                  rng.uniform(0, 255)));
    }

    float K[9];
    float R[9];
    for (int i = 0; i < 9; i++) {
       K[i] = camera_info->K[i];
       R[i] = camera_info->R[i];
    }
    cv::Mat cameraMatrix = cv::Mat(3, 3, CV_32F, K);
    cv::Mat rotationMatrix = cv::Mat(3, 3, CV_32F, R);
    float tvec[3];
    tvec[0] = camera_info->P[3];
    tvec[1] = camera_info->P[7];
    tvec[2] = camera_info->P[11];
    cv::Mat translationMatrix = cv::Mat(3, 1, CV_32F, tvec);

    float D[5];
    for (int i = 0; i < 5; i++) {
       D[i] = camera_info->D[i];
    }
    cv::Mat distortionModel = cv::Mat(5, 1, CV_32F, D);
    cv::Mat rvec;
    cv::Rodrigues(rotationMatrix, rvec);
    
    std::vector<cv::Point2f> imagePoints;
    cv::projectPoints(objectPoints, rvec, translationMatrix,
                      cameraMatrix, distortionModel, imagePoints);
    cv::Scalar color = cv::Scalar(0, 0, 0);
    cv::Mat image = cv::Mat(
       camera_info->height, camera_info->width, CV_8UC3, color);
    mask = cv::Mat::zeros(
       camera_info->height, camera_info->width, CV_32F);

    for (int i = 0; i < imagePoints.size(); i++) {
       int x = imagePoints[i].x;
       int y = imagePoints[i].y;
       if (!std::isnan(x) && !std::isnan(y) && (x >= 0 && x <= image.cols) &&
           (y >= 0 && y <= image.rows)) {

          image.at<cv::Vec3b>(y, x)[2] = labels[i] + 1;
          image.at<cv::Vec3b>(y, x)[1] = labels[i] + 1;
          image.at<cv::Vec3b>(y, x)[0] = labels[i] + 1;
          
          mask.at<float>(y, x) = 255.0f;
       }
    }
    return image;
}
void TerrainClassifierNode::publishDebugData() const
{
  sensor_msgs::PointCloud2 cloud_point_msg;

  if (cloud_input_pub.getNumSubscribers() > 0)
  {
    pcl::toROSMsg(*(terrain_classifier.getInputCloud()), cloud_point_msg);
    cloud_point_msg.header.stamp = ros::Time::now();
    cloud_point_msg.header.frame_id = terrain_classifier.getFrameId();
    cloud_input_pub.publish(cloud_point_msg);
  }

  if (cloud_points_processed_pub.getNumSubscribers() > 0)
  {
    pcl::toROSMsg(*(terrain_classifier.getCloudProcessed()), cloud_point_msg);
    cloud_point_msg.header.stamp = ros::Time::now();
    cloud_point_msg.header.frame_id = terrain_classifier.getFrameId();
    cloud_points_processed_pub.publish(cloud_point_msg);
  }

  if (cloud_points_processed_low_res_pub.getNumSubscribers() > 0)
  {
    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>());
    terrain_classifier.getCloudProcessedLowRes(cloud);
    pcl::toROSMsg(*cloud, cloud_point_msg);
    cloud_point_msg.header.stamp = ros::Time::now();
    cloud_point_msg.header.frame_id = terrain_classifier.getFrameId();
    cloud_points_processed_low_res_pub.publish(cloud_point_msg);
  }

  if (cloud_normals_pub.getNumSubscribers() > 0)
  {
    // convert normals to PoseArray and publish them
    const pcl::PointCloud<pcl::PointNormal>::ConstPtr cloud_points_with_normals = terrain_classifier.getPointsWithsNormals();

    geometry_msgs::PoseArray pose_array;
    pose_array.header.frame_id = terrain_classifier.getFrameId();
    pose_array.header.stamp = ros::Time::now();

    geometry_msgs::Pose pose_msg;
    for (size_t i = 0; i < cloud_points_with_normals->size(); i++)
    {
      const pcl::PointNormal& p_n = cloud_points_with_normals->at(i);

      pose_msg.position.x = p_n.x;
      pose_msg.position.y = p_n.y;
      pose_msg.position.z = p_n.z;

      geometry_msgs::Vector3 n;
      n.x = p_n.normal_x;
      n.y = p_n.normal_y;
      n.z = p_n.normal_z;

      double r, p;
      vigir_footstep_planning::normalToRP(n, 0.0, r, p);
      p -= M_PI_2;
      pose_msg.orientation = tf::createQuaternionMsgFromRollPitchYaw(r, p, 0.0);

      pose_array.poses.push_back(pose_msg);
    }

    cloud_normals_pub.publish(pose_array);
  }

  if ((cloud_gradients_pub.getNumSubscribers() > 0) && (terrain_classifier.getGradients()))
  {
    pcl::toROSMsg(*(terrain_classifier.getGradients()), cloud_point_msg);
    cloud_point_msg.header.stamp = ros::Time::now();
    cloud_point_msg.header.frame_id = terrain_classifier.getFrameId();
    cloud_gradients_pub.publish(cloud_point_msg);
  }

  // publish height grid map
  if (height_grid_map_pub.getNumSubscribers() > 0)
    height_grid_map_pub.publish(terrain_classifier.getHeightGridMapRescaled());
}
Example #20
0
void ConvexDecomp(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr& cloud, const Eigen::MatrixXf& dirs, float thresh,
                  /*optional outputs: */ std::vector<IntVec>* indices, std::vector< IntVec >* hull_indices)
{

  int k_neighbs = 5;
  pcl::KdTreeFLANN<pcl::PointXYZ>::Ptr tree(new pcl::KdTreeFLANN<pcl::PointXYZ>(true));
  tree->setEpsilon(0);
  tree->setInputCloud(cloud);
  int n_pts = cloud->size();
  int n_dirs = dirs.rows();
  
  
  
  
  DEBUG_PRINT("npts, ndirs %i %i\n", n_pts, n_dirs);
  
  MatrixXf dirs4(n_dirs, 4);
  dirs4.leftCols(3) = dirs;
  dirs4.col(3).setZero();
  MatrixXf pt2supports = Map< const Matrix<float, Dynamic, Dynamic, RowMajor > >(reinterpret_cast<const float*>(cloud->points.data()), n_pts, 4) * dirs4.transpose();
  
  
  const int UNLABELED = -1;
  IntVec pt2label(n_pts, UNLABELED);
  
  IntSet alldirs;
  for(int i = 0; i < n_dirs; ++i) alldirs.insert(i);
  
  int i_seed = 0;
  int i_label = 0;
  
  // each loop cycle, add a new cluster
  while(true)
  {
  
  
  
  
    // find first unlabeled point
    while(true)
    {
      if(i_seed == n_pts) return;
      if(pt2label[i_seed] == UNLABELED) break;
      ++i_seed;
    }
    
    pt2label[i_seed] = i_label;
    map<int, IntSet> pt2dirs;
    pt2dirs[i_seed] = alldirs;
    vector<SupInfo> dir2supinfo(n_dirs);
    for(int i_dir = 0; i_dir < n_dirs; ++i_dir)
    {
      float seedsup = pt2supports(i_seed, i_dir);
      dir2supinfo[i_dir].inds.push_back(i_seed);
      dir2supinfo[i_dir].sups.push_back(seedsup);
      dir2supinfo[i_dir].best = seedsup;
      
    }
    
    
    DEBUG_PRINT("seed: %i\n", i_seed);
    
    IntSet exclude_frontier;
    exclude_frontier.insert(i_seed);
    queue<int> frontier;
    
    BOOST_FOREACH(const int & i_nb,  getNeighbors(*tree, i_seed, k_neighbs, 2 * thresh))
    {
      if(pt2label[i_nb] == UNLABELED && exclude_frontier.find(i_nb) == exclude_frontier.end())
      {
        DEBUG_PRINT("adding %i to frontier\n", i_nb);
        frontier.push(i_nb);
        exclude_frontier.insert(i_nb);
      }
    }
    
    
    
    
    
    while(!frontier.empty())
    {
    
#if 0 // for serious debugging
      vector<int> clu;
      BOOST_FOREACH(Int2IntSet::value_type & pt_dir, pt2dirs)
      {
        clu.push_back(pt_dir.first);
      }
      MatrixXd sup_pd(clu.size(), n_dirs);
      for(int i = 0; i < clu.size(); ++i)
      {
        for(int i_dir = 0; i_dir < n_dirs; ++i_dir)
        {
          sup_pd(i, i_dir) = pt2supports(clu[i], i_dir);
        }
      }
      for(int i_dir = 0; i_dir < n_dirs; ++i_dir)
      {
        IntSet nearext;
        for(int i = 0; i < clu.size(); ++i)
        {
          if(sup_pd.col(i_dir).maxCoeff() - sup_pd(i, i_dir) < thresh)
          {
            nearext.insert(clu[i]);
          }
        }
        assert(toSet(dir2supinfo[i_dir].inds) == nearext);
      }
      printf("ok!\n");
#endif
      
      
      
      
      int i_cur = frontier.front();
      frontier.pop();
//      printf("cur: %i\n", i_cur);
      DEBUG_PRINT("pt2dirs %s", Str(pt2dirs).c_str());
      
      bool reject = false;
      
      Int2Int pt2decrement;
      for(int i_dir = 0; i_dir < n_dirs; ++i_dir)
      {
        float cursup = pt2supports(i_cur, i_dir);
        SupInfo& si = dir2supinfo[i_dir];
        if(cursup > si.best)
        {
          for(int i = 0; i < si.inds.size(); ++i)
          {
            float sup = si.sups[i];
            int i_pt = si.inds[i];
            if(cursup - sup > thresh)
            {
              pt2decrement[i_pt] = pt2decrement[i_pt] + 1;
              DEBUG_PRINT("decrementing %i (dir %i)\n", i_pt, i_dir);
            }
          }
        }
      }
      
      DEBUG_PRINT("pt2dec: %s", Str(pt2decrement).c_str());
      
      BOOST_FOREACH(const Int2Int::value_type & pt_dec, pt2decrement)
      {
        if(pt_dec.second == pt2dirs[pt_dec.first].size())
        {
          reject = true;
          break;
        }
      }
      
      DEBUG_PRINT("reject? %i\n", reject);
      if(!reject)
      {
        pt2label[i_cur] = i_label;
        pt2dirs[i_cur] = IntSet();
        for(int i_dir = 0; i_dir < n_dirs; ++i_dir)
        {
          float cursup = pt2supports(i_cur, i_dir);
          if(cursup > dir2supinfo[i_dir].best - thresh) pt2dirs[i_cur].insert(i_dir);
        }
        
        
        for(int i_dir = 0; i_dir < n_dirs; ++i_dir)
        {
          float cursup = pt2supports(i_cur, i_dir);
          SupInfo& si = dir2supinfo[i_dir];
          if(cursup > si.best)
          {
          
            IntVec filtinds;
            FloatVec filtsups;
            for(int i = 0; i < si.inds.size(); ++i)
            {
              float sup = si.sups[i];
              int i_pt = si.inds[i];
              if(cursup - sup > thresh)
              {
                pt2dirs[i_pt].erase(i_dir);
              }
              else
              {
                filtinds.push_back(i_pt);
                filtsups.push_back(sup);
              }
            }
            si.inds = filtinds;
            si.sups = filtsups;
            si.inds.push_back(i_cur);
            si.sups.push_back(cursup);
            si.best = cursup;
          }
          else if(cursup > si.best - thresh)
          {
            si.inds.push_back(i_cur);
            si.sups.push_back(cursup);
          }
        }
        BOOST_FOREACH(const int & i_nb,  getNeighbors(*tree, i_cur, k_neighbs, 2 * thresh))
        {
          if(pt2label[i_nb] == UNLABELED && exclude_frontier.find(i_nb) == exclude_frontier.end())
          {
            DEBUG_PRINT("adding %i to frontier\n", i_nb);
            frontier.push(i_nb);
            exclude_frontier.insert(i_nb);
          }
        }
        
      } // if !reject
      else
      {
      }
      
    } // while frontier nonempty
    
    if(indices != NULL)
Example #21
0
template <typename PointT> inline unsigned int
pcl::computeMeanAndCovarianceMatrix (const pcl::PointCloud<PointT> &cloud,
                                     Eigen::Matrix3d &covariance_matrix,
                                     Eigen::Vector4d &centroid)
{
  // create the buffer on the stack which is much faster than using cloud.points[indices[i]] and centroid as a buffer
  Eigen::Matrix<double, 1, 9, Eigen::RowMajor> accu = Eigen::Matrix<double, 1, 9, Eigen::RowMajor>::Zero ();
  unsigned int point_count;
  if (cloud.is_dense)
  {
    point_count = cloud.size ();
    // For each point in the cloud
    for (size_t i = 0; i < point_count; ++i)
    {
      accu [0] += cloud[i].x * cloud[i].x;
      accu [1] += cloud[i].x * cloud[i].y;
      accu [2] += cloud[i].x * cloud[i].z;
      accu [3] += cloud[i].y * cloud[i].y;
      accu [4] += cloud[i].y * cloud[i].z;
      accu [5] += cloud[i].z * cloud[i].z;
      accu [6] += cloud[i].x;
      accu [7] += cloud[i].y;
      accu [8] += cloud[i].z;
    }
  }
  else
  {
    point_count = 0;
    for (size_t i = 0; i < cloud.points.size (); ++i)
    {
      if (!isFinite (cloud[i]))
        continue;

      accu [0] += cloud[i].x * cloud[i].x;
      accu [1] += cloud[i].x * cloud[i].y;
      accu [2] += cloud[i].x * cloud[i].z;
      accu [3] += cloud[i].y * cloud[i].y;
      accu [4] += cloud[i].y * cloud[i].z;
      accu [5] += cloud[i].z * cloud[i].z;
      accu [6] += cloud[i].x;
      accu [7] += cloud[i].y;
      accu [8] += cloud[i].z;
      ++point_count;
    }
  }

  if (point_count != 0)
  {
    accu /= (double) point_count;
    centroid.head<3> () = accu.tail<3> ();
    centroid[3] = 0;
    covariance_matrix.coeffRef (0) = accu [0] - accu [6] * accu [6];
    covariance_matrix.coeffRef (1) = accu [1] - accu [6] * accu [7];
    covariance_matrix.coeffRef (2) = accu [2] - accu [6] * accu [8];
    covariance_matrix.coeffRef (4) = accu [3] - accu [7] * accu [7];
    covariance_matrix.coeffRef (5) = accu [4] - accu [7] * accu [8];
    covariance_matrix.coeffRef (8) = accu [5] - accu [8] * accu [8];
    covariance_matrix.coeffRef (3) = covariance_matrix.coeff (1);
    covariance_matrix.coeffRef (6) = covariance_matrix.coeff (2);
    covariance_matrix.coeffRef (7) = covariance_matrix.coeff (5);
  }
  return (point_count);
}
bool LaserSensorProcessor::cleanPointCloud(const pcl::PointCloud<pcl::PointXYZRGB>::Ptr pointCloud)
{
  pcl::PointCloud<pcl::PointXYZRGB> tempPointCloud;
  std::vector<int> indices;
  pcl::removeNaNFromPointCloud(*pointCloud, tempPointCloud, indices);
  tempPointCloud.is_dense = true;
  pointCloud->swap(tempPointCloud);
  ROS_DEBUG("ElevationMap: cleanPointCloud() reduced point cloud to %i points.", static_cast<int>(pointCloud->size()));
  return true;
}
Example #23
0
bool getDangerState(const pcl::PointCloud<pcl::PointXYZ>::Ptr& cloud) {
  return cloud->size() > 0;
}
Example #24
0
void PassThrough::applyFilterIndices (std::vector<int> &indices, pcl::PointCloud<PointXYZSIFT>::Ptr input, std::string filter_field_name, float min, float max, bool negative)
{
    CLOG(LTRACE) << "applyFilterIndices() " << filter_field_name;
    pcl::IndicesPtr indices_(new vector<int>);
    for(int i = 0; i < input->size(); i++) {
        indices_->push_back(i);
    }

    // The arrays to be used
    indices.resize (indices_->size ());
    int oii = 0; // oii = output indices iterator, rii = removed indices iterator
    // Has a field name been specified?

    if (filter_field_name.empty ())
    {
        // Only filter for non-finite entries then
        for (int iii = 0; iii < static_cast<int> (indices_->size ()); ++iii) // iii = input indices iterator
        {
            // Non-finite entries are always passed to removed indices
            if (!pcl_isfinite (input->points[(*indices_)[iii]].x) ||
                    !pcl_isfinite (input->points[(*indices_)[iii]].y) ||
                    !pcl_isfinite (input->points[(*indices_)[iii]].z))
            {
                continue;
            }
            indices[oii++] = (*indices_)[iii];
        }
    }
    else
    {
        // Attempt to get the field name's index
        std::vector<pcl::PCLPointField> fields;
        int distance_idx = pcl::getFieldIndex (*input, filter_field_name, fields);
        if (distance_idx == -1)
        {
            CLOG(LWARNING) << "applyFilterIndices Unable to find field name in point type.";
            indices.clear ();
            return;
        }
        // Filter for non-finite entries and the specified field limits
        for (int iii = 0; iii < static_cast<int> (indices_->size ()); ++iii) // iii = input indices iterator
        {
            // Non-finite entries are always passed to removed indices
            if (!pcl_isfinite (input->points[(*indices_)[iii]].x) ||
                    !pcl_isfinite (input->points[(*indices_)[iii]].y) ||
                    !pcl_isfinite (input->points[(*indices_)[iii]].z))
            {
                continue;
            }
            // Get the field's value
            const uint8_t* pt_data = reinterpret_cast<const uint8_t*> (&input->points[(*indices_)[iii]]);
            float field_value = 0;
            memcpy (&field_value, pt_data + fields[distance_idx].offset, sizeof (float));
            // Remove NAN/INF/-INF values. We expect passthrough to output clean valid data.
            if (!pcl_isfinite (field_value))
            {
                continue;
            }
            // Outside of the field limits are passed to removed indices
            if (!negative && (field_value < min || field_value > max))
            {
                continue;
            }
            // Inside of the field limits are passed to removed indices if negative was set
            if (negative && field_value >= min && field_value <= max)
            {
                continue;
            }
            // Otherwise it was a normal point for output (inlier)
            indices[oii++] = (*indices_)[iii];
        }
    }
    // Resize the output arrays
    indices.resize (oii);
}
Example #25
0
//static void velodyne_callback(const pcl::PointCloud<velodyne_pointcloud::PointXYZIR>::ConstPtr& input)
static void map_callback(const sensor_msgs::PointCloud2::ConstPtr& input)
{

  if (map_loaded == 0) {
    std::cout << "Loading map data... ";
    map.header.frame_id = "/pointcloud_map_frame";
    
    // Convert the data type(from sensor_msgs to pcl).
    pcl::fromROSMsg(*input, map);
    
    pcl::PointCloud<pcl::PointXYZI>::Ptr map_ptr(new pcl::PointCloud<pcl::PointXYZI>(map));
    // Setting point cloud to be aligned to.
    ndt.setInputTarget(map_ptr);
	
    // Setting NDT parameters to default values
    ndt.setMaximumIterations(iter);
    ndt.setResolution(ndt_res);
    ndt.setStepSize(step_size);
    ndt.setTransformationEpsilon(trans_eps);
    
    map_loaded = 1;
    std::cout << "Map Loaded." << std::endl;
  }



    if (map_loaded == 1 && init_pos_set == 1) {
        callback_start = ros::Time::now();

        static tf::TransformBroadcaster br;
        tf::Transform transform;
        tf::Quaternion q;

        tf::Quaternion q_control;

        // 1 scan
	/*
        pcl::PointCloud<pcl::PointXYZI> scan;
        pcl::PointXYZI p;
        scan.header = input->header;
        scan.header.frame_id = "velodyne_scan_frame";
	*/

        ros::Time scan_time;
        scan_time.sec = additional_map.header.stamp / 1000000.0;
        scan_time.nsec = (additional_map.header.stamp - scan_time.sec * 1000000.0) * 1000.0;

        /*
         std::cout << "scan.header.stamp: " << scan.header.stamp << std::endl;
         std::cout << "scan_time: " << scan_time << std::endl;
         std::cout << "scan_time.sec: " << scan_time.sec << std::endl;
         std::cout << "scan_time.nsec: " << scan_time.nsec << std::endl;
         */

        t1_start = ros::Time::now();
	/*
        for (pcl::PointCloud<velodyne_pointcloud::PointXYZIR>::const_iterator item = input->begin(); item != input->end(); item++) {
            p.x = (double) item->x;
            p.y = (double) item->y;
            p.z = (double) item->z;

            scan.points.push_back(p);
        }
	*/
	//	pcl::fromROSMsg(*input, scan);
        t1_end = ros::Time::now();
        d1 = t1_end - t1_start;

        Eigen::Matrix4f t(Eigen::Matrix4f::Identity());

	//        pcl::PointCloud<pcl::PointXYZI>::Ptr scan_ptr(new pcl::PointCloud<pcl::PointXYZI>(scan));
        pcl::PointCloud<pcl::PointXYZI>::Ptr filtered_additional_map_ptr(new pcl::PointCloud<pcl::PointXYZI>);

        // Downsampling the velodyne scan using VoxelGrid filter
        t2_start = ros::Time::now();
        pcl::VoxelGrid<pcl::PointXYZI> voxel_grid_filter;
        voxel_grid_filter.setLeafSize(voxel_leaf_size, voxel_leaf_size, voxel_leaf_size);
        voxel_grid_filter.setInputCloud(additional_map_ptr);
        voxel_grid_filter.filter(*filtered_additional_map_ptr);
        t2_end = ros::Time::now();
        d2 = t2_end - t2_start;

        // Setting point cloud to be aligned.
        ndt.setInputSource(filtered_additional_map_ptr);

        // Guess the initial gross estimation of the transformation
        t3_start = ros::Time::now();
        tf::Matrix3x3 init_rotation;

        guess_pos.x = previous_pos.x + offset_x;
        guess_pos.y = previous_pos.y + offset_y;
        guess_pos.z = previous_pos.z + offset_z;
        guess_pos.roll = previous_pos.roll;
        guess_pos.pitch = previous_pos.pitch;
        guess_pos.yaw = previous_pos.yaw + offset_yaw;

        Eigen::AngleAxisf init_rotation_x(guess_pos.roll, Eigen::Vector3f::UnitX());
        Eigen::AngleAxisf init_rotation_y(guess_pos.pitch, Eigen::Vector3f::UnitY());
        Eigen::AngleAxisf init_rotation_z(guess_pos.yaw, Eigen::Vector3f::UnitZ());

        Eigen::Translation3f init_translation(guess_pos.x, guess_pos.y, guess_pos.z);

        Eigen::Matrix4f init_guess = (init_translation * init_rotation_z * init_rotation_y * init_rotation_x).matrix();

        t3_end = ros::Time::now();
        d3 = t3_end - t3_start;

        t4_start = ros::Time::now();
        pcl::PointCloud<pcl::PointXYZI>::Ptr output_cloud(new pcl::PointCloud<pcl::PointXYZI>);
        ndt.align(*output_cloud, init_guess);

        t = ndt.getFinalTransformation();
	pcl::PointCloud<pcl::PointXYZI>::Ptr transformed_additional_map_ptr (new pcl::PointCloud<pcl::PointXYZI>());
	transformed_additional_map_ptr->header.frame_id = "/map";
	pcl::transformPointCloud(*additional_map_ptr, *transformed_additional_map_ptr, t);
	sensor_msgs::PointCloud2::Ptr msg_ptr(new sensor_msgs::PointCloud2);

	pcl::toROSMsg(*transformed_additional_map_ptr, *msg_ptr);
	msg_ptr->header.frame_id = "/map";
	ndt_map_pub.publish(*msg_ptr);

	// Writing Point Cloud data to PCD file
	pcl::io::savePCDFileASCII("global_map.pcd", *transformed_additional_map_ptr);
	std::cout << "Saved " << transformed_additional_map_ptr->points.size() << " data points to global_map.pcd." << std::endl;

	pcl::PointCloud<pcl::PointXYZRGB> output;
	output.width = transformed_additional_map_ptr->width;
	output.height = transformed_additional_map_ptr->height;
	output.points.resize(output.width * output.height);

	for(size_t i = 0; i < output.points.size(); i++){
	  output.points[i].x = transformed_additional_map_ptr->points[i].x;
	  output.points[i].y = transformed_additional_map_ptr->points[i].y;
	  output.points[i].z = transformed_additional_map_ptr->points[i].z;
	  output.points[i].rgb = 255 << 8;
	}

	pcl::io::savePCDFileASCII("global_map_rgb.pcd", output);
	std::cout << "Saved " << output.points.size() << " data points to global_map_rgb.pcd." << std::endl;

        t4_end = ros::Time::now();
        d4 = t4_end - t4_start;

        t5_start = ros::Time::now();
        /*
         tf::Vector3 origin;
         origin.setValue(static_cast<double>(t(0,3)), static_cast<double>(t(1,3)), static_cast<double>(t(2,3)));
         */

        tf::Matrix3x3 tf3d;

        tf3d.setValue(static_cast<double>(t(0, 0)), static_cast<double>(t(0, 1)), static_cast<double>(t(0, 2)), static_cast<double>(t(1, 0)), static_cast<double>(t(1, 1)), static_cast<double>(t(1, 2)), static_cast<double>(t(2, 0)), static_cast<double>(t(2, 1)), static_cast<double>(t(2, 2)));

        // Update current_pos.
        current_pos.x = t(0, 3);
        current_pos.y = t(1, 3);
        current_pos.z = t(2, 3);
        tf3d.getRPY(current_pos.roll, current_pos.pitch, current_pos.yaw, 1);

	// control_pose
	current_pos_control.roll = current_pos.roll;
	current_pos_control.pitch = current_pos.pitch;
	current_pos_control.yaw = current_pos.yaw - angle / 180.0 * M_PI;
	double theta = current_pos_control.yaw;
	current_pos_control.x = cos(theta) * (-control_shift_x) + sin(theta) * (-control_shift_y) + current_pos.x;
	current_pos_control.y = -sin(theta) * (-control_shift_x) + cos(theta) * (-control_shift_y) + current_pos.y;
	current_pos_control.z = current_pos.z - control_shift_z;

        // transform "/velodyne" to "/map"
#if 0
        transform.setOrigin(tf::Vector3(current_pos.x, current_pos.y, current_pos.z));
        q.setRPY(current_pos.roll, current_pos.pitch, current_pos.yaw);
        transform.setRotation(q);
#else
	//
	// FIXME:
	// We corrected the angle of "/velodyne" so that pure_pursuit
	// can read this frame for the control.
	// However, this is not what we want because the scan of Velodyne
	// looks unmatched for the 3-D map on Rviz.
	// What we really want is to make another TF transforming "/velodyne"
	// to a new "/ndt_points" frame and modify pure_pursuit to
	// read this frame instead of "/velodyne".
	// Otherwise, can pure_pursuit just use "/ndt_frame"?
	//
        transform.setOrigin(tf::Vector3(current_pos_control.x, current_pos_control.y, current_pos_control.z));
        q.setRPY(current_pos_control.roll, current_pos_control.pitch, current_pos_control.yaw);
        transform.setRotation(q);
#endif

	q_control.setRPY(current_pos_control.roll, current_pos_control.pitch, current_pos_control.yaw);

        /*
         std::cout << "ros::Time::now(): " << ros::Time::now() << std::endl;
         std::cout << "ros::Time::now().sec: " << ros::Time::now().sec << std::endl;
         std::cout << "ros::Time::now().nsec: " << ros::Time::now().nsec << std::endl;
         */

	//        br.sendTransform(tf::StampedTransform(transform, scan_time, "map", "velodyne"));

        static tf::TransformBroadcaster pose_broadcaster;
        tf::Transform pose_transform;
        tf::Quaternion pose_q;

/*        pose_transform.setOrigin(tf::Vector3(0, 0, 0));
        pose_q.setRPY(0, 0, 0);
        pose_transform.setRotation(pose_q);
        pose_broadcaster.sendTransform(tf::StampedTransform(pose_transform, scan_time, "map", "ndt_frame"));
*/
        // publish the position
       // ndt_pose_msg.header.frame_id = "/ndt_frame";
        ndt_pose_msg.header.frame_id = "/map";
        ndt_pose_msg.header.stamp = scan_time;
        ndt_pose_msg.pose.position.x = current_pos.x;
        ndt_pose_msg.pose.position.y = current_pos.y;
        ndt_pose_msg.pose.position.z = current_pos.z;
        ndt_pose_msg.pose.orientation.x = q.x();
        ndt_pose_msg.pose.orientation.y = q.y();
        ndt_pose_msg.pose.orientation.z = q.z();
        ndt_pose_msg.pose.orientation.w = q.w();

        static tf::TransformBroadcaster pose_broadcaster_control;
        tf::Transform pose_transform_control;
        tf::Quaternion pose_q_control;

     /*   pose_transform_control.setOrigin(tf::Vector3(0, 0, 0));
        pose_q_control.setRPY(0, 0, 0);
        pose_transform_control.setRotation(pose_q_control);
        pose_broadcaster_control.sendTransform(tf::StampedTransform(pose_transform_control, scan_time, "map", "ndt_frame"));
*/
        // publish the position
     //   control_pose_msg.header.frame_id = "/ndt_frame";
        control_pose_msg.header.frame_id = "/map";
        control_pose_msg.header.stamp = scan_time;
        control_pose_msg.pose.position.x = current_pos_control.x;
        control_pose_msg.pose.position.y = current_pos_control.y;
        control_pose_msg.pose.position.z = current_pos_control.z;
        control_pose_msg.pose.orientation.x = q_control.x();
        control_pose_msg.pose.orientation.y = q_control.y();
        control_pose_msg.pose.orientation.z = q_control.z();
        control_pose_msg.pose.orientation.w = q_control.w();

        /*
         std::cout << "ros::Time::now(): " << ros::Time::now() << std::endl;
         std::cout << "ros::Time::now().sec: " << ros::Time::now().sec << std::endl;
         std::cout << "ros::Time::now().nsec: " << ros::Time::now().nsec << std::endl;
         */

        ndt_pose_pub.publish(ndt_pose_msg);
        control_pose_pub.publish(control_pose_msg);

        t5_end = ros::Time::now();
        d5 = t5_end - t5_start;

#ifdef OUTPUT
        // Writing position to position_log.txt
        std::ofstream ofs("position_log.txt", std::ios::app);
        if (ofs == NULL) {
            std::cerr << "Could not open 'position_log.txt'." << std::endl;
            exit(1);
        }
        ofs << current_pos.x << " " << current_pos.y << " " << current_pos.z << " " << current_pos.roll << " " << current_pos.pitch << " " << current_pos.yaw << std::endl;
#endif

        // Calculate the offset (curren_pos - previous_pos)
        offset_x = current_pos.x - previous_pos.x;
        offset_y = current_pos.y - previous_pos.y;
        offset_z = current_pos.z - previous_pos.z;
        offset_yaw = current_pos.yaw - previous_pos.yaw;

        // Update position and posture. current_pos -> previous_pos
        previous_pos.x = current_pos.x;
        previous_pos.y = current_pos.y;
        previous_pos.z = current_pos.z;
        previous_pos.roll = current_pos.roll;
        previous_pos.pitch = current_pos.pitch;
        previous_pos.yaw = current_pos.yaw;

        callback_end = ros::Time::now();
        d_callback = callback_end - callback_start;

        std::cout << "-----------------------------------------------------------------" << std::endl;
        std::cout << "Sequence number: " << input->header.seq << std::endl;
        std::cout << "Number of scan points: " << additional_map_ptr->size() << " points." << std::endl;
        std::cout << "Number of filtered scan points: " << filtered_additional_map_ptr->size() << " points." << std::endl;
        std::cout << "NDT has converged: " << ndt.hasConverged() << std::endl;
        std::cout << "Fitness score: " << ndt.getFitnessScore() << std::endl;
        std::cout << "Number of iteration: " << ndt.getFinalNumIteration() << std::endl;
        std::cout << "(x,y,z,roll,pitch,yaw):" << std::endl;
        std::cout << "(" << current_pos.x << ", " << current_pos.y << ", " << current_pos.z << ", " << current_pos.roll << ", " << current_pos.pitch << ", " << current_pos.yaw << ")" << std::endl;
        std::cout << "Transformation Matrix:" << std::endl;
        std::cout << t << std::endl;
#ifdef VIEW_TIME
        std::cout << "Duration of velodyne_callback: " << d_callback.toSec() << " secs." << std::endl;
        std::cout << "Adding scan points: " << d1.toSec() << " secs." << std::endl;
        std::cout << "VoxelGrid Filter: " << d2.toSec() << " secs." << std::endl;
        std::cout << "Guessing the initial gross estimation: " << d3.toSec() << " secs." << std::endl;
        std::cout << "NDT: " << d4.toSec() << " secs." << std::endl;
        std::cout << "tf: " << d5.toSec() << " secs." << std::endl;
#endif
        std::cout << "-----------------------------------------------------------------" << std::endl;
    }
}
Example #26
0
	inline void
	hit_same_point(	const image_geometry::PinholeCameraModel &camera_model,
								const pcl::PointCloud<PointT> &in,
								pcl::PointCloud<PointT> &out,
								int rows,
								int cols,
								float z_threshold = 0.3)
	{
		std::vector<std::vector <std::vector<PointT> > > hit( cols, std::vector< std::vector<PointT> >(rows, std::vector<PointT>()));

		// Project points into image space
		for(unsigned int i=0; i < in.points.size();  ++i){
			const PointT* pt = &in.points.at(i);
			if( pt->z > 1) { // min distance from camera 1m

				cv::Point2i point_image = camera_model.project3dToPixel(cv::Point3d(pt->x, pt->y, pt->z));

				if( between<int>(0, point_image.x, cols )
					&& between<int>( 0, point_image.y, rows )
				)
				{
					// Sort all points into image
					{
						hit[point_image.x][point_image.y].push_back(in.points.at(i));
					}

				}
			}
		}
		assert(out.empty());
		for(int x = 0; x < hit.size(); ++x ){
			for(int y = 0; y < hit[0].size(); ++y){
				if(hit[x][y].size()>1){
					PointT min_z = hit[x][y][0];
					float max_z = min_z.z;
					for(int p = 1; p < hit[x][y].size(); ++p){
					// find min and max z
						max_z = MAX(max_z, hit[x][y][p].z);
#ifdef DEBUG
						std::cout << hit[x][y].size() << "\t";
#endif

						if(hit[x][y][p].z < min_z.z)
						{
							min_z = hit[x][y][p];
						}
					}
#ifdef DEBUG
					std::cout << min_z.z << "\t" << max_z << "\t";
#endif
					if(max_z - min_z.z > z_threshold){
#ifdef DEBUG
						std::cout << min_z << std::endl;
#endif
						out.push_back(min_z);
					}
				}
			}
		}
#ifdef DEBUG
		std::cout << "hit_same_point in: "<< in.size()  << "\t out: " << out.size() << "\n";
#endif
	}
Example #27
0
/**
 * Given a vector of data points and a vector of model
 * points, use ICP to register the model to the data.  Add registration transformation
 * to transforms vector and registration transformation from
 * the track's birth frame to absoluteTransforms vector.
 */
pcl::PointCloud<pcl::PointXYZRGB> Track::updatePosition(pcl::PointCloud<pcl::PointXYZRGB> dataPTS_cloud,vector<Model> modelPTS_clouds,
                                             int modelTODataThreshold, int separateThreshold, int matchDThresh,
                                             int ICP_ITERATIONS, double ICP_TRANSEPSILON, double ICP_EUCLIDEANDIST, double ICP_MAXFIT)
{

    matchDistanceThreshold=matchDThresh;
    nukeDistanceThreshold = separateThreshold;

    icp_maxIter=ICP_ITERATIONS;
    icp_transformationEpsilon=ICP_TRANSEPSILON;
    icp_euclideanDistance=ICP_EUCLIDEANDIST;

    icp_maxfitness = ICP_MAXFIT;




    pcl::PointCloud<pcl::PointXYZRGB> modelToProcess_cloud;

    Eigen::Matrix4f T;

    T.setIdentity();


//Figure out the identity of the model for this region
    if (frameIndex == birthFrameIndex)
    {

        isBirthFrame=true; // Try out Doing extra work aligning the objects if it is the very first frame
        //Determine what model this creature should use
        if(modelPTS_clouds.size()<2){// If there is only one model, just use that one
            modelIndex = 0;
        }
        else{
        modelIndex= identify(dataPTS_cloud,modelPTS_clouds);
        modelRotated = modelPTS_clouds[modelIndex].rotated;
}

    }
    else
    {
        isBirthFrame=false;


    }


    //    double birthAssociationsThreshold = modelPTS_clouds[0].cloud.size() * .01 *modelTODataThreshold; //TODO Shouldn't hardcode this to the first!
        double birthAssociationsThreshold = modelPTS_clouds[modelIndex].cloud.size() * .01 *modelTODataThreshold; //TODO Shouldn't hardcode this to the first!

        double healthyAssociationsThreshold = birthAssociationsThreshold*.4; //Still healthy with 40 percent of a whole model


    modelToProcess_cloud = modelPTS_clouds[modelIndex].cloud;

    //STRIP 3D data
    /**/
    PointCloud<PointXY> dataPTS_cloud2D;
    copyPointCloud(dataPTS_cloud,dataPTS_cloud2D);
    PointCloud<PointXY> modelPTS_cloud2D;
    copyPointCloud(modelToProcess_cloud,modelPTS_cloud2D);

             PointCloud<PointXYZRGB> dataPTS_cloudStripped;
             copyPointCloud(dataPTS_cloud2D,dataPTS_cloudStripped);

             PointCloud<PointXYZRGB> modelPTS_cloudStripped;
             copyPointCloud(modelPTS_cloud2D,modelPTS_cloudStripped);
            /* */


    //leave open for other possible ICP methods
    bool doPCL_3DICP=true;
    double fitnessin;

    //PCL implementation of ICP
    if(doPCL_3DICP){

        if (dataPTS_cloud.size() > 1) //TODO FIX the math will throw an error if there are not enough data points
        {
            // Find transformation from the orgin that will optimize a match between model and target
            T=   calcTransformPCLRGB(dataPTS_cloud, modelToProcess_cloud,&fitnessin); //Use 3D color

            //Apply the Transformation
            pcl::transformPointCloud(modelToProcess_cloud,modelToProcess_cloud, T);
//            pcl::transformPointCloud(modelPTS_cloudStripped,modelPTS_cloudStripped, T);

        }

    }

    else{



        //Use the 2D stripped models and data and align them
                    T=   calcTransformPCLRGB(dataPTS_cloudStripped, modelPTS_cloudStripped, &fitnessin); // Just 2D
                    pcl::transformPointCloud(modelToProcess_cloud,modelToProcess_cloud, T);

    }


    int totalpointsBeforeRemoval=dataPTS_cloud.size();
    int totalRemovedPoints = 0;

    /** Remove old data points associated with the model
      * delete the points under where the model was placed. The amount of points destroyed in this process gives us a metric for how healthy the track is.
      *
      **/
    pcl::PointCloud<pcl::PointXYZRGB> dataPTSreduced_cloud;

    //Removeclosestdatacloudpoints strips the 3D data and nukes points in the proximitiy of where our model has lined up
    pcl::copyPointCloud(removeClosestDataCloudPoints(dataPTS_cloud, modelToProcess_cloud, nukeDistanceThreshold ),dataPTSreduced_cloud);


    totalRemovedPoints = totalpointsBeforeRemoval - dataPTSreduced_cloud.size();


    qDebug()<<"Removed Points from Track  "<<totalRemovedPoints;

    /// For Debugging we can visualize the Pointcloud
             /**
                pcl:: PointCloud<PointXYZRGB>::Ptr dataPTS_cloud_ptr (new pcl::PointCloud<PointXYZRGB> (dataPTS_cloud));
              //  copyPointCloud(modelPTS_cloud,)
                transformPointCloud(modelPTS_clouds[modelIndex].cloud,modelPTS_clouds[modelIndex].cloud,T);

                pcl:: PointCloud<PointXYZRGB>::Ptr model_cloud_ptrTempTrans (new pcl::PointCloud<PointXYZRGB> (modelPTS_clouds[modelIndex].cloud));

                pcl::visualization::CloudViewer viewer("Simple Cloud Viewer");
                viewer.showCloud(dataPTS_cloud_ptr);

            int sw=0;
                        while (!viewer.wasStopped())
                        {
                            if(sw==0){
                            viewer.showCloud(model_cloud_ptrTempTrans);
                            sw=1;
                            }
                            else{
                                viewer.showCloud(dataPTS_cloud_ptr);
            sw=0;
                            }

                        }
            /**/



    //////
    ///Determine Health of latest part of track
    ////////

    // Just born tracks go here: //Should get rid of this, doesn't make sense
    if (frameIndex == birthFrameIndex)
    {

        absoluteTransforms.push_back(T);

        //Check number of data points associated with this track, if it is greater than
        // the birth threshold then set birth flag to true.
        //If it doesn't hit this, it never gets born ever

        if ( totalRemovedPoints>birthAssociationsThreshold ) //matchScore >birthAssociationsThreshold && //Need new check for birthAssociationsthresh//  closestToModel.size() >= birthAssociationsThreshold)
        {
            isBirthable=true;


        }

    }
    // Tracks that are older than 1 frame get determined here
    else
    {
        //Check the number of data points associated with this track, if it is less than
        //the zombie/death threshold, then copy previous transform, and add this frame
        //index to zombieFrames

        //Is the track unhealthy? if so make it a zombie
        if ( totalRemovedPoints<healthyAssociationsThreshold )// !didConverge)// No zombies for now! eternal life! !didConverge) //closestToModel.size() < healthyAssociationsThreshold && absoluteTransforms.size() > 2)
        {

            absoluteTransforms.push_back(T);

            zombieIndicesIntoAbsTransforms.push_back(absoluteTransforms.size()-1);
            numberOfContinuousZombieFrames++;
        }
        else // Not zombie frame, keep using new transforms
        {

            // add to transforms
            absoluteTransforms.push_back(T);
            numberOfContinuousZombieFrames = 0;

        } //not zombie frame
    }// Not first frame



    // increment frame counter
    frameIndex++;

    return dataPTSreduced_cloud;

}
	inline void
	filter_depth_projection(	const image_geometry::PinholeCameraModel &camera_model,
								const pcl::PointCloud<PointT> &in,
								pcl::PointCloud<PointT> &out,
								int rows,
								int cols,
								int k_neighbors = 2,
								float threshold = 0.3)
	{
		std::vector<std::vector<bool> > hit( cols, std::vector<bool>(rows));
		std::vector<int> points2d_indices;
		pcl::PointCloud<pcl::PointXY> points2d;
		pcl::PointCloud<PointT> z_filtred;

#ifdef DEBUG
		std::cout << "in points: "<< in.size() << " width: " << cols << " height: " << rows << "\n";
#endif

		project2d::Points2d<PointT> point_map;
		point_map.init(camera_model, in, rows, cols);
		point_map.get_points(z_filtred, 25);

		// Project points into image space
		for(unsigned int i=0; i < z_filtred.size();  ++i){
			const PointT* pt = &z_filtred.points.at(i);
			//if( pt->z > 1)
			{ // min distance from camera 1m

				cv::Point2i point_image = camera_model.project3dToPixel(cv::Point3d(pt->x, pt->y, pt->z));

				if( between<int>(0, point_image.x, cols )
					&& between<int>( 0, point_image.y, rows )
				)
				{
					// Point allready at this position?
					if(!hit[point_image.x][point_image.y]){
						hit[point_image.x][point_image.y] = true;

						pcl::PointXY p_image;
						p_image.x = point_image.x;
						p_image.y = point_image.y;

						points2d.push_back(p_image);
						points2d_indices.push_back(i);
					}
					else{
#ifdef DEBUG
						std::cout << "[" << point_image.x << "][" << point_image.y << "] already inserted " << pt << "\n";
#endif

					}

				}
			}
		}

#ifdef DEBUG
		std::cout << "Z_filtred: " << z_filtred.size() << " projected 2d points: "<< points2d.size() << " indices: " << points2d_indices.size() << "\n";
#endif

		pcl::search::KdTree<pcl::PointXY> tree_n;
		tree_n.setInputCloud(points2d.makeShared());
		tree_n.setEpsilon(0.5);

		for(unsigned int i=0; i < points2d.size(); ++i){
			std::vector<int> k_indices;
			std::vector<float> square_distance;

			//tree_n.nearestKSearch(points2d.at(i), k_neighbors, k_indices, square_distance);
			tree_n.radiusSearch(points2d.at(i), k_neighbors, k_indices, square_distance);

			look_up_indices(points2d_indices, k_indices);

			float distance_value;
			if(is_edge_threshold(z_filtred, points2d_indices.at(i), k_indices, square_distance, distance_value, threshold)){
				out.push_back(z_filtred.points.at(points2d_indices.at(i)));
				out.at(out.size()-1).intensity = sqrt(distance_value);
			}
		}

#ifdef DEBUG
		std::cout << "out 2d points: "<< out.size() << "\n";
#endif
	}
pcl::PointCloud<briskDepth> BDMatch(pcl::PointCloud<briskDepth> a, pcl::PointCloud<briskDepth> b)
{
    std::cout << "The count is: " << count << std::endl;
    pcl::PointCloud<briskDepth> pclMatch;
    try
    {
        cv::Mat descriptorsA;
        cv::Mat descriptorsB;
        for(int i =0; i < a.size(); i++)
        {
            descriptorsA.push_back(a[i].descriptor);
        }

        for(int i =0; i < b.size(); i++)
        {
            descriptorsB.push_back(b[i].descriptor);
        }

        cv::BFMatcher matcher(cv::NORM_HAMMING);
        std::vector< cv::DMatch > matches;

        matcher.match( descriptorsA, descriptorsB, matches );

        double max_dist = 0; double min_dist = 1000;

        StdDeviation sd;
        double temp[descriptorsA.rows];

        for (int i =0; i < descriptorsA.rows;i++)
        {
            double dist = matches[i].distance;
            if(max_dist<dist) max_dist = dist;
            if(min_dist>dist) min_dist = dist;
            //std::cout << dist << "\t";
            temp[i] = dist;
        }

        //std::cout << std::endl;
       // std::cout << " Brisk max dist " << max_dist << std::endl;
       // std::cout << " Brisk mins dist " << min_dist << std::endl;

        sd.SetValues(temp, descriptorsA.rows);

        double mean = sd.CalculateMean();
        double variance = sd.CalculateVariane();
        double samplevariance = sd.CalculateSampleVariane();
        double sampledevi = sd.GetSampleStandardDeviation();
        double devi = sd.GetStandardDeviation();

        std::cout << "Brisk\t" << descriptorsA.rows << "\t"
                << mean << "\t"
                << variance << "\t"
                << samplevariance << "\t"
                << devi << "\t"
                << sampledevi << "\n";

        std::vector< cv::DMatch > good_matches;

        for (int i=0;i<descriptorsA.rows;i++)
        {
            //if( matches[i].distance<10)
            if( matches[i].distance<max_dist/2)
            {
                good_matches.push_back(matches[i]);
                pclMatch.push_back(a[i]);
            }
        }

        cv::Mat img_matches;
        cv::drawMatches( brisk_lastImg, brisk_lastKeypoints, brisk_currentImg, brisk_lastKeypoints,
                           good_matches, img_matches, cv::Scalar::all(-1), cv::Scalar::all(-1),
                           std::vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

       // cv::imshow("Brisk Matches", img_matches);
        std::vector<cv::Point2f> obj;
        std::vector<cv::Point2f> scene;

        for( int i = 0; i < good_matches.size(); i++ )
        {
          //-- Get the keypoints from the good matches
          obj.push_back( brisk_lastKeypoints[ good_matches[i].queryIdx ].pt );
          scene.push_back( brisk_currentKeypoints[ good_matches[i].trainIdx ].pt );
        }

        cv::Mat H = findHomography( obj, scene, CV_RANSAC );

        //-- Get the corners from the image_1 ( the object to be "detected" )
        std::vector<cv::Point2f> obj_corners(4);
        obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( brisk_lastImg.cols, 0 );
        obj_corners[2] = cvPoint( brisk_lastImg.cols, brisk_lastImg.rows ); obj_corners[3] = cvPoint( 0, brisk_lastImg.rows );
        std::vector<cv::Point2f> scene_corners(4);

        perspectiveTransform( obj_corners, scene_corners, H);

        //-- Draw lines between the corners (the mapped object in the scene - image_2 )
        line( img_matches, scene_corners[0] + cv::Point2f( brisk_lastImg.cols, 0), scene_corners[1] + cv::Point2f( brisk_lastImg.cols, 0), cv::Scalar(0, 255, 0), 4 );
        line( img_matches, scene_corners[1] + cv::Point2f( brisk_lastImg.cols, 0), scene_corners[2] + cv::Point2f( brisk_lastImg.cols, 0), cv::Scalar( 0, 255, 0), 4 );
        line( img_matches, scene_corners[2] + cv::Point2f( brisk_lastImg.cols, 0), scene_corners[3] + cv::Point2f( brisk_lastImg.cols, 0), cv::Scalar( 0, 255, 0), 4 );
        line( img_matches, scene_corners[3] + cv::Point2f( brisk_lastImg.cols, 0), scene_corners[0] + cv::Point2f( brisk_lastImg.cols, 0), cv::Scalar( 0, 255, 0), 4 );

        //-- Show detected matches
        cv::imshow( "Good brisk Matches & Object detection", img_matches );
        cv::waitKey(50);
       // std::cout << good_matches.size() << " Brisk features matched from, " << a.size() << ", " << b.size() << " sets." << std::endl;
    }
    catch (const std::exception &exc)
    {
        // catch anything thrown within try block that derives from std::exception
        std::cerr << exc.what();
    }
    return pclMatch;
}
Example #30
0
template <typename PointT, typename Scalar> inline unsigned int
pcl::computeMeanAndCovarianceMatrix (const pcl::PointCloud<PointT> &cloud,
                                     Eigen::Matrix<Scalar, 3, 3> &covariance_matrix,
                                     Eigen::Matrix<Scalar, 4, 1> &centroid)
{
  // create the buffer on the stack which is much faster than using cloud[indices[i]] and centroid as a buffer
  Eigen::Matrix<Scalar, 1, 9, Eigen::RowMajor> accu = Eigen::Matrix<Scalar, 1, 9, Eigen::RowMajor>::Zero ();
  size_t point_count;
  if (cloud.is_dense)
  {
    point_count = cloud.size ();
    // For each point in the cloud
    for (size_t i = 0; i < point_count; ++i)
    {
      accu [0] += cloud[i].x * cloud[i].x;
      accu [1] += cloud[i].x * cloud[i].y;
      accu [2] += cloud[i].x * cloud[i].z;
      accu [3] += cloud[i].y * cloud[i].y; // 4
      accu [4] += cloud[i].y * cloud[i].z; // 5
      accu [5] += cloud[i].z * cloud[i].z; // 8
      accu [6] += cloud[i].x;
      accu [7] += cloud[i].y;
      accu [8] += cloud[i].z;
    }
  }
  else
  {
    point_count = 0;
    for (size_t i = 0; i < cloud.size (); ++i)
    {
      if (!isFinite (cloud[i]))
        continue;

      accu [0] += cloud[i].x * cloud[i].x;
      accu [1] += cloud[i].x * cloud[i].y;
      accu [2] += cloud[i].x * cloud[i].z;
      accu [3] += cloud[i].y * cloud[i].y;
      accu [4] += cloud[i].y * cloud[i].z;
      accu [5] += cloud[i].z * cloud[i].z;
      accu [6] += cloud[i].x;
      accu [7] += cloud[i].y;
      accu [8] += cloud[i].z;
      ++point_count;
    }
  }
  accu /= static_cast<Scalar> (point_count);
  if (point_count != 0)
  {
    //centroid.head<3> () = accu.tail<3> ();    -- does not compile with Clang 3.0
    centroid[0] = accu[6]; centroid[1] = accu[7]; centroid[2] = accu[8];
    centroid[3] = 0;
    covariance_matrix.coeffRef (0) = accu [0] - accu [6] * accu [6];
    covariance_matrix.coeffRef (1) = accu [1] - accu [6] * accu [7];
    covariance_matrix.coeffRef (2) = accu [2] - accu [6] * accu [8];
    covariance_matrix.coeffRef (4) = accu [3] - accu [7] * accu [7];
    covariance_matrix.coeffRef (5) = accu [4] - accu [7] * accu [8];
    covariance_matrix.coeffRef (8) = accu [5] - accu [8] * accu [8];
    covariance_matrix.coeffRef (3) = covariance_matrix.coeff (1);
    covariance_matrix.coeffRef (6) = covariance_matrix.coeff (2);
    covariance_matrix.coeffRef (7) = covariance_matrix.coeff (5);
  }
  return (static_cast<unsigned int> (point_count));
}