void go_AnytimeRRT(const InstanceFileMap &args, const Agent &agent, const Workspace &workspace,
            const typename Agent::State &start, const typename Agent::State &goal) {

	clock_t startT = clock();

	dfpair(stdout, "planner", "%s", "Anytime RRT");
	// typedef flann::KDTreeSingleIndexParams KDTreeType;
	typedef flann::KDTreeIndexParams KDTreeType;
	typedef FLANN_KDTreeWrapper<KDTreeType, typename Agent::DistanceEvaluator, typename Agent::Edge> KDTree;
	typedef UniformSampler<Workspace, Agent, KDTree> USampler;
	typedef GoalBiasSampler<Agent, USampler> GBSampler;
	typedef TreeInterface<Agent, KDTree, GBSampler> TreeInterface;
	typedef AnytimeRRT<Workspace, Agent, TreeInterface> Planner;

	/* planner config */

	KDTreeType kdtreeType(1);
	KDTree kdtree(kdtreeType, agent.getDistanceEvaluator(), agent.getTreeStateSize());
	USampler uniformsampler(workspace, agent, kdtree);

	double goalBias = args.exists("Goal Bias") ? args.doubleVal("Goal Bias") : 0;
	dfpair(stdout, "goal bias", "%g", goalBias);

	GBSampler goalbiassampler(uniformsampler, goal, goalBias);
	TreeInterface treeInterface(kdtree, goalbiassampler);
	Planner planner(workspace, agent, treeInterface, args);

	go_COMMONANYTIME<Planner, Workspace, Agent>(args, planner, workspace, agent, start, goal, startT);
}
Ejemplo n.º 2
0
int main(int argc, char** argv)
{
	// Object for storing a pointcloud 
	pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
	pcl::PointCloud<pcl::Normal>::Ptr normals(new pcl::PointCloud<pcl::Normal>);
	pcl::PointCloud<pcl::PointNormal>::Ptr cloudNormals(new pcl::PointCloud<pcl::PointNormal>);		

	// Read a PCD file from disk
	if(pcl::io::loadPCDFile<pcl::PointXYZ>(argv[1],*cloud)!=0)
	{
		return -1;
	}
	
	// Normal estimation
	pcl::NormalEstimation<pcl::PointXYZ,pcl::Normal> normalEstimation;
	normalEstimation.setInputCloud(cloud);
	normalEstimation.setRadiusSearch(0.03);
	pcl::search::KdTree<pcl::PointXYZ>::Ptr kdtree(new pcl::search::KdTree<pcl::PointXYZ>);
	normalEstimation.setSearchMethod(kdtree);
	normalEstimation.compute(*normals);
	
	// The triangulation object requires the points and normals to be stored in the same structure
	pcl::concatenateFields(*cloud,*normals,*cloudNormals);
	// Tree object for searches in this new object
	pcl::search::KdTree<pcl::PointNormal>::Ptr kdtree2(new pcl::search::KdTree<pcl::PointNormal>);
	kdtree2->setInputCloud(cloudNormals);
	
	// Triangulation object
	pcl::GreedyProjectionTriangulation<pcl::PointNormal> triangulation;
	// Output object containing the mesh
	pcl::PolygonMesh triangles;
	// Maximum distance between connected points (maximum edge length)
	triangulation.setSearchRadius(30);
	// Maximum acceptable distance for a point to be considered,
	// relative to the distance of the nearest point
	triangulation.setMu(2.5);
	// Set the number of neighbors searched for 
	triangulation.setMaximumNearestNeighbors(200);
	// Points will not be connected to the current points 
	// if normals deviate more than the specified angle.
	triangulation.setMaximumSurfaceAngle(M_PI / 4); // 45 degrees
	// If false the direction of normals will not be taken into account
	// when computing the angle between them
	triangulation.setNormalConsistency(false);
	// Minimum and maximum angle there can be in a triangle 
	// The first in not guaranteed, the second is
	triangulation.setMinimumAngle(M_PI / 18); // 10 degrees
	triangulation.setMaximumAngle(2*M_PI/ 3); // 120 degrees
	
	// Triangulate the cloud
	triangulation.setInputCloud(cloudNormals);
	triangulation.setSearchMethod(kdtree2);
	triangulation.reconstruct(triangles);
	
	// Save to disk
	pcl::io::saveVTKFile("mesh.vtk",triangles);
	return 0;
}
Ejemplo n.º 3
0
int
main (int, char** argv)
{
  std::string filename = argv[1];
  std::cout << "Reading " << filename << std::endl;
  pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);

  if (pcl::io::loadPCDFile <pcl::PointXYZ> (filename.c_str (), *cloud) == -1)
  // load the file
  {
    PCL_ERROR ("Couldn't read file");
    return (-1);
  }
  std::cout << "Loaded " << cloud->points.size () << " points." << std::endl;

  // Compute the normals
  pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> normal_estimation;
  normal_estimation.setInputCloud (cloud);

  pcl::search::KdTree<pcl::PointXYZ>::Ptr kdtree (new pcl::search::KdTree<pcl::PointXYZ>);
  normal_estimation.setSearchMethod (kdtree);

  pcl::PointCloud<pcl::Normal>::Ptr normals (new pcl::PointCloud< pcl::Normal>);
  normal_estimation.setRadiusSearch (100);
  normal_estimation.compute (*normals);

  // Setup spin iopenmage computation
  pcl::SpinImageEstimation<pcl::PointXYZ, pcl::Normal, pcl::Histogram<153> > spin_image_descriptor(8, .05, 16);
  spin_image_descriptor.setInputCloud (cloud);
  spin_image_descriptor.setInputNormals (normals);

  // Use the same KdTree from the normal estimation
  spin_image_descriptor.setSearchMethod (kdtree);
  pcl::PointCloud<pcl::Histogram<153> >::Ptr spin_images (new pcl::PointCloud<pcl::Histogram<153> >);
  spin_image_descriptor.setRadiusSearch (100);

  // Actually compute the spin images
  spin_image_descriptor.compute (*spin_images);
  std::cout << "SI output points.size (): " << spin_images->points.size () << std::endl;

  // Display and retrieve the spin image descriptor vector for the first point.
  FILE * pFile;
  char * fileToWrite;
  fileToWrite = strcat(argv[1], "spinImages.bin"); 
  //pFile = fopen (fileToWrite,"w");
  //pcl::Histogram<153> first_descriptor = spin_images->points[0];
  std::ofstream myfile (fileToWrite);
  

  for (int i = 0;i < spin_images-> points.size(); i ++) {
	  myfile << spin_images->points[i] << std::endl;
  }
  myfile.close();
  //fclose(pFile);
    

  return 0;
}
Ejemplo n.º 4
0
int
main (int, char** argv)
{
  std::string filename = argv[1];
  std::cout << "Reading " << filename << std::endl;
  pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);

  if (pcl::io::loadPCDFile <pcl::PointXYZ> (filename.c_str (), *cloud) == -1)
  // load the file
  {
    PCL_ERROR ("Couldn't read file");
    return (-1);
  }
  std::cout << "Loaded " << cloud->points.size () << " points." << std::endl;

  // Compute the normals
  pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> normal_estimation;
  normal_estimation.setInputCloud (cloud);

  pcl::search::KdTree<pcl::PointXYZ>::Ptr kdtree (new pcl::search::KdTree<pcl::PointXYZ>);
  normal_estimation.setSearchMethod (kdtree);

  pcl::PointCloud<pcl::Normal>::Ptr normals (new pcl::PointCloud< pcl::Normal>);
  normal_estimation.setRadiusSearch (0.03);
  normal_estimation.compute (*normals);

  // Setup the shape context computation
  pcl::ShapeContext3DEstimation<pcl::PointXYZ, pcl::Normal, pcl::ShapeContext1980> shape_context;

  // Provide the point cloud
  shape_context.setInputCloud (cloud);
  // Provide normals
  shape_context.setInputNormals (normals);
  // Use the same KdTree from the normal estimation
  shape_context.setSearchMethod (kdtree);
  pcl::PointCloud<pcl::ShapeContext1980>::Ptr shape_context_features (new pcl::PointCloud<pcl::ShapeContext1980>);

  // The minimal radius is generally set to approx. 1/10 of the search radius, while the pt. density radius is generally set to 1/5
  shape_context.setRadiusSearch (0.2);
  shape_context.setPointDensityRadius (0.04);
  shape_context.setMinimalRadius (0.02);

  // Actually compute the shape contexts
  shape_context.compute (*shape_context_features);
  std::cout << "3DSC output points.size (): " << shape_context_features->points.size () << std::endl;

  // Display and retrieve the shape context descriptor vector for the 0th point.
  std::cout << shape_context_features->points[0] << std::endl;
  //float* first_descriptor = shape_context_features->points[0].descriptor; // 1980 elements

  return 0;
}
Ejemplo n.º 5
0
void shot_detector::processImage()
{
    std::cerr << "Processing" << std::endl;
    std::string file = "/home/niko/projects/apc/catkin/src/apc_ros/apc_object_detection/visual_hull_refined_smoothed.obj";
    loadModel(model, file);
    pcl::io::loadPCDFile("/home/niko/projects/apc/catkin/src/apc_ros/apc_object_detection/niko_file.pcd", *scene);
    //Downsample the model and the scene so they have rougly the same resolution
    pcl::PointCloud<PointType>::Ptr scene_filter (new pcl::PointCloud<PointType> ());
    pcl_functions::voxelFilter(scene, scene_filter, voxel_sample_);
    scene = scene_filter;
    pcl::PointCloud<PointType>::Ptr model_filter (new pcl::PointCloud<PointType> ());
    pcl_functions::voxelFilter(model, model_filter, voxel_sample_);
    model = model_filter;
    // Randomly select a couple of keypoints so we don't calculte descriptors for everything
    sampleKeypoints(model, model_keypoints, model_ss_);
    sampleKeypoints(scene, scene_keypoints, scene_ss_);
    //Calculate the Normals
    calcNormals(model, model_normals);
    calcNormals(scene, scene_normals);
    pcl::search::KdTree<pcl::PointXYZ>::Ptr kdtree(new pcl::search::KdTree<pcl::PointXYZ>);
    ourcvfh.setInputCloud(scene);
    ourcvfh.setInputNormals(scene_normals);
    ourcvfh.setSearchMethod(kdtree);
    ourcvfh.setEPSAngleThreshold(5.0 / 180.0 * M_PI); // 5 degrees.
    ourcvfh.setCurvatureThreshold(1.0);
    ourcvfh.setNormalizeBins(false);
    // Set the minimum axis ratio between the SGURF axes. At the disambiguation phase,
    // this will decide if additional Reference Frames need to be created, if ambiguous.
    ourcvfh.setAxisRatio(0.8);
    ourcvfh.compute(vfh_scene_descriptors);
    ourcvfh.setInputCloud(model);
    ourcvfh.setInputNormals(model_normals);
    ourcvfh.compute(vfh_model_descriptors);
    //Calculate the shot descriptors at each keypoint in the scene
    calcSHOTDescriptors(model, model_keypoints, model_normals, model_descriptors);
    calcSHOTDescriptors(scene, scene_keypoints, scene_normals, scene_descriptors);
    // Compare descriptors and try to find correspondences
    //ransac(rototranslations,model,scene);
    //refinePose(rototranslations,model,scene);
    compare(model_descriptors, scene_descriptors);
    groupCorrespondences();
    visualizeCorrespondences();
    visualizeICP();

    /*Eigen::Matrix4f pose;
    if(model_scene_corrs->size ()!=0){
        groupCorrespondences();
        ransac(rototranslations,model,scene);
        pose=refinePose(rototranslations,model,scene);

    }*/
}
Ejemplo n.º 6
0
int
main(int argc, char** argv)
{
	// Object for storing the point cloud.
	pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
	// Object for storing the normals.
	pcl::PointCloud<pcl::Normal>::Ptr normals(new pcl::PointCloud<pcl::Normal>);
	// Object for storing the SHOT descriptors for each point.
	pcl::PointCloud<pcl::SHOT352>::Ptr descriptors(new pcl::PointCloud<pcl::SHOT352>());
 
	// Read a PCD file from disk.
	if (pcl::io::loadPCDFile<pcl::PointXYZ>(argv[1], *cloud) != 0)
	{
		return -1;
	}
 
	// Note: you would usually perform downsampling now. It has been omitted here
	// for simplicity, but be aware that computation can take a long time.
 
	// Estimate the normals.
	pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> normalEstimation;
	normalEstimation.setInputCloud(cloud);
	normalEstimation.setRadiusSearch(10);
	pcl::search::KdTree<pcl::PointXYZ>::Ptr kdtree(new pcl::search::KdTree<pcl::PointXYZ>);
	normalEstimation.setSearchMethod(kdtree);
	normalEstimation.compute(*normals);
 
	// SHOT estimation object.
	pcl::SHOTEstimation<pcl::PointXYZ, pcl::Normal, pcl::SHOT352> shot;
	shot.setInputCloud(cloud);
	shot.setInputNormals(normals);
	// The radius that defines which of the keypoint's neighbors are described.
	// If too large, there may be clutter, and if too small, not enough points may be found.
	shot.setRadiusSearch(2);
 
	shot.compute(*descriptors);
  
  std::string ss_temp;
  ss_temp.append(argv[1]);
  std::size_t found = ss_temp.find_last_of("/\\");
  ss_temp = ss_temp.substr(found+1);
  std::string ss = "SHOT_";
  ss.append(ss_temp);
  std::cout << ss << '\n';  
 
  pcl::io::savePCDFileASCII (ss, *descriptors);
  
  
}
Ejemplo n.º 7
0
            void findKNNeghbors()
            {
                KdTreeFLANN<PointXYZ>::Ptr kdtree(new KdTreeFLANN<PointXYZ>);
                kdtree->setInputCloud(cloud);

                size_t cloud_size = cloud->points.size();

                std::vector<float> dists;
                neighbors_all.resize(cloud_size);
                for(size_t i = 0; i < cloud_size; ++i)
                {
                    kdtree->nearestKSearch(cloud->points[i], k, neighbors_all[i], dists);
                    sizes.push_back((int)neighbors_all[i].size());
                }
                max_nn_size = *max_element(sizes.begin(), sizes.end());
            }
Ejemplo n.º 8
0
int
main (int, char** argv)
{
  std::string filename = argv[1];
  std::cout << "Reading " << filename << std::endl;
  pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);

  if (pcl::io::loadPCDFile <pcl::PointXYZ> (filename.c_str (), *cloud) == -1)
  // load the file
  {
    PCL_ERROR ("Couldn't read file");
    return (-1);
  }
  std::cout << "Loaded " << cloud->points.size () << " points." << std::endl;

  // Compute the normals
  pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> normal_estimation;
  normal_estimation.setInputCloud (cloud);

  pcl::search::KdTree<pcl::PointXYZ>::Ptr kdtree (new pcl::search::KdTree<pcl::PointXYZ>);
  normal_estimation.setSearchMethod (kdtree);

  pcl::PointCloud<pcl::Normal>::Ptr normals (new pcl::PointCloud< pcl::Normal>);
  normal_estimation.setRadiusSearch (0.03);
  normal_estimation.compute (*normals);

  // Setup spin image computation
  pcl::SpinImageEstimation<pcl::PointXYZ, pcl::Normal, pcl::Histogram<153> > spin_image_descriptor(8, 0.5, 16);
  spin_image_descriptor.setInputCloud (cloud);
  spin_image_descriptor.setInputNormals (normals);

  // Use the same KdTree from the normal estimation
  spin_image_descriptor.setSearchMethod (kdtree);
  pcl::PointCloud<pcl::Histogram<153> >::Ptr spin_images (new pcl::PointCloud<pcl::Histogram<153> >);
  spin_image_descriptor.setRadiusSearch (0.2);

  // Actually compute the spin images
  spin_image_descriptor.compute (*spin_images);
  std::cout << "SI output points.size (): " << spin_images->points.size () << std::endl;

  // Display and retrieve the spin image descriptor vector for the first point.
  pcl::Histogram<153> first_descriptor = spin_images->points[0];
  std::cout << first_descriptor << std::endl;

  return 0;
}
Ejemplo n.º 9
0
std::shared_ptr<Feature> ComputeFPFHFeature(const PointCloud &input,
        const KDTreeSearchParam &search_param/* = KDTreeSearchParamKNN()*/)
{
    auto feature = std::make_shared<Feature>();
    feature->Resize(33, (int)input.points_.size());
    if (input.HasNormals() == false) {
        PrintDebug("[ComputeFPFHFeature] Failed because input point cloud has no normal.\n");
        return feature;
    }
    KDTreeFlann kdtree(input);
    auto spfh = ComputeSPFHFeature(input, kdtree, search_param);
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
    for (int i = 0; i < (int)input.points_.size(); i++) {
        const auto &point = input.points_[i];
        std::vector<int> indices;
        std::vector<double> distance2;
        if (kdtree.Search(point, search_param, indices, distance2) > 1) {
            double sum[3] = {0.0, 0.0, 0.0};
            for (size_t k = 1; k < indices.size(); k++) {
                // skip the point itself
                double dist = distance2[k];
                if (dist == 0.0)
                    continue;
                for (int j = 0; j < 33; j++) {
                    double val = spfh->data_(j, indices[k]) / dist;
                    sum[j / 11] += val;
                    feature->data_(j, i) += val;
                }
            }
            for (int j = 0; j < 3; j++)
                if (sum[j] != 0.0) sum[j] = 100.0 / sum[j];
            for (int j = 0; j < 33; j++) {
                feature->data_(j, i) *= sum[j / 11];
                // The commented line is the fpfh function in the paper.
                // But according to PCL implementation, it is skipped.
                // Our initial test shows that the full fpfh function in the
                // paper seems to be better than PCL implementation. Further
                // test required.
                feature->data_(j, i) += spfh->data_(j, i);
            }
        }
    }
    return feature;
}
void solveWithRRTConnect(const VREPInterface *interface, const InstanceFileMap *args, const VREPInterface::State &start, const VREPInterface::State &goal) {
	dfpair(stdout, "planner", "%s", "RRT Connect");

	typedef flann::KDTreeSingleIndexParams KDTreeType;
	typedef FLANN_KDTreeWrapper<KDTreeType, flann::L2<double>, VREPInterface::Edge> KDTree;
	typedef UniformSampler<VREPInterface, VREPInterface, KDTree> UniformSampler;
	typedef TreeInterface<VREPInterface, KDTree, UniformSampler> TreeInterface;
	typedef RRTConnect<VREPInterface, VREPInterface, TreeInterface> RRTConnect;

	KDTreeType kdtreeType;
	KDTree kdtree(kdtreeType, interface->getTreeStateSize());
	UniformSampler uniformSampler(*interface, *interface, kdtree);
	TreeInterface treeInterface(kdtree, uniformSampler);

	RRTConnect rrtconnect(*interface, *interface, treeInterface, *args);
	rrtconnect.query(start, goal);
	rrtconnect.dfpairs();
}
Ejemplo n.º 11
0
            void findRadiusNeghbors(float radius = -1)
            {
                radius = radius == -1 ? this->radius : radius;

                KdTreeFLANN<PointXYZ>::Ptr kdtree(new KdTreeFLANN<PointXYZ>);
                kdtree->setInputCloud(cloud);

                size_t cloud_size = cloud->points.size();

                std::vector<float> dists;
                neighbors_all.resize(cloud_size);
                for(size_t i = 0; i < cloud_size; ++i)
                {
                    kdtree->radiusSearch(cloud->points[i], radius, neighbors_all[i], dists);
                    sizes.push_back((int)neighbors_all[i].size());
                }
                max_nn_size = *max_element(sizes.begin(), sizes.end());
            }
Ejemplo n.º 12
0
void contourHandler::getCloseContours(_Points &centresOfMass, int pointDistance, map <int,_Points> & mep )
{
    ///search____________________________________________________________________________
    //iterate through all centres of mass. and querie closest points to each point
    //  add a group to a vector if more than 1 NN
    // search vector group to see if current point exists in group. (skip if yes, search if no)
    vector<int> indices;
    vector<float> dists;
    int pointName=-1;
    pointDistance=pointDistance*pointDistance;

    if (centresOfMass.size()>1)
    {
        for (_Points::iterator it = centresOfMass.begin();it!=centresOfMass.end();it++)

        {
            ///iterate through all points in centres of mass collection
            /// for each point find all the nearest neighbours
            ///  add all nearest neighbours with distanve requirements to a map with the point name as a key (pointname is point name)
            ///the indices is the indicy to reference the centreofMass ALWAYS
            Point c=*it;
            pointName+=1;
            int numOfPoints=centresOfMass.size();
            flann::KDTreeIndexParams indexParams;
            flann::Index kdtree(Mat(centresOfMass).reshape(1), indexParams);
            vector<float> query;
            query.push_back(c.x); //Insert the 2D point we need to find neighbours to the query
            query.push_back(c.y); //Insert the 2D point we need to find neighbours to the query
            kdtree.knnSearch(query,indices,dists,numOfPoints);
            //all nearest are in centresofMass with indices.

            //        kdtree.radiusSearch(query, indices, dists, range, numOfPoints);
            for (vector<int>::iterator it2 = indices.begin();it2!=indices.end();it2++)
            {
                int what=*it2;
                if (dists[what]<pointDistance && dists[what]>1)
                {
                    mep[pointName].push_back(centresOfMass[indices[what]]);
                }
            }
        }
    }
}
Ejemplo n.º 13
0
	// PointNormal: float x, y, znormal[3], curvature
	pcl::PointCloud<pcl::PointNormal>::Ptr smoothAndNormalEstimation(pcl::PointCloud<pcl::PointXYZ>::Ptr cloud,
																	 const double radius)
	{
		// Smoothing and normal estimation based on polynomial reconstruction
		// Moving Least Squares (MLS) surface reconstruction method can be used to smooth and resample noisy data

		// Create a KD-Tree
		pcl::search::KdTree<pcl::PointXYZ>::Ptr kdtree(new pcl::search::KdTree<pcl::PointXYZ>);

		// Init object (second point type is for the normals, even if unused)
		pcl::MovingLeastSquares<pcl::PointXYZ, pcl::PointNormal> mls;

		// Set parameters
		mls.setInputCloud(cloud);
		mls.setPolynomialFit(true);
		mls.setSearchMethod(kdtree);
		mls.setSearchRadius(radius);
		// void 	setPointDensity (int desired_num_points_in_radius);
		// void 	setDilationVoxelSize (float voxel_size)
		// void 	setDilationIterations (int iterations);
		// void 	setSqrGaussParam (double sqr_gauss_param)
		// void 	setPolynomialOrder (int order)
		// void 	setPolynomialFit (bool polynomial_fit)

		// Reconstruct
		// PCL v1.6
#if 1
		mls.setComputeNormals(true);

		// Output has the PointNormal type in order to store the normals calculated by MLS
		pcl::PointCloud<pcl::PointNormal>::Ptr mls_points(new pcl::PointCloud<pcl::PointNormal>());
		mls.process (*mls_points);
		return mls_points;
#else
		mls.reconstruct(*pPoints);

		// Output has the PointNormal type in order to store the normals calculated by MLS
		pPointNormals = mls.getOutputNormals();
		//mls.setOutputNormals(mls_points);
#endif
	}
Ejemplo n.º 14
0
int
main(int argc, char** argv)
{
	// Object for storing the point cloud.
	pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
	// Object for storing the normals.
	pcl::PointCloud<pcl::Normal>::Ptr normals(new pcl::PointCloud<pcl::Normal>);
	// Object for storing the spin image for each point.
	pcl::PointCloud<SpinImage>::Ptr descriptors(new pcl::PointCloud<SpinImage>());

	// Read a PCD file from disk.
	if (pcl::io::loadPCDFile<pcl::PointXYZ>(argv[1], *cloud) != 0)
	{
		cout << "Returning -1\n";
		return -1;
	}

	// Note: you would usually perform downsampling now. It has been omitted here
	// for simplicity, but be aware that computation can take a long time.

	// Estimate the normals.
	pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> normalEstimation;
	normalEstimation.setInputCloud(cloud);
	normalEstimation.setRadiusSearch(0.03);
	pcl::search::KdTree<pcl::PointXYZ>::Ptr kdtree(new pcl::search::KdTree<pcl::PointXYZ>);
	normalEstimation.setSearchMethod(kdtree);
	normalEstimation.compute(*normals);

	// Spin image estimation object.
	pcl::SpinImageEstimation<pcl::PointXYZ, pcl::Normal, SpinImage> si;
	si.setInputCloud(cloud);
	si.setInputNormals(normals);
	// Radius of the support cylinder.
	si.setRadiusSearch(0.02);
	// Set the resolution of the spin image (the number of bins along one dimension).
	// Note: you must change the output histogram size to reflect this.
	si.setImageWidth(8);

	si.compute(*descriptors);
}
Ejemplo n.º 15
0
int
main(int argc, char** argv)
{
	// Object for storing the point cloud.
	pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
	// Object for storing the normals.
	pcl::PointCloud<pcl::Normal>::Ptr normals(new pcl::PointCloud<pcl::Normal>);

	// Read a PCD file from disk.
	if (pcl::io::loadPCDFile<pcl::PointXYZ>(argv[1], *cloud) != 0)
	{
		return -1;
	}

	// Object for normal estimation.
	pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> normalEstimation;
	normalEstimation.setInputCloud(cloud);
	// For every point, use all neighbors in a radius of 3cm.
	normalEstimation.setRadiusSearch(0.03);
	// A kd-tree is a data structure that makes searches efficient. More about it later.
	// The normal estimation object will use it to find nearest neighbors.
	pcl::search::KdTree<pcl::PointXYZ>::Ptr kdtree(new pcl::search::KdTree<pcl::PointXYZ>);
	normalEstimation.setSearchMethod(kdtree);

	// Calculate the normals.
	normalEstimation.compute(*normals);
	pcl::io::savePCDFileBinary ("normals.pcd", *normals);
	// Visualize them.
	boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer(new pcl::visualization::PCLVisualizer("Normals"));
	viewer->addPointCloud<pcl::PointXYZ>(cloud, "cloud");
	// Display one normal out of 20, as a line of length 3cm.
	viewer->addPointCloudNormals<pcl::PointXYZ, pcl::Normal>(cloud, normals, 20, 0.03, "normals");
	
	while (!viewer->wasStopped())
	{
		viewer->spinOnce(100);
		boost::this_thread::sleep(boost::posix_time::microseconds(100000));
	}
}
Ejemplo n.º 16
0
/*
 * Cache the Local Likelihood distributions for Q
 */
void hll_cache(hll_t *hll, double **Q, int n)
{
  int i;

  // allocate space, build kdtree
  double **X = new_matrix2(n, hll->dx);
  double ***S;
  safe_calloc(S, n, double**);
  for (i = 0; i < n; i++)
    S[i] = new_matrix2(hll->dx, hll->dx);
  kdtree_t *Q_kdtree = kdtree(Q, n, hll->dq);

  // precompute LL distributions
  hll_sample(X, S, Q, hll, n);

  // cache in hll
  hll->cache.Q_kdtree = Q_kdtree;
  hll->cache.Q = matrix_clone(Q, n, hll->dq);
  hll->cache.X = X;
  hll->cache.S = S;
  hll->cache.n = n;
}
Ejemplo n.º 17
0
void Cup_Segmentation::cloudCallback (const sensor_msgs::PointCloud2::ConstPtr& cloud_in)
{
  ROS_INFO("Processing!");
  /************************ CENTER AND LEFT BOXES ***************************************/
  //Creating point cloud and convert ROS Message
  pcl::PointCloud<pcl::PointXYZ>::Ptr pcl_cloud (new pcl::PointCloud<pcl::PointXYZ> ());
  pcl::PointCloud<pcl::PointXYZ>::Ptr seg_cloud (new pcl::PointCloud<pcl::PointXYZ>);
  pcl::fromROSMsg(*cloud_in, *pcl_cloud);
  //Create and define filter parameters

  pcl::PassThrough<pcl::PointXYZ> pass3;
  pass3.setFilterFieldName("x");
  pass3.setFilterLimits(0, 1.2); //-0.5 0.5
  pass3.setInputCloud(pcl_cloud);
  pass3.filter(*pcl_cloud);

  pcl::PassThrough<pcl::PointXYZ> pass;
  pass.setFilterFieldName("y");
  pass.setFilterLimits(-0.7, 0.1); //-0.5 0.5
  pass.setInputCloud(pcl_cloud);
  pass.filter(*pcl_cloud);

  pcl::PassThrough<pcl::PointXYZ> pass2;
  pass2.setFilterFieldName("z");
  pass2.setFilterLimits(0.0, 1.0); //-0.5 0.5
  pass2.setInputCloud(pcl_cloud);
  pass2.filter(*pcl_cloud);

  //Model fitting process ->RANSAC
  pcl::ModelCoefficients::Ptr coefficients (new pcl::ModelCoefficients);
  pcl::PointIndices::Ptr inliers (new pcl::PointIndices);
  pcl::SACSegmentation<pcl::PointXYZ> seg;
  seg.setOptimizeCoefficients (true);
  seg.setModelType (pcl::SACMODEL_PARALLEL_PLANE);
  seg.setMethodType (pcl::SAC_RANSAC);
  seg.setDistanceThreshold (0.03); //0.03
  seg.setAxis (Eigen::Vector3f(1, 0, 0));
  seg.setEpsAngle (0.1); //0.02
  seg.setInputCloud (pcl_cloud);
  seg.segment (*inliers, *coefficients);
  //Verify if inliers is not empty
  if (inliers->indices.size () == 0)
    return;
  pcl::PointCloud<pcl::PointXYZ>::Ptr treated_cloud (new pcl::PointCloud<pcl::PointXYZ> ());
  for (std::vector<int>::const_iterator it = inliers->indices.begin(); it != inliers->indices.end (); ++it)
    treated_cloud->push_back(pcl_cloud->points[*it]);

  pcl::ExtractIndices<pcl::PointXYZ> extract;
	extract.setInputCloud(pcl_cloud);
	extract.setIndices(inliers);
	extract.setNegative(true);
	extract.filter(*seg_cloud);

  //Create and define radial filter parameters
  //pcl::RadiusOutlierRemoval<pcl::PointXYZ> radialFilter;
  //radialFilter.setInputCloud(treated_cloud);
  //radialFilter.setRadiusSearch(0.03);
  //radialFilter.setMinNeighborsInRadius (20);
  //radialFilter.filter (*treated_cloud);

  //Apply clustering algorithm
  pcl::search::KdTree<pcl::PointXYZ>::Ptr kdtree (new pcl::search::KdTree<pcl::PointXYZ>);
  kdtree->setInputCloud (seg_cloud);
  std::vector<pcl::PointIndices> cluster_indices;
  pcl::EuclideanClusterExtraction<pcl::PointXYZ> ec;
  ec.setClusterTolerance (0.06);
  ec.setMinClusterSize (6);
  ec.setMaxClusterSize (150);
  ec.setSearchMethod (kdtree);
  ec.setInputCloud (seg_cloud);
  ec.extract (cluster_indices);
  pcl::PointCloud<pcl::PointXYZI>::Ptr cluster_cloud (new pcl::PointCloud<pcl::PointXYZI> ());
  pcl::PointXYZI cluster_point;
  double cluster_final_average;
  int cluster_id=0;
  float x1 = 0.0, y1 = 0.0, z1 = 0.0;
  float x2 = 0.0, y2 = 0.0, z2 = 0.0;
  float x3 = 0.0, y3 = 0.0, z3 = 0.0;
  int total1 = 0, total2 = 0, total3 = 0;
  bool hasCup1, hasCup2, hasCup3;

  for (std::vector<pcl::PointIndices>::const_iterator it = cluster_indices.begin (); it != cluster_indices.end ();
  ++it, cluster_id+=1)
  {
    for (std::vector<int>::const_iterator pit = it->indices.begin (); pit != it->indices.end (); pit++)
    {
      cluster_point.x = seg_cloud->points[*pit].x;
      cluster_point.y = seg_cloud->points[*pit].y;
      cluster_point.z = seg_cloud->points[*pit].z;
      cluster_point.intensity = cluster_id;
      cluster_cloud->push_back(cluster_point);

      if(cluster_id == 0){
        x1 += seg_cloud->points[*pit].x;
        y1 += seg_cloud->points[*pit].y;
        z1 += seg_cloud->points[*pit].z;
        total1++;
      } else if (cluster_id == 1){
        x2 += seg_cloud->points[*pit].x;
        y2 += seg_cloud->points[*pit].y;
        z2 += seg_cloud->points[*pit].z;
        total2++;
      } else if (cluster_id == 2){
        x3 += seg_cloud->points[*pit].x;
        y3 += seg_cloud->points[*pit].y;
        z3 += seg_cloud->points[*pit].z;
        total3++;
      }
    }
  }
  if(total1 != 0 ){
    x1 = x1/total1;
    y1 = y1/total1;
    z1 = z1/total1;
    hasCup1=true;
  }else{
    hasCup1 = false;
  }
  if(total2 != 0 ){
    x2 = x2/total2;
    y2 = y2/total2;
    z2 = z2/total2;

    hasCup2 = true;
  } else {
    hasCup2 = false;
  }

  if(total3 != 0){
    x3 = x3/total2;
    y3 = y3/total2;
    z3 = z3/total2;
    hasCup3 = true;
  } else{
    hasCup3 = false;
  }


  //Publish message
  sensor_msgs::PointCloud2 cloud;
  pcl::toROSMsg(*cluster_cloud, cloud);
  cloud.header.stamp = ros::Time::now();
  cloud.header.frame_id = cloud_in->header.frame_id;
  treated_cloud_pub_.publish(cloud);


  //Geometry
  geometry_msgs::PointStamped cupPoint1;
  geometry_msgs::PointStamped cupPoint2;
  geometry_msgs::PointStamped cupPoint3;

  tf::Quaternion q;

  geometry_msgs::PointStamped cupSensorPoint1;
  geometry_msgs::PointStamped cupSensorPoint2;
  geometry_msgs::PointStamped cupSensorPoint3;

  static tf::TransformBroadcaster tfBc1;
  static tf::TransformBroadcaster tfBc2;
  static tf::TransformBroadcaster tfBc3;

  tf::Transform tfCup1;
  tf::Transform tfCup2;
  tf::Transform tfCup3;

  cupPoint1.header.frame_id = "base_footprint";
  cupPoint2.header.frame_id = "base_footprint";
  cupPoint3.header.frame_id = "base_footprint";

  cupPoint1.header.stamp = cloud_in->header.stamp;
  cupPoint2.header.stamp = cloud_in->header.stamp;
  cupPoint3.header.stamp = cloud_in->header.stamp;

  cupPoint1.point.x = x1;
  cupPoint2.point.x = x2;
  cupPoint3.point.x = x3;

  cupPoint1.point.y = y1;
  cupPoint2.point.y = y2;
  cupPoint3.point.y = y3;

  cupPoint1.point.z = z1;
  cupPoint2.point.z = z2;
  cupPoint3.point.z = z3;

  ROS_INFO("Cup 1: X=%f Y=%f Z=%f", cupPoint1.point.x, cupPoint1.point.y, cupPoint1.point.z);
  ROS_INFO("Cup 2: X=%f Y=%f Z=%f", cupPoint2.point.x, cupPoint2.point.y, cupPoint2.point.z);
  ROS_INFO("Cup 3: X=%f Y=%f Z=%f", cupPoint3.point.x, cupPoint3.point.y, cupPoint3.point.z);


  tf_listener.transformPoint("sensors_frame", cupPoint1, cupSensorPoint1);
  tf_listener.transformPoint("sensors_frame", cupPoint2, cupSensorPoint2);
  tf_listener.transformPoint("sensors_frame", cupPoint3, cupSensorPoint3);


  tfCup1.setOrigin( tf::Vector3(cupSensorPoint1.point.x, cupSensorPoint1.point.y, cupSensorPoint1.point.z) );
  tfCup2.setOrigin( tf::Vector3(cupSensorPoint2.point.x, cupSensorPoint2.point.y, cupSensorPoint2.point.z) );
  tfCup3.setOrigin( tf::Vector3(cupSensorPoint3.point.x, cupSensorPoint3.point.y, cupSensorPoint3.point.z) );

  ROS_INFO("Sensor Frame Cup 1: X=%f Y=%f Z=%f", cupSensorPoint1.point.x, cupSensorPoint1.point.y, cupSensorPoint1.point.z);
  ROS_INFO("Sensor Frame Cup 2: X=%f Y=%f Z=%f", cupSensorPoint2.point.x, cupSensorPoint2.point.y, cupSensorPoint2.point.z);
  ROS_INFO("Sensor Frame Cup 3: X=%f Y=%f Z=%f", cupSensorPoint3.point.x, cupSensorPoint3.point.y, cupSensorPoint3.point.z);

  q.setRPY(0, 0, 0);

  tfCup1.setRotation(q);
  tfCup2.setRotation(q);
  tfCup3.setRotation(q);

  tfBc1.sendTransform(tf::StampedTransform(tfCup1, cloud_in->header.stamp, "sensors_frame", "cup1"));
  tfBc2.sendTransform(tf::StampedTransform(tfCup2, cloud_in->header.stamp, "sensors_frame", "cup2"));
  tfBc3.sendTransform(tf::StampedTransform(tfCup3, cloud_in->header.stamp, "sensors_frame", "cup3"));
  ROS_INFO("All Sent!");

}
Ejemplo n.º 18
0
void qFacets::extractFacets(CellsFusionDlg::Algorithm algo)
{
	//disclaimer accepted?
	if (!ShowDisclaimer(m_app))
		return;

	assert(m_app);
	if (!m_app)
		return;

	//we expect a unique cloud as input
	const ccHObject::Container& selectedEntities = m_app->getSelectedEntities();
	ccPointCloud* pc = (selectedEntities.size() == 1 ? ccHObjectCaster::ToPointCloud(selectedEntities.back()) : 0);
	if (!pc)
	{
		m_app->dispToConsole("Select one and only one point cloud!",ccMainAppInterface::ERR_CONSOLE_MESSAGE);
		return;
	}

	if (algo != CellsFusionDlg::ALGO_FAST_MARCHING && algo != CellsFusionDlg::ALGO_KD_TREE)
	{
		m_app->dispToConsole("Internal error: invalid algorithm type!",ccMainAppInterface::ERR_CONSOLE_MESSAGE);
		return;
	}

	//first time: we compute the max edge length automatically
	if (s_lastCloud != pc)
	{
		s_maxEdgeLength = static_cast<double>(pc->getOwnBB().getMinBoxDim())/50;
		s_minPointsPerFacet = std::max<unsigned>(pc->size() / 100000, 10);
		s_lastCloud = pc;
	}

	CellsFusionDlg fusionDlg(algo,m_app->getMainWindow());
	if (algo == CellsFusionDlg::ALGO_FAST_MARCHING)
		fusionDlg.octreeLevelSpinBox->setCloud(pc);

	fusionDlg.octreeLevelSpinBox->setValue(s_octreeLevel);
	fusionDlg.useRetroProjectionCheckBox->setChecked(s_fmUseRetroProjectionError);
	fusionDlg.minPointsPerFacetSpinBox->setValue(s_minPointsPerFacet);
	fusionDlg.errorMeasureComboBox->setCurrentIndex(s_errorMeasureType);
	fusionDlg.maxRMSDoubleSpinBox->setValue(s_errorMaxPerFacet);
	fusionDlg.maxAngleDoubleSpinBox->setValue(s_kdTreeFusionMaxAngle_deg);
	fusionDlg.maxRelativeDistDoubleSpinBox->setValue(s_kdTreeFusionMaxRelativeDistance);
	fusionDlg.maxEdgeLengthDoubleSpinBox->setValue(s_maxEdgeLength);
	//"no normal" warning
	fusionDlg.noNormalWarningLabel->setVisible(!pc->hasNormals());

	if (!fusionDlg.exec())
		return;

	s_octreeLevel						= fusionDlg.octreeLevelSpinBox->value();
	s_fmUseRetroProjectionError			= fusionDlg.useRetroProjectionCheckBox->isChecked();
	s_minPointsPerFacet					= fusionDlg.minPointsPerFacetSpinBox->value();
	s_errorMeasureType					= fusionDlg.errorMeasureComboBox->currentIndex();
	s_errorMaxPerFacet					= fusionDlg.maxRMSDoubleSpinBox->value();
	s_kdTreeFusionMaxAngle_deg			= fusionDlg.maxAngleDoubleSpinBox->value();
	s_kdTreeFusionMaxRelativeDistance	= fusionDlg.maxRelativeDistDoubleSpinBox->value();
	s_maxEdgeLength						= fusionDlg.maxEdgeLengthDoubleSpinBox->value();

	//convert 'errorMeasureComboBox' index to enum
	CCLib::DistanceComputationTools::ERROR_MEASURES errorMeasure = CCLib::DistanceComputationTools::RMS;
	switch (s_errorMeasureType)
	{
	case 0:
		errorMeasure = CCLib::DistanceComputationTools::RMS;
		break;
	case 1:
		errorMeasure = CCLib::DistanceComputationTools::MAX_DIST_68_PERCENT;
		break;
	case 2:
		errorMeasure = CCLib::DistanceComputationTools::MAX_DIST_95_PERCENT;
		break;
	case 3:
		errorMeasure = CCLib::DistanceComputationTools::MAX_DIST_99_PERCENT;
		break;
	case 4:
		errorMeasure = CCLib::DistanceComputationTools::MAX_DIST;
		break;
	default:
		assert(false);
		break;
	}
	
	//create scalar field to host the fusion result
	const char c_defaultSFName[] = "facet indexes";
	int sfIdx = pc->getScalarFieldIndexByName(c_defaultSFName);
	if (sfIdx < 0)
		sfIdx = pc->addScalarField(c_defaultSFName);
	if (sfIdx < 0)
	{
		m_app->dispToConsole("Couldn't allocate a new scalar field for computing fusion labels! Try to free some memory ...",ccMainAppInterface::ERR_CONSOLE_MESSAGE);
		return;
	}
	pc->setCurrentScalarField(sfIdx);
	
	//computation
	QElapsedTimer eTimer;
	eTimer.start();
	ccProgressDialog pDlg(true,m_app->getMainWindow());

	bool success = true;
	if (algo == CellsFusionDlg::ALGO_KD_TREE)
	{
		//we need a kd-tree
		QElapsedTimer eTimer;
		eTimer.start();
		ccKdTree kdtree(pc);

		if (kdtree.build(s_errorMaxPerFacet/2,errorMeasure,s_minPointsPerFacet,1000,&pDlg))
		{
			qint64 elapsedTime_ms = eTimer.elapsed();
			m_app->dispToConsole(QString("[qFacets] Kd-tree construction timing: %1 s").arg(static_cast<double>(elapsedTime_ms)/1.0e3,0,'f',3),ccMainAppInterface::STD_CONSOLE_MESSAGE);
		
			success = ccKdTreeForFacetExtraction::FuseCells(
				&kdtree,
				s_errorMaxPerFacet,
				errorMeasure,
				s_kdTreeFusionMaxAngle_deg,
				static_cast<PointCoordinateType>(s_kdTreeFusionMaxRelativeDistance),
				true,
				&pDlg);
		}
		else
		{
			m_app->dispToConsole("Failed to build Kd-tree! (not enough memory?)",ccMainAppInterface::ERR_CONSOLE_MESSAGE);
			success = false;
		}
	}
	else if (algo == CellsFusionDlg::ALGO_FAST_MARCHING)
	{
		int result = FastMarchingForFacetExtraction::ExtractPlanarFacets(
			pc,
			static_cast<unsigned char>(s_octreeLevel),
			static_cast<ScalarType>(s_errorMaxPerFacet),
			errorMeasure,
			s_fmUseRetroProjectionError,
			&pDlg,
			pc->getOctree().data());

		success = (result >= 0);
	}

	if (success)
	{
		pc->setCurrentScalarField(sfIdx); //for AutoSegmentationTools::extractConnectedComponents
		
		CCLib::ReferenceCloudContainer components;
		if (!CCLib::AutoSegmentationTools::extractConnectedComponents(pc,components))
		{
			m_app->dispToConsole("Failed to extract fused components! (not enough memory?)",ccMainAppInterface::ERR_CONSOLE_MESSAGE);
		}
		else
		{
			//we remove the temporary scalar field (otherwise it will be copied to the sub-clouds!)
			ccScalarField* indexSF = static_cast<ccScalarField*>(pc->getScalarField(sfIdx));
			indexSF->link(); //to prevent deletion below
			pc->deleteScalarField(sfIdx);
			sfIdx = -1;

			bool error = false;
			ccHObject* group = createFacets(pc,components,s_minPointsPerFacet,s_maxEdgeLength,false,error);

			if (group)
			{
				switch(algo)
				{
				case CellsFusionDlg::ALGO_KD_TREE:
					group->setName(group->getName() + QString(" [Kd-tree][error < %1][angle < %2 deg.]").arg(s_errorMaxPerFacet).arg(s_kdTreeFusionMaxAngle_deg));
					break;
				case CellsFusionDlg::ALGO_FAST_MARCHING:
					group->setName(group->getName() + QString(" [FM][level %2][error < %1]").arg(s_octreeLevel).arg(s_errorMaxPerFacet));
					break;
				default:
					break;
				}

				unsigned count = group->getChildrenNumber();
				m_app->dispToConsole(QString("[qFacets] %1 facet(s) where created from cloud '%2'").arg(count).arg(pc->getName()));

				if (error)
				{
					m_app->dispToConsole("Error(s) occurred during the generation of facets! Result may be incomplete",ccMainAppInterface::ERR_CONSOLE_MESSAGE);
				}
				else
				{
					//we but back the scalar field
					if (indexSF)
						sfIdx = pc->addScalarField(indexSF);
				}

				//pc->setEnabled(false);
				m_app->addToDB(group);
				group->prepareDisplayForRefresh();
			}
			else if (error)
			{
				m_app->dispToConsole("An error occurred during the generation of facets!",ccMainAppInterface::ERR_CONSOLE_MESSAGE);
			}
			else
			{
				m_app->dispToConsole("No facet remains! Check the parameters (min size, etc.)",ccMainAppInterface::WRN_CONSOLE_MESSAGE);
			}
		}
	}
	else
	{
		m_app->dispToConsole("An error occurred during the fusion process!",ccMainAppInterface::ERR_CONSOLE_MESSAGE);
	}

	if (sfIdx >= 0)
	{
		pc->getScalarField(sfIdx)->computeMinAndMax();
#ifdef _DEBUG
		pc->setCurrentDisplayedScalarField(sfIdx);
		pc->showSF(true);
#endif
	}

	//currently selected entities appearance may have changed!
	m_app->redrawAll();
}
int main(int argc, char** argv)
{

	boost::program_options::variables_map vm;
	if (!parseCommandLine(argc, argv, vm))
		return 0;

	const float radius_nms = vm["radiusNMS"].as<float>();
	const float radius_features = vm["radiusFeatures"].as<float>();
	const float threshold = vm["threshold"].as<float>();

	const std::string path_rf = vm["pathRF"].as<std::string>();
	const std::string path_cloud = vm["pathCloud"].as<std::string>();

	//create detector
	pcl::keypoints::KeypointLearningDetector<PointInT, KeypointT>::Ptr detector(new pcl::keypoints::KeypointLearningDetector<PointInT, KeypointT>());
	detector->setNAnnulus(ANNULI);
	detector->setNBins(BINS);
	detector->setNonMaxima(true);
	detector->setNonMaxRadius(radius_nms);
	detector->setNonMaximaDrawsRemove(false);
	detector->setPredictionThreshold(threshold);
	detector->setRadiusSearch(radius_features);  //40mm change according to the unit of measure used in your point cloud
	
	if (detector->loadForest(path_rf)) 
	{
		std::cout << "Detector created." << std::endl;
	}
	else 
	{
		return -1;
	}

	//load and subsample point cloud
	pcl::PointCloud<PointInT>::Ptr cloud(new pcl::PointCloud<PointInT>());
	pcl::io::loadPCDFile(path_cloud, *cloud);

 	pcl::UniformSampling<PointInT>::Ptr source_uniform_sampling(new pcl::UniformSampling<PointInT>());

	bool sub_sampling = vm.count("subSampling") > 0;
	float leaf = 0.0;

	if(sub_sampling)
	{
		leaf = vm["leaf"].as<float>();

		source_uniform_sampling->setRadiusSearch(leaf);
		source_uniform_sampling->setInputCloud(cloud);
		source_uniform_sampling->filter(*cloud);
	}

	std::cout << "Point cloud loaded" << std::endl;

	//Compute normals
	pcl::NormalEstimation<PointInT, PointNormalT> ne;
	pcl::PointCloud<PointNormalT>::Ptr normals(new pcl::PointCloud<PointNormalT>);
	ne.setInputCloud(cloud);

	pcl::search::KdTree<PointInT>::Ptr kdtree(new pcl::search::KdTree<PointInT>());
	ne.setKSearch(10);
	ne.setSearchMethod(kdtree);
	ne.compute(*normals);
	std::cout << "Normals Computed" << std::endl;

	//Set true with laser scanner dataset
	bool flip_normals = vm.count("flipNormals") > 0;
	if (flip_normals)
	{
		std::cout << "Flipping "<< std::endl;
		//Sensor origin is inside the model, flip normals to make normal sign coherent with scenes
		std::transform(normals->points.begin(), normals->points.end(), normals->points.begin(), [](pcl::Normal p) -> pcl::Normal {auto q = p; q.normal[0] *= -1; q.normal[1] *= -1; q.normal[2] *= -1; return q; });
	}

	//setup the detector
	detector->setInputCloud(cloud);
	detector->setNormals(normals);

	//detect keypoints
	pcl::PointCloud<KeypointT>::Ptr keypoint(new pcl::PointCloud<KeypointT>());
	detector->compute(*keypoint);
	std::cout << "Keypoint computed" << std::endl;

	//show results
	boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer;
	viewer.reset(new pcl::visualization::PCLVisualizer);
	viewer->setBackgroundColor(0, 0, 0);
	

	pcl::visualization::PointCloudColorHandlerCustom<PointInT> blu(cloud, 0, 0, 255);

	viewer->addPointCloud<pcl::PointXYZ>(cloud, blu,"model cloud");

	pcl::visualization::PointCloudColorHandlerCustom<KeypointT> red(keypoint, 255, 0, 0);
	viewer->addPointCloud<KeypointT>(keypoint, red, "Keypoint");
	viewer->setPointCloudRenderingProperties(pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 6, "Keypoint");
	std::cout << "viewer ON" << std::endl;
	while (!viewer->wasStopped())
	{
		viewer->spinOnce(100);
		boost::this_thread::sleep(boost::posix_time::microseconds(100000));
	}
	viewer->removeAllPointClouds();
	std::cout << "DONE" << std::endl;

	if (vm.count("pathKP")) 
	{
		const std::string path = vm["pathKP"].as<std::string>();
		pcl::io::savePCDFileASCII<KeypointT>(path, *keypoint);
	}
	

}
// Methods bound to input/inlets
t_jit_err jit_pcl_segeuclidean_matrix_calc(t_jit_pcl_segeuclidean *x, void *inputs, void *outputs)
{
    t_jit_err			err = JIT_ERR_NONE;
    long				in_savelock;
    long				out_savelock;
    t_jit_matrix_info	in_minfo;
    t_jit_matrix_info	out_minfo;
    char				*in_bp;
    char				*out_bp;
    long				i, j;
    long				dimcount;
    long				planecount;
    long				dim[JIT_MATRIX_MAX_DIMCOUNT];
    void				*in_matrix;
    void				*out_matrix;
    
    long rowstride;
    float *fip, *fop;
    
    
    in_matrix 	= jit_object_method(inputs,_jit_sym_getindex,0);
    out_matrix 	= jit_object_method(outputs,_jit_sym_getindex,0);
    
    if (x && in_matrix && out_matrix)
    {
        in_savelock = (long) jit_object_method(in_matrix, _jit_sym_lock, 1);
        out_savelock = (long) jit_object_method(out_matrix, _jit_sym_lock, 1);
        
        jit_object_method(in_matrix, _jit_sym_getinfo, &in_minfo);
        jit_object_method(in_matrix, _jit_sym_getdata, &in_bp);
        
        if (!in_bp) {
            err=JIT_ERR_INVALID_INPUT;
            goto out;
        }
        
        //get dimensions/planecount
        dimcount   = in_minfo.dimcount;
        planecount = in_minfo.planecount;
        
        if( planecount < 3 )
        {
            object_error((t_object *)x, "jit.pcl requires a 3 plane matrix (xyz)");
            goto out;
        }
        if( in_minfo.type != _jit_sym_float32)
        {
            object_error((t_object *)x, "received: %s jit.pcl uses only float32 matrixes", in_minfo.type->s_name );
            goto out;
        }
        
        //if dimsize is 1, treat as infinite domain across that dimension.
        //otherwise truncate if less than the output dimsize
        
        for (i=0; i<dimcount; i++) {
            dim[i] = in_minfo.dim[i];
            if ( in_minfo.dim[i]<dim[i] && in_minfo.dim[i]>1) {
                dim[i] = in_minfo.dim[i];
            }
        }
        
        //******
        // PCL stuff
        
        pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
        cloud->width    = (uint32_t)dim[0];
        cloud->height   = (uint32_t)dim[1];
        cloud->is_dense = false;
        cloud->points.resize (cloud->width * cloud->height);
        
        rowstride = in_minfo.dimstride[1];// >> 2L;
        size_t count = 0;
        
        for (j = 0; j < dim[0]; j++)
        {
            fip =  (float *)(in_bp + j * in_minfo.dimstride[0]);
            
            for( i = 0; i < dim[1]; i++)
            {
                cloud->points[count].x = ((float *)fip)[0];
                cloud->points[count].y = ((float *)fip)[1];
                cloud->points[count].z = ((float *)fip)[2];
        //        cloud->points[count].r = (uint8_t)(((float *)fip)[3] * 255.0);
        //        cloud->points[count].g = (uint8_t)(((float *)fip)[4] * 255.0);
        //        cloud->points[count].b = (uint8_t)(((float *)fip)[5] * 255.0);
                count++;
                fip += rowstride;
            }
        }
        
      
        {

            /*
            //filter
            pcl::VoxelGrid<pcl::PointXYZRGB> grid;
            pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_voxel_ (new pcl::PointCloud<pcl::PointXYZRGB>);
            
            grid.setLeafSize (x->leafsize, x->leafsize, x->leafsize);
            grid.setInputCloud (cloud);
            grid.filter (*cloud_voxel_);
            
            
            pcl::StatisticalOutlierRemoval<pcl::PointXYZRGB> sor;
            pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_sor_ (new pcl::PointCloud<pcl::PointXYZRGB>);
            
            sor.setInputCloud(cloud_voxel_);
            sor.setMeanK( (uint32_t)x->npoints );
            sor.setStddevMulThresh(x->stdthresh);
            sor.filter(*cloud_sor_);
            */
            
            pcl::search::KdTree<pcl::PointXYZ>::Ptr kdtree(new pcl::search::KdTree<pcl::PointXYZ>);
            kdtree->setInputCloud(cloud);
            
            // Euclidean clustering object.
            pcl::EuclideanClusterExtraction<pcl::PointXYZ> clustering;
            // Set cluster tolerance to 2cm (small values may cause objects to be divided
            // in several clusters, whereas big values may join objects in a same cluster).
            clustering.setClusterTolerance(x->cluster_tol);
            // Set the minimum and maximum number of points that a cluster can have.
            clustering.setMinClusterSize(10);
            clustering.setMaxClusterSize(25000);
            clustering.setSearchMethod(kdtree);
            clustering.setInputCloud(cloud);
            
            std::vector<pcl::PointIndices> clusters;
            clustering.extract(clusters);
            // For every cluster...
            
            pcl::PointCloud<pcl::PointXYZRGB>::Ptr cluster(new pcl::PointCloud<pcl::PointXYZRGB>);
            cluster->points.resize(cloud->size());
            cluster->width = (uint32_t)cloud->points.size();
            cluster->height = 1;
            cluster->is_dense = true;


            if( clusters.size() > 0 )
            {
                double segment_inc = 255. / clusters.size();
          
                int count = 0;
                int clusterN = 0;
                for (std::vector<pcl::PointIndices>::const_iterator i = clusters.begin(); i != clusters.end(); ++i)
                {

                    for (std::vector<int>::const_iterator p = i->indices.begin(); p != i->indices.end(); ++p)
                    {

                        cluster->points[count].x = cloud->points[ *p ].x;
                        cluster->points[count].y = cloud->points[ *p ].y;
                        cluster->points[count].z = cloud->points[ *p ].z;

                        cluster->points[count].r = (uint8_t)(segment_inc * clusterN);
                        cluster->points[count].g = (uint8_t)(segment_inc * clusterN);
                        cluster->points[count].b = 255;

                        count++;
                    }
                    clusterN++;
                }
            }
            err = jit_xyzrgb2jit(x, cluster, &out_minfo, &out_matrix );
            if( err != JIT_ERR_NONE )
                goto out;
            
        }
      
        // unable to make use of jitter's parallel methods since we need all the data together
        //jit_parallel_ndim_simplecalc2((method)jit_pcl_segeuclidean_calculate_ndim,
        //	x, dimcount, dim, planecount, &in_minfo, in_bp, &out_minfo, out_bp,
        //	0 /* flags1 */, 0 /* flags2 */);
        
    }
    else
        return JIT_ERR_INVALID_PTR;
    
out:
    jit_object_method( out_matrix, _jit_sym_lock, out_savelock );
    jit_object_method( in_matrix, _jit_sym_lock, in_savelock );
    return err;
}
int main(int argc, char* argv[])
{
    //-- Main program parameters (default values)
    double threshold = 0.03;
    std::string output_histogram_image = "histogram_image.m";
    std::string output_rsd_data = "rsd_data.m";
    double rsd_normal_radius = 0.05;
    double rsd_curvature_radius = 0.07;
    double rsd_plane_threshold = 0.2;

    //-- Show usage
    if (pcl::console::find_switch(argc, argv, "-h") || pcl::console::find_switch(argc, argv, "--help"))
    {
        show_usage(argv[0]);
        return 0;
    }

    //-- Check for parameters in arguments
    if (pcl::console::find_switch(argc, argv, "-t"))
        pcl::console::parse_argument(argc, argv, "-t", threshold);
    else if (pcl::console::find_switch(argc, argv, "--threshold"))
        pcl::console::parse_argument(argc, argv, "--threshold", threshold);

    if (pcl::console::find_switch(argc, argv, "--histogram"))
        pcl::console::parse_argument(argc, argv, "--histogram", output_histogram_image);

    if (pcl::console::find_switch(argc, argv, "-r"))
        pcl::console::parse_argument(argc, argv, "-r", output_rsd_data);
    else if (pcl::console::find_switch(argc, argv, "--rsd"))
        pcl::console::parse_argument(argc, argv, "--rsd", output_rsd_data);

    if (pcl::console::find_switch(argc, argv, "--rsd-params"))
        pcl::console::parse_3x_arguments(argc, argv, "--rsd-params", rsd_normal_radius, rsd_curvature_radius,
                                         rsd_plane_threshold);

    //-- Get point cloud file from arguments
    std::vector<int> filenames;
    bool file_is_pcd = false;

    filenames = pcl::console::parse_file_extension_argument(argc, argv, ".ply");

    if (filenames.size() != 1)
    {
        filenames = pcl::console::parse_file_extension_argument(argc, argv, ".pcd");

        if (filenames.size() != 1)
        {
            std::cerr << "No input file specified!" << std::endl;
            show_usage(argv[0]);
            return -1;
        }

        file_is_pcd = true;
    }

    //-- Print arguments to user
    std::cout << "Selected arguments: " << std::endl
              << "\tRANSAC threshold: " << threshold << std::endl
#ifdef HISTOGRAM
              << "\tHistogram image: " << output_histogram_image << std::endl
#endif
#ifdef CURVATURE
              << "\tRSD:" << std::endl
              << "\t\tFile: " << output_rsd_data << std::endl
              << "\t\tNormal search radius: " << rsd_normal_radius << std::endl
              << "\t\tCurvature search radius: " << rsd_curvature_radius << std::endl
              << "\t\tPlane threshold: " << rsd_plane_threshold << std::endl
#endif
              << "\tInput file: " << argv[filenames[0]] << std::endl;



    //-- Load point cloud data
    pcl::PointCloud<pcl::PointXYZ>::Ptr source_cloud(new pcl::PointCloud<pcl::PointXYZ>);

    if (file_is_pcd)
    {
        if (pcl::io::loadPCDFile(argv[filenames[0]], *source_cloud) < 0)
        {
            std::cout << "Error loading point cloud " << argv[filenames[0]] << std::endl << std::endl;
            show_usage(argv[0]);
            return -1;
        }
    }
    else
    {
        if (pcl::io::loadPLYFile(argv[filenames[0]], *source_cloud) < 0)
        {
            std::cout << "Error loading point cloud " << argv[filenames[0]] << std::endl << std::endl;
            show_usage(argv[0]);
            return -1;
        }
    }

    /********************************************************************************************
    * Stuff goes on here
    *********************************************************************************************/
    //-- Initial pre-processing of the mesh
    //------------------------------------------------------------------------------------
    std::cout << "[+] Pre-processing the mesh..." << std::endl;
    pcl::PointCloud<pcl::PointXYZ>::Ptr garment_points(new pcl::PointCloud<pcl::PointXYZ>);
    MeshPreprocessor<pcl::PointXYZ> preprocessor;
    preprocessor.setRANSACThresholdDistance(threshold);
    preprocessor.setInputCloud(source_cloud);
    preprocessor.process(*garment_points);

    //-- Find bounding box (not really required)
    pcl::MomentOfInertiaEstimation<pcl::PointXYZ> feature_extractor;
    pcl::PointXYZ min_point_AABB, max_point_AABB;
    pcl::PointXYZ min_point_OBB,  max_point_OBB;
    pcl::PointXYZ position_OBB;
    Eigen::Matrix3f rotational_matrix_OBB;

    feature_extractor.setInputCloud(garment_points);
    feature_extractor.compute();
    feature_extractor.getAABB(min_point_AABB, max_point_AABB);
    feature_extractor.getOBB(min_point_OBB, max_point_OBB, position_OBB, rotational_matrix_OBB);

#ifdef CURVATURE
    //-- Curvature stuff
    //-----------------------------------------------------------------------------------
    std::cout << "[+] Calculating curvature descriptors..." << std::endl;
    // Object for storing the normals.
    pcl::PointCloud<pcl::Normal>::Ptr normals(new pcl::PointCloud<pcl::Normal>);
    // Object for storing the RSD descriptors for each point.
    pcl::PointCloud<pcl::PrincipalRadiiRSD>::Ptr descriptors(new pcl::PointCloud<pcl::PrincipalRadiiRSD>());


    // Note: you would usually perform downsampling now. It has been omitted here
    // for simplicity, but be aware that computation can take a long time.

    // Estimate the normals.
    pcl::NormalEstimationOMP<pcl::PointXYZ, pcl::Normal> normalEstimation;
    normalEstimation.setInputCloud(garment_points);
    normalEstimation.setRadiusSearch(rsd_normal_radius);
    pcl::search::KdTree<pcl::PointXYZ>::Ptr kdtree(new pcl::search::KdTree<pcl::PointXYZ>);
    normalEstimation.setSearchMethod(kdtree);
    normalEstimation.setViewPoint(0, 0 , 2);
    normalEstimation.compute(*normals);

    // RSD estimation object.
    pcl::RSDEstimation<pcl::PointXYZ, pcl::Normal, pcl::PrincipalRadiiRSD> rsd;
    rsd.setInputCloud(garment_points);
    rsd.setInputNormals(normals);
    rsd.setSearchMethod(kdtree);
    // Search radius, to look for neighbors. Note: the value given here has to be
    // larger than the radius used to estimate the normals.
    rsd.setRadiusSearch(rsd_curvature_radius);
    // Plane radius. Any radius larger than this is considered infinite (a plane).
    rsd.setPlaneRadius(rsd_plane_threshold);
    // Do we want to save the full distance-angle histograms?
    rsd.setSaveHistograms(false);

    rsd.compute(*descriptors);

    //-- Save to mat file
    std::ofstream rsd_file(output_rsd_data.c_str());
    for (int i = 0; i < garment_points->points.size(); i++)
    {
        rsd_file << garment_points->points[i].x << " "
                 << garment_points->points[i].y << " "
                 << garment_points->points[i].z << " "
                 << descriptors->points[i].r_min << " "
                 << descriptors->points[i].r_max << "\n";
    }
    rsd_file.close();

#endif

#ifdef HISTOGRAM
    //-- Obtain histogram image
    //-----------------------------------------------------------------------------------
    std::cout << "[+] Calculating histogram image..." << std::endl;
    HistogramImageCreator<pcl::PointXYZ> histogram_image_creator;
    histogram_image_creator.setInputPointCloud(garment_points);
    histogram_image_creator.setResolution(1024);
    histogram_image_creator.setUpsampling(true);
    histogram_image_creator.compute();
    Eigen::MatrixXi image = histogram_image_creator.getDepthImageAsMatrix();

    //-- Temporal fix to get image (through file)
    std::ofstream file(output_histogram_image.c_str());
    file << image;
    file.close();
#endif

    return 0;
}
Ejemplo n.º 22
0
int
main (int argc, char** argv)
{
  srand ((unsigned int) time (NULL));
  time_t start, end;
  double dift;

  float resolution = 5.0f;

  pcl::SegmentDifferences<PointT> sd;
    
  pcl::PointCloud<PointT>::Ptr cloudA (new pcl::PointCloud<PointT>); 

  time(&start);
  cloudA->width = 128;
  cloudA->height = 1;
  cloudA->points.resize (cloudA->width * cloudA->height);  
  for (size_t i = 0; i < cloudA->points.size (); ++i)
  {
    cloudA->points[i].x = 64.0f * rand () / (RAND_MAX + 1.0f);
    cloudA->points[i].y = 64.0f * rand () / (RAND_MAX + 1.0f);
    cloudA->points[i].z = 64.0f * rand () / (RAND_MAX + 1.0f);
	cloudA->points[i].intensity = rand () / 255 + 1.0f;
  }
  time(&end);

  dift = difftime (end,start);
  std::cout<<"It took "<<dift<<" secs to read in the first point cloud"<<std::endl;
  
  pcl::PointCloud<PointT>::Ptr cloudB (new pcl::PointCloud<PointT>);

  time(&start);
  cloudB->width = 128;
  cloudB->height = 1;
  cloudB->points.resize (cloudB->width * cloudB->height);
  for (size_t i = 0; i < cloudB->points.size (); ++i)
  {
    cloudB->points[i].x = 64.0f * rand () / (RAND_MAX + 1.0f);
    cloudB->points[i].y = 64.0f * rand () / (RAND_MAX + 1.0f);
    cloudB->points[i].z = 64.0f * rand () / (RAND_MAX + 1.0f);
	cloudB->points[i].intensity = rand () / 255 + 1.0f;
  }
  time(&end);

  dift = difftime (end,start);
  std::cout<<"It took "<<dift<<" secs to read in the second point cloud"<<std::endl;
 

  pcl::PointCloud<PointT> cloudC;
  cloudC.width = 0;
  cloudC.height = 0;
  cloudC.points.resize (cloudC.width * cloudC.height);

  pcl::search::KdTree<PointT>::Ptr kdtree(new pcl::search::KdTree<PointT>);
  kdtree->setInputCloud(cloudB);
  sd.setDistanceThreshold (resolution);
  sd.setInputCloud (cloudA);
  sd.setTargetCloud (cloudB);
  sd.setSearchMethod (kdtree);  
  sd.segmentInt (cloudC);

  std::cout<<"The points in cloudC are "<<std::endl;
  for (size_t i = 0; i < cloudC.points.size (); ++i)
  {
	std::cout<<cloudC.points[i].x<<" "<<cloudC.points[i].y<<" "<<cloudC.points[i].z<<" "<<cloudC.points[i].intensity<<std::endl;
  }
  system("pause");
}
Ejemplo n.º 23
0
int
main(int argc, char** argv)
{
	// Object for storing the point cloud.
	pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZRGBA>);
 
	// Read a PCD file from disk.
	if (pcl::io::loadPCDFile<pcl::PointXYZRGBA>(argv[1], *cloud) != 0)
	{
		return -1;
	}

//BEGIN TIMER
	struct timeval start, end;
        long mtime, seconds, useconds;    

	gettimeofday(&start, NULL);
 
	// kd-tree object for searches.
	pcl::search::KdTree<pcl::PointXYZRGBA>::Ptr kdtree(new pcl::search::KdTree<pcl::PointXYZRGBA>);
	kdtree->setInputCloud(cloud);
 
	// Euclidean clustering object.
	pcl::EuclideanClusterExtraction<pcl::PointXYZRGBA> clustering;
	// Set cluster tolerance to 2cm (small values may cause objects to be divided
	// in several clusters, whereas big values may join objects in a same cluster).
	//clustering.setClusterTolerance(0.02);
	// Set the minimum and maximum number of points that a cluster can have.
        //clustering.setMinClusterSize(100);
	//clustering.setMaxClusterSize(25000);
	clustering.setClusterTolerance (0.02); // 2cm
  	clustering.setMinClusterSize (atoi(argv[2]));
  	clustering.setMaxClusterSize (atoi(argv[3]));
	clustering.setSearchMethod(kdtree);
	clustering.setInputCloud(cloud);
	std::vector<pcl::PointIndices> clusters;
	clustering.extract(clusters);
 
	// For every cluster...
	int currentClusterNum = 1;
	for (std::vector<pcl::PointIndices>::const_iterator i = clusters.begin(); i != clusters.end(); ++i)
	{
		// ...add all its points to a new cloud...
		pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cluster(new pcl::PointCloud<pcl::PointXYZRGBA>);
		for (std::vector<int>::const_iterator point = i->indices.begin(); point != i->indices.end(); point++)
			cluster->points.push_back(cloud->points[*point]);
		cluster->width = cluster->points.size();
		cluster->height = 1;
		cluster->is_dense = true;
 
		// ...and save it to disk.
		if (cluster->points.size() <= 0)
			break;
		std::cout << "Cluster " << currentClusterNum << " has " << cluster->points.size() << " points." << std::endl;
		std::string fileName = "clusters/cluster" + boost::to_string(currentClusterNum) + ".pcd";
		pcl::io::savePCDFileASCII(fileName, *cluster);
 
		currentClusterNum++;
	}

	gettimeofday(&end, NULL);

	seconds  = end.tv_sec  - start.tv_sec;
	useconds = end.tv_usec - start.tv_usec;

    	mtime = ((seconds) * 1000 + useconds/1000.0) + 0.5;
    	printf("Elapsed time: %ld milliseconds\n", mtime);
}
Ejemplo n.º 24
0
static void dispatch(int n, int m, double *x_ptr, double *c_ptr, double *d_ptr, int nlhs, mxArray *plhs[])
{
  // Read parameters
  typename KDTree<K, double>::points_type X;
  
  typename KDTree<K, double>::point_type v;
  
  for (int i = 0; i < n; ++i)
    {
      for (unsigned k = 0; k < K; ++k)
	v[k] = x_ptr[i + (n * k)];
      X.push_back(v);
    }

  typename KDTree<K, double>::points_type C;

  std::vector<double> R;

  for (int i = 0; i < m; ++i)
    {
      for (unsigned k = 0; k < K; ++k)
	v[k] = c_ptr[i + (m * k)];
      C.push_back(v);
      R.push_back(d_ptr[i]);
    }

  // Build kd-tree
  KDTree<K, double> kdtree(X);

  // Compute queries
  std::list<typename KDTree<K, double>::set_type > res_list;
  typename KDTree<K, double>::set_type res;
	
  for (int i = 0; i < m; ++i)
    {
      kdtree.ball_query(C[i], R[i], res);

      res_list.push_back(res);
			
      res.clear();
    }

  // Write output
  if (nlhs > 0)
    {
      const mwSize size[2] = {res_list.size(), 1};
      plhs[0] = mxCreateCellArray(2, size);
			
      int cnt = 0;
      for (typename std::list<typename KDTree<K, double>::set_type>::const_iterator it = res_list.begin(); it != res_list.end(); ++it, ++cnt)
	{
	  if (it->size())
	    {
	      mxArray * pArray = mxCreateDoubleMatrix(it->size(), 1, mxREAL);
	      double * p = mxGetPr(pArray);
				
	      for (typename KDTree<K, double>::set_type::const_iterator it2 = it->begin(); it2 != it->end(); ++it2)
		*p++ = it2->second + 1;
				
	      mxSetCell(plhs[0], cnt, pArray);
	    }
	}
    }

  if (nlhs > 1)
    {
      const mwSize size[2] = {res_list.size(), 1};
      plhs[1] = mxCreateCellArray(2, size);
			
      int cnt = 0;
      for (typename std::list<typename KDTree<K, double>::set_type>::const_iterator it = res_list.begin(); it != res_list.end(); ++it, ++cnt)
	{
	  if (it->size())
	    {
	      mxArray * pArray = mxCreateDoubleMatrix(it->size(), 1, mxREAL);
	      double * p = mxGetPr(pArray);
				
	      for (typename KDTree<K, double>::set_type::const_iterator it2 = it->begin(); it2 != it->end(); ++it2)
		*p++ = it2->first;
				
	      mxSetCell(plhs[1], cnt, pArray);
	    }
	}
    }
}
Ejemplo n.º 25
0
void test_kdtree(int argc, char *argv[])
{
  if (argc < 3) {
    printf("usage: %s <n> <d> <x> -- where x is a d-vector\n", argv[0]);
    return;
  }

  int i, j, n = atoi(argv[1]), d = atoi(argv[2]);

  if (argc < d+3) {
    printf("usage: %s <n> <d> <x> -- where x is a d-vector\n", argv[0]);
    return;
  }

  double *x;
  safe_malloc(x, d, double);
  for (i = 0; i < d; i++)
    x[i] = atof(argv[i+3]);

  /*
  double X_data[6][2] = {{-1, -1},
			 {-1, 1},
			 {1, -1},
			 {1, 1},
			 {-2, 0},
			 {2, 0}};
  */

  double **X = new_matrix2(n, d);
  //memcpy(X[0], X_data, n*d*sizeof(double));
  int nn = 0;
  double dmin = DBL_MAX;
  for (i = 0; i < n; i++) {
    for (j = 0; j < d; j++)
      X[i][j] = 100*frand();
    double dx = dist(x, X[i], d);
    if (dx < dmin) {
      dmin = dx;
      nn = i;
    }
  }

  double t = get_time_ms();

  kdtree_t *tree = kdtree(X, n, d);

  double tree_time = get_time_ms() - t;

  //print_kdtree(tree, 0);

  t = get_time_ms();

  printf("\n------------ NN query -------------\n");
  int kdnn = kdtree_NN(tree, x);
  printf("  KD-tree NN to x is node %d\n", kdnn);
  printf("  Real NN to x is node %d\n", nn);

  if (kdnn != nn)
    printf("\n*****  Error: Kd-tree NN != Real NN  *****\n");

  double nn_time = get_time_ms() - t;

  printf("\n --> Built KD-tree with %d nodes in %.2f ms\n", n, tree_time);
  printf(" --> Found NN to x in %.2f ms\n", nn_time);
  printf("\n");
}