vector<vector<int> > kmeansModule::getClusters(float** data, int M, vector <int>& pts)
{
    int N = pts.size();
    
    vector<vector<int> > clusters (K, vector<int>(0, 0));
    for (int i = 0; i < N; i++)
    	clusters[clusterMemberships[i]].push_back(pts[i]);
    return clusters;
}
示例#2
0
std::size_t FilterGraph<Graph>::numClusters() const {
    
    int c=0;
    auto it = clusters();
    for(; it.first != it.second; ++it.first)
        ++c;
    
    return c;       
};
示例#3
0
文件: asp.cpp 项目: Danvil/dasp
		std::vector<std::vector<int>> clusters_from_labels(const std::vector<int>& labels, unsigned int cluster_count)
		{
			std::vector<std::vector<int>> clusters(cluster_count);
			for(unsigned int i=0; i<labels.size(); i++) {
				int label = labels[i];
				if(label >= 0) {
					clusters[label].push_back(i);
				}
			}
			return clusters;
		}
  // represents and OR search free for AStar
  SearchTree train_search_tree_agglomerative(map<string,NNTemplateType>&allTemplates)
  {
    SearchTree search_tree_root;

    // initiailize the bottom layer
    unordered_map<int, unordered_map<int,double> > costs;    
    list<SearchTree > clusters(allTemplates.size());
    function<void (SearchTree&node)> update_dists_for_node = [&](SearchTree&node)
    {
      static atomic<int> completed(0);
      log_once(printfpp("++computed distance %d",completed++));
      for(auto & node_k : clusters)
      {
	double cost = node.Templ.merge_cost(node_k.Templ);
	static mutex m; lock_guard<mutex> l(m);
	costs[node.id][node_k.id] = cost;
	costs[node_k.id][node.id] = cost;
      }
      
    };
    auto iter = allTemplates.begin();
    for(int cter = 0; iter != allTemplates.end(); ++cter, ++iter)
    {
      auto jter = clusters.begin();
      std::advance(jter,cter);
      SearchTree&curTree = *jter;
      curTree.Templ = iter->second;
      curTree.id = seq_id++;
      curTree.pose_uuid = iter->first;
    }
    TaskBlock compute_init_dists("compute_init_dists");
    log_once(printfpp("++Computing Initial Distances for %d clusters",(int)clusters.size()));
    for(auto & curTree : clusters)
    {
      SearchTree*tp = &curTree;
      compute_init_dists.add_callee([&,tp]()
      {
	update_dists_for_node(*tp);
      });
    }
    compute_init_dists.execute();
    log_once(printfpp("--Computing Initial Distances for %d clusters",(int)clusters.size()));
    
    // merge layers until we get the root
    merge_clusters_agglom(seq_id,clusters,costs,update_dists_for_node);
    
    // store the root
    search_tree_root = clusters.front();
    
    // print the search tree
    log_search_tree(search_tree_root);

    return search_tree_root;
  }
示例#5
0
vector< vector<u32> > kmeans(const vector< vector<f64> >& points, u32 k,
                             vector< vector<f64> >& centers)
{
    if (points.size() == 0)
        throw eInvalidArgument("There must be points to cluster!");
    if (points[0].size() == 0)
        throw eInvalidArgument("There must be at least one dimension!");
    for (size_t i = 1; i < points.size(); i++)
        if (points[i].size() != points[0].size())
            throw eInvalidArgument("All data points must have the same dimensionality.");
    if (k == 0)
        throw eInvalidArgument("Clustering into zero clusters makes no sense.");
    if ((u32)centers.size() != k)
        throw eInvalidArgument("You must supply k initial centers to this version of kmeans().");
    for (u32 i = 0; i < k; i++)
        if (centers[i].size() != points[0].size())
            throw eInvalidArgument("All initial centers must have the same dimensionality as the data points.");

    vector< vector<u32> > clusters(k);

    while (true)
    {
        vector< vector<u32> > newClusters(k);

        for (size_t i = 0; i < points.size(); i++)
        {
            f64 dist = s_distanceSquared(points[i], centers[0]);
            u32 closest = 0;
            for (u32 c = 1; c < k; c++)
            {
                f64 distHere = s_distanceSquared(points[i], centers[c]);
                if (distHere < dist)
                {
                    closest = c;
                    dist = distHere;
                }
            }
            newClusters[closest].push_back((u32)i);
        }

        for (u32 i = 0; i < k; i++)
            if (newClusters[i].size() > 0)
                centers[i] = s_calcCenter(points, newClusters[i]);
            // Else, what should I do? Leave it? Randomize a new center?

        if (clusters == newClusters)
            break;

        clusters = newClusters;
    }

    return clusters;
}
示例#6
0
	const cluster_t *find_nearest_cluster( const sample_type& s, int& nearest_index) const
	{
		const_iterator it( clusters().begin());
		real_type min_dist = traits_type::distance( s, it->mean);
		nearest_index = 0;
		const cluster_t *cluster = &(*it);
		++it;
		
		for( int index = 0; it != clusters().end(); ++it, ++index)
		{
			real_type d = traits_type::distance( s, it->mean);

			if( d < min_dist)
			{
				min_dist = d;
				nearest_index = index;
				cluster = &(*it);
			}
		}
		
		return cluster;
	}
	std::vector<DataVector> sampleSelectionKMeans(std::vector<DataVector>& samples, std::size_t n, std::size_t max_iter)
	{
		if(samples.size() <= n) return samples;
		std::size_t dim = samples[0].dim();
		std::vector<DataVector> centers = sampleSelectionInitialize(samples, n);

		std::size_t n_clusters = centers.size();

		// clusters[i] contains the id of points in the i-th cluster
		std::vector<std::vector<std::size_t> > clusters(n_clusters);
		for(std::size_t iter = 0; iter < max_iter; ++iter)
		{
			for(std::size_t i = 0; i < samples.size(); ++i)
			{
				std::size_t min_cluster_id = -1;
				double min_cluster_dist = (std::numeric_limits<double>::max)();

				for(std::size_t j = 0; j < centers.size(); ++j)
				{
					double dist = 0;
					for(std::size_t d = 0; d < dim; ++d)
					{
						double v = centers[j][d] - samples[i][d];
						dist += v * v;
					}

					if(dist < min_cluster_dist) { min_cluster_dist = dist; min_cluster_id = j; }
				}

				clusters[min_cluster_id].push_back(i);
			}

			// for each cluster, compute the mean as the new center
			for(std::size_t i = 0; i < clusters.size(); ++i)
			{
				if(clusters[i].size() == 0) continue;

				DataVector mean(dim);
				mean.setZero();

				for(std::size_t j = 0; j < clusters[i].size(); ++j)
					mean += samples[clusters[i][j]];
				mean *= (1.0 / clusters[i].size());

				centers[i] = mean;
			}
		}

		return centers;
	}
示例#8
0
文件: test.cpp 项目: wxwidget/cluster
using namespace cluster

int main(int argc, char* argv[])
{
  ClusterId num_clusters = 30;
  PointId num_points = 3000;
  Dimensions num_dimensions = 10;

  PointsSpace ps(num_points, num_dimensions);
  //std::cout << "PointSpace" << ps;

  Clusters clusters(num_clusters, ps);

  clusters.k_means();
  
  std::cout << clusters;

}
PartitionalClustering *
RecursivePartitionalClusteringMetric::create_full_clustering(
    PartitionalClustering *center_cluster) {
  IMP::base::Vector<Ints> clusters(center_cluster->get_number_of_clusters());
  Ints reps(clusters.size());
  for (unsigned int i = 0; i < clusters.size(); ++i) {
    Ints outer = center_cluster->get_cluster(i);
    reps[i] = clustering_->get_cluster_representative(
        center_cluster->get_cluster_representative(i));
    for (unsigned int j = 0; j < outer.size(); ++j) {
      Ints inner = clustering_->get_cluster(outer[j]);
      clusters[i].insert(clusters[i].end(), inner.begin(), inner.end());
    }
  }
  IMP_NEW(internal::TrivialPartitionalClustering, ret, (clusters, reps));
  validate_partitional_clustering(ret, metric_->get_number_of_items());
  return ret.release();
}
示例#10
0
    std::vector<Index> GenerateClusters(Count k, Rng& rng) {
        std::vector<Index> clusters(g.nodeCount());

        std::uniform_int_distribution<> distr_v(0, g.nodeCount()-1);

        auto& origs = centers_;
        origs.clear();
        for (auto i = 0; i < k;) {
            auto n = distr_v(rng);
            auto it = lower_bound(origs.begin(), origs.end(), n);
            if (*it == n) {
                continue;
            }
            // vector...
            origs.insert(it, n);
            ++i;
        }

        return GenerateClusters(origs);
    }
示例#11
0
    std::vector<Index> GenerateClusters(const std::vector<Index>& origs) {
        std::vector<Index> clusters(g.nodeCount());
        for (auto i = 0; i < origs.size(); ++i) {
            clusters[origs[i]] = i;
        }

        radius_.resize(centers_.size());
        std::fill(radius_.begin(), radius_.end(), 0);
        auto proc = [&](Index to, Index from, Index edge, Value val) {
            clusters[to] = clusters[from];

            if (val > radius_[clusters[from]]) {
                radius_[clusters[from]] = val;
            }
        };

        WeightedBFS<EdgedGraph, Value> bfs(g, vals);
        bfs.runPrev(origs, proc);
        return clusters;

    }
示例#12
0
void KMeansSlaveClient::KMeansJob(const Points& portion_of_data, const size_t K) const {
  const size_t portion_size = portion_of_data.size();
  const size_t dimensions = portion_of_data[0].size();
  std::vector<size_t> clusters(portion_size);
  while (true) {
    std::string centroids_message = Recieve();
//    std::cout << "recv centroids ok" << std::endl;

    std::stringstream stream_with_centroids(centroids_message);
    Points centroids = ExtractPointsFromMessage(stream_with_centroids, K, dimensions);

    Points new_centroids(K, Point(dimensions));
//    std::cout << "      a" << centroids.size() << " " << centroids[0][2] << std::endl;
    std::vector<size_t> clusters_sizes(K);
    bool converged = JobItself(portion_of_data, centroids, clusters, new_centroids, clusters_sizes);

    std::stringstream converging_info_message_stream;
    converging_info_message_stream << converged << kComponentsDelimiter;
    Send(converging_info_message_stream.str());
//    std::cout <<"send conv ok"<<std::endl;

    std::string ok_or_bye_message = Recieve();
//    std::cout << "recv okbye ok: " << ok_or_bye_message << std::endl;
    if (ok_or_bye_message == "BYE") {
      break;
    }

//    std::cout <<"                b"<<new_centroids[0][0]<<std::endl;
    std::stringstream new_centroids_and_clusters_subsizes_message;
    new_centroids_and_clusters_subsizes_message.precision(20);
    InsertPointsToMessage(new_centroids_and_clusters_subsizes_message, new_centroids.cbegin(),
                          new_centroids.cend());
    for (const auto& cluster_size : clusters_sizes) {
      new_centroids_and_clusters_subsizes_message << cluster_size << kComponentsDelimiter;
    }
    Send(new_centroids_and_clusters_subsizes_message.str());
//    std::cout << "send new centroids ok" << std::endl;
  }
}
vector<int>
labelIntraFramePoints(vector<MotionPoint*>& points)
{
	vector<Node<int>*> vnodes;
	for (int i = 0; i < points.size(); ++i)
	{
		vnodes.push_back(makeset(i));
	}
	for (int i = 0; i < points.size(); ++i)
	{
		for (int j = i + 1; j < points.size(); ++j)
		{
			//if (i == j) continue;
			for (int m = 0; m < points[i]->candidates.size(); ++m)
			{
				for (int n = 0; n<points[j]->candidates.size(); ++n)
				{
					//if (points[i].GetLink(j, m, n) > .5) {
					if (points[i]->getLink(m, j, n) > 0.5) //probLink[j][m][n] > .5)
					{
						merge(vnodes[i], vnodes[j]);
					}
				}
			}
		}
	}
	vector<Node<int>*> labels = clusters(vnodes);
	vector<int> ilabels(points.size());
	for (int i = 0; i < points.size(); ++i)
	{
		ilabels[i] = distance(labels.begin(), find(labels.begin(), labels.end(), findset(vnodes[i])));
	}
	for (int i = 0; i < points.size(); ++i)
	{
		delete vnodes[i];
	}
	return ilabels;
}
void cloud_cb(const sensor_msgs::PointCloud2ConstPtr& input){
	
	sensor_msgs::PointCloud2::Ptr clusters (new sensor_msgs::PointCloud2);	
	pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>), cloud_f (new pcl::PointCloud<pcl::PointXYZ>);
	pcl::fromROSMsg(*input, *cloud);
	pcl::PointCloud<pcl::PointXYZ>::Ptr clustered_cloud (new pcl::PointCloud<pcl::PointXYZ>);	
		
	std::cout << "PointCloud before filtering has: " << cloud->points.size () << " data points." << std::endl;
	
	// Create the filtering object: downsample the dataset using a leaf size of 1cm
  	pcl::VoxelGrid<pcl::PointXYZ> vg;
  	pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_filtered (new pcl::PointCloud<pcl::PointXYZ>);
  	vg.setInputCloud (cloud);
  	vg.setLeafSize (0.01f, 0.01f, 0.01f);
  	vg.filter (*cloud_filtered);
  	std::cout << "PointCloud after filtering has: " << cloud_filtered->points.size ()  << " data points." << std::endl;

	// Create the segmentation object for the planar model and set all the parameters
  	pcl::SACSegmentation<pcl::PointXYZ> seg;
  	pcl::PointIndices::Ptr inliers (new pcl::PointIndices);
  	pcl::ModelCoefficients::Ptr coefficients (new pcl::ModelCoefficients);
  	pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_plane (new pcl::PointCloud<pcl::PointXYZ> ());
  	pcl::PCDWriter writer;
  	seg.setOptimizeCoefficients (true);
  	seg.setModelType (pcl::SACMODEL_PLANE);
  	seg.setMethodType (pcl::SAC_RANSAC);
  	seg.setMaxIterations (100);
  	seg.setDistanceThreshold (0.02);

  	int i=0, nr_points = (int) cloud_filtered->points.size ();
  	while (cloud_filtered->points.size () > 0.3 * nr_points)
  	{
    	// Segment the largest planar component from the remaining cloud
   	 	seg.setInputCloud (cloud_filtered);
    		seg.segment (*inliers, *coefficients);
    		if (inliers->indices.size () == 0)
   		 {
      			std::cout << "Could not estimate a planar model for the given dataset." << std::endl;
      			break;
    		 }

    	// Extract the planar inliers from the input cloud
   	pcl::ExtractIndices<pcl::PointXYZ> extract;
    	extract.setInputCloud (cloud_filtered);
    	extract.setIndices (inliers);
    	extract.setNegative (false);

    	// Get the points associated with the planar surface
    	extract.filter (*cloud_plane);
    	std::cout << "PointCloud representing the planar component: " << cloud_plane->points.size () << " data points." << std::endl;

    	// Remove the planar inliers, extract the rest
    	extract.setNegative (true);
    	extract.filter (*cloud_f);
    	*cloud_filtered = *cloud_f;
  	}

  	// Creating the KdTree object for the search method of the extraction
 	 pcl::search::KdTree<pcl::PointXYZ>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZ>);
  	tree->setInputCloud (cloud_filtered);

  	std::vector<pcl::PointIndices> cluster_indices;
  	pcl::EuclideanClusterExtraction<pcl::PointXYZ> ec;
  	ec.setClusterTolerance (0.02); // 2cm
  	ec.setMinClusterSize (10);
  	ec.setMaxClusterSize (2500);
  	ec.setSearchMethod (tree);
  	ec.setInputCloud (cloud_filtered);
  	ec.extract (cluster_indices);
	std::vector<pcl::PointIndices>::const_iterator it;
	std::vector<int>::const_iterator pit;

	 int j = 0;	
	for(it = cluster_indices.begin(); it != cluster_indices.end(); ++it) {
        	pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_cluster (new pcl::PointCloud<pcl::PointXYZ>);
        	for(pit = it->indices.begin(); pit != it->indices.end(); pit++) {
        	//push_back: add a point to the end of the existing vector
                	cloud_cluster->points.push_back(cloud_filtered->points[*pit]); 
			
			/*cloud_cluster->width = cloud_cluster->points.size ();
    			cloud_cluster->height = 1;
    			cloud_cluster->is_dense = true;


			std::stringstream ss;
    			ss << "cloud_cluster_" << j << ".pcd";
    			writer.write<pcl::PointXYZ> (ss.str (), *cloud_cluster, false); //*
    			j++;*/
        	}

        	//Merge current clusters to whole point cloud
    		*clustered_cloud += *cloud_cluster;

 	 }

	pcl::toROSMsg (*clustered_cloud , *clusters);
	
	clusters->header.frame_id = "/camera_depth_frame";
	clusters->header.stamp=ros::Time::now();
	pub.publish (*clusters);
	//sensor_msgs::PointCloud2::Ptr output (new sensor_msgs::PointCloud2);	
	//pcl::toROSMsg(*cloud_filtered, *output);
	//pub_plane.publish(*output);
}
示例#15
0
std::vector< std::set<idxtype> > srmclWithGraph(GraphType *graph,std::vector<std::set<idxtype> >& indices,Options opt )
{
	GraphType *cgraph,*t,*cgraphFinest, *cgraphCoarest;
	int nvtxs=graph->nvtxs;
	CtrlType ctrl;
	int levels=0;
	Matrix *M=NULL;

	int ct = ctrl.CoarsenTo = opt.coarsenTo, runMcl=0;
	ctrl.CType=opt.matchType;

	ctrl.optype=OP_KMETIS;
	ctrl.dbglvl=1;

	if ( ct < 5 )
		ctrl.maxvwgt = (int) ceil( (1.5*nvtxs)/ct );
	else if ( ct <= 100 )
		ctrl.maxvwgt = (int) ceil( (2.0*nvtxs)/ct );
	else if ( ct > 100 )
	{
		// we can allow some imbalance here.
		ctrl.maxvwgt = (int) ceil( 10.0 * nvtxs/ct );
	}

	my_AllocateWorkSpace(&ctrl,graph);

	cgraphCoarest = cgraph = Coarsen2Way(&ctrl,graph);
	

	FreeWorkSpace(&ctrl, graph);


	for(levels=0,t=cgraph ; t->finer!=NULL ; t=t->finer,levels++);

	int num_level = levels;
	printf("Coarsened %d levels\n", levels);
	printf("Number of vertices in coarsest graph:%d\n", cgraph->nvtxs);
	fflush(stdout);


	int times_rmcl= 1;	

	while ( cgraph != NULL )
	{
		Matrix *M0, *Mnew;
		int iters;

		M0=setupCanonicalMatrix(cgraph->nvtxs, cgraph->nedges, cgraph->xadj, cgraph->adjncy, cgraph->adjwgt, opt.ncutify);  //do weight normalization here
#ifdef TEST_OUTPUT
		//printf("level: %d, M0:\n",levels);
		//printMatrix(M0);
#endif


		if ( cgraph->coarser == NULL )
		{
		// if this is the coarsest graph, flow matrix is same as
		// the transition matrix. 
			M=M0;
	//		dumpMatrix(M);
		}

		if (cgraph->finer == NULL){
			iters=opt.num_last_iter;
			cgraphFinest = cgraph;
		}
		else{
			iters=opt.iter_per_level;
		}

		//printMatrix(M);
		Mnew=dprmcl(M,M0,cgraph, opt, iters, levels);        //YK: main process!
		M=Mnew;

		if ( cgraph->finer != NULL )  //YK: map nodes to the finer graph
		{
			int nnzRefinedGraph = 2*M->nnz;
			if ( ctrl.CType == MATCH_POWERLAW_FC )
			{
				int ii;
				nnzRefinedGraph = 0;
				for ( ii=0; ii<cgraph->finer->nvtxs; ii++ )
				{
					int tx=cgraph->finer->cmap[ii];
					nnzRefinedGraph +=
					M->xadj[tx+1]-M->xadj[tx];
				}
			}
			else if ( ctrl.CType == MATCH_HASH  && levels <= 3 )
			{
				int ii;
				nnzRefinedGraph=0;
				for ( ii=0; ii<cgraph->nvtxs; ii++ )
				{
					nnzRefinedGraph += cgraph->vwgt[ii] *
							(M->xadj[ii+1] - M->xadj[ii]);
				}
			}
//			dumpMatrix(M);
			Mnew=propagateFlow(M,cgraph,cgraph->finer,nnzRefinedGraph);
			//printf("times:%d, levels:%d Mnew\n",times_rmcl, levels);
			//printMatrix(Mnew);
			if ( M != NULL )
			{
				freeMatrix(M);
			}
			M=Mnew;
		}
		cgraph=cgraph->finer;  //change to the finer graph


		levels--;
		printf("Done level %d (%d times MLR-MCL)\n", levels+1,times_rmcl);
		fflush(stdout);
	}
	fflush(stdout);
	idxtype* firstIndices =  (idxtype*) malloc(sizeof(idxtype) * M->nvtxs);
	idxcopy(nvtxs, M->attractors, firstIndices);


	bool* isAttractor = (bool*) malloc(sizeof(bool) * M->nvtxs);  
	getAttractorsForAll(M, isAttractor);
	int** countAttractor = (int**) malloc(sizeof(int*) * (num_level+1));  //count the number of times the node being an attractor node
	for(int i=0; i <= num_level ; i++){
		countAttractor[i] = (int*) malloc (sizeof(int) * nvtxs);
		for(int j=0; j<M->nvtxs; j++){
			countAttractor[i][j] = 0;
		}
	}
#ifdef TEST_OUTPUT
	{
		for(int i=0; i<M->nvtxs; i++){
			int level = 1, nodeIdx = i;
			for(cgraph = cgraphFinest; cgraph->coarser!=NULL; cgraph = cgraph->coarser, level++){
				idxtype oldNodeIdx = nodeIdx;
				nodeIdx = cgraph->cmap[oldNodeIdx];
				printf("  cmap: level:%d  vid: %d to %d \n", level, oldNodeIdx+1, nodeIdx+1);
			}
		}
	}
#endif
	for(int i=0; i<M->nvtxs; i++){
		
		if(isAttractor[i]){
			countAttractor[0][i]++;
#ifdef TEST_OUTPUT
			printf("attractor vid:%d(%d times), %d-th times MLRMCL\n",i+1,countAttractor[0][i],times_rmcl);fflush(stdout);
#endif
			int level = 1, nodeIdx = i;
			for(cgraph = cgraphFinest; cgraph->coarser!=NULL; cgraph = cgraph->coarser, level++){
				idxtype oldNodeIdx = nodeIdx;
				nodeIdx = cgraph->cmap[nodeIdx];

				countAttractor[level][nodeIdx]++;
#ifdef TEST_OUTPUT
				//printf("countAttractor[%d][%d] = %d;\n", level, nodeIdx,countAttractor[level][nodeIdx]);
#endif
			}
			
		}
	}
	
	//construct hash table
	idxtype* hashtable = idxmalloc(nvtxs,"Hashtable");
	for(int i=0;i<nvtxs;i++)
		hashtable[i]=-1;  	// clear hashtable.
	int num_clusters = 0;  //include singleton
	for(int i=0;i<nvtxs;i++){
		if ( hashtable[firstIndices[i]] == -1){
			hashtable[firstIndices[i]] = num_clusters;
			num_clusters++;
		}
	}
	/*idxtype* invHashTable = idxmalloc(num_clusters,"inverse hashtable");  //map cluster id back to the attractor id
	for(int i=0;i<nvtxs;i++){
		if ( hashtable[firstIndices[i]] != -1){
			invHashTable[hashtable[firstIndices[i]]] = firstIndices[i];
		}
	}*/
	//construct clusters
	std::vector< std::set<idxtype> > clusters(0);   //clusters[i] contains nodes' numbers (in original graph) in cluster i
	for(int i=0; i<num_clusters ; i++){
		std::set<idxtype> newCluster;
		clusters.push_back(newCluster);
	}
	for(int i=0; i<nvtxs ; i++){
		if( firstIndices[i] != -1){
			idxtype cID = hashtable[firstIndices[i]];
			indices[i].insert(cID); 
			if(cID == -1)
				continue;
			clusters[ cID  ].insert( i );
		}
	}
	idxtype* lastIndices = firstIndices;
	bool* lastAttractor = isAttractor;
	idxtype* oldHashtable = hashtable;

	printf("number of clusters in 1st time MLR-MCL: %d\n", num_clusters);fflush(stdout);

	idxtype* thisIndices =  (idxtype*) malloc(sizeof(idxtype) * M->nvtxs);
	idxtype* newHashtable = idxmalloc(nvtxs,"newHashtable");
	bool* thisAttractor = (bool*) malloc(sizeof(bool) * M->nvtxs);  
	while(times_rmcl < opt.time_rmcl){  //do more than one times R-MCL but penalize attractor nodes (by assigning them higher inflation rate)

		times_rmcl++;
		cgraph = cgraphCoarest;
		for(levels=0,t=cgraph ; t->finer!=NULL ; t=t->finer,levels++);

		while ( cgraph != NULL )
		{
			Matrix *M0, *Mnew;
			int iters;

			M0=setupCanonicalMatrix(cgraph->nvtxs, cgraph->nedges, cgraph->xadj, cgraph->adjncy, cgraph->adjwgt, opt.ncutify);  //do weight normalization here

			if ( cgraph->coarser == NULL )
			{
				M=M0;
			}
			

			if (cgraph->finer == NULL)
				iters=opt.num_last_iter;
			else
				iters=opt.iter_per_level;

			//Mnew=dprmcl(M,M0,cgraph, opt, iters,levels);  //original
			Mnew=dprmcl_penalizeAttractors(M,M0,cgraph, opt, iters, levels, countAttractor);        //YK: main process!
			
			M=Mnew;

			if ( cgraph->finer != NULL )  //YK: map nodes to the finer graph
			{
				int nnzRefinedGraph = 2*M->nnz;
				if ( ctrl.CType == MATCH_POWERLAW_FC )
				{
					int ii;
					nnzRefinedGraph = 0;
					for ( ii=0; ii<cgraph->finer->nvtxs; ii++ )
					{
						int tx=cgraph->finer->cmap[ii];
						nnzRefinedGraph +=
						M->xadj[tx+1] - M->xadj[tx];
					}
				}
				else if ( ctrl.CType == MATCH_HASH  && levels <= 3 )
				{
					int ii;
					nnzRefinedGraph=0;
					for ( ii=0; ii<cgraph->nvtxs; ii++ )
					{
						nnzRefinedGraph += cgraph->vwgt[ii] *
								(M->xadj[ii+1] - M->xadj[ii]);
					}
				}
				Mnew=propagateFlow(M,cgraph,cgraph->finer,nnzRefinedGraph);
				//printf("times:%d, levels:%d Mnew\n",times_rmcl, levels);
				//printMatrix(Mnew);
				if ( M != NULL )
				{
					freeMatrix(M);
				}
				M=Mnew;
			}
			cgraph=cgraph->finer;  //change to the finer graph

			// These two didn't get freed earlier, when we freed
			// gdata.
			

			levels--;
			printf("Done level %d (%d-th times MLR-MCL)\n", levels+1,times_rmcl);fflush(stdout);

		}
		//update clusters
		
		idxcopy(nvtxs, M->attractors, thisIndices);

		
		getAttractorsForAll(M, thisAttractor);
		for(int i=0; i<M->nvtxs; i++){
			if(thisAttractor[i]){
				countAttractor[0][i]++;
	#ifdef TEST_OUTPUT
					printf("attractor vid:%d(%d times), %d-th times MLRMCL\n",i+1,countAttractor[0][i],times_rmcl);
	#endif
				int level = 1, nodeIdx = i;
				for(cgraph = cgraphFinest; cgraph->coarser!=NULL; cgraph = cgraph->coarser, level++){
					nodeIdx = cgraph->cmap[nodeIdx];
					countAttractor[level][nodeIdx]++;
				}
				//printf("attractor:%d, times:%d\n",i+1,times_rmcl);
			}
		}
	
		//construct hash table

		for(int i=0;i<nvtxs;i++)
			newHashtable[i]=-1;  	// clear newHashtable.

		int new_num_clusters = 0 ;
		for(int i=0;i<nvtxs;i++){
			if ( newHashtable[thisIndices[i]] == -1){
				newHashtable[thisIndices[i]] = num_clusters+new_num_clusters;  //cluster ID
				if(num_clusters+new_num_clusters == 22704 || num_clusters+new_num_clusters == 29925)
					printf("cluster #%d's attractor node %d\n",num_clusters+new_num_clusters,thisIndices[i]);
				new_num_clusters++;
			}
		}
		//construct clusters
		for(int i=0; i<new_num_clusters;  i++){
			std::set<idxtype>* newCluster = new std::set<idxtype>();
			clusters.push_back(*newCluster);
		}
		for(int i=0; i<nvtxs ; i++){
			if( thisIndices[i] != -1){
				idxtype cID = newHashtable[thisIndices[i]];
				indices[i].insert(cID); 
				if(cID == -1)
					continue;
				clusters[ cID ].insert( i );
			}
		}
		printf("number of clusters in %dth times MLR-MCL: %d\ttotal # clusters:%zu\n", times_rmcl, new_num_clusters,clusters.size());
		num_clusters += new_num_clusters;

		//detect last attractor is split to multiple clusters
		/*
		double* countToCluster = (double*) malloc(sizeof(double) * num_clusters) ; 
		for(idxtype vID=0; vID<nvtxs ; vID++){
			
			//printf("test:%d\n",vID+1);fflush(stdout);
			if(lastAttractor[vID] && !thisAttractor[vID]){
				for(int i=0; i<num_clusters; i++)
					countToCluster[i] = 0;
				int lastCluster = oldHashtable[lastIndices[vID]];
				//printf("lastCluster:cid%d (attractor:%d size:%zu), num clusters:%d\n",lastCluster,vID+1,clusters[lastCluster].size(),num_clusters);fflush(stdout);
				
				for(std::set<idxtype>::iterator nodeIterator = clusters[lastCluster].begin(); nodeIterator != clusters[lastCluster].end(); nodeIterator++){
					
					int currentCluster = newHashtable[thisIndices[*nodeIterator]];
					countToCluster[currentCluster] ++;
				}
				for(idxtype cID = 0; cID < num_clusters; cID++){
					if(countToCluster[cID] / clusters[lastCluster].size() >= opt.ratio_nodes_another_cluster && clusters[cID].find(vID)==clusters[cID].end()  ){
						//clusters[ cID ].insert(vID);
						//indices[ vID ].insert(cID);
#ifdef TEST_OUTPUT
						printf("^add extra cluster: vid:%d to cid:%d\n",vID+1, cID);fflush(stdout);
#endif
					}
				}
				
				
			}
		}	
		free(countToCluster);*/
		

		lastIndices = thisIndices;
		lastAttractor = thisAttractor;
		oldHashtable = newHashtable;
		
	}
	printf("total number of clusters after %d times MLR-MCL:%zu\n", opt.time_rmcl, clusters.size());fflush(stdout);

	free(hashtable);
	for(int i=0; i <= num_level ; i++)
		free(countAttractor[i]);
	free(countAttractor);
	free(isAttractor);
	free(firstIndices);
	free(thisIndices);
	free(newHashtable);
	free(thisAttractor);
	freeMatrix(M);
	return clusters;
}
示例#16
0
int main()
{
  // LOAD DATA
  arma::mat X;

  // A) Toy Data
  //  char inputFile[] = "../data_files/toyclusters/toyclusters.dat";
  
  // B) X4.dat
  //char inputFile[] = "./X4.dat";
  
  // C) fisher data
  //char inputFile[] = "./fisher.dat";

  // D) MNIST data
  //char inputFile[] = "../data_files/MNIST/MNIST.dat";
  
  // E) Reduced MNIST (5000x400)
  //char inputFile[] = "../data_files/MNIST/MNISTreduced.dat";
  
  // F) Reduced MNIST (0 and 1) (1000x400)
  //char inputFile[] = "../data_files/MNIST/MNISTlittle.dat";

  // G) Girl.png (512x768, RGB, already unrolled)
  //char inputFile[] = "girl.dat";

  // H) Pool.png (383x512, RGB, already unrolled)
  //char inputFile[] = "pool.dat";

  // I) Cat.png (733x490, RGB, unrolled)
  //char inputFile[] = "cat.dat";
  
  // J) Airplane.png (512x512, RGB, unrolled)
  //char inputFile[] = "airplane.dat";

  // K) Monarch.png (512x768, RGB, unrolled)
  //char inputFile[] = "monarch.dat";

  // L) tulips.png (512x768 ,RGB, unrolled)
  //char inputFile[] = "tulips.dat";
  
  // M) demo.dat (2d data)
  char inputFile[] = "demo.dat";

  // INITIALIZE PARAMETERS
  X.load(inputFile);
  const arma::uword N = X.n_rows;
  const arma::uword D = X.n_cols;

  arma::umat ids(N,1);	// needed to shuffle indices later
  arma::umat shuffled_ids(N,1);
  for (arma::uword i = 0; i < N; ++i) {
    ids(i,0) = i;
  }
  
  arma::arma_rng::set_seed_random(); // set arma rng
  // int seed = time(NULL);	// set RNG seed to current time
  // srand(seed);

  arma::uword initial_K = 32;   // initial number of clusters
  arma::uword K = initial_K;
  
  arma::umat clusters(N,1); // contains cluster assignments for each data point
  for (arma::uword i = 0; i < N; ++i) {
    clusters(i, 0) = i%K;		// initialize as [0,1,...,K-1,0,1,...,K-1,0,...]
  }

  arma::umat cluster_sizes(N,1,arma::fill::zeros); // contains num data points in cluster k
  for (arma::uword i = 0; i < N; ++i) {
    cluster_sizes(clusters(i,0), 0) += 1;
  }

  arma::mat mu(N, D, arma::fill::zeros); // contains cluster mean parameters
  arma::mat filler(D,D,arma::fill::eye);
  std::vector<arma::mat> sigma(N,filler); // contains cluster covariance parameters

  if (K < N) {			// set parameters not belonging to any cluster to -999
    mu.rows(K,N-1).fill(-999);
    for (arma::uword k = K; k < N; ++k) {
      sigma[k].fill(-999);
    }
  }

  arma::umat uword_dummy(1,1);	// dummy 1x1 matrix;
  // for (arma::uword i = 0; i <N; ++i) {
  //   std::cout << sigma[i] << std::endl;
  // }
  // std::cout << X << std::endl
  // 	    << N << std::endl
  // 	    << D << std::endl
  // 	    << K << std::endl
  // 	    << clusters << std::endl
  // 	    << cluster_sizes << std::endl
  //	    << ids << std::endl;

  // INITIALIZE HYPER PARAMETERS
  // Dirichlet Process concentration parameter is alpha:
  double alpha = 1;
  // Dirichlet Process base distribution (i.e. prior) is 
  // H(mu,sigma) = NIW(mu,Sigma|m_0,k_0,S_0,nu_0) = N(mu|m_0,Sigma/k_0)IW(Sigma|S_0,nu_0)
  arma::mat perturbation(D,D,arma::fill::eye);
  perturbation *= 0.000001;
  //const arma::mat S_0 = arma::cov(X,X,1) + perturbation; // S_xbar / N
  const arma::mat S_0(D,D,arma::fill::eye);
  const double nu_0 = D + 2;
  const arma::mat m_0 = mean(X).t();
  const double k_0 = 0.01;
  // std::cout << "S_0" << S_0 << std::endl;
  // std::cout << S_0 << std::endl
  // 	    << nu_0 << std::endl
  // 	    << m_0 << std::endl
  // 	    << k_0 << std::endl;


  // INITIALIZE SAMPLING PARAMETERS
  arma::uword NUM_SWEEPS = 250;	// number of Gibbs iterations
  bool SAVE_CHAIN = false;	// save output of each Gibbs iteration?
  // arma::uword BURN_IN = NUM_SWEEPS - 10;
  // arma::uword CHAINSIZE = NUM_SWEEPS - BURN_IN;
  // std::vector<arma::uword> chain_K(CHAINSIZE, K); // Initialize chain variable to initial parameters for convinience
  // std::vector<arma::umat> chain_clusters(CHAINSIZE, clusters);
  // std::vector<arma::umat> chain_clusterSizes(CHAINSIZE, cluster_sizes);
  // std::vector<arma::mat> chain_mu(CHAINSIZE, mu);
  // std::vector<std::vector<arma::mat> > chain_sigma(CHAINSIZE, sigma);
  
  // for (arma::uword sweep = 0; sweep < CHAINSIZE; ++sweep) {
  //   std::cout << sweep << " K\n" << chain_K[sweep] << std::endl
  // 		<< sweep << " clusters\n" << chain_clusters[sweep] << std::endl
  // 		<< sweep << " sizes\n" << chain_clusterSizes[sweep] << std::endl
  // 		<< sweep << " mu\n" << chain_mu[sweep] << std::endl;
  //   for (arma::uword i = 0; i < N; ++i) { 
  // 	std::cout << sweep << " " << i << " sigma\n" << chain_sigma[sweep][i] << std::endl;
  //   }
  // }
  

  // START CHAIN
  std::cout << "Starting Algorithm with K = " << K << std::endl;
  for (arma::uword sweep = 0; sweep < NUM_SWEEPS; ++sweep) { 
    // shuffle indices
    shuffled_ids = shuffle(ids);
    // std::cout << shuffled_ids << std::endl;

    // SAMPLE CLUSTERS
    for (arma::uword j = 0; j < N; ++j){
      //      std::cout << "j = " << j << std::endl;
      arma::uword i = shuffled_ids(j);
      arma::mat x = X.row(i).t(); // current data point
      
      // Remove i's statistics and any empty clusters
      arma::uword c = clusters(i,0); // current cluster
      cluster_sizes(c,0) -= 1;
      //std::cout << "old c = " << c << std::endl;

      if (cluster_sizes(c,0) == 0) { // remove empty cluster
      	cluster_sizes(c,0) = cluster_sizes(K-1,0); // move entries for K onto position c
	mu.row(c) = mu.row(K-1);
      	sigma[c] = sigma[K-1];
      	uword_dummy(0,0) = c;
	arma::uvec idx = find(clusters == K - 1);
      	clusters.each_row(idx) = uword_dummy;
	cluster_sizes(K-1,0) = 0;
	mu.row(K-1).fill(-999);
	sigma[K-1].fill(-999);
	--K;
      }

      // quick test of logMvnPdf:
      // arma::mat m_(2,1);
      // arma::mat s_(2,2);
      // arma::mat t_(2,1);
      // m_ << 1 << arma::endr << 2;
      // s_ << 3 << -0.2 << arma::endr << -0.2 << 1;
      // t_ << -3 << arma::endr << -3;
      // double lpdf = logMvnPdf(t_, m_, s_);
      // std::cout << lpdf << std::endl; // śhould be -19.1034 (works)


      
      // Find categorical distribution over clusters (tested)
      arma::mat logP(K+1, 1, arma::fill::zeros);
      
      // quick test of logInvWishPdf
      // arma::mat si_(2,2), s_(2,2);
      // double nu_ = 4;
      // si_ << 1 << 0.5 << arma::endr << 0.5 << 4;
      // s_ << 3 << -0.2 << arma::endr << -0.2 << 1;
      // double lpdf = logInvWishPdf(si_, s_, nu_);
      // std::cout << lpdf << std::endl; // should be -7.4399 (it is)

      // quick test for logNormInvWishPdf
      // arma::mat si_(2,2), s_(2,2);
      // arma :: mat mu_(2,1), m_(2,1);
      // double k_ = 0.5, nu_ = 4;
      // si_ << 1 << 0.5 << arma::endr << 0.5 << 4;
      // s_ << 3 << -0.2 << arma::endr << -0.2 << 1;
      // mu_ << -3 << arma::endr << -3;
      // m_ << 1 << arma::endr << 2;
      // double lp = logNormInvWishPdf(mu_,si_,m_,k_,s_,nu_);
      // std::cout << lp << std::endl; // should equal -15.2318 (it is)
      
      // p(existing clusters) (tested)
      for (arma::uword k = 0; k < K; ++k) {
	arma::mat m_ = mu.row(k).t();
	arma::mat s_ = sigma[k];
	logP(k,0) = log(cluster_sizes(k,0)) - log(N-1+alpha) + logMvnPdf(x,m_,s_);
      }

      // p(new cluster): find partition function (tested)
      // arma::mat dummy_mu(D, 1, arma::fill::zeros);
      // arma::mat dummy_sigma(D, D, arma::fill::eye);
      // double logPrior, logLklihd, logPstr, logPartition; 
      // posterior hyperparameters (tested)
      arma::mat m_1(D,1), S_1(D,D);
      double k_1, nu_1;
      k_1 = k_0 + 1;
      nu_1 = nu_0 + 1;
      m_1 = (k_0*m_0 + x) / k_1;
      S_1 = S_0 + x * x.t() + k_0 * (m_0 * m_0.t()) - k_1 * (m_1 * m_1.t());
      // std::cout << k_1 << std::endl
      // 		<< nu_1 << std::endl
      // 		<< m_1 << std::endl
      // 		<< S_1 << std::endl;
      
      // // partition = likelihood*prior/posterior (tested)
      // // (perhaps a direct computation of the partition function would be better)
      // logPrior = logNormInvWishPdf(dummy_mu, dummy_sigma, m_0, k_0, S_0, nu_0);
      // logLklihd = logMvnPdf(x, dummy_mu, dummy_sigma);
      // logPstr = logNormInvWishPdf(dummy_mu, dummy_sigma, m_1, k_1, S_1, nu_1);
      // logPartition = logPrior + logLklihd - logPstr;      
      // std::cout << "log  Prior = " << logPrior << std::endl
      // 		<< "log Likelihood = " << logLklihd << std::endl
      // 		<< "log Posterior = " << logPstr << std::endl
      // 		<< "log Partition = " << logPartition << std::endl;
      
      // Computing partition directly
      double logS0,signS0,logS1,signS1;
      arma::log_det(logS0,signS0,S_0);
      arma::log_det(logS1,signS1,S_1);
      /*std::cout << "log(det(S_0)) = " << logS0 << std::endl
	<< "log(det(S_1)) = " << logS1 << std::endl;*/
      double term1 = 0.5*D*(log(k_0)-log(k_1));
      double term2 = -0.5*D*log(arma::datum::pi);
      double term3 = 0.5*(nu_0*logS0 - nu_1*logS1);
      double term4 = lgamma(0.5*nu_1);
      double term5 = -lgamma(0.5*(nu_1-D));
      double logPartition = term1+term2+term3+term4+term5;
      /*double logPartition = 0.5*D*(log(k_0)-log(k_1)-log(arma::datum::pi)) \
	/+0.5*(nu_0*logS0 - nu_1*logS1) + lgamma(0.5*nu_1) - lgamma(0.5*(nu_1-D));*/
      
      /*      std::cout << "term1 = " << term1 << std::endl
		<< "term2 = " << term2 << std::endl
		<< "term3 = " << term3 << std::endl
		<< "term4 = " << term4 << std::endl
		<< "term5 = " << term5 << std::endl;*/
      
      //std::cout << "logP = " << logPartition << std::endl;

      // p(new cluster): (tested)
      logP(K,0) = log(alpha) - log(N - 1 + alpha) + logPartition;
      //      std::cout << "logP(new cluster) = " << logP(K,0) << std::endl;
      //if(i == 49)
      //assert(false);
      // sample cluster for i
      
      

      arma::uword c_ = logCatRnd(logP);
      clusters(i,0) = c_;
      
      //if (j % 10 == 0){
      //std::cout << "New c = " << c_ << std::endl;
      //std::cout << "logP = \n" << logP << std::endl;
      //}
      // quick test for mvnRnd
      // arma::mat mu, si;
      // mu << 1 << arma::endr << 2;
      // si << 1 << 0.9 << arma::endr << 0.9 << 1;
      // arma::mat m = mvnRnd(mu, si);
      // std::cout << m << std::endl;

      // quick test for invWishRnd
      // double df = 4;
      // arma::mat si(2,2);
      // si << 1 << 1 << arma::endr << 1 << 1;
      // arma::mat S = invWishRnd(si,df);
      // std::cout << S << std::endl;

      if (c_ == K) {	// Sample parameters for any new-born clusters from posterior
	cluster_sizes(K, 0) = 1;
	arma::mat si_ = invWishRnd(S_1, nu_1);
	//arma::mat si_ = S_1;
	arma::mat mu_ = mvnRnd(m_1, si_/k_1);
	//arma::mat mu_ = m_1;
	mu.row(K) = mu_.t();
	sigma[K] = si_;
	K += 1;
      } else {
	cluster_sizes(c_,0) += 1;
      }
      // if (sweep == 0)
      // 	std::cout << " K = " << K << std::endl;
      // // if (j == N-1) {
      // // 	std::cout << logP << std::endl;
      // // 	std::cout << K << std::endl;
      // // 	assert(false);
      // // }
      // std::cout << "K = " << K << "\n" << std::endl;
    }
    
    // sample CLUSTER PARAMETERS FROM POSTERIOR
    for (arma::uword k = 0; k < K; ++k) {
      // std::cout << "k = " << k << std::endl;
      // cluster data
      arma::mat Xk = X.rows(find(clusters == k));
      arma::uword Nk = cluster_sizes(k,0);

      // posterior hyperparameters
      arma::mat m_Nk(D,1), S_Nk(D,D);
      double k_Nk, nu_Nk;
      
      arma::mat sum_k = sum(Xk,0).t();
      arma::mat cov_k(D, D, arma::fill::zeros);
      for (arma::uword l = 0; l < Nk; ++l) { 
	cov_k += Xk.row(l).t() * Xk.row(l);
      }
      
      k_Nk = k_0 + Nk;
      nu_Nk = nu_0 + Nk;
      m_Nk = (k_0 * m_0 + sum_k) / k_Nk;
      S_Nk = S_0 + cov_k + k_0 * (m_0 * m_0.t()) - k_Nk * (m_Nk * m_Nk.t());
      
      // sample fresh parameters
      arma::mat si_ = invWishRnd(S_Nk, nu_Nk);
      //arma::mat si_ = S_Nk;
      arma::mat mu_ = mvnRnd(m_Nk, si_/k_Nk);
      //arma::mat mu_ = m_Nk;
      mu.row(k) = mu_.t();
      sigma[k] = si_;
    }
    std::cout << "Iteration " << sweep + 1 << "/" << NUM_SWEEPS<< " done. K = " << K << std::endl;
        
    // // STORE CHAIN
    // if (SAVE_CHAIN) {
    //   if (sweep >= BURN_IN) { 
    // 	chain_K[sweep - BURN_IN] = K;
    // 	chain_clusters[sweep - BURN_IN] = clusters;
    // 	chain_clusterSizes[sweep - BURN_IN] = cluster_sizes;
    // 	chain_mu[sweep - BURN_IN] = mu;
    // 	chain_sigma[sweep - BURN_IN] = sigma;
    //   }
    // }
	
  }
  std::cout << "Final cluster sizes: " << std::endl
	    << cluster_sizes.rows(0, K-1) << std::endl;






  
  // WRITE OUPUT DATA TO FILE
  arma::mat MU = mu.rows(0,K-1);
  arma::mat SIGMA(D*K,D);
  for (arma::uword k = 0; k < K; ++k) { 
    SIGMA.rows(k*D,(k+1)*D-1) = sigma[k];
  }
  arma::umat IDX = clusters;

  // A) toycluster data
  // char MuFile[] = "../data_files/toyclusters/dpmMU.out";
  // char SigmaFile[] = "../data_files/toyclusters/dpmSIGMA.out";
  // char IdxFile[] = "../data_files/toyclusters/dpmIDX.out";

  // B) X4.dat
  char MuFile[] = "dpmMU.out";
  char SigmaFile[] = "dpmSIGMA.out";
  char IdxFile[] = "dpmIDX.out";
  std::ofstream KFile("K.out");
  
  MU.save(MuFile, arma::raw_ascii);
  SIGMA.save(SigmaFile, arma::raw_ascii);
  IDX.save(IdxFile, arma::raw_ascii);
  KFile << "K = " << K << std::endl;
  
  if (SAVE_CHAIN) {}
  //   std::ofstream chainKFile("chainK.out");
  //   std::ofstream chainClustersFile("chainClusters.out");
  //   std::ofstream chainClusterSizesFile("chainClusterSizes.out");
  //   std::ofstream chainMuFile("chainMu.out");
  //   std::ofstream chainSigmaFile("chainSigma.out");
    
  //   chainKFile << "Dirichlet Process Mixture Model.\nInput: " << inputFile << std::endl
  // 	       << "Number of iterations of Gibbs Sampler: " << NUM_SWEEPS << std::endl
  // 	       << "Burn-In: " << BURN_IN << std::endl
  // 	       << "Initial number of clusters: " << initial_K << std::endl
  // 	       << "Output: Number of cluster (K)\n" << std::endl; 
  //   chainClustersFile << "Dirichlet Process Mixture Model.\nInput: " << inputFile << std::endl
  // 		      << "Number of iterations of Gibbs Sampler: " << NUM_SWEEPS << std::endl
  // 		      << "Burn-In: " << BURN_IN << std::endl
  // 		      << "Initial number of clusters: " << initial_K << std::endl
  // 		      << "Output: Cluster identities (clusters)\n" << std::endl; 
  //   chainClusterSizesFile << "Dirichlet Process Mixture Model.\nInput: " << inputFile << std::endl
  // 			  << "Number of iterations of Gibbs Sampler: " << NUM_SWEEPS << std::endl
  // 			  << "Burn-In: " << BURN_IN << std::endl
  // 			  << "Initial number of clusters: " << initial_K << std::endl
  // 			  << "Output: Size of clusters (cluster_sizes)\n" << std::endl; 
  //   chainMuFile << "Dirichlet Process Mixture Model.\nInput: " << inputFile << std::endl
  // 		<< "Number of iterations of Gibbs Sampler: " << NUM_SWEEPS << std::endl
  // 		<< "Burn-In: " << BURN_IN << std::endl
  // 		<< "Initial number of clusters: " << initial_K << std::endl
  // 		<< "Output: Samples for cluster mean parameters (mu. Note: means stored in rows)\n" << std::endl; 
  //   chainSigmaFile << "Dirichlet Process Mixture Model.\nInput: " << inputFile << std::endl
  // 		   << "Number of iterations of Gibbs Sampler: " << NUM_SWEEPS << std::endl
  // 		   << "Burn-In " << BURN_IN << std::endl
  // 		   << "Initial number of clusters: " << initial_K << std::endl
  // 		   << "Output: Samples for cluster covariances (sigma)\n" << std::endl; 


  //   for (arma::uword sweep = 0; sweep < CHAINSIZE; ++sweep) {
  //     arma::uword K = chain_K[sweep];
  //     chainKFile << "Sweep #" << BURN_IN + sweep + 1 << "\n" << chain_K[sweep] << std::endl;
  //     chainClustersFile << "Sweep #" << BURN_IN + sweep + 1 << "\n" << chain_clusters[sweep]  << std::endl;
  //     chainClusterSizesFile << "Sweep #" << BURN_IN + sweep + 1 << "\n" << chain_clusterSizes[sweep].rows(0, K - 1) << std::endl;
  //     chainMuFile << "Sweep #" << BURN_IN + sweep + 1 << "\n" << chain_mu[sweep].rows(0, K - 1) << std::endl;
  //     chainSigmaFile << "Sweep #" << BURN_IN + sweep + 1<< "\n";
  //     for (arma::uword i = 0; i < K; ++i) { 
  // 	chainSigmaFile << chain_sigma[sweep][i] << std::endl;
  //     }
  //     chainSigmaFile << std::endl;
  //   }
  // }

  return 0;
}
int
main (int argc, char** argv)
{
  // Data containers used
  pcl::PointCloud<PointTypeIO>::Ptr cloud_in (new pcl::PointCloud<PointTypeIO>), cloud_out (new pcl::PointCloud<PointTypeIO>);
  pcl::PointCloud<PointTypeFull>::Ptr cloud_with_normals (new pcl::PointCloud<PointTypeFull>);
  pcl::IndicesClustersPtr clusters (new pcl::IndicesClusters), small_clusters (new pcl::IndicesClusters), large_clusters (new pcl::IndicesClusters);
  pcl::search::KdTree<PointTypeIO>::Ptr search_tree (new pcl::search::KdTree<PointTypeIO>);
  pcl::console::TicToc tt;

  // Load the input point cloud
  std::cerr << "Loading...\n", tt.tic ();
  pcl::io::loadPCDFile (argv[1], *cloud_in);
  std::cerr << ">> Done: " << tt.toc () << " ms, " << cloud_in->points.size () << " points\n";

  // Downsample the cloud using a Voxel Grid class
  std::cerr << "Downsampling...\n", tt.tic ();
  pcl::VoxelGrid<PointTypeIO> vg;
  vg.setInputCloud (cloud_in);
  vg.setLeafSize (80.0, 80.0, 80.0);
  vg.setDownsampleAllData (true);
  vg.filter (*cloud_out);
  std::cerr << ">> Done: " << tt.toc () << " ms, " << cloud_out->points.size () << " points\n";

  // Set up a Normal Estimation class and merge data in cloud_with_normals
  std::cerr << "Computing normals...\n", tt.tic ();
  pcl::copyPointCloud (*cloud_out, *cloud_with_normals);
  pcl::NormalEstimation<PointTypeIO, PointTypeFull> ne;
  ne.setInputCloud (cloud_out);
  ne.setSearchMethod (search_tree);
  ne.setRadiusSearch (300.0);
  ne.compute (*cloud_with_normals);
  std::cerr << ">> Done: " << tt.toc () << " ms\n";

  // Set up a Conditional Euclidean Clustering class
  std::cerr << "Segmenting to clusters...\n", tt.tic ();
  pcl::ConditionalEuclideanClustering<PointTypeFull> cec (true);
  cec.setInputCloud (cloud_with_normals);
  cec.setConditionFunction (&customRegionGrowing);
  cec.setClusterTolerance (500.0);
  cec.setMinClusterSize (cloud_with_normals->points.size () / 1000);
  cec.setMaxClusterSize (cloud_with_normals->points.size () / 5);
  cec.segment (*clusters);
  cec.getRemovedClusters (small_clusters, large_clusters);
  std::cerr << ">> Done: " << tt.toc () << " ms\n";

  // Using the intensity channel for lazy visualization of the output
  for (int i = 0; i < small_clusters->size (); ++i)
    for (int j = 0; j < (*small_clusters)[i].indices.size (); ++j)
      cloud_out->points[(*small_clusters)[i].indices[j]].intensity = -2.0;
  for (int i = 0; i < large_clusters->size (); ++i)
    for (int j = 0; j < (*large_clusters)[i].indices.size (); ++j)
      cloud_out->points[(*large_clusters)[i].indices[j]].intensity = +10.0;
  for (int i = 0; i < clusters->size (); ++i)
  {
    int label = rand () % 8;
    for (int j = 0; j < (*clusters)[i].indices.size (); ++j)
      cloud_out->points[(*clusters)[i].indices[j]].intensity = label;
  }

  // Save the output point cloud
  std::cerr << "Saving...\n", tt.tic ();
  pcl::io::savePCDFile ("output.pcd", *cloud_out);
  std::cerr << ">> Done: " << tt.toc () << " ms\n";

  return (0);
}
示例#18
0
void JLinkage::clusterPSMatrix(std::vector<std::vector<int> >& result)
{
  /* allocate memory */
	std::vector<std::vector<double> > distances(line_count, 
			std::vector<double>(line_count, 0));
	std::vector<std::vector<int> > clusters(line_count,
			std::vector<int>(line_count, 0));
	std::vector<int> indicators(line_count, 1);

	/* initialization */
	double minDis = 1;
	int indexA = 0, indexB = 0;
	for(int i = 0; i < line_count; i++)
	{
		clusters[i][i] = 1;
		for(int j = i + 1; j < line_count; j++)
		{
			const double jd = jaccardDist(PSMatrix[i], PSMatrix[j]);
			distances[i][j] = jd;
			distances[j][i] = jd;
			if(jd < minDis)
			{
				minDis = jd;
				indexA = i;
				indexB = j;
			}
		}
	}

	while(minDis != 1)
	{
		/* merge two clusters */
		for(int i = 0; i < line_count; i++)
		{
			if(clusters[indexA][i] || clusters[indexB][i])
				clusters[indexA][i] = clusters[indexB][i] = 1;
		}
		indicators[indexB] = 0;
		for(int i = 0; i < JLINKAGE_MODEL_SIZE; i++)
		{
			PSMatrix[indexA] = PSMatrix[indexB] = PSMatrix[indexA]&PSMatrix[indexB];
		}

		/* recalculate distance */
		for(int i = 0; i < line_count; i++)
		{
			distances[indexA][i] = jaccardDist(PSMatrix[indexA], PSMatrix[i]);
			distances[i][indexA] = distances[indexA][i];
		}

		/* find minimum distance */
		minDis = 1;
		for(int i = 0; i < line_count; i++)
		{
			if(indicators[i] == 0) continue;
			for(int j = i + 1; j < line_count; j++)
			{
				if(indicators[j] == 0) continue;
				if(distances[i][j] < minDis)
				{
					minDis = distances[i][j];
					indexA = i;
					indexB = j;
				}
			} 
		}
	}

 	/* calculate cluster size */
	std::vector<int> clusterSizes(line_count);
	for(int i = 0; i < line_count; i++)
	{
		int cs = 0;
		if(indicators[i])
		{
			for(int j = 0; j < line_count; j++)
			{
				if(clusters[i][j]) ++cs;
			}
		}
		clusterSizes[i] = cs;
	}

	const int cluster_num = 3;
	result.clear();
	result.resize(cluster_num, std::vector<int>(line_count)); /* choose the largest three clusters */


	int count = 0;
	while(count < cluster_num)
	{
		int max_index = 0;
		int max_size = clusterSizes[0];
		for(int i = 1; i < line_count; i++)
		{
			if(max_size < clusterSizes[i])
			{
				max_size = clusterSizes[i];
				max_index = i;
			}
		}
		result[count] = clusters[max_index];
		count++;
		clusterSizes[max_index] = 0;
	}

	/* print clusters */
	/*
	for(int i = 0; i < cluster_num; i++)
	{
		printf("Cluster %d:\n", i);
		for(int j = 0; j < line_count; j++)
		{
			if(result[i][j])
				printf("%d ", j);
		}
		printf("\n");
	}
	*/
}
示例#19
0
  void AverageLinkage::operator()(DistanceMatrix<float> & original_distance, std::vector<BinaryTreeNode> & cluster_tree, const float threshold /*=1*/) const
  {
    // input MUST have >= 2 elements!
    if (original_distance.dimensionsize() < 2)
    {
      throw ClusterFunctor::InsufficientInput(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Distance matrix to start from only contains one element");
    }

    std::vector<std::set<Size> > clusters(original_distance.dimensionsize());
    for (Size i = 0; i < original_distance.dimensionsize(); ++i)
    {
      clusters[i].insert(i);
    }

    cluster_tree.clear();
    cluster_tree.reserve(original_distance.dimensionsize() - 1);

    // Initial minimum-distance pair
    original_distance.updateMinElement();
    std::pair<Size, Size> min = original_distance.getMinElementCoordinates();

    Size overall_cluster_steps(original_distance.dimensionsize());
    startProgress(0, original_distance.dimensionsize(), "clustering data");

    while (original_distance(min.second, min.first) < threshold)
    {
      //grow the tree
      cluster_tree.push_back(BinaryTreeNode(*(clusters[min.second].begin()), *(clusters[min.first].begin()), original_distance(min.first, min.second)));
      if (cluster_tree.back().left_child > cluster_tree.back().right_child)
      {
        std::swap(cluster_tree.back().left_child, cluster_tree.back().right_child);
      }

      if (original_distance.dimensionsize() > 2)
      {
        //pick minimum-distance pair i,j and merge them

        //calculate parameter for lance-williams formula
        float alpha_i = (float)(clusters[min.first].size() / (float)(clusters[min.first].size() + clusters[min.second].size()));
        float alpha_j = (float)(clusters[min.second].size() / (float)(clusters[min.first].size() + clusters[min.second].size()));
        //~ std::cout << alpha_i << '\t' << alpha_j << std::endl;

        //pushback elements of second to first (and then erase second)
        clusters[min.second].insert(clusters[min.first].begin(), clusters[min.first].end());
        // erase first one
        clusters.erase(clusters.begin() + min.first);

        //update original_distance matrix
        //average linkage: new distance between clusters is the minimum distance between elements of each cluster
        //lance-williams update for d((i,j),k): (m_i/m_i+m_j)* d(i,k) + (m_j/m_i+m_j)* d(j,k) ; m_x is the number of elements in cluster x
        for (Size k = 0; k < min.second; ++k)
        {
          float dik = original_distance.getValue(min.first, k);
          float djk = original_distance.getValue(min.second, k);
          original_distance.setValueQuick(min.second, k, (alpha_i * dik + alpha_j * djk));
        }
        for (Size k = min.second + 1; k < original_distance.dimensionsize(); ++k)
        {
          float dik = original_distance.getValue(min.first, k);
          float djk = original_distance.getValue(min.second, k);
          original_distance.setValueQuick(k, min.second, (alpha_i * dik + alpha_j * djk));
        }

        //reduce
        original_distance.reduce(min.first);

        //update minimum-distance pair
        original_distance.updateMinElement();

        //get min-pair from triangular matrix
        min = original_distance.getMinElementCoordinates();
      }
      else
      {
        break;
      }
      setProgress(overall_cluster_steps - original_distance.dimensionsize());

      //repeat until only two cluster remains, last step skips matrix operations
    }
    //fill tree with dummy nodes
    Size sad(*clusters.front().begin());
    for (Size i = 1; (i < clusters.size()) && (cluster_tree.size() < cluster_tree.capacity()); ++i)
    {
      cluster_tree.push_back(BinaryTreeNode(sad, *clusters[i].begin(), -1.0));
    }

    endProgress();
  }
示例#20
0
文件: MinDist.cpp 项目: ios4u/grt
bool MinDist::loadModelFromFile(fstream &file){
    
    trained = false;
    numInputDimensions = 0;
    numClasses = 0;
    models.clear();
    classLabels.clear();
    
    if(!file.is_open())
    {
        errorLog << "loadModelFromFile(string filename) - Could not open file to load model" << endl;
        return false;
    }
    
    std::string word;
    
    file >> word;
    
    //Check to see if we should load a legacy file
    if( word == "GRT_MINDIST_MODEL_FILE_V1.0" ){
        return loadLegacyModelFromFile( file );
    }
    
    //Find the file type header
    if(word != "GRT_MINDIST_MODEL_FILE_V2.0"){
        errorLog << "loadModelFromFile(string filename) - Could not find Model File Header" << endl;
        return false;
    }
    
    //Load the base settings from the file
    if( !Classifier::loadBaseSettingsFromFile(file) ){
        errorLog << "loadModelFromFile(string filename) - Failed to load base settings from file!" << endl;
        return false;
    }
    
    if( trained ){
        
        //Resize the buffer
        models.resize(numClasses);
        classLabels.resize(numClasses);
        
        //Load each of the K models
        for(UINT k=0; k<numClasses; k++){
            double rejectionThreshold;
            double gamma;
            double trainingSigma;
            double trainingMu;
            
            file >> word;
            if( word != "ClassLabel:" ){
                errorLog << "loadModelFromFile(string filename) - Could not load the class label for class " << k << endl;
                return false;
            }
            file >> classLabels[k];
            
            file >> word;
            if( word != "NumClusters:" ){
                errorLog << "loadModelFromFile(string filename) - Could not load the NumClusters for class " << k << endl;
                return false;
            }
            file >> numClusters;
            
            file >> word;
            if( word != "RejectionThreshold:" ){
                errorLog << "loadModelFromFile(string filename) - Could not load the RejectionThreshold for class " << k << endl;
                return false;
            }
            file >> rejectionThreshold;
            
            file >> word;
            if( word != "Gamma:" ){
                errorLog << "loadModelFromFile(string filename) - Could not load the Gamma for class " << k << endl;
                return false;
            }
            file >> gamma;
            
            file >> word;
            if( word != "TrainingMu:" ){
                errorLog << "loadModelFromFile(string filename) - Could not load the TrainingMu for class " << k << endl;
                return false;
            }
            file >> trainingMu;
            
            file >> word;
            if( word != "TrainingSigma:" ){
                errorLog << "loadModelFromFile(string filename) - Could not load the TrainingSigma for class " << k << endl;
                return false;
            }
            file >> trainingSigma;
            
            file >> word;
            if( word != "ClusterData:" ){
                errorLog << "loadModelFromFile(string filename) - Could not load the ClusterData for class " << k << endl;
                return false;
            }
            
            //Load the cluster data
            MatrixDouble clusters(numClusters,numInputDimensions);
            for(UINT i=0; i<numClusters; i++){
                for(UINT j=0; j<numInputDimensions; j++){
                    file >> clusters[i][j];
                }
            }
            
            models[k].setClassLabel( classLabels[k] );
            models[k].setClusters( clusters );
            models[k].setGamma( gamma );
            models[k].setRejectionThreshold( rejectionThreshold );
            models[k].setTrainingSigma( trainingSigma );
            models[k].setTrainingMu( trainingMu );
        }
        
        //Recompute the null rejection thresholds
        recomputeNullRejectionThresholds();
        
        //Resize the prediction results to make sure it is setup for realtime prediction
        maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
        bestDistance = DEFAULT_NULL_DISTANCE_VALUE;
        classLikelihoods.resize(numClasses,DEFAULT_NULL_LIKELIHOOD_VALUE);
        classDistances.resize(numClasses,DEFAULT_NULL_DISTANCE_VALUE);
    }
    
    return true;
}
示例#21
0
  void SingleLinkage::operator()(DistanceMatrix<float> & original_distance, std::vector<BinaryTreeNode> & cluster_tree, const float threshold /*=1*/) const
  {
    // input MUST have >= 2 elements!
    if (original_distance.dimensionsize() < 2)
    {
      throw ClusterFunctor::InsufficientInput(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Distance matrix to start from only contains one element");
    }

    cluster_tree.clear();
    if (threshold < 1)
    {
      LOG_ERROR << "You tried to use Single Linkage clustering with a threshold. This is currently not supported!" << std::endl;
      throw Exception::NotImplemented(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION);
    }

    //SLINK
    std::vector<Size> pi;
    pi.reserve(original_distance.dimensionsize());
    std::vector<float> lambda;
    lambda.reserve(original_distance.dimensionsize());

    startProgress(0, original_distance.dimensionsize(), "clustering data");

    //initialize first pointer values
    pi.push_back(0);
    lambda.push_back(std::numeric_limits<float>::max());

    for (Size k = 1; k < original_distance.dimensionsize(); ++k)
    {
      std::vector<float> row_k;
      row_k.reserve(k);

      //initialize pointer values for element to cluster
      pi.push_back(k);
      lambda.push_back(std::numeric_limits<float>::max());

      // get the right distances
      for (Size i = 0; i < k; ++i)
      {
        row_k.push_back(original_distance.getValue(i, k));
      }

      //calculate pointer values for element k
      for (Size i = 0; i < k; ++i)
      {
        if (lambda[i] >= row_k[i])
        {
          row_k[pi[i]] = std::min(row_k[pi[i]], lambda[i]);
          lambda[i] = row_k[i];
          pi[i] = k;
        }
        else
        {
          row_k[pi[i]] = std::min(row_k[pi[i]], row_k[i]);
        }
      }

      //update clustering if necessary
      for (Size i = 0; i < k; ++i)
      {
        if (lambda[i] >= lambda[pi[i]])
        {
          pi[i] = k;
        }
      }
      setProgress(k);
    }

    for (Size i = 0; i < pi.size() - 1; ++i)
    {
      //strict order is always kept in algorithm: i < pi[i]
      cluster_tree.push_back(BinaryTreeNode(i, pi[i], lambda[i]));
      //~ std::cout << i << '\n' << pi[i] << '\n' << lambda[i] << std::endl;
    }

    //sort pre-tree
    std::sort(cluster_tree.begin(), cluster_tree.end(), compareBinaryTreeNode);

    // convert -pre-tree to correct format
    for (Size i = 0; i < cluster_tree.size(); ++i)
    {
      if (cluster_tree[i].right_child < cluster_tree[i].left_child)
      {
        std::swap(cluster_tree[i].left_child, cluster_tree[i].right_child);
      }
      for (Size k = i + 1; k < cluster_tree.size(); ++k)
      {
        if (cluster_tree[k].left_child == cluster_tree[i].right_child)
        {
          cluster_tree[k].left_child = cluster_tree[i].left_child;
        }
        else if (cluster_tree[k].left_child > cluster_tree[i].right_child)
        {
          --cluster_tree[k].left_child;
        }
        if (cluster_tree[k].right_child == cluster_tree[i].right_child)
        {
          cluster_tree[k].right_child = cluster_tree[i].left_child;
        }
        else if (cluster_tree[k].right_child > cluster_tree[i].right_child)
        {
          --cluster_tree[k].right_child;
        }
      }

    }
    //~ prepare to redo clustering to get all indices for binarytree in min index element representation
    std::vector<std::set<Size> > clusters(original_distance.dimensionsize());
    for (Size i = 0; i < original_distance.dimensionsize(); ++i)
    {
      clusters[i].insert(i);
    }
    for (Size cluster_step = 0; cluster_step < cluster_tree.size(); ++cluster_step)
    {
      Size new_left_child = *(clusters[cluster_tree[cluster_step].left_child].begin());
      Size new_right_child = *(clusters[cluster_tree[cluster_step].right_child].begin());
      clusters[cluster_tree[cluster_step].left_child].insert(clusters[cluster_tree[cluster_step].right_child].begin(), clusters[cluster_tree[cluster_step].right_child].end());
      clusters.erase(clusters.begin() + cluster_tree[cluster_step].right_child);
      std::swap(cluster_tree[cluster_step].left_child, new_left_child);
      std::swap(cluster_tree[cluster_step].right_child, new_right_child);
      if (cluster_tree[cluster_step].left_child > cluster_tree[cluster_step].right_child)
      {
        std::swap(cluster_tree[cluster_step].left_child, cluster_tree[cluster_step].right_child);
      }
    }

    endProgress();
  }
示例#22
0
void InvaderDetectOperation::operate( const std::vector<ImageBufferPtr>& inputList, const std::vector<double>&, ImageBufferPtr output ) {
	assert(inputList.size() == 2);
	min.clear();
	max.clear();

	CpuImage input = readBuffer(inputList[0]);
	Matrix<int> clusters( input.height(), input.width() );

	for(unsigned int y = 0; y < input.height(); y++)
	for(unsigned int x = 0; x < input.width(); x++) {
		clusters(y,x) = -1;
	}

	int lastCluster = 0;
	for(unsigned int y = 0; y < input.height(); y++ )
	for(unsigned int x = 0; x < input.width(); x++ ) {
		if( clusters(y,x).get() != -1 )
			continue;
		if( ! input(y,x).get().r ){
			clusters(y,x) = 0;
			continue;
		}
		clusters(y,x) = ++lastCluster;

		std::queue<Recursive> recurse;
		recurse.push(Recursive(y,x));

		while( !recurse.empty() ) {
			Recursive recursive = recurse.front();
			recurse.pop();
			int ry = recursive.ry;
			int rx = recursive.rx;

			for( int line = -1; line <= 1; line++){
			for( int column = -1; column <= 1; column++){
				if ( clusters(ry,rx).neighbour(line,column).get() == -1 && input(ry,rx).neighbour(line,column).get().r != 0 ){
					clusters(ry, rx).neighbour(line, column) = lastCluster;
					recurse.push( Recursive(ry+line, rx+column) );
				}
			}
			}	
		};
	}

	CpuImage backImage = readBuffer(inputList[1]);
	for( unsigned int y = 0; y < input.height(); y++)
	for( unsigned int x = 0; x < input.width(); x++) {
		maximize( backImage(y,x) , clusters(y,x).get() );
		minimize( backImage(y,x) , clusters(y,x).get() );
	}

	{
		auto minI = min.begin();
		auto maxI = max.begin();
		for( ; minI != min.end(); ++minI,++maxI) {
			if( minI->first == 0 )
				continue;

			int clusterWidth = maxI->second.first-minI->second.first;
			int clusterHeight = maxI->second.second-minI->second.second;
			int area = clusterHeight*clusterWidth;
			if( area < 500 )
				continue;
			double ratio = double(clusterHeight)/double(clusterWidth);
		//	std::cout << minI->first << ": (" << clusterWidth << "," << clusterHeight << "), ratio:" << ratio;
		//	std::cout << ", area:" << area << std::endl;

			int color = ratio > 1.2 ? 3 : 2;
			Point line = minI->second;
			for( ; line.second <= maxI->second.second; ++line.second) {
				colorize( backImage(line.second,line.first), color );
			}
			for( ; line.first <= maxI->second.first; ++line.first) {
				colorize( backImage(line.second,line.first), color );
			}
			for( ; line.second >= minI->second.second; --line.second) {
				colorize( backImage(line.second,line.first), color );
			}
			for( ; line.first >= minI->second.first; --line.first) {
				colorize( backImage(line.second,line.first), color );
			}
		}
	}

	writeBuffer(backImage,output);
}
示例#23
0
int spheroidsToSTL(const string& out, const shared_ptr<DemField>& dem, Real tol, const string& solid, int mask, bool append, bool clipCell, bool merge){
	if(tol==0 || isnan(tol)) throw std::runtime_error("tol must be non-zero.");
	#ifndef WOO_GTS
		if(merge) throw std::runtime_error("woo.triangulated.spheroidsToSTL: merge=True only possible in builds with the 'gts' feature.");
	#endif
	// first traversal to find reference radius
	auto particleOk=[&](const shared_ptr<Particle>&p){ return (mask==0 || (p->mask & mask)) && (p->shape->isA<Sphere>() || p->shape->isA<Ellipsoid>() || p->shape->isA<Capsule>()); };
	int numTri=0;

	if(tol<0){
		LOG_DEBUG("tolerance is negative, taken as relative to minimum radius.");
		Real minRad=Inf;
		for(const auto& p: *dem->particles){
			if(particleOk(p)) minRad=min(minRad,p->shape->equivRadius());
		}
		if(isinf(minRad) || isnan(minRad)) throw std::runtime_error("Minimum radius not found (relative tolerance specified); no matching particles?");
		tol=-minRad*tol;
		LOG_DEBUG("Minimum radius "<<minRad<<".");
	}
	LOG_DEBUG("Triangulation tolerance is "<<tol);
	
	std::ofstream stl(out,append?(std::ofstream::app|std::ofstream::binary):std::ofstream::binary); // binary better, anyway
	if(!stl.good()) throw std::runtime_error("Failed to open output file "+out+" for writing.");

	Scene* scene=dem->scene;
	if(!scene) throw std::logic_error("DEM field has not associated scene?");

	// periodicity, cache that for later use
	AlignedBox3r cell;

	/*
	wasteful memory-wise, but we need to store the whole triangulation in case *merge* is in effect,
	when it is only an intermediary result and will not be output as-is
	*/
	vector<vector<Vector3r>> ppts;
	vector<vector<Vector3i>> ttri;
	vector<Particle::id_t> iid;

	for(const auto& p: *dem->particles){
		if(!particleOk(p)) continue;
		const auto sphere=dynamic_cast<Sphere*>(p->shape.get());
		const auto ellipsoid=dynamic_cast<Ellipsoid*>(p->shape.get());
		const auto capsule=dynamic_cast<Capsule*>(p->shape.get());
		vector<Vector3r> pts;
		vector<Vector3i> tri;
		if(sphere || ellipsoid){
			Real r=sphere?sphere->radius:ellipsoid->semiAxes.minCoeff();
			// 1 is for icosahedron
			int tess=ceil(M_PI/(5*acos(1-tol/r)));
			LOG_DEBUG("Tesselation level for #"<<p->id<<": "<<tess);
			tess=max(tess,0);
			auto uSphTri(CompUtils::unitSphereTri20(/*0 for icosahedron*/max(tess-1,0)));
			const auto& uPts=std::get<0>(uSphTri); // unit sphere point coords
			pts.resize(uPts.size());
			const auto& node=(p->shape->nodes[0]);
			Vector3r scale=(sphere?sphere->radius*Vector3r::Ones():ellipsoid->semiAxes);
			for(size_t i=0; i<uPts.size(); i++){
				pts[i]=node->loc2glob(uPts[i].cwiseProduct(scale));
			}
			tri=std::get<1>(uSphTri); // this makes a copy, but we need out own for capsules
		}
		if(capsule){
			#ifdef WOO_VTK
				int subdiv=max(4.,ceil(M_PI/(acos(1-tol/capsule->radius))));
				std::tie(pts,tri)=VtkExport::triangulateCapsule(static_pointer_cast<Capsule>(p->shape),subdiv);
			#else
				throw std::runtime_error("Triangulation of capsules is (for internal and entirely fixable reasons) only available when compiled with the 'vtk' features.");
			#endif
		}
		// do not write out directly, store first for later
		ppts.push_back(pts);
		ttri.push_back(tri);
		LOG_TRACE("#"<<p->id<<" triangulated: "<<tri.size()<<","<<pts.size()<<" faces,vertices.");

		if(scene->isPeriodic){
			// make sure we have aabb, in skewed coords and such
			if(!p->shape->bound){
				// this is a bit ugly, but should do the trick; otherwise we would recompute all that ourselves here
				if(sphere) Bo1_Sphere_Aabb().go(p->shape);
				else if(ellipsoid) Bo1_Ellipsoid_Aabb().go(p->shape);
				else if(capsule) Bo1_Capsule_Aabb().go(p->shape);
			}
			assert(p->shape->bound);
			const AlignedBox3r& box(p->shape->bound->box);
			AlignedBox3r cell(Vector3r::Zero(),scene->cell->getSize()); // possibly in skewed coords
			// central offset
			Vector3i off0;
			scene->cell->canonicalizePt(p->shape->nodes[0]->pos,off0); // computes off0
			Vector3i off; // offset from the original cell
			//cerr<<"#"<<p->id<<" at "<<p->shape->nodes[0]->pos.transpose()<<", off0="<<off0<<endl;
			for(off[0]=off0[0]-1; off[0]<=off0[0]+1; off[0]++) for(off[1]=off0[1]-1; off[1]<=off0[1]+1; off[1]++) for(off[2]=off0[2]-1; off[2]<=off0[2]+1; off[2]++){
				Vector3r dx=scene->cell->intrShiftPos(off);
				//cerr<<"  off="<<off.transpose()<<", dx="<<dx.transpose()<<endl;
				AlignedBox3r boxOff(box); boxOff.translate(dx);
				//cerr<<"  boxOff="<<boxOff.min()<<";"<<boxOff.max()<<" | cell="<<cell.min()<<";"<<cell.max()<<endl;
				if(boxOff.intersection(cell).isEmpty()) continue;
				// copy the entire triangulation, offset by dx
				vector<Vector3r> pts2(pts); for(auto& p: pts2) p+=dx;
				vector<Vector3i> tri2(tri); // same topology
				ppts.push_back(pts2);
				ttri.push_back(tri2);
				LOG_TRACE("  offset "<<off.transpose()<<": #"<<p->id<<": "<<tri2.size()<<","<<pts2.size()<<" faces,vertices.");
			}
		}
	}

	if(!merge){
		LOG_DEBUG("Will export (unmerged) "<<ppts.size()<<" particles to STL.");
		stl<<"solid "<<solid<<"\n";
		for(size_t i=0; i<ppts.size(); i++){
			const auto& pts(ppts[i]);
			const auto& tri(ttri[i]);
			LOG_TRACE("Exporting "<<i<<" with "<<tri.size()<<" faces.");
			for(const Vector3i& t: tri){
				Vector3r pp[]={pts[t[0]],pts[t[1]],pts[t[2]]};
				// skip triangles which are entirely out of the canonical periodic cell
				if(scene->isPeriodic && clipCell && (!scene->cell->isCanonical(pp[0]) && !scene->cell->isCanonical(pp[1]) && !scene->cell->isCanonical(pp[2]))) continue;
				numTri++;
				Vector3r n=(pp[1]-pp[0]).cross(pp[2]-pp[1]).normalized();
				stl<<"  facet normal "<<n.x()<<" "<<n.y()<<" "<<n.z()<<"\n";
				stl<<"    outer loop\n";
				for(auto p: {pp[0],pp[1],pp[2]}){
					stl<<"      vertex "<<p[0]<<" "<<p[1]<<" "<<p[2]<<"\n";
				}
				stl<<"    endloop\n";
				stl<<"  endfacet\n";
			}
		}
		stl<<"endsolid "<<solid<<"\n";
		stl.close();
		return numTri;
	}

#if WOO_GTS
	/*****
	Convert all triangulation to GTS surfaces, find their distances, isolate connected components,
	merge these components incrementally and write to STL
	*****/

	// total number of points
	const size_t N(ppts.size());
	// bounds for collision detection
	struct Bound{
		Bound(Real _coord, int _id, bool _isMin): coord(_coord), id(_id), isMin(_isMin){};
		Bound(): coord(NaN), id(-1), isMin(false){}; // just for allocation
		Real coord;
		int id;
		bool isMin;
		bool operator<(const Bound& b) const { return coord<b.coord; }
	};
	vector<Bound> bounds[3]={vector<Bound>(2*N),vector<Bound>(2*N),vector<Bound>(2*N)};
	/* construct GTS surface objects; all objects must be deleted explicitly! */
	vector<GtsSurface*> ssurf(N);
	vector<vector<GtsVertex*>> vvert(N);
	vector<vector<GtsEdge*>> eedge(N);
	vector<AlignedBox3r> boxes(N);
	for(size_t i=0; i<N; i++){
		LOG_TRACE("** Creating GTS surface for #"<<i<<", with "<<ttri[i].size()<<" faces, "<<ppts[i].size()<<" vertices.");
		AlignedBox3r box;
		// new surface object
		ssurf[i]=gts_surface_new(gts_surface_class(),gts_face_class(),gts_edge_class(),gts_vertex_class());
		// copy over all vertices
		vvert[i].reserve(ppts[i].size());
		eedge[i].reserve(size_t(1.5*ttri[i].size())); // each triangle consumes 1.5 edges, for closed surfs
		for(size_t v=0; v<ppts[i].size(); v++){
			vvert[i].push_back(gts_vertex_new(gts_vertex_class(),ppts[i][v][0],ppts[i][v][1],ppts[i][v][2]));
			box.extend(ppts[i][v]);
		}
		// create faces, and create edges on the fly as needed
		std::map<std::pair<int,int>,int> edgeIndices;
		for(size_t t=0; t<ttri[i].size(); t++){
			//const Vector3i& t(ttri[i][t]);
			//LOG_TRACE("Face with vertices "<<ttri[i][t][0]<<","<<ttri[i][t][1]<<","<<ttri[i][t][2]);
			Vector3i eIxs;
			for(int a:{0,1,2}){
				int A(ttri[i][t][a]), B(ttri[i][t][(a+1)%3]);
				auto AB=std::make_pair(min(A,B),max(A,B));
				auto ABI=edgeIndices.find(AB);
				if(ABI==edgeIndices.end()){ // this edge not created yet
					edgeIndices[AB]=eedge[i].size(); // last index 
					eIxs[a]=eedge[i].size();
					//LOG_TRACE("  New edge #"<<eIxs[a]<<": "<<A<<"--"<<B<<" (length "<<(ppts[i][A]-ppts[i][B]).norm()<<")");
					eedge[i].push_back(gts_edge_new(gts_edge_class(),vvert[i][A],vvert[i][B]));
				} else {
					eIxs[a]=ABI->second;
					//LOG_TRACE("  Found edge #"<<ABI->second<<" for "<<A<<"--"<<B);
				}
			}
			//LOG_TRACE("  New face: edges "<<eIxs[0]<<"--"<<eIxs[1]<<"--"<<eIxs[2]);
			GtsFace* face=gts_face_new(gts_face_class(),eedge[i][eIxs[0]],eedge[i][eIxs[1]],eedge[i][eIxs[2]]);
			gts_surface_add_face(ssurf[i],face);
		}
		// make sure the surface is OK
		if(!gts_surface_is_orientable(ssurf[i])) LOG_ERROR("Surface of #"+to_string(iid[i])+" is not orientable (expect troubles).");
		if(!gts_surface_is_closed(ssurf[i])) LOG_ERROR("Surface of #"+to_string(iid[i])+" is not closed (expect troubles).");
		assert(!gts_surface_is_self_intersecting(ssurf[i]));
		// copy bounds
		LOG_TRACE("Setting bounds of surf #"<<i);
		boxes[i]=box;
		for(int ax:{0,1,2}){
			bounds[ax][2*i+0]=Bound(box.min()[ax],/*id*/i,/*isMin*/true);
			bounds[ax][2*i+1]=Bound(box.max()[ax],/*id*/i,/*isMin*/false);
		}
	}

	/*
	broad-phase collision detection between GTS surfaces
	only those will be probed with exact algorithms below and merged if needed
	*/
	for(int ax:{0,1,2}) std::sort(bounds[ax].begin(),bounds[ax].end());
	vector<Bound>& bb(bounds[0]); // run the search along x-axis, does not matter really
	std::list<std::pair<int,int>> int0; // broad-phase intersections
	for(size_t i=0; i<2*N; i++){
		if(!bb[i].isMin) continue; // only start with lower bound
		// go up to the upper bound, but handle overflow safely (no idea why it would happen here) as well
		for(size_t j=i+1; j<2*N && bb[j].id!=bb[i].id; j++){
			if(bb[j].isMin) continue; // this is handled by symmetry
			#if EIGEN_VERSION_AT_LEAST(3,2,5)
				if(!boxes[bb[i].id].intersects(boxes[bb[j].id])) continue; // no intersection along all axes
			#else
				// old, less elegant
				if(boxes[bb[i].id].intersection(boxes[bb[j].id]).isEmpty()) continue; 
			#endif
			int0.push_back(std::make_pair(min(bb[i].id,bb[j].id),max(bb[i].id,bb[j].id)));
			LOG_TRACE("Broad-phase collision "<<int0.back().first<<"+"<<int0.back().second);
		}
	}

	/*
	narrow-phase collision detection between GTS surface
	this must be done via gts_surface_inter_new, since gts_surface_distance always succeeds
	*/
	std::list<std::pair<int,int>> int1;
	for(const std::pair<int,int> ij: int0){
		LOG_TRACE("Testing narrow-phase collision "<<ij.first<<"+"<<ij.second);
		#if 0
			GtsRange gr1, gr2;
			gts_surface_distance(ssurf[ij.first],ssurf[ij.second],/*delta ??*/(gfloat).2,&gr1,&gr2);
			if(gr1.min>0 && gr2.min>0) continue;
			LOG_TRACE("  GTS reports collision "<<ij.first<<"+"<<ij.second<<" (min. distances "<<gr1.min<<", "<<gr2.min);
		#else
			GtsSurface *s1(ssurf[ij.first]), *s2(ssurf[ij.second]);
			GNode* t1=gts_bb_tree_surface(s1);
			GNode* t2=gts_bb_tree_surface(s2);
			GtsSurfaceInter* I=gts_surface_inter_new(gts_surface_inter_class(),s1,s2,t1,t2,/*is_open_1*/false,/*is_open_2*/false);
			GSList* l=gts_surface_intersection(s1,s2,t1,t2); // list of edges describing intersection
			int n1=g_slist_length(l);
			// extra check by looking at number of faces of the intersected surface
			#if 1
				GtsSurface* s12=gts_surface_new(gts_surface_class(),gts_face_class(),gts_edge_class(),gts_vertex_class());
				gts_surface_inter_boolean(I,s12,GTS_1_OUT_2);
				gts_surface_inter_boolean(I,s12,GTS_2_OUT_1);
				int n2=gts_surface_face_number(s12);
				gts_object_destroy(GTS_OBJECT(s12));
			#endif
			gts_bb_tree_destroy(t1,TRUE);
			gts_bb_tree_destroy(t2,TRUE);
			gts_object_destroy(GTS_OBJECT(I));
			g_slist_free(l);
			if(n1==0) continue;
			#if 1
				if(n2==0){ LOG_ERROR("n1==0 but n2=="<<n2<<" (no narrow-phase collision)"); continue; }
			#endif
			LOG_TRACE("  GTS reports collision "<<ij.first<<"+"<<ij.second<<" ("<<n<<" edges describe the intersection)");
		#endif
		int1.push_back(ij);
	}
	/*
	connected components on the graph: graph nodes are 0…(N-1), graph edges are in int1
	see http://stackoverflow.com/a/37195784/761090
	*/
	typedef boost::subgraph<boost::adjacency_list<boost::vecS,boost::vecS,boost::undirectedS,boost::property<boost::vertex_index_t,int>,boost::property<boost::edge_index_t,int>>> Graph;
	Graph graph(N);
	for(const auto& ij: int1) boost::add_edge(ij.first,ij.second,graph);
	vector<size_t> clusters(boost::num_vertices(graph));
	size_t numClusters=boost::connected_components(graph,clusters.data());
	for(size_t n=0; n<numClusters; n++){
		// beginning cluster #n
		// first, count how many surfaces are in this cluster; if 1, things are easier
		int numThisCluster=0; int cluster1st=-1;
		for(size_t i=0; i<N; i++){ if(clusters[i]!=n) continue; numThisCluster++; if(cluster1st<0) cluster1st=(int)i; }
		GtsSurface* clusterSurf=NULL;
		LOG_DEBUG("Cluster "<<n<<" has "<<numThisCluster<<" surfaces.");
		if(numThisCluster==1){
			clusterSurf=ssurf[cluster1st]; 
		} else {
			clusterSurf=ssurf[cluster1st]; // surface of the cluster itself
			LOG_TRACE("  Initial cluster surface from "<<cluster1st<<".");
			/* composed surface */
			for(size_t i=0; i<N; i++){
				if(clusters[i]!=n || ((int)i)==cluster1st) continue;
				LOG_TRACE("   Adding "<<i<<" to the cluster");
				// ssurf[i] now belongs to cluster #n
				// trees need to be rebuild every time anyway, since the merged surface keeps changing in every cycle
				//if(gts_surface_face_number(clusterSurf)==0) LOG_ERROR("clusterSurf has 0 faces.");
				//if(gts_surface_face_number(ssurf[i])==0) LOG_ERROR("Surface #"<<i<<" has 0 faces.");
				GNode* t1=gts_bb_tree_surface(clusterSurf);
				GNode* t2=gts_bb_tree_surface(ssurf[i]);
				GtsSurfaceInter* I=gts_surface_inter_new(gts_surface_inter_class(),clusterSurf,ssurf[i],t1,t2,/*is_open_1*/false,/*is_open_2*/false);
				GtsSurface* merged=gts_surface_new(gts_surface_class(),gts_face_class(),gts_edge_class(),gts_vertex_class());
				gts_surface_inter_boolean(I,merged,GTS_1_OUT_2);
				gts_surface_inter_boolean(I,merged,GTS_2_OUT_1);
				gts_object_destroy(GTS_OBJECT(I));
				gts_bb_tree_destroy(t1,TRUE);
				gts_bb_tree_destroy(t2,TRUE);
				if(gts_surface_face_number(merged)==0){
					LOG_ERROR("Cluster #"<<n<<": 0 faces after fusing #"<<i<<" (why?), adding #"<<i<<" separately!");
					// this will cause an extra 1-particle cluster to be created
					clusters[i]=numClusters;
					numClusters+=1;
				} else {
					// not from global vectors (cleanup at the end), explicit delete!
					if(clusterSurf!=ssurf[cluster1st]) gts_object_destroy(GTS_OBJECT(clusterSurf));
					clusterSurf=merged;
				}
			}
		}
		#if 0
			LOG_TRACE("  GTS surface cleanups...");
	 		pygts_vertex_cleanup(clusterSurf,.1*tol); // cleanup 10× smaller than tolerance
		   pygts_edge_cleanup(clusterSurf);
	      pygts_face_cleanup(clusterSurf);
		#endif
		LOG_TRACE("  STL: cluster "<<n<<" output");
		stl<<"solid "<<solid<<"_"<<n<<"\n";
		/* output cluster to STL here */
		_gts_face_to_stl_data data(stl,scene,clipCell,numTri);
		gts_surface_foreach_face(clusterSurf,(GtsFunc)_gts_face_to_stl,(gpointer)&data);
		stl<<"endsolid\n";
		if(clusterSurf!=ssurf[cluster1st]) gts_object_destroy(GTS_OBJECT(clusterSurf));
	}
	// this deallocates also edges and vertices
	for(size_t i=0; i<ssurf.size(); i++) gts_object_destroy(GTS_OBJECT(ssurf[i]));
	return numTri;
#endif /* WOO_GTS */
}
示例#24
0
// Zhu et al. "A Rank-Order Distance based Clustering Algorithm for Face Tagging", CVPR 2011
br::Clusters br::ClusterGallery(const QStringList &simmats, float aggressiveness, const QString &csv)
{
    qDebug("Clustering %d simmat(s)", simmats.size());

    // Read in gallery parts, keeping top neighbors of each template
    Neighborhood neighborhood = getNeighborhood(simmats);
    const int cutoff = neighborhood.first().size();
    const float threshold = 3*cutoff/4 * aggressiveness/5;

    // Initialize clusters
    Clusters clusters(neighborhood.size());
    for (int i=0; i<neighborhood.size(); i++)
        clusters[i].append(i);

    bool done = false;
    while (!done) {
        // nextClusterIds[i] = j means that cluster i is set to merge into cluster j
        QVector<int> nextClusterIDs(neighborhood.size());
        for (int i=0; i<neighborhood.size(); i++) nextClusterIDs[i] = i;

        // For each cluster
        for (int clusterID=0; clusterID<neighborhood.size(); clusterID++) {
            const Neighbors &neighbors = neighborhood[clusterID];
            int nextClusterID = nextClusterIDs[clusterID];

            // Check its neighbors
            foreach (const Neighbor &neighbor, neighbors) {
                int neighborID = neighbor.first;
                int nextNeighborID = nextClusterIDs[neighborID];

                // Don't bother if they have already merged
                if (nextNeighborID == nextClusterID) continue;

                // Flag for merge if similar enough
                if (normalizedROD(neighborhood, clusterID, neighborID) < threshold) {
                    if (nextClusterID < nextNeighborID) nextClusterIDs[neighborID] = nextClusterID;
                    else                                nextClusterIDs[clusterID] = nextNeighborID;
                }
            }
        }

        // Transitive merge
        for (int i=0; i<neighborhood.size(); i++) {
            int nextClusterID = i;
            while (nextClusterID != nextClusterIDs[nextClusterID]) {
                assert(nextClusterIDs[nextClusterID] < nextClusterID);
                nextClusterID = nextClusterIDs[nextClusterID];
            }
            nextClusterIDs[i] = nextClusterID;
        }

        // Construct new clusters
        QHash<int, int> clusterIDLUT;
        QList<int> allClusterIDs = QSet<int>::fromList(nextClusterIDs.toList()).values();
        for (int i=0; i<neighborhood.size(); i++)
            clusterIDLUT[i] = allClusterIDs.indexOf(nextClusterIDs[i]);

        Clusters newClusters(allClusterIDs.size());
        Neighborhood newNeighborhood(allClusterIDs.size());

        for (int i=0; i<neighborhood.size(); i++) {
            int newID = clusterIDLUT[i];
            newClusters[newID].append(clusters[i]);
            newNeighborhood[newID].append(neighborhood[i]);
        }

        // Update indices and trim
        for (int i=0; i<newNeighborhood.size(); i++) {
            Neighbors &neighbors = newNeighborhood[i];
            int size = qMin(neighbors.size(),cutoff);
            std::partial_sort(neighbors.begin(), neighbors.begin()+size, neighbors.end(), compareNeighbors);
            for (int j=0; j<size; j++)
                neighbors[j].first = clusterIDLUT[j];
            neighbors = neighbors.mid(0, cutoff);
        }

        // Update results
        done = true; //(newClusters.size() >= clusters.size());
        clusters = newClusters;
        neighborhood = newNeighborhood;
    }
  void RegionGrowingMultiplePlaneSegmentation::segment(
    const sensor_msgs::PointCloud2::ConstPtr& msg,
    const sensor_msgs::PointCloud2::ConstPtr& normal_msg)
  {
    boost::mutex::scoped_lock lock(mutex_);
    if (!done_initialization_) {
      return;
    }
    vital_checker_->poke();
    {
      jsk_recognition_utils::ScopedWallDurationReporter r
        = timer_.reporter(pub_latest_time_, pub_average_time_);
      pcl::PointCloud<PointT>::Ptr cloud(new pcl::PointCloud<PointT>);
      pcl::PointCloud<pcl::Normal>::Ptr normal(new pcl::PointCloud<pcl::Normal>);
      pcl::fromROSMsg(*msg, *cloud);
      pcl::fromROSMsg(*normal_msg, *normal);
      // concatenate fields
      pcl::PointCloud<pcl::PointXYZRGBNormal>::Ptr
        all_cloud (new pcl::PointCloud<pcl::PointXYZRGBNormal>);
      pcl::concatenateFields(*cloud, *normal, *all_cloud);
      pcl::PointIndices::Ptr indices (new pcl::PointIndices);
      for (size_t i = 0; i < all_cloud->points.size(); i++) {
        if (!isnan(all_cloud->points[i].x) &&
            !isnan(all_cloud->points[i].y) &&
            !isnan(all_cloud->points[i].z) &&
            !isnan(all_cloud->points[i].normal_x) &&
            !isnan(all_cloud->points[i].normal_y) &&
            !isnan(all_cloud->points[i].normal_z) &&
            !isnan(all_cloud->points[i].curvature)) {
          if (all_cloud->points[i].curvature < max_curvature_) {
            indices->indices.push_back(i);
          }
        }
      }
      pcl::ConditionalEuclideanClustering<pcl::PointXYZRGBNormal> cec (true);
    
      // vector of pcl::PointIndices
      pcl::IndicesClustersPtr clusters (new pcl::IndicesClusters);
      cec.setInputCloud (all_cloud);
      cec.setIndices(indices);
      cec.setClusterTolerance(cluster_tolerance_);
      cec.setMinClusterSize(min_size_);
      //cec.setMaxClusterSize(max_size_);
      {
        boost::mutex::scoped_lock lock2(global_custom_condigion_function_mutex);
        setCondifionFunctionParameter(angular_threshold_, distance_threshold_);
        cec.setConditionFunction(
          &RegionGrowingMultiplePlaneSegmentation::regionGrowingFunction);
        //ros::Time before = ros::Time::now();
        cec.segment (*clusters);
        // ros::Time end = ros::Time::now();
        // ROS_INFO("segment took %f sec", (before - end).toSec());
      }
      // Publish raw cluster information 
      // pcl::IndicesCluster is typdefed as std::vector<pcl::PointIndices>
      jsk_recognition_msgs::ClusterPointIndices ros_clustering_result;
      ros_clustering_result.cluster_indices
        = pcl_conversions::convertToROSPointIndices(*clusters, msg->header);
      ros_clustering_result.header = msg->header;
      pub_clustering_result_.publish(ros_clustering_result);

      // estimate planes
      std::vector<pcl::PointIndices::Ptr> all_inliers;
      std::vector<pcl::ModelCoefficients::Ptr> all_coefficients;
      jsk_recognition_msgs::PolygonArray ros_polygon;
      ros_polygon.header = msg->header;
      for (size_t i = 0; i < clusters->size(); i++) {
        pcl::PointIndices::Ptr plane_inliers(new pcl::PointIndices);
        pcl::ModelCoefficients::Ptr plane_coefficients(new pcl::ModelCoefficients);
        pcl::PointIndices::Ptr cluster = boost::make_shared<pcl::PointIndices>((*clusters)[i]);
        ransacEstimation(cloud, cluster,
                         *plane_inliers, *plane_coefficients);
        if (plane_inliers->indices.size() > 0) {
          jsk_recognition_utils::ConvexPolygon::Ptr convex
            = jsk_recognition_utils::convexFromCoefficientsAndInliers<pcl::PointXYZRGB>(
              cloud, plane_inliers, plane_coefficients);
          if (convex) {
            if (min_area_ > convex->area() || convex->area() > max_area_) {
              continue;
            }
            {
              // check direction consistency of coefficients and convex
              Eigen::Vector3f coefficient_normal(plane_coefficients->values[0],
                                                 plane_coefficients->values[1],
                                                 plane_coefficients->values[2]);
              if (convex->getNormalFromVertices().dot(coefficient_normal) < 0) {
                convex = boost::make_shared<jsk_recognition_utils::ConvexPolygon>(convex->flipConvex());
              }
            }
            // Normal should direct to origin
            {
              Eigen::Vector3f p = convex->getPointOnPlane();
              Eigen::Vector3f n = convex->getNormal();
              if (p.dot(n) > 0) {
                convex = boost::make_shared<jsk_recognition_utils::ConvexPolygon>(convex->flipConvex());
                Eigen::Vector3f new_normal = convex->getNormal();
                plane_coefficients->values[0] = new_normal[0];
                plane_coefficients->values[1] = new_normal[1];
                plane_coefficients->values[2] = new_normal[2];
                plane_coefficients->values[3] = convex->getD();
              }
            }
          
            geometry_msgs::PolygonStamped polygon;
            polygon.polygon = convex->toROSMsg();
            polygon.header = msg->header;
            ros_polygon.polygons.push_back(polygon);
            all_inliers.push_back(plane_inliers);
            all_coefficients.push_back(plane_coefficients);
          }
        }
      }
    
      jsk_recognition_msgs::ClusterPointIndices ros_indices;
      ros_indices.cluster_indices =
        pcl_conversions::convertToROSPointIndices(all_inliers, msg->header);
      ros_indices.header = msg->header;
      pub_inliers_.publish(ros_indices);
      jsk_recognition_msgs::ModelCoefficientsArray ros_coefficients;
      ros_coefficients.header = msg->header;
      ros_coefficients.coefficients =
        pcl_conversions::convertToROSModelCoefficients(
          all_coefficients, msg->header);
      pub_coefficients_.publish(ros_coefficients);
      pub_polygons_.publish(ros_polygon);
    }
  }
KMeansMethodResult KMeansMethod::Clustering(const std::vector<std::vector<double>> &inputs, int dim, int countOfCluster) {
    int size = inputs.size();
    std::vector<std::vector<double>> clusters(countOfCluster, std::vector<double>(dim, 0));

    std::cout << "- Clustering -" << std::endl;

    // kMeans法++で初期値を決定
    std::vector<int> clusterOfInputs = getInitialClusterOfInputs(inputs, dim, countOfCluster);
    std::cout << "initialized" << std::endl;

    while (true) {
        // 各クラスタの中心を求める
        for (int i = 0; i < countOfCluster; i++) {
            std::vector<double> center(dim, 0);
            int count = 0;
            // i番目のクラスタに含まれている入力を検索
            for (int j = 0; j < size; j++) {
                if (clusterOfInputs[j] == i) {
                    // クラスタの中心を求めるために加算
                    for (int k = 0; k < dim; k++) {
                        center[k] += inputs[j][k];
                    }
                    // 数を保存
                    count++;
                }
            }

            if (count > 0) {
                // クラスタの中心を求めるために数で割る
                for (int j = 0; j < dim; j++) {
                    center[j] /= count;
                }
                // クラスタの決定
                for (int j = 0; j < dim; j++) {
                    clusters[i][j] = center[j];
                }
            }
        }

        bool isChanged = false;

        // 入力をそれぞれ一番近いクラスタに付与
        for (int i = 0; i < size; i++) {
            std::vector<double> input = inputs[i];

            double minDistance = std::numeric_limits<double>::max();
            int minIndex;
            // 一番近いクラスタを検索
            for (int j = 0; j < countOfCluster; j++) {
                double distance = 0;
                std::vector<double> cluster = clusters[j];
                for (int k = 0; k < dim; k++) {
                    distance += pow(input[k] - cluster[k], 2.0);
                }
                if (distance < minDistance) {
                    minDistance = distance;
                    minIndex = j;
                }
            }

            // 距離が一番小さいところに置き換える
            if (clusterOfInputs[i] != minIndex) {
                clusterOfInputs[i] = minIndex;
                isChanged = true;
            }
        }

        // 変化がなければ処理を終了
        if (isChanged == false) {
            break;
        }
    }

    // 一つも属されていないクラスタを削除する
    for (int i = countOfCluster - 1; i >= 0; i--) {
        int count = 0;
        for (int j = 0; j < size; j++) {
            if (clusterOfInputs[j] == i) {
                count++;
            }
        }
        if (count == 0) {
            clusters.erase(clusters.begin() + i);
            for (int j = 0; j < size; j++) {
                if (clusterOfInputs[j] > i) {
                    clusterOfInputs[j] -= 1;
                }
            }
        }
    }

    std::cout << "clusters.size() -> " << clusters.size() << std::endl;

    KMeansMethodResult result;
    result.clusters = clusters;
    result.indexOfCluster = clusterOfInputs;
    return result;
}
示例#27
0
  void CompleteLinkage::operator()(DistanceMatrix<float> & original_distance, std::vector<BinaryTreeNode> & cluster_tree, const float threshold /*=1*/) const
  {
    // attention: clustering process is done by clustering the indices
    // pointing to elements in inputvector and distances in inputmatrix

    // input MUST have >= 2 elements!
    if (original_distance.dimensionsize() < 2)
    {
      throw ClusterFunctor::InsufficientInput(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Distance matrix to start from only contains one element");
    }

    std::vector<std::set<Size> > clusters(original_distance.dimensionsize());
    for (Size i = 0; i < original_distance.dimensionsize(); ++i)
    {
      clusters[i].insert(i);
    }

    cluster_tree.clear();
    cluster_tree.reserve(original_distance.dimensionsize() - 1);

    // Initial minimum-distance pair
    original_distance.updateMinElement();
    std::pair<Size, Size> min = original_distance.getMinElementCoordinates();

    Size overall_cluster_steps(original_distance.dimensionsize());
    startProgress(0, original_distance.dimensionsize(), "clustering data");

    while (original_distance(min.first, min.second) < threshold)
    {
      //grow the tree
      cluster_tree.push_back(BinaryTreeNode(*(clusters[min.second].begin()), *(clusters[min.first].begin()), original_distance(min.first, min.second)));
      if (cluster_tree.back().left_child > cluster_tree.back().right_child)
      {
        std::swap(cluster_tree.back().left_child, cluster_tree.back().right_child);
      }

      if (original_distance.dimensionsize() > 2)
      {
        //pick minimum-distance pair i,j and merge them

        //pushback elements of second to first (and then erase second)
        clusters[min.second].insert(clusters[min.first].begin(), clusters[min.first].end());
        // erase first one
        clusters.erase(clusters.begin() + min.first);

        //update original_distance matrix
        //complete linkage: new distance between clusters is the minimum distance between elements of each cluster
        //lance-williams update for d((i,j),k): 0.5* d(i,k) + 0.5* d(j,k) + 0.5* |d(i,k)-d(j,k)|
        for (Size k = 0; k < min.second; ++k)
        {
          float dik = original_distance.getValue(min.first, k);
          float djk = original_distance.getValue(min.second, k);
          original_distance.setValueQuick(min.second, k, (0.5f * dik + 0.5f * djk + 0.5f * std::fabs(dik - djk)));
        }
        for (Size k = min.second + 1; k < original_distance.dimensionsize(); ++k)
        {
          float dik = original_distance.getValue(min.first, k);
          float djk = original_distance.getValue(min.second, k);
          original_distance.setValueQuick(k, min.second, (0.5f * dik + 0.5f * djk + 0.5f * std::fabs(dik - djk)));
        }

        //reduce
        original_distance.reduce(min.first);

        //update minimum-distance pair
        original_distance.updateMinElement();

        //get new min-pair
        min = original_distance.getMinElementCoordinates();
      }
      else
      {
        break;
      }
      setProgress(overall_cluster_steps - original_distance.dimensionsize());

      //repeat until only two cluster remains or threshold exceeded, last step skips matrix operations
    }
    //fill tree with dummy nodes
    Size sad(*clusters.front().begin());
    for (Size i = 1; i < clusters.size() && (cluster_tree.size() < cluster_tree.capacity()); ++i)
    {
      cluster_tree.push_back(BinaryTreeNode(sad, *clusters[i].begin(), -1.0));
    }
    //~ while(cluster_tree.size() < cluster_tree.capacity())
    //~ {
    //~ cluster_tree.push_back(BinaryTreeNode(0,1,-1.0));
    //~ }

    endProgress();
  }
示例#28
0
    /** Compute the pose of the table plane
     * @param inputs
     * @param outputs
     * @return
     */
    int
    process(const tendrils& inputs, const tendrils& outputs)
    {
      std::vector<tabletop_object_detector::TabletopObjectRecognizer<pcl::PointXYZ>::TabletopResult > results;

      // Process each table
      pose_results_->clear();

      std::vector<pcl::PointCloud<pcl::PointXYZ>::Ptr> clusters_merged;
      // Map to store the transformation for each cluster (table_index)
      std::map<pcl::PointCloud<pcl::PointXYZ>::Ptr, size_t> cluster_table;

      std::vector<cv::Vec3f> translations(clusters_->size());
      std::vector<cv::Matx33f> rotations(clusters_->size());
      for (size_t table_index = 0; table_index < clusters_->size(); ++table_index)
      {
        getPlaneTransform((*table_coefficients_)[table_index], rotations[table_index], translations[table_index]);

        // Make the clusters be in the table frame
        size_t n_clusters = (*clusters_)[table_index].size();
        std::vector<pcl::PointCloud<pcl::PointXYZ>::Ptr> clusters(n_clusters);

        cv::Matx33f Rinv = rotations[table_index].t();
        cv::Vec3f Tinv = -Rinv*translations[table_index];

        for (size_t cluster_index = 0; cluster_index < n_clusters; ++cluster_index)
        {
          clusters[cluster_index] = pcl::PointCloud<pcl::PointXYZ>::Ptr(new pcl::PointCloud<pcl::PointXYZ>());
          for(size_t i = 0; i < (*clusters_)[table_index][cluster_index].size(); ++i)
          {
            cv::Vec3f res = Rinv*(*clusters_)[table_index][cluster_index][i] + Tinv;
            clusters[cluster_index]->push_back(pcl::PointXYZ(res[0], res[1], res[2]));
          }
          cluster_table.insert(std::pair<pcl::PointCloud<pcl::PointXYZ>::Ptr, size_t>(clusters[cluster_index], table_index));
        }

        clusters_merged.insert(clusters_merged.end(), clusters.begin(), clusters.end());
      }

      object_recognizer_.objectDetection(clusters_merged, confidence_cutoff_, perform_fit_merge_, results);

      for (size_t i = 0; i < results.size(); ++i)
      {
        const tabletop_object_detector::TabletopObjectRecognizer<pcl::PointXYZ>::TabletopResult & result = results[i];
        const size_t table_index = cluster_table[result.cloud_];

        PoseResult pose_result;

        // Add the object id
        std::stringstream ss;
        ss << result.object_id_;
        pose_result.set_object_id(db_, ss.str());

        // Add the pose
        const geometry_msgs::Pose &pose = result.pose_;
        cv::Vec3f T(pose.position.x, pose.position.y, pose.position.z);
        Eigen::Quaternionf quat(pose.orientation.w, pose.orientation.x, pose.orientation.y, pose.orientation.z);

        cv::Vec3f new_T = rotations[table_index] * T + translations[table_index];
        pose_result.set_T(cv::Mat(new_T));

        pose_result.set_R(quat);
        cv::Mat R = cv::Mat(rotations[table_index] * pose_result.R<cv::Matx33f>());
        pose_result.set_R(R);
        pose_result.set_confidence(result.confidence_);

        // Add the cluster of points
        std::vector<sensor_msgs::PointCloud2ConstPtr> ros_clouds (1);
        sensor_msgs::PointCloud2Ptr cluster_cloud (new sensor_msgs::PointCloud2());

#if PCL_VERSION_COMPARE(>=,1,7,0)
        ::pcl::PCLPointCloud2 pcd_tmp;
        ::pcl::toPCLPointCloud2(*result.cloud_, pcd_tmp);
        pcl_conversions::fromPCL(pcd_tmp, *cluster_cloud);
#else
        pcl::toROSMsg(*result.cloud_, *cluster_cloud);
#endif
        ros_clouds[0] = cluster_cloud;
        pose_result.set_clouds(ros_clouds);

        pose_results_->push_back(pose_result);
      }
      return ecto::OK;
    }
示例#29
0
文件: MinDist.cpp 项目: ios4u/grt
bool MinDist::loadLegacyModelFromFile( fstream &file ){
    
    string word;
    
    file >> word;
    if(word != "NumFeatures:"){
        errorLog << "loadModelFromFile(string filename) - Could not find NumFeatures " << endl;
        return false;
    }
    file >> numInputDimensions;
    
    file >> word;
    if(word != "NumClasses:"){
        errorLog << "loadModelFromFile(string filename) - Could not find NumClasses" << endl;
        return false;
    }
    file >> numClasses;
    
    file >> word;
    if(word != "UseScaling:"){
        errorLog << "loadModelFromFile(string filename) - Could not find UseScaling" << endl;
        return false;
    }
    file >> useScaling;
    
    file >> word;
    if(word != "UseNullRejection:"){
        errorLog << "loadModelFromFile(string filename) - Could not find UseNullRejection" << endl;
        return false;
    }
    file >> useNullRejection;
    
    ///Read the ranges if needed
    if( useScaling ){
        //Resize the ranges buffer
        ranges.resize(numInputDimensions);
        
        file >> word;
        if(word != "Ranges:"){
            errorLog << "loadModelFromFile(string filename) - Could not find the Ranges" << endl;
            return false;
        }
        for(UINT n=0; n<ranges.size(); n++){
            file >> ranges[n].minValue;
            file >> ranges[n].maxValue;
        }
    }
    
    //Resize the buffer
    models.resize(numClasses);
    classLabels.resize(numClasses);
    
    //Load each of the K models
    for(UINT k=0; k<numClasses; k++){
        double rejectionThreshold;
        double gamma;
        double trainingSigma;
        double trainingMu;
        
        file >> word;
        if( word != "ClassLabel:" ){
            errorLog << "loadModelFromFile(string filename) - Could not load the class label for class " << k << endl;
            return false;
        }
        file >> classLabels[k];
        
        file >> word;
        if( word != "NumClusters:" ){
            errorLog << "loadModelFromFile(string filename) - Could not load the NumClusters for class " << k << endl;
            return false;
        }
        file >> numClusters;
        
        file >> word;
        if( word != "RejectionThreshold:" ){
            errorLog << "loadModelFromFile(string filename) - Could not load the RejectionThreshold for class " << k << endl;
            return false;
        }
        file >> rejectionThreshold;
        
        file >> word;
        if( word != "Gamma:" ){
            errorLog << "loadModelFromFile(string filename) - Could not load the Gamma for class " << k << endl;
            return false;
        }
        file >> gamma;
        
        file >> word;
        if( word != "TrainingMu:" ){
            errorLog << "loadModelFromFile(string filename) - Could not load the TrainingMu for class " << k << endl;
            return false;
        }
        file >> trainingMu;
        
        file >> word;
        if( word != "TrainingSigma:" ){
            errorLog << "loadModelFromFile(string filename) - Could not load the TrainingSigma for class " << k << endl;
            return false;
        }
        file >> trainingSigma;
        
        file >> word;
        if( word != "ClusterData:" ){
            errorLog << "loadModelFromFile(string filename) - Could not load the ClusterData for class " << k << endl;
            return false;
        }
        
        //Load the cluster data
        MatrixDouble clusters(numClusters,numInputDimensions);
        for(UINT i=0; i<numClusters; i++){
            for(UINT j=0; j<numInputDimensions; j++){
                file >> clusters[i][j];
            }
        }
        
        models[k].setClassLabel( classLabels[k] );
        models[k].setClusters( clusters );
        models[k].setGamma( gamma );
        models[k].setRejectionThreshold( rejectionThreshold );
        models[k].setTrainingSigma( trainingSigma );
        models[k].setTrainingMu( trainingMu );
    }
    
    //Recompute the null rejection thresholds
    recomputeNullRejectionThresholds();
    
    //Resize the prediction results to make sure it is setup for realtime prediction
    maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
    bestDistance = DEFAULT_NULL_DISTANCE_VALUE;
    classLikelihoods.resize(numClasses,DEFAULT_NULL_LIKELIHOOD_VALUE);
    classDistances.resize(numClasses,DEFAULT_NULL_DISTANCE_VALUE);
    
    trained = true;
    
    return true;
}
示例#30
0
void HAC::train(Matrix& features, Matrix& labels)
{

	_instanceToCluster = std::vector<size_t>(features.rows()); 
	_adjMatrix = std::vector<std::vector<double> >(features.rows(), std::vector<double>(features.rows(), 0));
	for(size_t i = 0; i < features.rows(); i++)
	{
		_instanceToCluster[i] = i;
		for(size_t j = 0; j < features.rows(); j++)
		{
			double dist = 0;
			if(i != j)
				dist = ClusteringUtils::dist(features, i, j);
			_adjMatrix[i][j] = dist;
		}
	}

	size_t numClusters = features.rows();
	while(numClusters > _numClusters)
	{
		size_t cluster0 = 0;
		size_t cluster1 = 0;

#if SINGLE_LINK
		// find the min-dist (single link)
		double minDist = DBL_MAX;
		for(size_t i = 0; i < features.rows(); i++)
		{
			for(size_t j = i + 1; j < features.rows(); j++)
			{
				if(_adjMatrix[i][j] < minDist && _instanceToCluster[i] != _instanceToCluster[j])
				{
					minDist = _adjMatrix[i][j];
					cluster0 = _instanceToCluster[i];
					cluster1 = _instanceToCluster[j];
				}
			}
		}
		if(_logOn)
		{
			std::cout << "Merging clusters: " << cluster0 << " and " << cluster1 << std::endl;
			std::cout << "Distance: " << minDist << std::endl;
		}
#else // complete link (find the minimum largest distance between clusters)
		std::vector<std::vector<double> > clusterDistances(numClusters, std::vector<double>(numClusters, DBL_MIN));
		for(size_t i = 0; i < features.rows(); i++)
		{
			for(size_t j = i + 1; j < features.rows(); j++)
			{
				if(_instanceToCluster[i] == _instanceToCluster[j])
					continue;

				size_t lowClusterIndex = std::min(_instanceToCluster[i],_instanceToCluster[j]);
				size_t highClusterIndex = std::max(_instanceToCluster[i],_instanceToCluster[j]);

				if(_adjMatrix[i][j] > clusterDistances[lowClusterIndex][highClusterIndex])
					clusterDistances[lowClusterIndex][highClusterIndex] = _adjMatrix[i][j];
			}
		}

        double minLargestDist = DBL_MAX;
		for(size_t i = 0; i < clusterDistances.size(); i++)
		{
			for(size_t j = i + 1; j < clusterDistances.size(); j++)
			{
				if(clusterDistances[i][j] < minLargestDist)
				{
					minLargestDist = clusterDistances[i][j];
					cluster0 = i; 
					cluster1 = j; 
				}
			}
		}

		if(_logOn)
		{
			std::cout << "Merging clusters: " << cluster0 << " and " << cluster1 << std::endl;
			std::cout << "Distance: " << minLargestDist << std::endl;
		}
#endif

		// group clusters
		assert(cluster0 != cluster1);
		for(size_t i = 0; i < _instanceToCluster.size(); i++)
		{
			if(_instanceToCluster[i] == cluster1)
				_instanceToCluster[i] = cluster0;
		}
		std::cout << std::endl;
		
		normalizeLabels();
		numClusters -= 1;
	}


	std::vector<Matrix> clusters(_numClusters, features);
	for(size_t i = 0; i < _instanceToCluster.size(); i++)
	{
		size_t clusterIndex = _instanceToCluster[i];
		clusters[clusterIndex].copyRow(features[i]);
	}

	// calculate centroids
	std::vector<std::vector<double> > clusterMeans(_numClusters, std::vector<double>(features.cols()));
	for(size_t i = 0; i < _numClusters; i++)
	{
		for(size_t j = 0; j < features.cols(); j++)
		{
			if(features.valueCount(j) == 0) // continuous data
				clusterMeans[i][j] = clusters[i].columnMean(j);
			else
				clusterMeans[i][j] = clusters[i].mostCommonValue(j);
		}

		if(_logOn)
		{
			std::cout << "Centroid " << i << ": ";
			std::cout << getInstanceString(clusterMeans[i], features) << std::endl;
		}
	}

	// calculate SSE
	double sse = 0.0;
	for(size_t i = 0; i < _instanceToCluster.size(); i++)
	{
		double d = ClusteringUtils::dist(features, i, clusterMeans[_instanceToCluster[i]]);
		sse += d * d;	
	}

	if(_logOn)
	{
		std::cout << "Total sse: " << sse << std::endl;
	}

	if(_logOn)
	{
		std::vector<Matrix> clusters(_numClusters, features); 
		for(size_t i = 0; i < _instanceToCluster.size(); i++)
			clusters[_instanceToCluster[i]].copyRow(features[i]);

		ClusteringUtils::outputClusterStats(clusters, std::cout);
	}
}