/** * @function process */ void process( pcl::PointCloud<pcl::PointXYZRGBA>::Ptr _cloud ) { // Segment TabletopSegmentor<pcl::PointXYZRGBA> tts; tts.processCloud( _cloud ); mTableCoeffs = tts.getTableCoeffs(); int n = tts.getNumClusters(); // Set segmented variables gClusters.resize(0); for( int i = 0; i < n; ++i ) { pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cluster( new pcl::PointCloud<pcl::PointXYZRGBA>() ); gClusters.push_back( cluster ); } gColors.resize(n); for( int i = 0; i < n; ++i ) { pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cluster( new pcl::PointCloud<pcl::PointXYZRGBA>() ); *cluster = tts.getCluster(i); gClusters[i] = cluster; Eigen::Vector3i def; def(0) = rand() % 255; def(1) = rand() % 255; def(2) = rand() % 255; gColors[i]<< (double) def(0) / 255.0, (double) def(1) / 255.0, (double) def(2) / 255.0; } }
int main(int argc, char* argv[]) { if (argc < 6) { std::cout << "Usage: " << argv[0] << " in out-image out-csv threshold [sort-mode]\n"; std::cout << " sort-mode: sort-by-color, sort-by-count\n"; return 0; } cv::Mat Z; std::string f = argv[1]; if (fs::is_directory(f)) { Z = loadThumbnails(f); } else if (fs::path(f).extension() == ".png" || fs::path(f).extension() == ".jpg") { Z = loadImage(f); } else { Z = loadVideo(f); } std::cout << "Clustering...\n"; std::vector<Cluster> centers; if (std::string(argv[4]).find(".") == std::string::npos) { cluster(Z, (int)atoi(argv[4]), centers); } else { cluster(Z, (float)atof(argv[4]), centers); } std::string sort_mode = argc >= 6 ? argv[5] : "sort-by-count"; if (sort_mode == "sort-by-color") { sortCentersByColor(centers); } else { sortCentersByCount(centers); } exportCenters(centers, argv[2], argv[3]); }
TEST(podio, equality) { auto cluster = ExampleCluster(); auto rel = ExampleWithOneRelation(); rel.cluster(cluster); auto returned_cluster = rel.cluster(); EXPECT_EQ(cluster,returned_cluster); EXPECT_EQ(returned_cluster,cluster); }
char * ban_it(char *nick, char *user, char *host, char *ip) { static char banstr[BIG_BUFFER_SIZE/4+1]; char *t = user; char *t1 = user; char *tmp; *banstr = 0; while (strlen(t1)>9) t1++; t1 = clear_server_flags(t1); switch (defban) { case 7: if (ip) { snprintf(banstr, sizeof banstr, "*!*@%s", cluster(ip)); break; } case 2: /* Better */ snprintf(banstr, sizeof banstr, "*!*%s@%s", t1, cluster(host)); break; case 3: /* Host */ snprintf(banstr, sizeof banstr, "*!*@%s", host); break; case 4: /* Domain */ tmp = strrchr(host, '.'); if (tmp) { snprintf(banstr, sizeof banstr, "*!*@*%s", tmp); } else { snprintf(banstr, sizeof banstr, "*!*@%s", host); } break; case 5: /* User */ snprintf(banstr, sizeof banstr, "*!%s@%s", t, cluster(host)); break; case 6: /* Screw */ snprintf(banstr, sizeof banstr, "*!*%s@%s", t1, host); screw(banstr); break; case 1: /* Normal */ default: snprintf(banstr, sizeof banstr, "%s!*%s@%s", nick, t1, host); break; } return banstr; }
bool Package::Depend(const Package& p) const { bool depends(false); std::string cluster(fs::basename(fs::path(m_Package->first).leaf())); if(cluster.empty()) { cluster = fs::basename(fs::absolute(m_Package->first).filename()); } std::cout << "subgraph cluster_" << cluster << "{" << std::endl << "label=" << cluster << ';' << std::endl; for(std::set<Object>::const_iterator it = m_Package->second.object.begin(); it != m_Package->second.object.end(); ++it) { std::cout << '"' << fs::basename(fs::path(it->Name()).leaf()) << "\";" << std::endl; } for(std::set<Package>::const_iterator it = m_Package->second.package.begin(); it != m_Package->second.package.end(); ++it) { it->Depend(p); } std::cout << "}" << std::endl; return depends; }
std::vector<pcl::PointCloud<pcl::PointXYZ>::Ptr> MovingObjectsIdentificator::extractClusters() { if(verbose) std::cout << "Extracting clusters ... "; pcl::search::KdTree<pcl::PointXYZ>::Ptr kdTree (new pcl::search::KdTree<pcl::PointXYZ>); kdTree->setInputCloud(workingCloud); std::vector<pcl::PointIndices> indices; pcl::EuclideanClusterExtraction<pcl::PointXYZ> ece; ece.setClusterTolerance(clusterTolerance); ece.setMinClusterSize(minClusterSize); ece.setSearchMethod(kdTree); ece.setInputCloud(workingCloud); ece.extract(indices); std::vector<pcl::PointCloud<pcl::PointXYZ>::Ptr > clusters; for(std::vector<pcl::PointIndices>::iterator it = indices.begin(); it != indices.end(); it++) { pcl::PointCloud<pcl::PointXYZ>::Ptr cluster(new pcl::PointCloud<pcl::PointXYZ>); pcl::ExtractIndices<pcl::PointXYZ> extract; extract.setInputCloud(workingCloud); pcl::PointIndices::Ptr pi(new pcl::PointIndices); pi->indices=it->indices; extract.setIndices(pi); extract.setNegative(false); extract.filter(*cluster); clusters.push_back(cluster); } if(verbose) std::cout << "done!" << std::endl; return clusters; }
void findDoorCentroids(const cloud_t::Ptr &cloud, const std::vector<pcl::PointIndices> &indices, std::vector<point_t> ¢roids) { // X-coord width in meters const float min_width = .8; const float max_width = .96; // Loop through clusters for (std::vector<pcl::PointIndices>::const_iterator it = indices.begin(); it != indices.end(); ++it) { // Create cluster from indices cloud_t::Ptr cluster(new cloud_t(*cloud, (*it).indices)); // Second check: door width point_t min; point_t max; pcl::getMinMax3D(*cluster, min, max); float door_width = sqrt(pow(max.x-min.x, 2.) + pow(max.y-min.y, 2.)); //std::cout << door_width << std::endl; if ((door_width > min_width) && (door_width < max_width)) { // Return door centroid Eigen::Matrix<float, 4, 1> centroid; pcl::compute3DCentroid(*cluster, centroid); centroids.push_back(point_t(centroid(0), centroid(1), centroid(2))); std::cout << "Found door with centroid at: " << centroid(0) << ", " << centroid(1) << ", " << centroid(2) << std::endl; } } }
void segment_region_growing(pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud, int index, pcl::PointCloud<pcl::PointXYZRGBA>::Ptr output_cloud) { pcl::search::Search<pcl::PointXYZRGBA>::Ptr tree = boost::shared_ptr< pcl::search::Search<pcl::PointXYZRGBA> > (new pcl::search::KdTree<pcl::PointXYZRGBA>); pcl::PointCloud <pcl::Normal>::Ptr normals (new pcl::PointCloud <pcl::Normal>); pcl::NormalEstimation<pcl::PointXYZRGBA, pcl::Normal> normal_estimator; normal_estimator.setSearchMethod (tree); normal_estimator.setInputCloud (cloud); normal_estimator.setKSearch (50); normal_estimator.compute (*normals); pcl::IndicesPtr indices (new std::vector <int>); pcl::PassThrough<pcl::PointXYZRGBA> pass; pass.setInputCloud (cloud); pass.setFilterFieldName ("z"); // Filter the huge quantity of points at the origin that mean nothing pass.setFilterLimits (-0.01, 0.01); // pass.setFilterFieldName ("y"); // pass.setFilterLimits (-0.1, 0.1); // pass.setFilterFieldName ("x"); // pass.setFilterLimits (-0.1, 0.1); pass.setFilterLimitsNegative(true); pass.filter (*indices); pcl::PointCloud <pcl::PointXYZRGBA>::Ptr temp_visualization (new pcl::PointCloud <pcl::PointXYZRGBA>); pcl::PointIndices filter_pts; pcl::PointIndices::Ptr cluster_ptr(new pcl::PointIndices()); cluster_ptr->indices = *indices; extract_indices(cloud, temp_visualization, *cluster_ptr); pcl::RegionGrowing<pcl::PointXYZRGBA, pcl::Normal> reg; reg.setMinClusterSize (10); reg.setMaxClusterSize (100000); reg.setSearchMethod (tree); reg.setNumberOfNeighbours (30); reg.setIndices(indices); reg.setInputCloud (cloud); reg.setInputNormals (normals); // reg.setSmoothnessThreshold (3.0 / 180.0 * M_PI); reg.setSmoothnessThreshold (15.0 / 180.0 * M_PI); // More relaxed angle constraints reg.setCurvatureThreshold (1.0); // std::vector <pcl::PointIndices> clusters; // reg.extract (clusters); /* pcl::PointCloud <pcl::PointXYZRGB>::Ptr colored_cloud = reg.getColoredCloud(); pcl::PointCloud <pcl::PointXYZRGBA>::Ptr colored_cloud_rgba(new pcl::PointCloud <pcl::PointXYZRGBA>); pcl::copyPointCloud(*colored_cloud, *colored_cloud_rgba); visualize(colored_cloud_rgba, "colored segmentation"); */ pcl::PointIndices::Ptr cluster(new pcl::PointIndices()); reg.getSegmentFromPoint(index, *cluster); extract_indices(cloud, output_cloud, *cluster); }
void CAttributeDlg::DoCheckSyntax() { m_view.SyntaxChecking(); CString ecl; m_view.GetText(ecl); CString cluster(GetIConfig(QUERYBUILDER_CFG)->Get(GLOBAL_CLUSTER)); CString module(m_attribute->GetModuleQualifiedLabel()); CString attribute(m_attribute->GetLabel()); if (CComPtr<IEclCC> eclcc = CreateIEclCC()) { if (!DoSave(false)) return; } if (m_attribute->GetType() == CreateIAttributeECLType()) { clib::thread run(__FUNCTION__, boost::bind(&EclCheckSyntax, this, ecl, cluster, module, attribute, _T(""), _T(""), false, false)); } else { IAttributeVector attrs; Dali::CEclExceptionVector errors; m_attribute->PreProcess(PREPROCESS_SYNTAXCHECK, ecl, attrs, errors); SendMessage(CWM_SUBMITDONE, Dali::WUActionCheck, (LPARAM)&errors); } }
void darts :: hwloc :: AbstractMachine :: discoverTopologyWithLLC(void) { unsigned nbSockets = hwloc_get_nbobjs_by_type(_topology,HWLOC_OBJ_SOCKET); hwloc_obj_t o = hwloc_get_obj_by_type(_topology,HWLOC_OBJ_SOCKET,0); hwloc_obj_t obj; for (obj = o->first_child; obj && obj->type != HWLOC_OBJ_CACHE; obj = obj->first_child) ; _nbClusters = nbSockets; if (obj) { int n = hwloc_get_nbobjs_inside_cpuset_by_type(_topology,obj->cpuset,HWLOC_OBJ_PU); _nbClusters = _nbTotalUnits / n; // XXX assumes homogeneous distribution of PUs } _clusterMap = new Cluster[_nbClusters]; // TODO Refactor this code and the next function's code into a single one for (o = obj; o; o = o->next_cousin) { int nUnits = hwloc_get_nbobjs_inside_cpuset_by_type(_topology,o->cpuset,HWLOC_OBJ_PU); Unit *units = new Unit[nUnits]; for (int i = 0; i < nUnits; ++i) { hwloc_obj_t t = hwloc_get_obj_inside_cpuset_by_type(_topology,o->cpuset,HWLOC_OBJ_PU,i); Unit hwu(o->logical_index,t->logical_index,t->os_index); units[i] = hwu; // simple shallow copy } Cluster cluster(o->logical_index,o->logical_index,nUnits,units); _clusterMap[o->logical_index] = cluster; // simple shallow copy } }
bool try_initialize_more (Ekiga::ServiceCore& core, int* /*argc*/, char** /*argv*/[]) { boost::shared_ptr<Ekiga::PresenceCore> presence_core = core.get<Ekiga::PresenceCore> ("presence-core"); boost::shared_ptr<Ekiga::CallCore> call_core = core.get<Ekiga::CallCore> ("call-core"); boost::shared_ptr<Ekiga::PersonalDetails> details = core.get<Ekiga::PersonalDetails> ("personal-details"); if (presence_core && call_core && details) { boost::shared_ptr<Avahi::PresencePublisher> publisher (new Avahi::PresencePublisher (core, *details, *call_core)); if (core.add (publisher)) { presence_core->add_presence_publisher (publisher); result = true; } boost::shared_ptr<Avahi::Cluster> cluster (new Avahi::Cluster (core)); if (core.add (cluster)) { presence_core->add_cluster (cluster); result = true; } } return result; }
void build(const std::string& ip_prefix, int num_nodes) { test_utils::CassClusterPtr cluster(cass_cluster_new()); test_utils::initialize_contact_points(cluster.get(), ip_prefix, num_nodes, 0); cass_cluster_set_load_balance_round_robin(cluster.get()); cass_cluster_set_token_aware_routing(cluster.get(), cass_false); test_utils::CassSessionPtr session(test_utils::create_session(cluster.get())); for (int i = 0; i < num_nodes; ++i) { test_utils::CassStatementPtr statement( cass_statement_new("SELECT tokens, data_center FROM system.local", 0)); test_utils::CassFuturePtr future(cass_session_execute(session.get(), statement.get())); test_utils::wait_and_check_error(future.get()); test_utils::CassResultPtr result(cass_future_get_result(future.get())); const CassRow* row = cass_result_first_row(result.get()); const CassValue* data_center = cass_row_get_column_by_name(row, "data_center"); const CassValue* token_set = cass_row_get_column_by_name(row, "tokens"); CassString str; cass_value_get_string(data_center, &str.data, &str.length); std::string dc(str.data, str.length); std::string ip = cass::get_host_from_future(future.get()); test_utils::CassIteratorPtr iterator(cass_iterator_from_collection(token_set)); while (cass_iterator_next(iterator.get())) { cass_value_get_string(cass_iterator_get_value(iterator.get()), &str.data, &str.length); std::string token(str.data, str.length); tokens[boost::lexical_cast<int64_t>(token)] = Host(ip, dc); } } }
int main() { //CREATE GRAPH FROM A SORTED EDGE LIST FILE createGraph(); int i=0, flg=0; for(i=1;i<NO_OF_ENTRIES;++i) { if(list[i]->w>list[i+1]->w) { printf("\n%d) %d %d %d\n", i, list[i]->s, list[i]->d, list[i]->w); flg++; } } if(flg==0) { printf("\nArray is sorted. Now to apply clustering..."); getch(); } else return -1; i=cluster(4); printf("\nMaximum spacing=%d. (At edge no.=%d)", list[i]->w, i); return 0; }
template <typename PointT> void pcl::people::HeadBasedSubclustering<PointT>::mergeClustersCloseInFloorCoordinates (std::vector<pcl::people::PersonCluster<PointT> >& input_clusters, std::vector<pcl::people::PersonCluster<PointT> >& output_clusters) { float min_distance_between_cluster_centers = 0.4; // meters float normalize_factor = std::pow(sqrt_ground_coeffs_, 2); // sqrt_ground_coeffs ^ 2 (precomputed for speed) Eigen::Vector3f head_ground_coeffs = ground_coeffs_.head(3); // ground plane normal (precomputed for speed) std::vector <std::vector<int> > connected_clusters; connected_clusters.resize(input_clusters.size()); std::vector<bool> used_clusters; // 0 in correspondence of clusters remained to process, 1 for already used clusters used_clusters.resize(input_clusters.size()); for(size_t i = 0; i < input_clusters.size(); i++) // for every cluster { Eigen::Vector3f theoretical_center = input_clusters[i].getTCenter(); float t = theoretical_center.dot(head_ground_coeffs) / normalize_factor; // height from the ground Eigen::Vector3f current_cluster_center_projection = theoretical_center - head_ground_coeffs * t; // projection of the point on the groundplane for(size_t j = i+1; j < input_clusters.size(); j++) // for every remaining cluster { theoretical_center = input_clusters[j].getTCenter(); float t = theoretical_center.dot(head_ground_coeffs) / normalize_factor; // height from the ground Eigen::Vector3f new_cluster_center_projection = theoretical_center - head_ground_coeffs * t; // projection of the point on the groundplane if (((new_cluster_center_projection - current_cluster_center_projection).norm()) < min_distance_between_cluster_centers) { connected_clusters[i].push_back(j); } } } for(size_t i = 0; i < connected_clusters.size(); i++) // for every cluster { if (!used_clusters[i]) // if this cluster has not been used yet { used_clusters[i] = true; if (connected_clusters[i].empty()) // no other clusters to merge { output_clusters.push_back(input_clusters[i]); } else { // Copy cluster points into new cluster: pcl::PointIndices point_indices; point_indices = input_clusters[i].getIndices(); for(const int &cluster : connected_clusters[i]) { if (!used_clusters[cluster]) // if this cluster has not been used yet { used_clusters[cluster] = true; for(std::vector<int>::const_iterator points_iterator = input_clusters[cluster].getIndices().indices.begin(); points_iterator != input_clusters[cluster].getIndices().indices.end(); points_iterator++) { point_indices.indices.push_back(*points_iterator); } } } pcl::people::PersonCluster<PointT> cluster(cloud_, point_indices, ground_coeffs_, sqrt_ground_coeffs_, head_centroid_, vertical_); output_clusters.push_back(cluster); } } } }
vector<Detection> NonMaximumSuppression::eliminateRedundantDetections(vector<Detection> candidates) const { if (overlapThreshold == 1.0) // with this threshold, there would be an endless loop - this check assumes distinct bounding boxes return candidates; sortByScore(candidates); vector<vector<Detection>> clusters = cluster(candidates); return getMaxima(clusters); }
int main(int argc, char** argv) { try { variable_set_type variables; options(argc, argv, variables); if (variables.count("temporary") && ! variables["temporary"].as<std::string>().empty()) ::setenv("TMPDIR_SPEC", variables["temporary"].as<std::string>().data(), 1); if (! variables.count("input")) throw std::runtime_error("no input file"); if (! variables.count("output")) throw std::runtime_error("no output file"); const std::string input_path = variables["input"].as<std::string>(); const std::string output_path = variables["output"].as<std::string>(); cicada::Cluster cluster(input_path); ::sync(); cluster.write(output_path); } catch (const std::exception& err) { std::cerr << "error: " << err.what() << std::endl; return 1; } return 0; }
bool process_ros(duplo_v0::Process_PCD::Request &req, duplo_v0::Process_PCD::Response &res) { pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZRGB>); pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloudz(new pcl::PointCloud<pcl::PointXYZRGB>); //Filtered cloud pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_inliers(new pcl::PointCloud<pcl::PointXYZRGB>); //Inliers after removing outliers pcl::PointCloud<pcl::PointXYZRGB>::Ptr planar_cloud(new pcl::PointCloud<pcl::PointXYZRGB>); pcl::PointCloud<pcl::PointXYZRGB>::Ptr object_cloud(new pcl::PointCloud<pcl::PointXYZRGB>); pcl::PointCloud<pcl::PointXYZRGB>::Ptr cluster1(new pcl::PointCloud<pcl::PointXYZRGB>); pcl::fromROSMsg(req.pcd_in,*cloud); /****************** Filter out the non-table points ******************/ if (pass_through(cloud,cloudz) != 0) { std::cerr << "Pass-through filter error" << std::endl; return false; } /* ********* Segmentation of the cloud into table and object clouds **********/ planar_seg(cloudz,planar_cloud,object_cloud,"table_try.pcd","object_try.pcd"); /********************** Cluster objects based on Euclidean distance **********/ //vector<double> volumes = cluster(object_cloud); int num_clusters_found = cluster(object_cloud); if (num_clusters_found == 0) the_chosen_one.data.clear(); res.n_clusters = num_clusters_found; processed_pointcloud = true; return true; }
TEST(podio,OneToOneRelations) { bool success = true; auto cluster = ExampleCluster(); auto rel = ExampleWithOneRelation(); rel.cluster(cluster); EXPECT_EQ(true, success); }
/* * Cluster all the samples again. */ bool OfflineKMeans::reCluster() { bool clusterChanged = false; for(int i = 0; i < sampleNum; ++i) { const float* sample = samples.getSample(i); int oldCluster = clusterIndexes[i]; int newCluster = cluster(sample, featureNum); if(newCluster != oldCluster) { clusterChanged = true; #ifdef OKM_DBG std::cout<<"i = "<<i<<", oldCluster = "<<oldCluster<<", newCluster = " <<newCluster<<std::endl; #endif // move sample from the old cluster to the new cluster. clusters[oldCluster].second.erase(const_cast<float*>(sample)); clusters[newCluster].second.insert(const_cast<float*>(sample)); /* * set clusterIndexes[i] to newCluster to indicate now sample * indexed by 'i' is in cluster indexed by 'newCluster'. */ clusterIndexes[i] = newCluster; } } if(clusterChanged == true) updateCentroids(); return clusterChanged; }
void redis_client_cluster::set_all_slot(const char* addr, int max_conns) { redis_client client(addr, 30, 60, false); redis_cluster cluster(&client); const std::vector<redis_slot*>* slots = cluster.cluster_slots(); if (slots == NULL) return; std::vector<redis_slot*>::const_iterator cit; for (cit = slots->begin(); cit != slots->end(); ++cit) { const redis_slot* slot = *cit; const char* ip = slot->get_ip(); if (*ip == 0) continue; int port = slot->get_port(); if (port <= 0) continue; size_t slot_min = slot->get_slot_min(); size_t slot_max = slot->get_slot_max(); if ((int) slot_max >= max_slot_ || slot_max < slot_min) continue; char buf[128]; safe_snprintf(buf, sizeof(buf), "%s:%d", ip, port); redis_client_pool* conns = (redis_client_pool*) get(buf); if (conns == NULL) set(buf, max_conns); for (size_t i = slot_min; i <= slot_max; i++) set_slot((int) i, buf); } }
bool try_initialize_more (Ekiga::ServiceCore& core, int* /*argc*/, char** /*argv*/[]) { boost::shared_ptr<Ekiga::PresenceCore> presence = core.get<Ekiga::PresenceCore> ("presence-core"); boost::shared_ptr<Ekiga::AccountCore> account = core.get<Ekiga::AccountCore> ("account-core"); boost::shared_ptr<Ekiga::ChatCore> chat = core.get<Ekiga::ChatCore> ("chat-core"); boost::shared_ptr<Ekiga::PersonalDetails> details = core.get<Ekiga::PersonalDetails> ("personal-details"); if (presence && account && chat && details) { LM::DialectPtr dialect(new LM::Dialect (core)); LM::ClusterPtr cluster(new LM::Cluster (dialect, details)); LM::BankPtr bank (new LM::Bank (details, dialect, cluster)); if (core.add (bank)) { chat->add_dialect (dialect); account->add_bank (bank); presence->add_cluster (cluster); result = true; } } return result; }
virtual void init() { OwnedRoxieString fname(helper->getFileName()); dlfn.set(fname); isLocal = 0 != (TIWlocal & helper->getFlags()); unsigned minSize = helper->queryDiskRecordSize()->getMinRecordSize(); if (minSize > KEYBUILD_MAXLENGTH) throw MakeActivityException(this, 0, "Index minimum record length (%d) exceeds %d internal limit", minSize, KEYBUILD_MAXLENGTH); unsigned maxSize = helper->queryDiskRecordSize()->getRecordSize(NULL); if (maxSize > KEYBUILD_MAXLENGTH) throw MakeActivityException(this, 0, "Index maximum record length (%d) exceeds %d internal limit. Minimum size = %d, try setting index MAXLENGTH", maxSize, KEYBUILD_MAXLENGTH, minSize); singlePartKey = 0 != (helper->getFlags() & TIWsmall) || dlfn.isExternal(); clusters.kill(); unsigned idx=0; while (true) { OwnedRoxieString cluster(helper->getCluster(idx)); if(!cluster) break; clusters.append(cluster); idx++; } IArrayOf<IGroup> groups; if (singlePartKey) { isLocal = true; buildTlk = false; } else if (!isLocal || globals->getPropBool("@buildLocalTlks", true)) buildTlk = true; fillClusterArray(container.queryJob(), fname, clusters, groups); unsigned restrictedWidth = 0; if (TIWhaswidth & helper->getFlags()) { restrictedWidth = helper->getWidth(); if (restrictedWidth > container.queryJob().querySlaves()) throw MakeActivityException(this, 0, "Unsupported, can't refactor to width(%d) larger than host cluster(%d)", restrictedWidth, container.queryJob().querySlaves()); else if (restrictedWidth < container.queryJob().querySlaves()) { if (!isLocal) throw MakeActivityException(this, 0, "Unsupported, refactoring to few parts only supported for local indexes."); assertex(!singlePartKey); unsigned gwidth = groups.item(0).ordinality(); if (0 != container.queryJob().querySlaves() % gwidth) throw MakeActivityException(this, 0, "Unsupported, refactored target size (%d) must be factor of thor cluster width (%d)", groups.item(0).ordinality(), container.queryJob().querySlaves()); if (0 == restrictedWidth) restrictedWidth = gwidth; ForEachItemIn(g, groups) { IGroup &group = groups.item(g); if (gwidth != groups.item(g).ordinality()) throw MakeActivityException(this, 0, "Unsupported, cannot output multiple refactored widths, targeting cluster '%s' and '%s'", clusters.item(0), clusters.item(g)); if (gwidth != restrictedWidth) groups.replace(*group.subset((unsigned)0, restrictedWidth), g); } refactor = true; }
void make_graph (const char* fn) { FILE *fw = mustOpen(fn, "w"); int i, j, cnt; int rec_num = 0; if (po->COL_WIDTH == 2) po->COL_WIDTH = MAX(cols/20, 2); /* edge_ptr describe edges */ AllocArray(edge_list, HEAP_SIZE); /* Allocating heap structure */ struct fibheap *heap; heap = fh_makeheap(); fh_setcmp(heap, edge_cmpr); /* Generating seed list and push into heap */ progress("Generating seed list (minimum weight %d)", po->COL_WIDTH); Edge __cur_min = {0, 0, po->COL_WIDTH}; Edge *_cur_min = &__cur_min; Edge **cur_min = & _cur_min; /* iterate over all genes to retrieve all edges */ for (i = 0; i < rows; i++) { for (j = i+1; j < rows; j++) { cnt = str_intersect_r(arr_c[i], arr_c[j]); if (cnt < (*cur_min)->score) continue; AllocVar(edge_ptr); edge_ptr -> gene_one = i; edge_ptr -> gene_two = j; edge_ptr -> score = cnt; fh_insert_fixed(heap, edge_ptr, cur_min); rec_num++; } } /*rec_num = heap->fh_n;*/ if (rec_num == 0) errAbort("Not enough overlap between genes"); /* sort the seeds */ uglyTime("%d seeds generated", rec_num); ReAllocArray(edge_list, rec_num); fh_dump(heap, edge_list); /* bi-clustering */ int n_blocks = 0; progress("Clustering started"); n_blocks = cluster(fw, edge_list, rec_num); uglyTime("%d clusters are written to %s", n_blocks, fn); /* clean up */ for (i=0; i<rec_num; i++) free(edge_list[i]); free(edge_list); }
// // Main // int clusterMain(int argc, char** argv) { Timer* pTimer = new Timer("sga cluster"); parseClusterOptions(argc, argv); cluster(); delete pTimer; return 0; }
/** * @function grabberCallback */ void grabberCallback( const pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr &_cloud ) { if( !gViewer->wasStopped() ) { gViewer->showCloud( _cloud ); } if( gProcessCloud ) { std::stringstream stream; stream << "inputCloud" << gFilesSaved << ".pcd"; std::string filename = stream.str(); if( pcl::io::savePCDFile( filename, *_cloud, true ) == 0 ) { gFilesSaved++; std::cout << "Saved " << filename << "." << std::endl; } else { PCL_ERROR( "[!]Problem saving %s. \n", filename.c_str() ); } /** Process cloud */ gTs.processCloud( _cloud ); // Get camera parameters double f, cx, cy; int width, height; XnMapOutputMode m = ((pcl::OpenNIGrabber*)gKinectGrabber)->getDevice()->getDepthOutputMode(); width = (int) m.nXRes; height = (int) m.nYRes; f = (double)((pcl::OpenNIGrabber*)gKinectGrabber)->getDevice()->getDepthFocalLength(0); cx = width >> 1; cy = height >> 1; /** Set mindGapper (to generate mirror pointclouds)*/ mindGapper<pcl::PointXYZRGBA> mG; mG.setTablePlane( gTs.getTableCoeffs() ); mG.setFittingParams( 6,5,0.01,M_PI/9.0 ); mG.setDeviceParams( width, height, f, (double)cx, (double)cy ); /** Generate mirror pointclouds */ for( int i = 0; i < gTs.getNumClusters(); ++i ) { pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cluster( new pcl::PointCloud<pcl::PointXYZRGBA>() ); *cluster = gTs.getCluster(i); mG.complete( cluster ); char name[50]; sprintf( name, "mirror_%d.pcd", i ); pcl::io::savePCDFile( name, *cluster, true ); } gProcessCloud = false; } }
void OfflineKMeans::initClusters() { // calculate the minimum and maximum values of each feature std::pair<float, float>* featureRanges = new std::pair<float, float>[featureNum]; for(int j = 0; j < featureNum; ++j) { float minFeatureJ = std::numeric_limits<float>::max(); float maxFeatureJ = std::numeric_limits<float>::min(); for(int i = 0; i < sampleNum; ++i) { float feature = samples.getSample(i)[j]; if(minFeatureJ > feature) minFeatureJ = feature; if(maxFeatureJ < feature) maxFeatureJ = feature; } featureRanges[j].first = minFeatureJ; featureRanges[j].second = maxFeatureJ; } // initialize centroids of clusters. clusters = new std::pair<float*, std::set<float*> >[clusterNum]; for(int i = 0; i < clusterNum; ++i) { float* centroid = new float[featureNum]; srand(i+1); float scale = static_cast<float>(rand()) / RAND_MAX; for(int j = 0; j < featureNum; ++j) { float minFeatureJ = featureRanges[j].first; float maxFeatureJ = featureRanges[j].second; centroid[j] = minFeatureJ + (maxFeatureJ - minFeatureJ) * scale; } clusters[i].first = centroid; } delete[] featureRanges; #ifdef OKM_DBG std::cout<<"Centroids of clusters initialized randomly:"<<std::endl; for(int i = 0; i < clusterNum; ++i) { std::cout<<"<"; float* centroid = clusters[i].first; int j; for(j = 0; j < featureNum-1; ++j) std::cout<<centroid[j]<<" "; std::cout<<centroid[j]<<">"<<std::endl; } #endif /* cluster all samples for the first time. * setup mapping from samples to clusters and centroids of clusters. */ clusterIndexes = new int[sampleNum]; for(int i = 0; i < sampleNum; ++i) { const float* sample = samples.getSample(i); int targetCluster = cluster(sample, featureNum); clusterIndexes[i] = targetCluster; clusters[targetCluster].second.insert(const_cast<float*>(sample)); } updateCentroids(); }
void QTClusterFinder::computeClustering_(Grid & grid, list<QTCluster> & clustering) { clustering.clear(); distances_.clear(); // FeatureDistance produces normalized distances (between 0 and 1): const DoubleReal max_distance = 1.0; // iterate over all grid cells: for (Grid::iterator it = grid.begin(); it != grid.end(); ++it) { const Grid::CellIndex & act_coords = it.index(); const Int x = act_coords[0], y = act_coords[1]; //cout << x << " " << y << endl; GridFeature * center_feature = it->second; QTCluster cluster(center_feature, num_maps_, max_distance, use_IDs_); // iterate over neighboring grid cells (1st dimension): for (int i = x - 1; i <= x + 1; ++i) { // iterate over neighboring grid cells (2nd dimension): for (int j = y - 1; j <= y + 1; ++j) { try { const Grid::CellContent & act_pos = grid.grid_at(Grid::CellIndex(i, j)); for (Grid::const_cell_iterator it_cell = act_pos.begin(); it_cell != act_pos.end(); ++it_cell) { GridFeature * neighbor_feature = it_cell->second; // consider only "real" neighbors, not the element itself: if (center_feature != neighbor_feature) { DoubleReal dist = getDistance_(center_feature, neighbor_feature); if (dist == FeatureDistance::infinity) { continue; // conditions not satisfied } // if neighbor point is a possible cluster point, add it: if (!use_IDs_ || compatibleIDs_(cluster, neighbor_feature)) { cluster.add(neighbor_feature, dist); } } } } catch (std::out_of_range &) { } } } clustering.push_back(cluster); } }
QList<OneDimensionalCluster> OneDimensionalCluster::createOneDimensionalClusterList(const QList<double> & oneDimensionalCoordinateList) { QList<OneDimensionalCluster> oneDimensionalClusterList; foreach (double coordinate, oneDimensionalCoordinateList) { QList<double> initialList; initialList.append(coordinate); OneDimensionalCluster cluster(initialList); oneDimensionalClusterList.append(initialList); }
int main(int argc, char *argv[]) { ros::init(argc, argv, "EuclideanClusterNode"); ros::NodeHandle nh; ros::NodeHandle n("~"); EuclideanCluster cluster(nh, n); cluster.run(); return 0; }
int main(){ srand(time(NULL)); data **zee = init(MAX); int i; for(i=0;i<5000;i++){ cluster(zee); } print_cluster(zee); return 0; }