static double score2(const boost::unordered_map<int, double> &m0, const boost::unordered_map<int, double> &m1) { int tmp = 0; auto i0 = m0.begin(); auto i1 = m1.begin(); while (i0 != m0.end() && i1 != m1.end()) { if (i0->first < i1->first) ++i0; else if (i0->first > i1->first) ++i1; else ++tmp, ++i0, ++i1; } return m0.size() + m1.size() - tmp; }
void IfcTreeWidget::slotObjectsSelected( boost::unordered_map<int, shared_ptr<IfcPPEntity> >& map ) { if( m_block_selection_signals ) { return; } if( map.size() < 1 ) { return; } // take the first object from map and highlight it shared_ptr<IfcPPEntity> object = (*(map.begin())).second; int selected_id = object->m_id; for( int i=0; i<topLevelItemCount(); ++i ) { QTreeWidgetItem* toplevel_item = topLevelItem( i ); QTreeWidgetItem* selected_item = ViewerUtil::findItemByIfcId( toplevel_item, selected_id ); if( selected_item != 0 ) { blockSignals(true); m_block_selection_signals = true; setCurrentItem( selected_item, 1, QItemSelectionModel::SelectCurrent ); blockSignals(false); m_block_selection_signals = false; break; } } }
void IndexingVariables::set( boost::unordered_map<std::string, boost::shared_ptr<ArrayStorage> > as, int offset) { hm.clear(); hm.insert(as.begin(), as.end()); this->offset = offset; }
//! Dump Legendre polynomial cache data to stream (table and history). void dumpLegendrePolynomialCacheData( std::ostream& outputStream, boost::unordered_map< Point, double > cacheTable, boost::circular_buffer< Point > cacheHistory ) { outputStream << "Table:\n"; for ( boost::unordered_map< Point, double >::iterator iteratorCacheTable = cacheTable.begin( ); iteratorCacheTable != cacheTable.end( ); iteratorCacheTable++ ) { outputStream << "\t" << writeLegendrePolynomialStructureToString( iteratorCacheTable->first ).c_str( ) << " => " << iteratorCacheTable->second << std::endl; } outputStream << "History:\n"; for ( boost::circular_buffer< Point >::iterator iteratorCacheHistory = cacheHistory.begin( ); iteratorCacheHistory != cacheHistory.end( ); iteratorCacheHistory++ ) { outputStream << "\t" << writeLegendrePolynomialStructureToString( *iteratorCacheHistory ).c_str( ) << ", "; } outputStream << std::endl; }
TKmerMapSimple kmerify_graph_simple(String<TVertexDescriptor const> const & order, TGraph const & graph, std::vector<VertexLabels> & vertex_vector, boost::unordered_set<TVertexDescriptor> const & free_nodes, boost::unordered_map< std::pair<TVertexDescriptor, TVertexDescriptor>, boost::dynamic_bitset<> > & edge_ids, int const & kmer_size ) { TKmerMapSimple kmer_map; for (Iterator<String<TVertexDescriptor const> const>::Type it = begin(order) ; it != end(order) ; ++it) { TVertexDescriptor const & source_vertex = *it; if (free_nodes.count(source_vertex) == 0) { boost::dynamic_bitset<> id_bits(edge_ids.begin()->second.size()); id_bits.flip(); check_kmers_simple(vertex_vector[source_vertex].dna, graph, source_vertex, vertex_vector, free_nodes, edge_ids, id_bits, kmer_map, static_cast<std::size_t>(kmer_size)); } } return kmer_map; }
TKmerMap kmerifyGraph(String<TVertexDescriptor const> const & order, TGraph const & graph, std::vector<VertexLabels> & vertex_vector, boost::unordered_set<TVertexDescriptor> const & free_nodes, boost::unordered_map< std::pair<TVertexDescriptor, TVertexDescriptor>, boost::dynamic_bitset<> > & edge_ids, int const & kmer_size ) { TKmerMap kmer_map; for (Iterator<String<TVertexDescriptor const> const>::Type it = begin(order) ; it != end(order) ; ++it) { TVertexDescriptor const & source_vertex = *it; if (free_nodes.count(source_vertex) == 0) { boost::dynamic_bitset<> id_bits(edge_ids.begin()->second.size()); id_bits.flip(); checkKmers(vertex_vector[source_vertex].dna, source_vertex, source_vertex, graph, vertex_vector, free_nodes, edge_ids, id_bits, kmer_map, static_cast<std::size_t>(kmer_size)); // std::cout << "source_vertex = " << source_vertex << " kmer_map.size() = " << kmer_map.size() << " kmer_size = " << kmer_size; // std::cout << " vertex_vector[source_vertex].level = " << vertex_vector[source_vertex].level << std::endl; } } return kmer_map; }
void Streamer::processPickups(Player &player, const std::vector<SharedCell> &cells) { static boost::unordered_map<int, Item::SharedPickup> discoveredPickups; for (std::vector<SharedCell>::const_iterator c = cells.begin(); c != cells.end(); ++c) { for (boost::unordered_map<int, Item::SharedPickup>::const_iterator p = (*c)->pickups.begin(); p != (*c)->pickups.end(); ++p) { boost::unordered_map<int, Item::SharedPickup>::iterator d = discoveredPickups.find(p->first); if (d == discoveredPickups.end()) { if (checkPlayer(p->second->players, player.playerID, p->second->interiors, player.interiorID, p->second->worlds, player.worldID)) { if (boost::geometry::comparable_distance(player.position, p->second->position) <= p->second->streamDistance) { boost::unordered_map<int, int>::iterator i = internalPickups.find(p->first); if (i == internalPickups.end()) { p->second->worldID = !p->second->worlds.empty() ? player.worldID : -1; } discoveredPickups.insert(*p); } } } } } if (processingFinalPlayer) { boost::unordered_map<int, int>::iterator i = internalPickups.begin(); while (i != internalPickups.end()) { boost::unordered_map<int, Item::SharedPickup>::iterator d = discoveredPickups.find(i->first); if (d == discoveredPickups.end()) { DestroyPickup(i->second); i = internalPickups.erase(i); } else { discoveredPickups.erase(d); ++i; } } for (boost::unordered_map<int, Item::SharedPickup>::iterator d = discoveredPickups.begin(); d != discoveredPickups.end(); ++d) { if (internalPickups.size() == visiblePickups) { break; } int internalID = CreatePickup(d->second->modelID, d->second->type, d->second->position[0], d->second->position[1], d->second->position[2], d->second->worldID); if (internalID == INVALID_ALTERNATE_ID) { break; } internalPickups.insert(std::make_pair(d->second->pickupID, internalID)); } discoveredPickups.clear(); } }
///Calculates the Gini impurity of a node. The impurity is defined as ///1-sum_j p(j|t)^2 ///i.e the 1 minus the sum of the squared probability of observing class j in node t double CARTTrainer::gini(boost::unordered_map<std::size_t, std::size_t>& countMatrix, std::size_t n){ double res = 0; boost::unordered_map<std::size_t, std::size_t>::iterator it; double denominator = n; for ( it=countMatrix.begin() ; it != countMatrix.end(); it++ ){ res += sqr(it->second/denominator); } return 1-res; }
bool buildNameLookupTable(Kompex::SQLiteStatement * stmt, boost::unordered_map<std::string,int32_t> &table_names) { std::string stmt_insert = "INSERT INTO name_lookup"; stmt_insert += "(name_id,name_lookup) VALUES(@name_id,@name_lookup);"; try { stmt->BeginTransaction(); stmt->Sql(stmt_insert); } catch(Kompex::SQLiteException &exception) { qDebug() << "ERROR: SQLite exception with insert statement:" << QString::fromStdString(exception.GetString()); return false; } // keep track of the number of transactions and // commit after a certain limit size_t transaction_limit=5000; size_t transaction_count=0; // write name lookup info the database boost::unordered_map<std::string,int32_t>::iterator it; for(it = table_names.begin(); it != table_names.end(); ++it) { // prepare sql try { //[name_id, name_lookup] stmt->BindInt(1,it->second); stmt->BindString(2,it->first); stmt->Execute(); stmt->Reset(); if(transaction_count > transaction_limit) { stmt->FreeQuery(); stmt->CommitTransaction(); stmt->BeginTransaction(); stmt->Sql(stmt_insert); transaction_count=0; } transaction_count++; } catch(Kompex::SQLiteException &exception) { qDebug() << "ERROR: SQLite exception writing tile data:" << QString::fromStdString(exception.GetString()); qDebug() << "ERROR: name_id:" << it->second; qDebug() << "ERROR: name_lookup:" << QString::fromStdString(it->first); return false; } } stmt->FreeQuery(); stmt->CommitTransaction(); return true; }
void read_db_users() { m_read_db_users_time = srv_time(); if (!m_use_sql) return; try { Csql_query q(m_database, "select @uid"); if (m_read_users_can_leech) q += ", can_leech"; if (m_read_users_peers_limit) q += ", peers_limit"; if (m_read_users_torrent_pass) q += ", torrent_pass"; q += ", torrent_pass_version"; if (m_read_users_wait_time) q += ", wait_time"; q += " from @users"; Csql_result result = q.execute(); // m_users.reserve(result.size()); for (auto& i : m_users) i.second.marked = true; m_users_torrent_passes.clear(); while (Csql_row row = result.fetch_row()) { t_user& user = m_users[row[0].i()]; user.marked = false; int c = 0; user.uid = row[c++].i(); if (m_read_users_can_leech) user.can_leech = row[c++].i(); if (m_read_users_peers_limit) user.peers_limit = row[c++].i(); if (m_read_users_torrent_pass) { if (row[c].size() == 32) m_users_torrent_passes[to_array<char, 32>(row[c])] = &user; c++; } user.torrent_pass_version = row[c++].i(); if (m_read_users_wait_time) user.wait_time = row[c++].i(); } for (auto i = m_users.begin(); i != m_users.end(); ) { if (i->second.marked) m_users.erase(i++); else i++; } } catch (bad_query&) { } }
static void serialize(storage_type& s, const boost::unordered_map<T1, T2>& o) { uint32_t sz = o.size(); s.serialize(sz); for (typename boost::unordered_map<T1,T2>::const_iterator it = o.begin(), it_end = o.end(); it != it_end; ++it) { s.serialize(it->first); s.serialize(it->second); } }
///Calculates the Gini impurity of a node. The impurity is defined as ///1-sum_j p(j|t)^2 ///i.e the 1 minus the sum of the squared probability of observing class j in node t double RFTrainer::gini(boost::unordered_map<std::size_t, std::size_t> & countMatrix, std::size_t n){ double res = 0; boost::unordered_map<std::size_t, std::size_t>::iterator it; if(n){ n = n*n; for ( it=countMatrix.begin() ; it != countMatrix.end(); it++ ){ res += sqr(it->second)/(double)n; } } return 1-res; }
int TwoSum::find_range(int lower, int upper) { int count = 0; numbers_to_check.resize(upper-lower); for (int i=lower; i<= upper; i++ ) { numbers_to_check.push_back(i); }; for( auto it = numbers.begin(); it != numbers.end(); ++it ) { if( find_sum(it->first) ) { ++count; }; }; return count; };
RealVector CARTTrainer::hist(boost::unordered_map<std::size_t, std::size_t> countMatrix){ //create a normed histogram std::size_t totalElements = 0; RealVector normedHistogram(m_maxLabel+1); normedHistogram.clear(); boost::unordered_map<std::size_t, std::size_t>::iterator it; for ( it=countMatrix.begin() ; it != countMatrix.end(); it++ ){ normedHistogram(it->first) = it->second; totalElements += it->second; } normedHistogram /= totalElements; return normedHistogram; }
void Task::drawKeypoints(const base::samples::frame::Frame &frame2, const std::vector<cv::KeyPoint> &keypoints1, const std::vector<cv::KeyPoint> &keypoints2, const std::vector<cv::DMatch> &matches, const boost::unordered_map<boost::uuids::uuid, StereoFeature> &hash) { /** Convert Images to opencv **/ cv::Mat img2 = frameHelperLeft.convertToCvMat(frame2); ::cv::Mat img_out(img2); cv::drawKeypoints (img2, keypoints1, img_out, cv::Scalar(0, 255, 0));//green previous for (std::vector<cv::DMatch>::const_iterator it= matches.begin(); it!= matches.end(); ++it) { cv::Point point1 = keypoints1[it->queryIdx].pt; cv::Point point2 = keypoints2[it->trainIdx].pt; //cv::circle(img_out, point1, 3, cv::Scalar(0, 255, 0), 1);// green previous cv::circle(img_out, point2, 3, cv::Scalar(255, 0, 0), 1);// blue current cv::line (img_out, point1, point2, cv::Scalar(0, 0, 255)); // red line } if (_draw_hash_uuid_features.value()) { /** Draw hash features **/ for (boost::unordered_map<boost::uuids::uuid, StereoFeature>::const_iterator it = hash.begin(); it != hash.end(); ++it) { /** Only current features but with the hash id and the image frame index in the label **/ if (it->second.img_idx == this->fcurrent_left.img_idx) { float red = 255.0 - (255.0/(this->frame_window_hash_size)*(this->ffinal_left.img_idx-it->second.img_idx)); cv::circle(img_out, it->second.keypoint_left.pt, 5, cv::Scalar(0, 0, red), 2); std::vector<std::string> uuid_tokens = this->split(boost::lexical_cast<std::string>(it->first), '-'); cv::putText(img_out, uuid_tokens[uuid_tokens.size()-1]+"["+boost::lexical_cast<std::string>(it->second.img_idx)+"]", it->second.keypoint_left.pt, cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, cv::Scalar(0,0,0), 2.5); } } } ::base::samples::frame::Frame *frame_ptr = this->inter_frame_out.write_access(); frameHelperLeft.copyMatToFrame(img_out, *frame_ptr); frame_ptr->time = this->frame_pair.time; this->inter_frame_out.reset(frame_ptr); _inter_frame_samples_out.write(this->inter_frame_out); return; }
void save(Archive & ar, const boost::unordered_map<T,U> & t, unsigned int version) { // write the size // TODO: should we handle bucket size as well? typedef typename boost::unordered_map<T, U>::size_type size_type; typedef typename boost::unordered_map<T,U>::const_iterator const_iterator; size_type size = t.size(); ar & BOOST_SERIALIZATION_NVP(size); unsigned int count = 0; for (const_iterator it = t.begin(); it != t.end(); it++) { ar & boost::serialization::make_nvp("pair" + count, *it); count++; } }
RealVector RFTrainer::hist(boost::unordered_map<std::size_t, std::size_t> countMatrix){ RealVector histogram(m_maxLabel+1,0.0); std::size_t totalElements = 0; boost::unordered_map<std::size_t, std::size_t>::iterator it; for ( it=countMatrix.begin() ; it != countMatrix.end(); it++ ){ histogram(it->first) = (double)it->second; totalElements += it->second; } histogram /= totalElements; return histogram; }
bool TwoSum::find_sum(long int target) { long int bucket; for( auto it = numbers_to_check.begin(); it != numbers_to_check.end(); it++ ) { if( numbers.find((*it) - target) == numbers.end() ) { continue; }; bucket = numbers.bucket( (*it) - target ); for(auto it_local = numbers.begin(bucket); it_local != numbers.end(bucket); ++it_local) { if ( ( it_local->first + target ) != (*it) ) { continue; }; if ( it_local->first != target || (it_local->first == target && multi_numbers.find(target) != multi_numbers.end()) ) { std::cout << target << " + " << it_local->first << " = " << (*it) << " ( " << target + it_local->first << std::endl; numbers_to_check.erase(it); return true; }; }; }; return false; };
void printIdMap(boost::unordered_map<std::string, long> &my_id_map) { // std::cout << my_id_map.size() << std::endl; std::string max_id; long max_count = 0; for (boost::unordered_map<std::string, long>::iterator it = my_id_map.begin() ; it != my_id_map.end() ; ++it) { printf("%8s: %4ld\n", it->first.c_str(), it->second); if (it->second > max_count) { max_count = it->second; max_id = it->first.c_str(); } } std::cout << max_id << " wins!" << std::endl; }
inline void operator<< (object::with_zone& o, const boost::unordered_map<K,V>& v) { o.type = type::MAP; if(v.empty()) { o.via.map.ptr = NULL; o.via.map.size = 0; } else { object_kv* p = (object_kv*)o.zone->malloc(sizeof(object_kv)*v.size()); object_kv* const pend = p + v.size(); o.via.map.ptr = p; o.via.map.size = v.size(); typename boost::unordered_map<K,V>::const_iterator it(v.begin()); do { p->key = object(it->first, o.zone); p->val = object(it->second, o.zone); ++p; ++it; } while(p < pend); } }
void dump_call_profile() { #ifdef TORRENT_PROFILE_CALLS FILE* out = fopen("blocking_calls.txt", "w+"); std::map<int, std::string> profile; mutex::scoped_lock l(g_calls_mutex); for (boost::unordered_map<std::string, int>::const_iterator i = g_blocking_calls.begin() , end(g_blocking_calls.end()); i != end; ++i) { profile[i->second] = i->first; } for (std::map<int, std::string>::const_reverse_iterator i = profile.rbegin() , end(profile.rend()); i != end; ++i) { fprintf(out, "\n\n%d\n%s\n", i->first, i->second.c_str()); } fclose(out); #endif }
static std::pair<double, double> score(const boost::unordered_map<int, double> &real, const boost::unordered_map<int, double> &fake, double threshold = 0.0) { double res = 0.0; int cnt = 0; double e = std::numeric_limits<double>::epsilon(); for (auto i = fake.begin(); i != fake.end(); ++i) if (i->second >= threshold && real.find(i->first) != real.end()) { ++cnt; double p = real.at(i->first); double q = fake.at(i->first); if (p <= e) res += (1-p) * log((1-p)/(1-q)); else if (1-p <= e || 1-q <= e) res += p * log(p/q); else res += p * log(p/q) + (1-p) * log((1-p)/(1-q)); } return {res, cnt ? res / cnt : 0.0}; }
void Task::cleanHashFeatures (const int current_image_idx, const int max_number_img, boost::unordered_map<boost::uuids::uuid, StereoFeature> &hash) { if (current_image_idx > max_number_img) { const int border_image_idx = (current_image_idx - max_number_img); /** All the images with idx <= border_image_idx should be eliminated **/ for (boost::unordered_map<boost::uuids::uuid, StereoFeature>::const_iterator it = hash.begin(); it != hash.end(); ++it) { if (it->second.img_idx <= border_image_idx) { #ifdef DEBUG_PRINTS std::cout<<"[CLEAN_HASH_FEATURES] Remove from hash "<<boost::lexical_cast<std::string>(it->first)<<"\n"; #endif hash.erase(it); } } } return; }
void read_db_torrents() { m_read_db_torrents_time = srv_time(); if (m_use_sql) read_db_torrents_sql(); else if (!m_config.m_auto_register) { std::set<t_torrent*> new_torrents; std::ifstream is("xbt_torrents.txt"); for (std::string s; getline(is, s); ) { s = hex_decode(s); if (s.size() == 20) new_torrents.insert(&m_torrents[to_array<char, 20>(s)]); } for (auto i = m_torrents.begin(); i != m_torrents.end(); ) { if (new_torrents.count(&i->second)) i++; else m_torrents.erase(i++); } } }
iterator begin() { return iterator(xmls.begin()); }
void Voxelize::generateUmap(pcl::PointCloud<pcl::PointXYZ> &cloud,double resolution,boost::unordered_map<unordered_map_voxel,un_key> &m)//遍历每个点,生成栅格。然后计算每个栅格的参数 { double t0 = ros::Time::now().toSec(); un_key tempp; //key键不允许修改,全存在value中 for(int i=0;i<cloud.size();i++) { unordered_map_voxel tempv(cloud[i].x,cloud[i].y,cloud[i].z,resolution); pair<umap::iterator,bool> pair_insert = m.insert(umap::value_type(tempv,tempp));//这个pair就是用来存储插入返回值的,看是否这个栅格已经存在 pair_insert.first->second.vec_index_point.push_back(i); /* if(!pair_insert.second)//如果栅格已经存在,那么把value++ { (pair_insert.first)->second.cout++; pair_insert.first->second.vec_index_point.push_back(i); } else{ pair_insert.first->second.vec_index_point.push_back(i); } */ } double t2 = ros::Time::now().toSec(); //cout <<"划栅格的时间为"<<(t2-t0)<<endl; Matrix3d tempmat; tempmat.setZero(); pcl::PointXYZ temppoint(0,0,0); for(umap::iterator iter=m.begin();iter!=m.end();) { temppoint.x=temppoint.y=temppoint.z=0; if(iter->second.vec_index_point.size()<10) { m.erase(iter++); continue; } for(int j = 0;j<iter->second.vec_index_point.size();j++) { double x_ = cloud[iter->second.vec_index_point[j]].x; double y_ = cloud[iter->second.vec_index_point[j]].y; double z_ = cloud[iter->second.vec_index_point[j]].z; tempmat += (Vector3d(x_,y_,z_) * RowVector3d(x_,y_,z_));//存储pipiT的和 temppoint.x += x_; temppoint.y += y_; temppoint.z += z_; } int n = iter->second.vec_index_point.size(); //cout <<"栅格大小为"<<n<<endl; Vector3d v(temppoint.x/n,temppoint.y/n,temppoint.z/n);//存储栅格重心 RowVector3d vT(temppoint.x/n,temppoint.y/n,temppoint.z/n); tempmat /= n; tempmat -= v*vT;//S iter->second.matS = tempmat; //cout <<"S 矩阵= "<< tempmat <<endl; vector<eigen_sort> vector1(3);//用于排序 //cout <<"tempmat="<<endl<<tempmat<<endl; EigenSolver<MatrixXd> es(tempmat); VectorXcd eivals = es.eigenvalues(); MatrixXcd eigenvectors = es.eigenvectors(); //cout << "特征值="<<eivals<<endl; vector1[0]=eigen_sort(eivals(0).real(),es.eigenvectors().col(0)); vector1[1]=eigen_sort(eivals(1).real(),es.eigenvectors().col(1)); vector1[2]=eigen_sort(eivals(2).real(),es.eigenvectors().col(2)); sort(vector1.begin(),vector1.end()); double lamada1 = vector1[0].eigen_value; double lamada2 = vector1[1].eigen_value; double lamada3 = vector1[2].eigen_value; //cout <<"lamada="<<lamada1<<" "<<lamada2<<" "<<lamada3<<endl; // if(vector1[0].eigen_vector(2).real()<0) { vector1[0].eigen_vector(2).real() = -vector1[0].eigen_vector(2).real(); } if(vector1[2].eigen_vector(2).real()<0) { vector1[2].eigen_vector(2).real() = -vector1[2].eigen_vector(2).real(); } iter->second.c = (lamada3-lamada2)/(lamada1+lamada2+lamada3); iter->second.p = 2*(lamada2-lamada1)/(lamada1+lamada2+lamada3); iter->second.u = v; iter->second.v1 = vector1[0].eigen_vector; iter->second.v3 = vector1[2].eigen_vector; double c = iter->second.c; double p = iter->second.p; iter->second.vector_9D << v(0),v(1),v(2),p*vector1[0].eigen_vector(0).real(),p*vector1[0].eigen_vector(1).real(), p*vector1[0].eigen_vector(2).real(),c*vector1[2].eigen_vector(0).real(),c*vector1[2].eigen_vector(1).real(), p*vector1[2].eigen_vector(2).real(); //cout << "9D向量="<<iter->second.vector_9D<<endl; //ArrayXd ad = iter->second.vector_9D; //cout <<"ad = "<<ad<<endl; //cout <<"ad square="<<ad.square()<<endl; tempmat.setZero(); iter++; } double tn = ros::Time::now().toSec(); //cout << "对栅格计算处理的时间"<<(tn - t2)<<endl; //cout <<"栅格数量为" <<m.size()<<endl; }
//------------------------------------------------------------------------------ bool MzIDIO::insertMZIDValues(boost::unordered_map<PercolatorOutFeatures, string, PercolatorOutFeatures> &pout_values, vector <string> &filenames, bool multiplemzidfiles) { ifstream fpr; ofstream fpw; ostream *wout; string mzidname,s1,psmid; int i1,vi1,n,xmlindent; n=0; psmid=""; try { for (vi1=0; vi1<filenames.size(); vi1++) { mzidname=boost::lexical_cast<boost::filesystem::path>(filenames[vi1]).stem().string(); if (!multiplemzidfiles) mzidname=""; fpr.open(filenames[vi1].c_str()); if (outputfileending.length()>0) { fpw.open(setOutputFileName(filenames[vi1]).c_str()); wout=&fpw; } else wout=&cout; while (getline(fpr,s1)) { if (s1.find(MZID_PARAM::END_INSERT_TAG)!=string::npos) { if (psmid.length()==0) THROW_ERROR_VALUE(PRINT_TEXT::BAD_XML,filenames[vi1]); for (i1=0; i1<ARRAYSIZE(MZID_PARAM::ELEMENT_DATA::ELEMENTS); i1++) { if (pout_values.find(PercolatorOutFeatures(mzidname,psmid,i1))==pout_values.end()) continue; n++; switch (MZID_PARAM::ELEMENT_DATA::ELEMENTS[i1]) { case MZID_PARAM::CVPARAM: { *wout << boost::format(MZID_PARAM::CVPARAM_TAG) % string(xmlindent,' ') % MZID_PARAM::ELEMENT_DATA::ACCESSIONS[i1] % MZID_PARAM::ELEMENT_DATA::CVREFS[i1] % MZID_PARAM::ELEMENT_DATA::NAMES[i1] % pout_values[PercolatorOutFeatures(mzidname,psmid,i1)]; break; } case MZID_PARAM::USERPARAM: { *wout << boost::format(MZID_PARAM::USERPARAM_TAG) % string(xmlindent,' ') % MZID_PARAM::ELEMENT_DATA::NAMES[i1] % pout_values[PercolatorOutFeatures(mzidname,psmid,i1)]; break; } } pout_values.erase(PercolatorOutFeatures(mzidname,psmid,i1)); } psmid=""; } xmlindent=s1.find_first_not_of(" "); *wout << s1 << endl; if (s1.find(MZID_PARAM::PSMID_TAG)!=string::npos && s1.find(MZID_PARAM::START_INSERT_TAG)!=string::npos) { i1=s1.find(MZID_PARAM::PSMID_TAG)+strlen(MZID_PARAM::PSMID_TAG); psmid=s1.substr(i1,s1.find("\"",i1)-i1); } } fpr.close(); fpw.close(); } clog << boost::format(PRINT_TEXT::INSERTED) % n << endl; for (boost::unordered_map<PercolatorOutFeatures, string, PercolatorOutFeatures>::iterator it=pout_values.begin(); it!=pout_values.end(); it++) cerr << boost::format(PRINT_TEXT::PSM_NOT_ENTERED) % it->first.filename % it->first.psmid << endl; return true; } catch (exception &e) { cerr << e.what() << endl; fpr.close(); fpw.close(); return false; } }
//----------------------------------------------------------------------------- std::size_t DofMapBuilder::build_constrained_vertex_indices( const Mesh& mesh, const std::map<unsigned int, std::pair<unsigned int, unsigned int> >& slave_to_master_vertices, std::vector<std::size_t>& modified_global_indices) { // MPI communicator const MPI_Comm mpi_comm = mesh.mpi_comm(); // Get vertex sharing information (local index, [(sharing process p, // local index on p)]) const boost::unordered_map<unsigned int, std:: vector<std::pair<unsigned int, unsigned int> > > shared_vertices = DistributedMeshTools::compute_shared_entities(mesh, 0); // Mark shared vertices std::vector<bool> vertex_shared(mesh.num_vertices(), false); boost::unordered_map<unsigned int, std::vector<std::pair<unsigned int, unsigned int> > >::const_iterator shared_vertex; for (shared_vertex = shared_vertices.begin(); shared_vertex != shared_vertices.end(); ++shared_vertex) { dolfin_assert(shared_vertex->first < vertex_shared.size()); vertex_shared[shared_vertex->first] = true; } // Mark slave vertices std::vector<bool> slave_vertex(mesh.num_vertices(), false); std::map<unsigned int, std::pair<unsigned int, unsigned int> >::const_iterator slave; for (slave = slave_to_master_vertices.begin(); slave != slave_to_master_vertices.end(); ++slave) { dolfin_assert(slave->first < slave_vertex.size()); slave_vertex[slave->first] = true; } // MPI process number const std::size_t proc_num = MPI::rank(mesh.mpi_comm()); // Communication data structures std::vector<std::vector<std::size_t> > new_shared_vertex_indices(MPI::size(mesh.mpi_comm())); // Compute modified global vertex indices std::size_t new_index = 0; modified_global_indices = std::vector<std::size_t>(mesh.num_vertices(), std::numeric_limits<std::size_t>::max()); for (VertexIterator vertex(mesh); !vertex.end(); ++vertex) { const std::size_t local_index = vertex->index(); if (slave_vertex[local_index]) { // Do nothing, will get new master index later } else if (vertex_shared[local_index]) { // If shared, let lowest rank process number the vertex boost::unordered_map<unsigned int, std::vector<std::pair<unsigned int, unsigned int> > >::const_iterator it = shared_vertices.find(local_index); dolfin_assert(it != shared_vertices.end()); const std::vector<std::pair<unsigned int, unsigned int> >& sharing_procs = it->second; // Figure out if this is the lowest rank process sharing the vertex std::vector<std::pair<unsigned int, unsigned int> >::const_iterator min_sharing_rank = std::min_element(sharing_procs.begin(), sharing_procs.end()); std::size_t _min_sharing_rank = proc_num + 1; if (min_sharing_rank != sharing_procs.end()) _min_sharing_rank = min_sharing_rank->first; if (proc_num <= _min_sharing_rank) { // Re-number vertex modified_global_indices[vertex->index()] = new_index; // Add to list to communicate std::vector<std::pair<unsigned int, unsigned int> >::const_iterator p; for (p = sharing_procs.begin(); p != sharing_procs.end(); ++p) { dolfin_assert(p->first < new_shared_vertex_indices.size()); // Local index on remote process new_shared_vertex_indices[p->first].push_back(p->second); // Modified global index new_shared_vertex_indices[p->first].push_back(new_index); } new_index++; } } else modified_global_indices[vertex->index()] = new_index++; } // Send number of owned entities to compute offeset std::size_t offset = MPI::global_offset(mpi_comm, new_index, true); // Add process offset to modified indices for (std::size_t i = 0; i < modified_global_indices.size(); ++i) modified_global_indices[i] += offset; // Add process offset to shared vertex indices before sending for (std::size_t p = 0; p < new_shared_vertex_indices.size(); ++p) for (std::size_t i = 1; i < new_shared_vertex_indices[p].size(); i += 2) new_shared_vertex_indices[p][i] += offset; // Send/receive new indices for shared vertices std::vector<std::vector<std::size_t> > received_vertex_data; MPI::all_to_all(mesh.mpi_comm(), new_shared_vertex_indices, received_vertex_data); // Set index for shared vertices that have been numbered by another // process for (std::size_t p = 0; p < received_vertex_data.size(); ++p) { const std::vector<std::size_t>& received_vertex_data_p = received_vertex_data[p]; for (std::size_t i = 0; i < received_vertex_data_p.size(); i += 2) { const unsigned int local_index = received_vertex_data_p[i]; const std::size_t recv_new_index = received_vertex_data_p[i + 1]; dolfin_assert(local_index < modified_global_indices.size()); modified_global_indices[local_index] = recv_new_index; } } // Request master vertex index from master owner std::vector<std::vector<std::size_t> > master_send_buffer(MPI::size(mpi_comm)); std::vector<std::vector<std::size_t> > local_slave_index(MPI::size(mpi_comm)); std::map<unsigned int, std::pair<unsigned int, unsigned int> >::const_iterator master; for (master = slave_to_master_vertices.begin(); master != slave_to_master_vertices.end(); ++master) { const unsigned int local_index = master->first; const unsigned int master_proc = master->second.first; const unsigned int remote_master_local_index = master->second.second; dolfin_assert(master_proc < local_slave_index.size()); dolfin_assert(master_proc < master_send_buffer.size()); local_slave_index[master_proc].push_back(local_index); master_send_buffer[master_proc].push_back(remote_master_local_index); } // Send/receive master local indices for slave vertices std::vector<std::vector<std::size_t> > received_slave_vertex_indices; MPI::all_to_all(mpi_comm, master_send_buffer, received_slave_vertex_indices); // Send back new master vertex index std::vector<std::vector<std::size_t> > master_vertex_indices(MPI::size(mpi_comm)); for (std::size_t p = 0; p < received_slave_vertex_indices.size(); ++p) { const std::vector<std::size_t>& local_master_indices = received_slave_vertex_indices[p]; for (std::size_t i = 0; i < local_master_indices.size(); ++i) { std::size_t master_local_index = local_master_indices[i]; dolfin_assert(master_local_index < modified_global_indices.size()); master_vertex_indices[p].push_back(modified_global_indices[master_local_index]); } } // Send/receive new global master indices for slave vertices std::vector<std::vector<std::size_t> > received_new_slave_vertex_indices; MPI::all_to_all(mpi_comm, master_vertex_indices, received_new_slave_vertex_indices); // Set index for slave vertices for (std::size_t p = 0; p < received_new_slave_vertex_indices.size(); ++p) { const std::vector<std::size_t>& new_indices = received_new_slave_vertex_indices[p]; const std::vector<std::size_t>& local_indices = local_slave_index[p]; for (std::size_t i = 0; i < new_indices.size(); ++i) { const std::size_t local_index = local_indices[i]; const std::size_t new_global_index = new_indices[i]; dolfin_assert(local_index < modified_global_indices.size()); modified_global_indices[local_index] = new_global_index; } } // Send new indices to process that share a vertex but were not // responsible for re-numbering return MPI::sum(mpi_comm, new_index); }
void HDBScan::get_clusters(vector<CondensedTree*>& tree, boost::unordered_map<int, double>& stability, vector<int>& out_labels, vector<double>& out_probs, vector<double>& out_stabilities, int cluster_selection_method, bool allow_single_cluster, bool match_reference_implementation) { vector<int> node_list; boost::unordered_map<int, double>::iterator sit; for (sit = stability.begin(); sit!=stability.end(); sit++) { node_list.push_back(sit->first); } std::sort(node_list.begin(), node_list.end(), std::greater<int>()); if (!allow_single_cluster) { // exclude root node_list.pop_back(); } vector<CondensedTree*> cluster_tree; for (int i=0; i<tree.size(); i++) { if (tree[i]->child_size > 1) { cluster_tree.push_back(tree[i]); } } boost::unordered_map<int, bool> is_cluster; for (int i=0; i<node_list.size(); i++) { is_cluster[node_list[i]] = true; } int num_points = DBL_MIN; for (int i=0; i<tree.size(); i++) { if (tree[i]->child_size == 1) { if (tree[i]->child > num_points) { num_points = tree[i]->child; } } } num_points += 1; double max_lambda = tree[0]->lambda_val; for (int i=1; i<tree.size(); i++) { if (tree[i]->lambda_val > max_lambda) { max_lambda = tree[i]->lambda_val; } } if (cluster_selection_method == 0) { // eom for (int i=0; i<node_list.size(); i++) { int node = node_list[i]; double subtree_stability = 0; for (int j=0; j<cluster_tree.size(); j++) { if (cluster_tree[j]->parent == node) { //child_selection.push_back(node); int child = cluster_tree[j]->child; subtree_stability += stability[child]; } } if (subtree_stability > stability[node]) { is_cluster[node] = false; stability[node] = subtree_stability; } else { vector<int> sub_nodes = bfs_from_cluster_tree(cluster_tree, node); for (int j=0; j<sub_nodes.size(); j++) { int sub_node = sub_nodes[j]; if (sub_node != node) { is_cluster[sub_node] = false; } } } } } else if (cluster_selection_method == 1) { // leaf int parent_min = tree[0]->parent; for (int i=1; i<tree.size(); i++) { if (tree[i]->parent < parent_min) { parent_min = tree[i]->parent; } } boost::unordered_map<int, bool>::iterator it; vector<int> leaves_ = get_cluster_tree_leaves(cluster_tree); set<int> leaves; for (int i=0; i<leaves_.size(); i++) { leaves.insert(leaves_[i]); } if ( leaves.empty()) { for (it=is_cluster.begin(); it!=is_cluster.end(); it++) { is_cluster[it->first] = false; } is_cluster[parent_min] = true; } for (it=is_cluster.begin(); it!=is_cluster.end(); it++) { int c = it->first; if (leaves.find(c) != leaves.end()) { is_cluster[c] = true; } else { is_cluster[c] = false; } } } set<int> clusters; boost::unordered_map<int, bool>::iterator it; for (it = is_cluster.begin(); it!= is_cluster.end(); it++) { int c = it->first; if (it->second) { clusters.insert(c); } } vector<int> _clusters; set<int>::iterator _it; for (_it =clusters.begin(); _it != clusters.end(); _it++) { _clusters.push_back(*_it); } std::sort(_clusters.begin(), _clusters.end()); boost::unordered_map<int, int> cluster_map, reverse_cluster_map; for (int i=0; i<_clusters.size(); i++) { cluster_map[_clusters[i]] = i; reverse_cluster_map[i] = _clusters[i]; } // do labeling (tree, clusters, cluster_map, False, False out_labels = do_labelling(tree, clusters, cluster_map, allow_single_cluster, match_reference_implementation); // probs = get_probabilities(tree, reverse_cluster_map, labels) out_probs = get_probabilities(tree, reverse_cluster_map, out_labels); // stabilities = get_stability_scores(labels, clusters, stability, max_lambda) out_stabilities = get_stability_scores(out_labels, clusters, stability, max_lambda); }
void Task::featuresOut(const int current_image_idx, const boost::unordered_map<boost::uuids::uuid, StereoFeature> &hash) { ExteroFeatures features_samples; base::samples::Pointcloud features_points; double const &baseline(cameracalib.extrinsic.tx); cv::Mat cv_image1; /** Uncertainty in the images (left and right) planes **/ Eigen::Matrix4d px_var; px_var << this->pxleftVar, Eigen::Matrix2d::Zero(), Eigen::Matrix2d::Zero(), this->pxrightVar; #ifdef DEBUG_PRINTS std::cout<<"[FEATURES_OUT] Current hash.size(): "<<hash.size()<<"\n"; #endif /** Parse the hash table of features **/ for (boost::unordered_map<boost::uuids::uuid, StereoFeature>::const_iterator it = hash.begin(); it != hash.end(); ++it) { //std::cout<<"[FEATURES_OUT] img_idx: "<<it->second.img_idx<<" current_idx: "<<current_image_idx<<"\n"; if (it->second.img_idx == current_image_idx) { Feature feature; /** Get the uuid **/ feature.index = it->first; /** Take the "Stereo" 2d point **/ cv::Point const &left_pt(it->second.keypoint_left.pt); cv::Point const &right_pt(it->second.keypoint_right.pt); feature.stereo_point = base::Vector3d(left_pt.x, right_pt.x, left_pt.y); /** Compute the 3d point **/ double disparity = std::min(right_pt.x - left_pt.x, -1); base::Vector4d image_point (left_pt.x, left_pt.y, disparity, 1); base::Vector4d homogeneous_point = Q * image_point; feature.point_3d = base::Vector3d (homogeneous_point(0)/homogeneous_point(3), homogeneous_point(1)/homogeneous_point(3), homogeneous_point(2)/homogeneous_point(3)); #ifdef DEBUG_PRINTS std::cout<<"[FEATURES_OUT] Image point[left]: "<<left_pt.x<<" "<<left_pt.y<<"\n"; std::cout<<"[FEATURES_OUT] Image point[right]: "<<right_pt.x<<" "<<right_pt.y<<"\n"; std::cout<<"[FEATURES_OUT] 3D point:\n"<<feature.point_3d<<"\n"; #endif if (_output_debug.value()) { /** 3D point coordinates in base point cloud structure **/ features_points.points.push_back(base::Vector3d ( homogeneous_point(0)/homogeneous_point(3), homogeneous_point(1)/homogeneous_point(3), homogeneous_point(2)/homogeneous_point(3))); /** Color **/ cv_image1 = frame_helper::FrameHelper::convertToCvMat(this->left_color_frame); cv::Vec3b color = cv_image1.at<cv::Vec3b>(left_pt.y, left_pt.x); base::Vector4d color4d; color4d[0] = color[0]/255.0;//R color4d[1] = color[1]/255.0;//G color4d[2] = color[2]/255.0;//B color4d[3] = 1.0;//Alpha features_points.colors.push_back(color4d); } /** Compute the covariance **/ Eigen::Matrix<double, 3, 4> noise_jacobian; /** Jacobian Matrix for the triangulation noise model */ double disparity_power = pow(disparity,2); noise_jacobian << -(baseline*right_pt.x)/disparity_power, 0.00, (baseline*left_pt.x)/disparity_power, 0.00, -(baseline*left_pt.y)/disparity_power, baseline/disparity, (baseline*left_pt.y)/disparity_power, 0.00, -(baseline*cameracalib.camLeft.fx)/disparity_power, 0.00, (baseline*cameracalib.camLeft.fx)/disparity_power, 0.00; feature.cov_3d = noise_jacobian * px_var * noise_jacobian.transpose(); /** Store the feature in the vector **/ features_samples.features.push_back(feature); } } #ifdef DEBUG_PRINTS std::cout<<"[FEATURES_OUT] Sent "<<features_samples.features.size()<<" vector of uuids\n"; #endif features_samples.time = this->frame_pair.time; features_samples.img_idx = current_image_idx; _features_samples_out.write(features_samples); if (_output_debug.value()) { features_points.time = this->frame_pair.time; _features_point_samples_out.write(features_points); } return; }