void multimenu_button::select_options(boost::dynamic_bitset<> states) { assert(states.size() == values_.size()); toggle_states_ = states; update_config_from_toggle_states(); update_label(); }
const bool PartialPage::getBitmap(const unsigned long sequence) const { if (sequence > bitmap.size()) { return false; } return (bitmap[sequence] == 1); }
void operator()( space_type & ss, boost::dynamic_bitset<> & indices, result_type column_margin, result_type row_margin ) { typedef typename space_type::raw_block_pointer iterator; size_type N = ss.row_count(); size_type i = 0; while ( i < N ) { if( !indices.test(i) ) { ++i; continue; } iterator start = ss.begin_row(i); iterator end = ss.end_row(i); size_type j = 0, M = 0; while( start != end ) { block_type b = *start++; while( b ) { unsigned int b_idx = bit_walker_type::unset_next_index( b ) + j; column_margin[ b_idx ] += 1; ++M; } j += bit_helper_type::BITS_PER_BLOCK; } row_margin[ i ] = M; ++i; } }
void normalize_split( boost::dynamic_bitset<>& split, const vector< vector< int > >& splitmap, int target_size ){ boost::dynamic_bitset<> newsplit( target_size ); for( int i=0; i<splitmap.size(); i++ ) for(int j=0; j<splitmap[i].size(); j++) newsplit.set( splitmap[i][j], split.test(i) ); split = newsplit; }
const bool PartialPage::setBitmap(const unsigned long sequence) { if (sequence > bitmap.size()) { return false; } bitmap[sequence] = 1; return true; }
void DigitalInputWriter::setDigitalInput(const ::boost::dynamic_bitset<>& bitset) { for (::std::size_t i = 0; i < bitset.size(); ++i) { this->setDigitalInput(i, bitset[i]); } }
void checkKmers(DnaString const & kmer, TVertexDescriptor const & starting_vertex, TVertexDescriptor const & source_vertex, TGraph const & graph, std::vector<VertexLabels> & vertex_vector, boost::unordered_set<TVertexDescriptor> const & free_nodes, boost::unordered_map< std::pair<TVertexDescriptor, TVertexDescriptor>, boost::dynamic_bitset<> > & edge_ids, boost::dynamic_bitset<> const & id_bits, TKmerMap & kmer_map, std::size_t const & kmer_size ) { if (id_bits.none()) return; if (length(kmer) == kmer_size) { KmerLabels new_kmer_label = { starting_vertex, source_vertex, id_bits }; if (kmer_map.count(kmer) == 0) { std::vector<KmerLabels> new_vector(1, new_kmer_label); kmer_map[kmer] = new_vector; } else { kmer_map[kmer].push_back(new_kmer_label); } return; } for (Iterator<TGraph, OutEdgeIterator>::Type out_edge_iterator (graph, source_vertex) ; !atEnd(out_edge_iterator) ; ++out_edge_iterator) { DnaString new_kmer(kmer); TVertexDescriptor const & target_vertex = targetVertex(out_edge_iterator); boost::dynamic_bitset<> new_id_bits(id_bits); if (free_nodes.count(target_vertex) == 0) { seqan::appendValue(new_kmer, vertex_vector[target_vertex].dna); std::pair<TVertexDescriptor, TVertexDescriptor> edge_pair(source_vertex, target_vertex); if (edge_ids.count(edge_pair) == 1) { new_id_bits = id_bits & edge_ids[edge_pair]; } } checkKmers(new_kmer, starting_vertex, target_vertex, graph, vertex_vector, free_nodes, edge_ids, new_id_bits, kmer_map, kmer_size); } }
unsigned int cliqueHelper3(const boost_graph &g, const std::vector<VertexID> &nodes, const std::vector<EdgeID> &edges, double &score) { static boost::dynamic_bitset<> empty; empty.resize(nodes.size()); unsigned largest_clique = 1; boost::dynamic_bitset<> set(nodes.size()); for (unsigned i = 0; i < nodes.size(); ++i) { set[i] = true; } int i = 0; while ((largest_clique <= 1 || i < 50000) && i < 100000) { // build the next potential clique. // every bit that is a 1 in set means that that node is in the clique. std::vector<VertexID> clique; randomize(set); set[0] = true; // the first bit is the query node. must always be on. for (unsigned loop = 0; loop < nodes.size(); ++loop) { if (set[loop]) { clique.push_back(nodes[loop]); } } // if this is a clique.. double maybe_score = 0; if ((clique.size() > largest_clique) && isClique(g, clique, edges, maybe_score)) { largest_clique = clique.size(); score = maybe_score; std::cout << "Largest clique is " << largest_clique << std::endl; } //decrement(set); //increment(set); i++; } return largest_clique; }
template<class T> std::vector<T> bitset_to_vector(const boost::dynamic_bitset<>& b) { std::vector<T> v(b.size()); for (size_t i=0; i<v.size(); ++i) { if (b[i]) v[i] = 1; } return v; }
void decrement(boost::dynamic_bitset<> &bitset) { for (unsigned loop = 0; loop < bitset.size(); ++loop) { if ((bitset[loop] ^= 0x1) == 0x0) { break; } } }
std::string format_binary(const boost::dynamic_bitset<>& b) { std::ostringstream oss; oss.imbue(std::locale::classic()); oss.put('"'); oss << std::hex << std::setw(1); unsigned c = b.size(); unsigned n = (4 - (c % 4)) & 3; oss << n; for (unsigned i = 0; i < c + n;) { unsigned accum = 0; for (int j = 0; j < 4; ++j, ++i) { unsigned bit = i < n ? 0 : b.test(c - i + n - 1) ? 1 : 0; accum |= bit << (3-j); } oss << accum; } oss.put('"'); return oss.str(); }
template<typename PointT, typename NormalT> bool pcl::NormalSpaceSampling<PointT, NormalT>::isEntireBinSampled (boost::dynamic_bitset<> &array, unsigned int start_index, unsigned int length) { bool status = true; for (unsigned int i = start_index; i < start_index + length; i++) { status = status & array.test (i); } return status; }
void listbox::set_row_shown(const boost::dynamic_bitset<>& shown) { assert(generator_); assert(shown.size() == get_item_count()); if(generator_->get_items_shown() == shown) { LOG_GUI_G << LOG_HEADER << " returning early" << std::endl; return; } window* window = get_window(); assert(window); const int selected_row = get_selected_row(); bool resize_needed = false; // Local scope for invalidate_layout_blocker { window::invalidate_layout_blocker invalidate_layout_blocker(*window); for(size_t i = 0; i < shown.size(); ++i) { generator_->set_item_shown(i, shown[i]); } point best_size = generator_->calculate_best_size(); generator_->place(generator_->get_origin(), {std::max(best_size.x, content_visible_area().w), best_size.y}); resize_needed = !content_resize_request(); } if(resize_needed) { window->invalidate_layout(); } else { content_grid_->set_visible_rectangle(content_visible_area()); set_is_dirty(true); } if(selected_row != get_selected_row()) { fire(event::NOTIFY_MODIFIED, *this, nullptr); } }
void setInTreeTaxBits() { No* n; for(int i = 0; i < nNo; i++) { n = nodes[i]; if(n->isLeaf == 1) { taxBits[n->taxNum] = 1; } } const std::size_t first_one = taxBits.find_first(); firstOnePos = first_one; }
/****************************************************************************** * Remaps the bonds after some of the particles have been deleted. * Dangling bonds are removed too. ******************************************************************************/ void BondsObject::particlesDeleted(const boost::dynamic_bitset<>& deletedParticlesMask) { // Build map that maps old particle indices to new indices. std::vector<size_t> indexMap(deletedParticlesMask.size()); auto index = indexMap.begin(); size_t oldParticleCount = deletedParticlesMask.size(); size_t newParticleCount = 0; for(size_t i = 0; i < deletedParticlesMask.size(); i++) *index++ = deletedParticlesMask.test(i) ? std::numeric_limits<size_t>::max() : newParticleCount++; auto result = modifiableStorage()->begin(); auto bond = modifiableStorage()->begin(); auto last = modifiableStorage()->end(); for(; bond != last; ++bond) { // Remove invalid bonds. if(bond->index1 >= oldParticleCount || bond->index2 >= oldParticleCount) continue; // Remove dangling bonds whose particles have gone. if(deletedParticlesMask.test(bond->index1) || deletedParticlesMask.test(bond->index2)) continue; // Keep but remap particle indices. result->pbcShift = bond->pbcShift; result->index1 = indexMap[bond->index1]; result->index2 = indexMap[bond->index2]; ++result; } modifiableStorage()->erase(result, last); changed(); }
void stacked_widget::select_layers(const boost::dynamic_bitset<>& mask) { assert(mask.size() == get_layer_count()); select_layer_impl([&](unsigned int i) { if(mask[i]) { update_selected_layer_index(i); } return mask[i]; }); }
void tlistbox::set_row_shown(const boost::dynamic_bitset<>& shown) { assert(generator_); assert(shown.size() == get_item_count()); if (generator_->get_items_shown() == shown) { LOG_GUI_G << LOG_HEADER << " returning early" << std::endl; return; } twindow* window = get_window(); assert(window); const int selected_row = get_selected_row(); bool resize_needed; { twindow::tinvalidate_layout_blocker invalidate_layout_blocker(*window); for(size_t i = 0; i < shown.size(); ++i) { generator_->set_item_shown(i, shown[i]); } tpoint best_size = generator_->calculate_best_size(); generator_->place(generator_->get_origin(), { std::max(best_size.x, content_visible_area().w), best_size.y }); resize_needed = !content_resize_request(); } if(resize_needed) { window->invalidate_layout(); } else { content_grid_->set_visible_rectangle(content_visible_area()); set_is_dirty(true); } if(selected_row != get_selected_row() && callback_value_changed_) { callback_value_changed_(*this); } }
void EditableSceneBody::setLinkVisibilities(const boost::dynamic_bitset<>& visibilities) { int i; const int m = numSceneLinks(); const int n = std::min(m, (int)visibilities.size()); for(i=0; i < n; ++i){ sceneLink(i)->setVisible(visibilities[i]); } while(i < m){ sceneLink(i)->setVisible(false); ++i; } notifyUpdate(impl->modified); }
// Returns true if at least one destination has been retained, i.e. one // destination is remote. It returns false if all destinations have been // local. inline std::vector<naming::gid_type>::iterator remove_local_destinations(std::vector<naming::gid_type>& gids, std::vector<naming::address>& addrs, boost::dynamic_bitset<> const& locals) { HPX_ASSERT(gids.size() == addrs.size()); std::vector<naming::gid_type>::iterator gids_it = gids.begin(); std::vector<naming::gid_type>::iterator gids_end = gids.end(); std::vector<naming::address>::iterator addrs_it = addrs.begin(); // gids_it = find_if(gids_it, gids_end, pred) std::size_t i = 0; for (/**/; gids_it != gids_end; ++gids_it, ++addrs_it) { if (locals.test(i++)) break; } if (gids_it == gids_end) return gids_it; // gids_next = remove_if(gids_it, gids_end, pred) std::vector<naming::gid_type>::iterator gids_next = gids_it; std::vector<naming::address>::iterator addrs_next = addrs_it; for (++gids_it, ++addrs_it; gids_it != gids_end; ++gids_it, ++addrs_it) { if (!locals.test(i++)) { *gids_next++ = std::move(*gids_it); *addrs_next++ = std::move(*addrs_it); } } return gids_next; }
void randomize(boost::dynamic_bitset<> &bitset) { for (unsigned loop = 0; loop < bitset.size(); ++loop) { double rand_num = (double)rand()/RAND_MAX; if (rand_num < 0.1) { bitset[loop] = true; } else { bitset[loop] = false; } } }
void table_manager::init_migrate_done_set(boost::dynamic_bitset<> &migrate_done_set, const vector<uint64_t> ¤t_state_table) { int bucket_number = 0; for (size_t i=0; i<current_state_table.size(); i+=this->copy_count) { bucket_number = (int)current_state_table[i++]; // skip the bucket_number bool has_migrated = false; for (size_t j=0; j<this->copy_count; ++j) { if (current_state_table[i+j] != server_table[bucket_number + j * this->bucket_count]) { has_migrated = true; break; } } if (has_migrated) { migrate_done_set.set(bucket_number, true); log_debug("bucket[%d] has migrated"); } } }
bool executive::ReconvergenceBarrier::eval_Bra(executive::CTAContext &context, const ir::PTXInstruction &instr, const boost::dynamic_bitset<> & branch, const boost::dynamic_bitset<> & fallthrough) { bool isDivergent = false; if (instr.uni) { // unfiorm if (branch.count()) { // all threads branch context.PC = instr.branchTargetInstruction; } else { // all threads fall through context.PC ++; } } else { // divergence - complicated CTAContext branchContext(context), fallthroughContext(context); branchContext.active = branch; branchContext.PC = instr.branchTargetInstruction; fallthroughContext.active = fallthrough; fallthroughContext.PC++; runtimeStack.pop_back(); if (branchContext.active.any()) { runtimeStack.push_back(branchContext); } if (fallthroughContext.active.any()) { runtimeStack.push_back(fallthroughContext); } isDivergent = true; } return isDivergent; }
/* * Finds the set of SSATmps that should be considered for allocation * to a full XMM register. These are the SSATmps that satisfy all the * following conditions: * a) it requires 2 64-bit registers * b) it's defined in a load instruction * c) all its uses are simple stores to memory * * The computed set of SSATmps is stored in m_fullXMMCandidates. */ void LinearScan::findFullXMMCandidates() { boost::dynamic_bitset<> notCandidates(m_irFactory->numTmps()); m_fullXMMCandidates.reset(); for (auto* block : m_blocks) { for (auto& inst : *block) { for (SSATmp& tmp : inst.dsts()) { if (tmp.numNeededRegs() == 2 && inst.isLoad()) { m_fullXMMCandidates[tmp.id()] = true; } } int idx = 0; for (SSATmp* tmp : inst.srcs()) { if (tmp->numNeededRegs() == 2 && !inst.storesCell(idx)) { notCandidates[tmp->id()] = true; } idx++; } } } m_fullXMMCandidates -= notCandidates; }
bool BDDCalculator::calculate(const boost::dynamic_bitset<> &varValues) const { if(!m_proot) return false; BDDCalculator::Node *pnode = m_proot -> m_pnode; if(!pnode) return false; for(;;) { if(pnode -> m_fixTrue && pnode -> m_fixFalse) { if(varValues.test(pnode -> m_varId)) pnode = pnode -> m_fixTrue; else pnode = pnode -> m_fixFalse; } else return pnode -> m_value; } }
void SparseBipartiteGraph::convert_to_bitset( boost::dynamic_bitset<>& bits) const { std::vector < int > adj; unsigned int n = getNumberOfNodes() / 2; bits.resize(n * n); for (int u = 0; u < n; u++) { getNeighbors(u, adj); for (std::vector<int>::iterator it = adj.begin(); it != adj.end(); ++it) { int v = *it; unsigned int i = n * u + v - n; bits[n * n - i - 1] = 1; } } }
bool executive::ReconvergenceTFGen6::eval_Bra(executive::CTAContext &context, const ir::PTXInstruction &instr, const boost::dynamic_bitset<> & branch, const boost::dynamic_bitset<> & fallthrough) { report("eval_Bra([PC " << context.PC << "])"); // handle nops if (!context.active.count()) { context.PC++; return false; } for (unsigned int id = 0, end = branch.size(); id != end; ++id) { if (branch[id]) { threadPCs[id] = instr.branchTargetInstruction; } } for (unsigned int id = 0, end = fallthrough.size(); id != end; ++id) { if (fallthrough[id]) { ++threadPCs[id]; } } bool divergent = true; if (branch.count() == branch.size()) { context.PC = instr.branchTargetInstruction; divergent = false; } else if (fallthrough.count() == fallthrough.size()) { ++context.PC; divergent = false; } else { context.PC = instr.reconvergeInstruction; } return divergent; }
/// \brief Check if the split is informative /// /// \param p The split bool informative(const boost::dynamic_bitset<>& p) { int N = p.size(); int C = p.count(); return (C >= 2) and ((N-C) >= 2); }
bool mark (unsigned key) { if (flags_[key]) return false; flags_.set(key); return true; }
void reset () { flags_.reset(); }
inline void insert_if_exists(boost::dynamic_bitset<>& cont, const boost::dynamic_bitset<> *other) { if (other && !other->empty()) { cont.resize(std::max(cont.size(), other->size())); cont |= *other; } }