/// Try to get a connection to \a l from the cache. /// /// \returns A usable connection to \a l if a connection could be /// found, otherwise a default constructed connection. /// /// \note The connection must be returned to the cache by calling /// \a reclaim(). connection_type get(key_type const& l) { mutex_type::scoped_lock lock(mtx_); // Check if this key already exists in the cache. typename cache_type::iterator const it = cache_.find(l); // Check if this key already exists in the cache. if (it != cache_.end()) { // Key exists in cache. // Update LRU meta data. key_tracker_.splice( key_tracker_.end() , key_tracker_ , boost::get<2>(it->second) ); // If connections to the locality are available in the cache, // remove the oldest one and return it. if (!boost::get<0>(it->second).empty()) { connection_type result = boost::get<0>(it->second).front(); boost::get<0>(it->second).pop_front(); check_invariants(); return result; } } // If we get here then the item is not in the cache. check_invariants(); return connection_type(); }
void node::add(key_t key, unique_ptr<node>& other) { check_invariants(); assert(size < range_max); assert(other.get() != nullptr); const auto pos = find_pos(key); place_key(key, pos); children[pos + 1] = move(other); children[pos + 1]->parent = this; check_invariants(); }
void node::place_key(const key_t& key, const size_t pos) { check_invariants(); for(auto i = size; i > pos; --i) { keys[i] = keys[i - 1]; assert(!children[i+1]); children[i+1] = move(children[i]); } keys[pos] = key; ++size; check_invariants(); }
void reach_sat_loop(reach_proc_t reach_proc, vset_t visited, bitvector_t *reach_groups, long *eg_count, long *next_count, long *guard_count) { bitvector_t groups[max_sat_levels]; int empty_groups[max_sat_levels]; vset_t old_vis = vset_create(domain, -1, NULL); vset_t prev_vis[nGrps]; for (int k = 0; k < max_sat_levels; k++) bitvector_create(&groups[k], nGrps); initialize_levels(groups, empty_groups, NULL, reach_groups); for (int i = 0; i < max_sat_levels; i++) prev_vis[i] = save_sat_levels?vset_create(domain, -1, NULL):NULL; while (!vset_equal(old_vis, visited)) { vset_copy(old_vis, visited); for (int k = 0; k < max_sat_levels; k++) { if (empty_groups[k]) continue; Warning(infoLong, "Saturating level: %d", k); reach_proc(visited, prev_vis[k], &groups[k], eg_count, next_count,guard_count); check_invariants(visited, -1); if (save_sat_levels) vset_copy(prev_vis[k], visited); } } for (int k = 0; k < max_sat_levels; k++) bitvector_free(&groups[k]); vset_destroy(old_vis); if (save_sat_levels) for (int i = 0; i < max_sat_levels; i++) vset_destroy(prev_vis[i]); }
/// Returns a connection for \a l to the cache. /// /// \note The cache must already be aware of the connection, through /// a prior call to \a get() or \a get_or_reserve(). void reclaim(key_type const& l, connection_type const& conn) { mutex_type::scoped_lock lock(mtx_); // Search for an entry for this key. typename cache_type::iterator const ct = cache_.find(l); // Key should already exist in the cache. FIXME: This should // probably throw as could easily be triggered by caller error. BOOST_ASSERT(ct != cache_.end()); // Update LRU meta data. key_tracker_.splice( key_tracker_.end() , key_tracker_ , boost::get<2>(ct->second) ); // Add the connection to the entry. boost::get<0>(ct->second).push_back(conn); // FIXME: Again, this should probably throw instead of asserting, // as invariants could be invalidated here due to caller error. check_invariants(); }
void clear() { boost::lock_guard<mutex_type> lock(mtx_); key_tracker_.clear(); cache_.clear(); check_invariants(); }
void gl_avltree_oset_check_invariants (gl_oset_t set) { size_t counter = 0; if (set->root != NULL) check_invariants (set->root, NULL, &counter); if (!(set->count == counter)) abort (); }
/* For debugging. */ static unsigned int check_invariants (gl_oset_node_t node, gl_oset_node_t parent, size_t *counterp) { unsigned int left_height = (node->left != NULL ? check_invariants (node->left, node, counterp) : 0); unsigned int right_height = (node->right != NULL ? check_invariants (node->right, node, counterp) : 0); int balance = (int)right_height - (int)left_height; if (!(node->parent == parent)) abort (); if (!(balance >= -1 && balance <= 1)) abort (); if (!(node->balance == balance)) abort (); (*counterp)++; return 1 + (left_height > right_height ? left_height : right_height); }
/// Destroys all connections in the cache, and resets all counts. /// /// \note Calling this function while connections are still checked out /// of the cache is a bad idea, and will violate this classes /// invariants. void clear() { mutex_type::scoped_lock lock(mtx_); key_tracker_.clear(); cache_.clear(); connections_ = 0; // FIXME: This should probably throw instead of asserting, as it // can be triggered by caller error. check_invariants(); }
/* For debugging. */ static unsigned int check_invariants (gl_oset_node_t node, gl_oset_node_t parent, size_t *counterp) { unsigned int left_blackheight = (node->left != NULL ? check_invariants (node->left, node, counterp) : 0); unsigned int right_blackheight = (node->right != NULL ? check_invariants (node->right, node, counterp) : 0); if (!(node->parent == parent)) abort (); if (!(node->color == BLACK || node->color == RED)) abort (); if (parent == NULL && !(node->color == BLACK)) abort (); if (!(left_blackheight == right_blackheight)) abort (); (*counterp)++; return left_blackheight + (node->color == BLACK ? 1 : 0); }
/// Returns a connection for \a l to the cache. /// /// \note The cache must already be aware of the connection, through /// a prior call to \a get() or \a get_or_reserve(). void reclaim(key_type const& l, connection_type const& conn) { std::lock_guard<mutex_type> lock(mtx_); // Search for an entry for this key. typename cache_type::iterator const ct = cache_.find(l); if (ct != cache_.end()) { // Update LRU meta data. key_tracker_.splice( key_tracker_.end() , key_tracker_ , lru_reference(ct->second) ); // Return the connection back to the cache only if the number // of connections does not need to be shrunk. if (num_existing_connections(ct->second) <= max_num_connections(ct->second)) { // Add the connection to the entry. cached_connections(ct->second).push_back(conn); ++reclaims_; #if defined(HPX_TRACK_STATE_OF_OUTGOING_TCP_CONNECTION) conn->set_state(Connection::state_reclaimed); #endif } else { // Adjust the number of existing connections for this key. decrement_connection_count(ct->second); // do the accounting ++evictions_; // the connection itself will go out of scope on return #if defined(HPX_TRACK_STATE_OF_OUTGOING_TCP_CONNECTION) conn->set_state(Connection::state_deleting); #endif } // FIXME: Again, this should probably throw instead of asserting, // as invariants could be invalidated here due to caller error. check_invariants(); } // else { // // Key should already exist in the cache. FIXME: This should // // probably throw as could easily be triggered by caller error. // HPX_ASSERT(shutting_down_); // } }
void node::insert(key_t key) { check_invariants(); if(!is_leaf()) { auto key_pos = find_pos(key); if(!children[key_pos]) children[key_pos] = unique_ptr<node>(new node(this)); else if(children[key_pos]->size == range_max) { split(children[key_pos].get()); key_pos = find_pos(key); } children[key_pos]->insert(key); check_invariants(); return; } place_key(key, find_pos(key)); check_invariants(); }
bool get(key_type const& size, connection_type& conn) { boost::lock_guard<mutex_type> lock(mtx_); // Check if a matching entry exists ... cache_type::iterator const it = cache_.lower_bound(size); // If it does ... if (it != cache_.end()) { // remove the entry from the cache conn = boost::get<0>(it->second); key_tracker_.erase(boost::get<1>(it->second)); cache_.erase(it); check_invariants(); return true; } // If we get here then the item is not in the cache. check_invariants(); return false; }
/* For debugging. */ static unsigned int check_invariants (gl_list_node_t node, gl_list_node_t parent) { unsigned int left_height = (node->left != NULL ? check_invariants (node->left, node) : 0); unsigned int right_height = (node->right != NULL ? check_invariants (node->right, node) : 0); int balance = (int)right_height - (int)left_height; if (!(node->parent == parent)) abort (); if (!(node->branch_size == (node->left != NULL ? node->left->branch_size : 0) + 1 + (node->right != NULL ? node->right->branch_size : 0))) abort (); if (!(balance >= -1 && balance <= 1)) abort (); if (!(node->balance == balance)) abort (); return 1 + (left_height > right_height ? left_height : right_height); }
/// Try to get a connection to \a l from the cache. /// /// \returns A usable connection to \a l if a connection could be /// found, otherwise a default constructed connection. /// /// \note The connection must be returned to the cache by calling /// \a reclaim(). connection_type get(key_type const& l) { std::lock_guard<mutex_type> lock(mtx_); // Check if this key already exists in the cache. typename cache_type::iterator const it = cache_.find(l); // Check if this key already exists in the cache. if (it != cache_.end()) { // Key exists in cache. // Update LRU meta data. key_tracker_.splice( key_tracker_.end() , key_tracker_ , lru_reference(it->second) ); // If connections to the locality are available in the cache, // remove the oldest one and return it. if (!cached_connections(it->second).empty()) { value_type& connections = cached_connections(it->second); connection_type result = connections.front(); connections.pop_front(); ++hits_; check_invariants(); return result; } } // If we get here then the item is not in the cache. ++misses_; check_invariants(); return connection_type(); }
void reach_sat_fix(reach_proc_t reach_proc, vset_t visited, bitvector_t *reach_groups, long *eg_count, long *next_count, long *guard_count) { (void) reach_proc; (void) guard_count; if (PINS_USE_GUARDS) Abort("guard-splitting not supported with saturation=sat-fix"); int level = 0; vset_t old_vis = vset_create(domain, -1, NULL); vset_t deadlocks = dlk_detect?vset_create(domain, -1, NULL):NULL; vset_t dlk_temp = dlk_detect?vset_create(domain, -1, NULL):NULL; LACE_ME; while (!vset_equal(visited, old_vis)) { if (trc_output != NULL) save_level(visited); vset_copy(old_vis, visited); stats_and_progress_report(NULL, visited, level); level++; for(int i = 0; i < nGrps; i++){ if (!bitvector_is_set(reach_groups, i)) continue; expand_group_next(i, visited); reach_chain_stop(); (*eg_count)++; } if (dlk_detect) vset_copy(deadlocks, visited); if (USE_PARALLELISM) vset_least_fixpoint_par(visited, visited, group_next, nGrps); else vset_least_fixpoint(visited, visited, group_next, nGrps); (*next_count)++; check_invariants(visited, level); if (dlk_detect) { for (int i = 0; i < nGrps; i++) { vset_prev(dlk_temp, visited, group_next[i],deadlocks); reduce(i, dlk_temp); vset_minus(deadlocks, dlk_temp); vset_clear(dlk_temp); } deadlock_check(deadlocks, reach_groups); } vset_reorder(domain); } vset_destroy(old_vis); if (dlk_detect) { vset_destroy(deadlocks); vset_destroy(dlk_temp); } }
/* For debugging. */ static unsigned int check_invariants (gl_list_node_t node, gl_list_node_t parent) { unsigned int left_blackheight = (node->left != NULL ? check_invariants (node->left, node) : 0); unsigned int right_blackheight = (node->right != NULL ? check_invariants (node->right, node) : 0); if (!(node->parent == parent)) abort (); if (!(node->branch_size == (node->left != NULL ? node->left->branch_size : 0) + 1 + (node->right != NULL ? node->right->branch_size : 0))) abort (); if (!(node->color == BLACK || node->color == RED)) abort (); if (parent == NULL && !(node->color == BLACK)) abort (); if (!(left_blackheight == right_blackheight)) abort (); return left_blackheight + (node->color == BLACK ? 1 : 0); }
void reach_sat(reach_proc_t reach_proc, vset_t visited, bitvector_t *reach_groups, long *eg_count, long *next_count, long *guard_count) { (void) reach_proc; (void) next_count; (void) guard_count; if (PINS_USE_GUARDS) Abort("guard-splitting not supported with saturation=sat"); if (act_detect != NULL && trc_output != NULL) Abort("Action detection with trace generation not supported"); for (int i = 0; i < nGrps; i++) { if (bitvector_is_set(reach_groups, i)) { struct expand_info *ctx = RTmalloc(sizeof(struct expand_info)); ctx->group = i; ctx->group_explored = group_explored[i]; ctx->eg_count = eg_count; vrel_set_expand(group_next[i], expand_group_next_projected, ctx); } } if (trc_output != NULL) save_level(visited); stats_and_progress_report(NULL, visited, 0); if (USE_PARALLELISM) vset_least_fixpoint_par(visited, visited, group_next, nGrps); else vset_least_fixpoint(visited, visited, group_next, nGrps); stats_and_progress_report(NULL, visited, 1); check_invariants(visited, -1); if (dlk_detect) { vset_t deadlocks = vset_create(domain, -1, NULL); vset_t dlk_temp = vset_create(domain, -1, NULL); vset_copy(deadlocks, visited); for (int i = 0; i < nGrps; i++) { vset_prev(dlk_temp, visited, group_next[i],deadlocks); reduce(i, dlk_temp); vset_minus(deadlocks, dlk_temp); vset_clear(dlk_temp); } deadlock_check(deadlocks, reach_groups); vset_destroy(deadlocks); vset_destroy(dlk_temp); } }
void tree::reroot() { assert(root); root.get()->check_invariants(); auto new_root = unique_ptr<node>(new node(nullptr)); auto old_root_raw = root.get(); root->parent = new_root.get(); new_root->children[0] = move(root); new_root->children[0]->parent = new_root.get(); root = move(new_root); node::split(old_root_raw); root.get()->check_invariants(); old_root_raw->check_invariants(); }
/// Destroys all connections in the cache, and resets all counts. /// /// \note Calling this function while connections are still checked out /// of the cache is a bad idea, and will violate this class' /// invariants. void clear() { std::lock_guard<mutex_type> lock(mtx_); key_tracker_.clear(); cache_.clear(); connections_ = 0; insertions_ = 0; evictions_ = 0; hits_ = 0; misses_ = 0; reclaims_ = 0; // FIXME: This should probably throw instead of asserting, as it // can be triggered by caller error. check_invariants(); }
/// Destroys all connections for the give locality in the cache, reset /// all associated counts. /// /// \note Calling this function while connections are still checked out /// of the cache is a bad idea, and will violate this classes /// invariants. void clear(key_type const& l) { mutex_type::scoped_lock lock(mtx_); // Check if this key already exists in the cache. typename cache_type::iterator const it = cache_.find(l); if (it != cache_.end()) { // Remove from LRU meta data. key_tracker_.erase(boost::get<2>(it->second)); // correct counter to avoid assertions later on connections_ -= boost::get<1>(it->second); // Erase entry if key exists in the cache. cache_.erase(it); } // FIXME: This should probably throw instead of asserting, as it // can be triggered by caller error. check_invariants(); }
/// Destroys all connections for the given locality in the cache, reset /// all associated counts. void clear(key_type const& l, connection_type const& conn) { std::lock_guard<mutex_type> lock(mtx_); // Check if this key already exists in the cache. typename cache_type::iterator const it = cache_.find(l); if (it != cache_.end()) { // Adjust the number of existing connections for this key. decrement_connection_count(it->second); // do the accounting ++evictions_; // the connection itself will go out of scope on return #if defined(HPX_TRACK_STATE_OF_OUTGOING_TCP_CONNECTION) conn->set_state(Connection::state_deleting); #endif } check_invariants(); }
static void test_boundaries (void) { gchar *text; const gchar *filename; #if GLIB_CHECK_VERSION(2, 37, 2) filename = g_test_get_filename (G_TEST_DIST, "boundaries.utf8", NULL); #else filename = SRCDIR "/boundaries.utf8"; #endif g_print ("sample file: %s\n", filename); if (!g_file_get_contents (filename, &text, NULL, NULL)) fail ("Couldn't open sample text file"); check_invariants (text); g_free (text); printf ("testboundaries passed\n"); }
/// Destroys all connections for the given locality in the cache, reset /// all associated counts. /// /// \note Calling this function while connections are still checked out /// of the cache is a bad idea, and will violate this classes /// invariants. void clear(key_type const& l) { std::lock_guard<mutex_type> lock(mtx_); // Check if this key already exists in the cache. typename cache_type::iterator it = cache_.find(l); if (it != cache_.end()) { // Remove from LRU meta data. key_tracker_.erase(lru_reference(it->second)); // correct counter to avoid assertions later on std::size_t num_existing = num_existing_connections(it->second); connections_ -= num_existing; evictions_ += num_existing; // Erase entry if key exists in the cache. cache_.erase(it); } // FIXME: This should probably throw instead of asserting, as it // can be triggered by caller error. check_invariants(); }
// add the given data_buffer to the cache, evict old entries if needed void add(key_type const& size, connection_type const& conn) { boost::lock_guard<mutex_type> lock(mtx_); if (key_tracker_.empty()) { HPX_ASSERT(cache_.empty()); } else { // eviction strategy implemented here ... // If we reached maximum capacity, evict one entry ... // Find the least recently used key entry key_tracker_type::iterator it = key_tracker_.begin(); while (cache_.size() >= max_cache_size_) { // find it ... cache_type::iterator const kt = cache_.find(*it); HPX_ASSERT(kt != cache_.end()); // ... remove it cache_.erase(kt); key_tracker_.erase(it); it = key_tracker_.begin(); HPX_ASSERT(it != key_tracker_.end()); } } // Add a new entry ... key_tracker_type::iterator it = key_tracker_.insert(key_tracker_.end(), size); cache_.insert(std::make_pair(size, boost::make_tuple(conn, it))); check_invariants(); }
/// Try to get a connection to \a l from the cache, or reserve space for /// a new connection to \a l. This function may evict entries from the /// cache. /// /// \returns If a connection was found in the cache, its value is /// assigned to \a conn and this function returns true. If a /// connection was not found but space was reserved, \a conn is /// set such that conn.get() == 0, and this function returns /// true. If a connection could not be found and space could /// not be returned, \a conn is unmodified and this function /// returns false. /// If force_nsert is true, a new connection entry will be /// created even if that means the cache limits will be /// exceeded. /// /// \note The connection must be returned to the cache by calling /// \a reclaim(). bool get_or_reserve(key_type const& l, connection_type& conn, bool force_insert = false) { std::lock_guard<mutex_type> lock(mtx_); typename cache_type::iterator const it = cache_.find(l); // Check if this key already exists in the cache. if (it != cache_.end()) { // Key exists in cache. // Update LRU meta data. key_tracker_.splice( key_tracker_.end() , key_tracker_ , lru_reference(it->second) ); // If connections to the locality are available in the cache, // remove the oldest one and return it. if (!cached_connections(it->second).empty()) { value_type& connections = cached_connections(it->second); conn = connections.front(); connections.pop_front(); #if defined(HPX_TRACK_STATE_OF_OUTGOING_TCP_CONNECTION) conn->set_state(Connection::state_reinitialized); #endif ++hits_; check_invariants(); return true; } // Otherwise, if we have less connections for this locality // than the maximum, try to reserve space in the cache for a new // connection. if (num_existing_connections(it->second) < max_num_connections(it->second) || force_insert) { // See if we have enough space or can make space available. // Note that if we don't have any space and there are no // outstanding connections for this locality, we grow the // cache size beyond its limit (hoping that it will be // reduced in size next time some connection is handed back // to the cache). if (!free_space() && num_existing_connections(it->second) != 0 && !force_insert) { // If we can't find or make space, give up. ++misses_; check_invariants(); return false; } // Make sure the input connection shared_ptr doesn't hold // anything. conn.reset(); // Increase the per-locality and overall connection counts. increment_connection_count(it->second); // Statistics ++insertions_; check_invariants(); return true; } // We've reached the maximum number of connections for this // locality, and none of them are checked into the cache, so // we have to give up. ++misses_; check_invariants(); return false; } // Key (locality) isn't in cache. // See if we have enough space or can make space available. // Note that we ignore the outcome of free_space() here as we have // to guarantee to have space for the new connection as there are // no connections outstanding for this locality. If free_space // fails we grow the cache size beyond its limit (hoping that it // will be reduced in size next time some connection is handed back // to the cache). free_space(); // Update LRU meta data. typename key_tracker_type::iterator kt = key_tracker_.insert(key_tracker_.end(), l); cache_.insert(std::make_pair( l, util::make_tuple( value_type(), 1, max_connections_per_locality_, kt )) ); // Make sure the input connection shared_ptr doesn't hold anything. conn.reset(); // Increase the overall connection counts. ++connections_; ++insertions_; check_invariants(); return true; }
void _GL_ATTRIBUTE_CONST gl_avltree_list_check_invariants (gl_list_t list) { if (list->root != NULL) (void) check_invariants (list->root, NULL); }
void gl_avltree_list_check_invariants (gl_list_t list) { if (list->root != NULL) check_invariants (list->root, NULL); }
void gl_rbtreehash_list_check_invariants (gl_list_t list) { if (list->root != NULL) check_invariants (list->root, NULL); }
/// Try to get a connection to \a l from the cache, or reserve space for /// a new connection to \a l. This function may evict entries from the /// cache. /// /// \returns If a connection was found in the cache, its value is /// assigned to \a conn and this function returns true. If a /// connection was not found but space was reserved, \a conn is /// set such that conn.get() == 0, and this function returns /// true. If a connection could not be found and space could /// not be returned, \a conn is unmodified and this function /// returns false. /// /// \note The connection must be returned to the cache by calling /// \a reclaim(). bool get_or_reserve(key_type const& l, connection_type& conn) { mutex_type::scoped_lock lock(mtx_); typename cache_type::iterator const it = cache_.find(l); // Check if this key already exists in the cache. if (it != cache_.end()) { // Key exists in cache. // Update LRU meta data. key_tracker_.splice( key_tracker_.end() , key_tracker_ , boost::get<2>(it->second) ); // If connections to the locality are available in the cache, // remove the oldest one and return it. if (!boost::get<0>(it->second).empty()) { conn = boost::get<0>(it->second).front(); boost::get<0>(it->second).pop_front(); check_invariants(); return true; } // Otherwise, if we have less connections for this locality // than the maximum, try to reserve space in the cache for a new // connection. if (boost::get<1>(it->second) < max_connections_per_locality_) { // See if we have enough space or can make space available. // If we can't find or make space, give up. if (!free_space()) { check_invariants(); return false; } // Make sure the input connection shared_ptr doesn't hold // anything. conn.reset(); // Increase the per-locality and overall connection counts. ++boost::get<1>(it->second); ++connections_; check_invariants(); return true; } // We've reached the maximum number of connections for this // locality, and none of them are checked into the cache, so // we have to give up. check_invariants(); return false; } // Key isn't in cache. // See if we have enough space or can make space available. // If we can't find or make space, give up. if (!free_space()) { check_invariants(); return false; } // Update LRU meta data. typename key_tracker_type::iterator kt = key_tracker_.insert(key_tracker_.end(), l); cache_.insert( std::make_pair(l, boost::make_tuple(value_type(), 1, kt))); // Make sure the input connection shared_ptr doesn't hold anything. conn.reset(); // Increase the overall connection counts. ++connections_; check_invariants(); return true; }