/** * Function that will return a list of all interval_db's that are * entirely contained in (startKey,Endkey]. Written for use by precopy * when the interval hasn't been merged yet. * * -jhferris, 2/21/10 */ list<interval_db*> node_mgr::findAllIntervalDb(const string* startKey, const string* endKey, int lockDB) { DBID start_id(*startKey); DBID end_id(*endKey); list<interval_db*> db_list; // lock intervaldb pthread_rwlock_rdlock(&interval_lock); for (uint i = 0; i < dbs.size(); i++) { if (lockDB == WRLOCK) { pthread_rwlock_wrlock(&(dbs[i]->dbLock)); } else { pthread_rwlock_rdlock(&(dbs[i]->dbLock)); } //Unlock list so others can access pthread_rwlock_unlock(&interval_lock); // ownership => (begindID + 1) to endID [both inclusive] /* * To explain this ugly if: we have a range (a,b] and we want * all databases (c,d] such that it resides entirely within * (a,b]. Checking the endpoint is easy because its * inclusive, but if c == a then between will return false * and it will still be a valid subrange so we check for that condition. */ const DBID *d_end_id = dbs[i]->h->getEndID(); const DBID *d_start_id = dbs[i]->h->getStartID(); if ( (between(&start_id,&end_id, d_end_id)) && (between(&start_id,&end_id, d_start_id) || (start_id == *d_start_id)) ) { if (lockDB == NOLOCK) { pthread_rwlock_unlock(&(dbs[i]->dbLock)); } db_list.push_back(dbs[i]); } else { // Release lock because you didn't find it pthread_rwlock_unlock(&(dbs[i]->dbLock)); } delete d_end_id; delete d_start_id; // Acquire list lock again pthread_rwlock_rdlock(&interval_lock); } // interval_lock is locked if it gets here, so unlock pthread_rwlock_unlock(&interval_lock); return db_list; }
RedundancyMap<Id> Condense(RedundancyMap<Id> uncondensed_map){ uncondensed_map_ = uncondensed_map; ClearParams(); TRACE("Start condensing"); TRACE("Computing of main keys"); auto all_ids_ = uncondensed_map_.AllElements(); map<Id, bool> is_main; for(auto it = all_ids_.begin(); it != all_ids_.end(); it++) is_main[*it] = true; for(auto it = uncondensed_map_.begin(); it != uncondensed_map_.end(); it++){ for(auto it_set = it->second.begin(); it_set != it->second.end(); it_set++){ is_main[*it_set] = false; } } set<Id> main_keys; for(auto it = is_main.begin(); it != is_main.end(); it++) if(it->second) main_keys.insert(it->first); TRACE("Number of all keys - " << all_ids_.size()); TRACE("Number of main keys - " << main_keys.size()); TRACE("Condensing starts"); need_processed_ = all_ids_.size(); number_processed_ = 0; for(auto it = all_ids_.begin(); it != all_ids_.end(); it++) is_processed_[*it] = false; for(auto main_key = main_keys.begin(); main_key != main_keys.end(); main_key++){ condensed_map_.SetValuesByKey(*main_key, uncondensed_map_.GetValuesByKey(*main_key)); number_processed_++; is_processed_[*main_key] = true; } // main processing ProcessCondensing(); // processing of non visiting Ids while(number_processed_ < need_processed_){ size_t max_child_setsize = 0; Id start_id(0); for(auto it = is_processed_.begin(); it != is_processed_.end(); it++){ if(!it->second && uncondensed_map_.GetValuesByKey(it->first).size() >= max_child_setsize){ start_id = it->first; max_child_setsize = uncondensed_map_.GetValuesByKey(it->first).size(); } } auto start_set = uncondensed_map_.GetValuesByKey(start_id); for(auto it = start_set.begin(); it != start_set.end(); it++) if(!is_processed_[*it]) condensed_map_.AddNewPair(start_id, *it); is_processed_[start_id] = true; number_processed_++; ProcessCondensing(); } VERIFY(number_processed_ == need_processed_); return condensed_map_; }