CollectionMetadata* CollectionMetadata::clonePlusPending( const ChunkType& pending, string* errMsg ) const { // The error message string is optional. string dummy; if ( errMsg == NULL ) { errMsg = &dummy; } if ( rangeMapOverlaps( _chunksMap, pending.getMin(), pending.getMax() ) ) { RangeVector overlap; getRangeMapOverlap( _chunksMap, pending.getMin(), pending.getMax(), &overlap ); *errMsg = stream() << "cannot add pending chunk " << rangeToString( pending.getMin(), pending.getMax() ) << " because the chunk overlaps " << overlapToString( overlap ); warning() << *errMsg << endl; return NULL; } auto_ptr<CollectionMetadata> metadata( new CollectionMetadata ); metadata->_keyPattern = this->_keyPattern; metadata->_keyPattern.getOwned(); metadata->_pendingMap = this->_pendingMap; metadata->_chunksMap = this->_chunksMap; metadata->_rangesMap = this->_rangesMap; metadata->_shardVersion = _shardVersion; metadata->_collVersion = _collVersion; // If there are any pending chunks on the interval to be added this is ok, since pending // chunks aren't officially tracked yet and something may have changed on servers we do not // see yet. // We remove any chunks we overlap, the remote request starting a chunk migration must have // been authoritative. if ( rangeMapOverlaps( _pendingMap, pending.getMin(), pending.getMax() ) ) { RangeVector pendingOverlap; getRangeMapOverlap( _pendingMap, pending.getMin(), pending.getMax(), &pendingOverlap ); warning() << "new pending chunk " << rangeToString( pending.getMin(), pending.getMax() ) << " overlaps existing pending chunks " << overlapToString( pendingOverlap ) << ", a migration may not have completed" << endl; for ( RangeVector::iterator it = pendingOverlap.begin(); it != pendingOverlap.end(); ++it ) { metadata->_pendingMap.erase( it->first ); } } metadata->_pendingMap.insert( make_pair( pending.getMin(), pending.getMax() ) ); dassert(metadata->isValid()); return metadata.release(); }
RangeVector calculateChangedRanges( const std::vector<clang::tooling::Replacement> &Replaces) { RangeVector ChangedRanges; // Generate the new ranges from the replacements. int Shift = 0; for (const tooling::Replacement &R : Replaces) { unsigned Offset = R.getOffset() + Shift; unsigned Length = R.getReplacementText().size(); Shift += Length - R.getLength(); ChangedRanges.push_back(tooling::Range(Offset, Length)); } return ChangedRanges; }
// Add all the section file start address & size to the RangeVector, // recusively adding any children sections. static void AddSectionsToRangeMap(SectionList *sectlist, RangeVector<addr_t, addr_t> §ion_ranges) { const int num_sections = sectlist->GetNumSections(0); for (int i = 0; i < num_sections; i++) { SectionSP sect_sp = sectlist->GetSectionAtIndex(i); if (sect_sp) { SectionList &child_sectlist = sect_sp->GetChildren(); // If this section has children, add the children to the RangeVector. // Else add this section to the RangeVector. if (child_sectlist.GetNumSections(0) > 0) { AddSectionsToRangeMap(&child_sectlist, section_ranges); } else { size_t size = sect_sp->GetByteSize(); if (size > 0) { addr_t base_addr = sect_sp->GetFileAddress(); RangeVector<addr_t, addr_t>::Entry entry; entry.SetRangeBase(base_addr); entry.SetByteSize(size); section_ranges.Append(entry); } } } } }
RangeVector calculateChangedRanges( const std::vector<clang::tooling::Replacement> &Replaces) { RangeVector ChangedRanges; // Generate the new ranges from the replacements. // // NOTE: This is O(n^2) in the number of replacements. If this starts to // become a problem inline shiftedCodePosition() here and do shifts in a // single run through this loop. for (const tooling::Replacement &R : Replaces) { unsigned Offset = tooling::shiftedCodePosition(Replaces, R.getOffset()); unsigned Length = R.getReplacementText().size(); ChangedRanges.push_back(tooling::Range(Offset, Length)); } return ChangedRanges; }
std::unique_ptr<CollectionMetadata> CollectionMetadata::clonePlusPending( const ChunkType& chunk) const { invariant(!rangeMapOverlaps(_chunksMap, chunk.getMin(), chunk.getMax())); unique_ptr<CollectionMetadata> metadata(stdx::make_unique<CollectionMetadata>()); metadata->_keyPattern = _keyPattern.getOwned(); metadata->fillKeyPatternFields(); metadata->_pendingMap = _pendingMap; metadata->_chunksMap = _chunksMap; metadata->_rangesMap = _rangesMap; metadata->_shardVersion = _shardVersion; metadata->_collVersion = _collVersion; // If there are any pending chunks on the interval to be added this is ok, since pending chunks // aren't officially tracked yet and something may have changed on servers we do not see yet. // // We remove any chunks we overlap because the remote request starting a chunk migration is what // is authoritative. if (rangeMapOverlaps(_pendingMap, chunk.getMin(), chunk.getMax())) { RangeVector pendingOverlap; getRangeMapOverlap(_pendingMap, chunk.getMin(), chunk.getMax(), &pendingOverlap); warning() << "new pending chunk " << redact(rangeToString(chunk.getMin(), chunk.getMax())) << " overlaps existing pending chunks " << redact(overlapToString(pendingOverlap)) << ", a migration may not have completed"; for (RangeVector::iterator it = pendingOverlap.begin(); it != pendingOverlap.end(); ++it) { metadata->_pendingMap.erase(it->first); } } // The pending map entry cannot contain a specific chunk version because we don't know what // version would be generated for it at commit time. That's why we insert an IGNORED value. metadata->_pendingMap.insert( make_pair(chunk.getMin(), CachedChunkInfo(chunk.getMax(), ChunkVersion::IGNORED()))); invariant(metadata->isValid()); return metadata; }
std::unique_ptr<CollectionMetadata> CollectionMetadata::clonePlusPending( const ChunkType& chunk) const { invariant(!rangeMapOverlaps(_chunksMap, chunk.getMin(), chunk.getMax())); unique_ptr<CollectionMetadata> metadata(stdx::make_unique<CollectionMetadata>()); metadata->_keyPattern = _keyPattern.getOwned(); metadata->fillKeyPatternFields(); metadata->_pendingMap = _pendingMap; metadata->_chunksMap = _chunksMap; metadata->_rangesMap = _rangesMap; metadata->_shardVersion = _shardVersion; metadata->_collVersion = _collVersion; // If there are any pending chunks on the interval to be added this is ok, since pending // chunks aren't officially tracked yet and something may have changed on servers we do not // see yet. // We remove any chunks we overlap, the remote request starting a chunk migration must have // been authoritative. if (rangeMapOverlaps(_pendingMap, chunk.getMin(), chunk.getMax())) { RangeVector pendingOverlap; getRangeMapOverlap(_pendingMap, chunk.getMin(), chunk.getMax(), &pendingOverlap); warning() << "new pending chunk " << rangeToString(chunk.getMin(), chunk.getMax()) << " overlaps existing pending chunks " << overlapToString(pendingOverlap) << ", a migration may not have completed"; for (RangeVector::iterator it = pendingOverlap.begin(); it != pendingOverlap.end(); ++it) { metadata->_pendingMap.erase(it->first); } } metadata->_pendingMap.insert(make_pair(chunk.getMin(), chunk.getMax())); invariant(metadata->isValid()); return metadata; }
CollectionMetadata* CollectionMetadata::cloneMerge( const BSONObj& minKey, const BSONObj& maxKey, const ChunkVersion& newShardVersion, string* errMsg ) const { if (newShardVersion <= _shardVersion) { *errMsg = stream() << "cannot merge range " << rangeToString( minKey, maxKey ) << ", new shard version " << newShardVersion.toString() << " is not greater than current version " << _shardVersion.toString(); warning() << *errMsg << endl; return NULL; } RangeVector overlap; getRangeMapOverlap( _chunksMap, minKey, maxKey, &overlap ); if ( overlap.empty() || overlap.size() == 1 ) { *errMsg = stream() << "cannot merge range " << rangeToString( minKey, maxKey ) << ( overlap.empty() ? ", no chunks found in this range" : ", only one chunk found in this range" ); warning() << *errMsg << endl; return NULL; } bool validStartEnd = true; bool validNoHoles = true; if ( overlap.begin()->first.woCompare( minKey ) != 0 ) { // First chunk doesn't start with minKey validStartEnd = false; } else if ( overlap.rbegin()->second.woCompare( maxKey ) != 0 ) { // Last chunk doesn't end with maxKey validStartEnd = false; } else { // Check that there are no holes BSONObj prevMaxKey = minKey; for ( RangeVector::iterator it = overlap.begin(); it != overlap.end(); ++it ) { if ( it->first.woCompare( prevMaxKey ) != 0 ) { validNoHoles = false; break; } prevMaxKey = it->second; } } if ( !validStartEnd || !validNoHoles ) { *errMsg = stream() << "cannot merge range " << rangeToString( minKey, maxKey ) << ", overlapping chunks " << overlapToString( overlap ) << ( !validStartEnd ? " do not have the same min and max key" : " are not all adjacent" ); warning() << *errMsg << endl; return NULL; } auto_ptr<CollectionMetadata> metadata( new CollectionMetadata ); metadata->_keyPattern = this->_keyPattern; metadata->_keyPattern.getOwned(); metadata->_pendingMap = this->_pendingMap; metadata->_chunksMap = this->_chunksMap; metadata->_rangesMap = this->_rangesMap; metadata->_shardVersion = newShardVersion; metadata->_collVersion = newShardVersion > _collVersion ? newShardVersion : this->_collVersion; for ( RangeVector::iterator it = overlap.begin(); it != overlap.end(); ++it ) { metadata->_chunksMap.erase( it->first ); } metadata->_chunksMap.insert( make_pair( minKey, maxKey ) ); dassert(metadata->isValid()); return metadata.release(); }
StatusWith<std::unique_ptr<CollectionMetadata>> CollectionMetadata::cloneMerge( const BSONObj& minKey, const BSONObj& maxKey, const ChunkVersion& newShardVersion) const { invariant(newShardVersion.epoch() == _shardVersion.epoch()); invariant(newShardVersion > _shardVersion); RangeVector overlap; getRangeMapOverlap(_chunksMap, minKey, maxKey, &overlap); if (overlap.empty() || overlap.size() == 1) { return {ErrorCodes::IllegalOperation, stream() << "cannot merge range " << rangeToString(minKey, maxKey) << (overlap.empty() ? ", no chunks found in this range" : ", only one chunk found in this range")}; } bool validStartEnd = true; bool validNoHoles = true; if (overlap.begin()->first.woCompare(minKey) != 0) { // First chunk doesn't start with minKey validStartEnd = false; } else if (overlap.rbegin()->second.woCompare(maxKey) != 0) { // Last chunk doesn't end with maxKey validStartEnd = false; } else { // Check that there are no holes BSONObj prevMaxKey = minKey; for (RangeVector::iterator it = overlap.begin(); it != overlap.end(); ++it) { if (it->first.woCompare(prevMaxKey) != 0) { validNoHoles = false; break; } prevMaxKey = it->second; } } if (!validStartEnd || !validNoHoles) { return {ErrorCodes::IllegalOperation, stream() << "cannot merge range " << rangeToString(minKey, maxKey) << ", overlapping chunks " << overlapToString(overlap) << (!validStartEnd ? " do not have the same min and max key" : " are not all adjacent")}; } unique_ptr<CollectionMetadata> metadata(stdx::make_unique<CollectionMetadata>()); metadata->_keyPattern = _keyPattern.getOwned(); metadata->fillKeyPatternFields(); metadata->_pendingMap = _pendingMap; metadata->_chunksMap = _chunksMap; metadata->_rangesMap = _rangesMap; metadata->_shardVersion = newShardVersion; metadata->_collVersion = newShardVersion > _collVersion ? newShardVersion : this->_collVersion; for (RangeVector::iterator it = overlap.begin(); it != overlap.end(); ++it) { metadata->_chunksMap.erase(it->first); } metadata->_chunksMap.insert(make_pair(minKey, maxKey)); invariant(metadata->isValid()); return std::move(metadata); }
void ReliableSession::send_naks() { // Could get data samples before syn control message. // No use nak'ing until syn control message is received and session is acked. if (!this->acked()) return; if (DCPS_debug_level > 5) { ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%P|%t) ReliableSession::send_naks local %d ") ACE_TEXT("remote %d nak request size %d \n"), this->link_->local_peer(), this->remote_peer_, this->nak_requests_.size())); } if (!this->nak_sequence_.disjoint()) return; // nothing to send ACE_Time_Value now(ACE_OS::gettimeofday()); // Record high-water mark for this interval; this value will // be used to reset the low-water mark in the event the remote // peer becomes unresponsive: this->nak_requests_[now] = this->nak_sequence_.high(); typedef std::vector<SequenceRange> RangeVector; RangeVector ignored; /// The range first - second will be skiped (no naks sent for it). SequenceNumber first; SequenceNumber second; NakRequestMap::reverse_iterator itr(this->nak_requests_.rbegin()); if (this->nak_requests_.size() > 1) { // The sequences between rbegin - 1 and rbegin will not be ignored for naking. ++itr; size_t nak_delay_intervals = this->link()->config()->nak_delay_intervals_; size_t nak_max = this->link()->config()->nak_max_; size_t sz = this->nak_requests_.size(); // Image i is the index of element in nak_requests_ in reverse order. // index 0 sequence is most recent high water mark. // e.g index , 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 // 0 (rbegin) is always skipped because missing sample between 1 and 0 interval // should always be naked., // if nak_delay_intervals=4, nak_max=3, any sequence between 5 - 1, 10 - 6, 15 - 11 // are skipped for naking due to nak_delay_intervals and 20 - 16 are skipped for // naking due to nak_max. for (size_t i = 1; i < sz; ++i) { if ((i * 1.0) / (nak_delay_intervals + 1) > nak_max) { if (first != SequenceNumber()) { first = this->nak_requests_.begin()->second; } else { ignored.push_back(std::make_pair(this->nak_requests_.begin()->second, itr->second)); } break; } if (i % (nak_delay_intervals + 1) == 1) { second = itr->second; } if (second != SequenceNumber()) { first = itr->second; } if (i % (nak_delay_intervals + 1) == 0) { first = itr->second; if (first != SequenceNumber() && second != SequenceNumber()) { ignored.push_back(std::make_pair(first, second)); first = SequenceNumber(); second = SequenceNumber(); } } ++itr; } if (first != SequenceNumber() && second != SequenceNumber() && first != second) { ignored.push_back(std::make_pair(first, second)); } } // Take a copy to facilitate temporary suppression: DisjointSequence received(this->nak_sequence_); if (DCPS_debug_level > 0) { received.dump(); } size_t sz = ignored.size(); for (size_t i = 0; i < sz; ++i) { if (ignored[i].second > received.cumulative_ack()) { SequenceNumber high = ignored[i].second; SequenceNumber low = ignored[i].first; if (low < received.cumulative_ack()) { low = received.cumulative_ack(); } if (DCPS_debug_level > 0) { ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%P|%t) ReliableSession::send_naks local %d ") ACE_TEXT("remote %d ignore missing [%q - %q]\n"), this->link_->local_peer(), this->remote_peer_, low.getValue(), high.getValue())); } // Make contiguous between ignored sequences. received.insert(SequenceRange(low, high)); } } for (NakPeerSet::iterator it(this->nak_peers_.begin()); it != this->nak_peers_.end(); ++it) { // Update sequence to temporarily suppress repair requests for // ranges already requested by other peers for this interval: received.insert(*it); } if (received.disjoint()) { send_naks(received); } // Clear peer repair requests: this->nak_peers_.clear(); }
// Ensure zero-length ranges are produced. Even lines where things are deleted // need reformatting. TEST(CalculateChangedRangesTest, producesZeroLengthRange) { RangeVector Changes = calculateChangedRanges(makeReplacements(0, 4, 0)); EXPECT_EQ(Range(0, 0), Changes.front()); }