string CollectionMetadata::overlapToString( RangeVector overlap ) const { stringstream ss; for ( RangeVector::const_iterator it = overlap.begin(); it != overlap.end(); ++it ) { if ( it != overlap.begin() ) ss << ", "; ss << rangeToString( it->first, it->second ); } return ss.str(); }
CollectionMetadata* CollectionMetadata::clonePlusPending( const ChunkType& pending, string* errMsg ) const { // The error message string is optional. string dummy; if ( errMsg == NULL ) { errMsg = &dummy; } if ( rangeMapOverlaps( _chunksMap, pending.getMin(), pending.getMax() ) ) { RangeVector overlap; getRangeMapOverlap( _chunksMap, pending.getMin(), pending.getMax(), &overlap ); *errMsg = stream() << "cannot add pending chunk " << rangeToString( pending.getMin(), pending.getMax() ) << " because the chunk overlaps " << overlapToString( overlap ); warning() << *errMsg << endl; return NULL; } auto_ptr<CollectionMetadata> metadata( new CollectionMetadata ); metadata->_keyPattern = this->_keyPattern; metadata->_keyPattern.getOwned(); metadata->fillKeyPatternFields(); metadata->_pendingMap = this->_pendingMap; metadata->_chunksMap = this->_chunksMap; metadata->_rangesMap = this->_rangesMap; metadata->_shardVersion = _shardVersion; metadata->_collVersion = _collVersion; // If there are any pending chunks on the interval to be added this is ok, since pending // chunks aren't officially tracked yet and something may have changed on servers we do not // see yet. // We remove any chunks we overlap, the remote request starting a chunk migration must have // been authoritative. if ( rangeMapOverlaps( _pendingMap, pending.getMin(), pending.getMax() ) ) { RangeVector pendingOverlap; getRangeMapOverlap( _pendingMap, pending.getMin(), pending.getMax(), &pendingOverlap ); warning() << "new pending chunk " << rangeToString( pending.getMin(), pending.getMax() ) << " overlaps existing pending chunks " << overlapToString( pendingOverlap ) << ", a migration may not have completed" << endl; for ( RangeVector::iterator it = pendingOverlap.begin(); it != pendingOverlap.end(); ++it ) { metadata->_pendingMap.erase( it->first ); } } metadata->_pendingMap.insert( make_pair( pending.getMin(), pending.getMax() ) ); dassert(metadata->isValid()); return metadata.release(); }
static RangeVector::const_iterator strongFindRange(const RangeVector &ranges, const ZLTextWordCursor &cursor) { // TODO: binary search for (RangeVector::const_iterator it = ranges.begin(); it != ranges.end(); ++it) { if (strongContains(*it, cursor)) { return it; } } return ranges.end(); }
static RangeVector::const_iterator findRange(const RangeVector &ranges, const ZLTextElementArea &area) { // TODO: binary search for (RangeVector::const_iterator it = ranges.begin(); it != ranges.end(); ++it) { if (contains(*it, area)) { return it; } } return ranges.end(); }
// Basic test to ensure replacements turn into ranges properly. TEST(CalculateChangedRangesTest, calculatesRanges) { ReplacementsVec R; R.push_back(makeReplacement(2, 0, 3)); R.push_back(makeReplacement(5, 2, 4)); RangeVector Changes = calculateChangedRanges(R); Range ExpectedRanges[] = { Range(2, 3), Range(8, 4) }; EXPECT_TRUE(std::equal(Changes.begin(), Changes.end(), ExpectedRanges)); }
std::unique_ptr<CollectionMetadata> CollectionMetadata::clonePlusPending( const ChunkType& chunk) const { invariant(!rangeMapOverlaps(_chunksMap, chunk.getMin(), chunk.getMax())); unique_ptr<CollectionMetadata> metadata(stdx::make_unique<CollectionMetadata>()); metadata->_keyPattern = _keyPattern.getOwned(); metadata->fillKeyPatternFields(); metadata->_pendingMap = _pendingMap; metadata->_chunksMap = _chunksMap; metadata->_rangesMap = _rangesMap; metadata->_shardVersion = _shardVersion; metadata->_collVersion = _collVersion; // If there are any pending chunks on the interval to be added this is ok, since pending chunks // aren't officially tracked yet and something may have changed on servers we do not see yet. // // We remove any chunks we overlap because the remote request starting a chunk migration is what // is authoritative. if (rangeMapOverlaps(_pendingMap, chunk.getMin(), chunk.getMax())) { RangeVector pendingOverlap; getRangeMapOverlap(_pendingMap, chunk.getMin(), chunk.getMax(), &pendingOverlap); warning() << "new pending chunk " << redact(rangeToString(chunk.getMin(), chunk.getMax())) << " overlaps existing pending chunks " << redact(overlapToString(pendingOverlap)) << ", a migration may not have completed"; for (RangeVector::iterator it = pendingOverlap.begin(); it != pendingOverlap.end(); ++it) { metadata->_pendingMap.erase(it->first); } } // The pending map entry cannot contain a specific chunk version because we don't know what // version would be generated for it at commit time. That's why we insert an IGNORED value. metadata->_pendingMap.insert( make_pair(chunk.getMin(), CachedChunkInfo(chunk.getMax(), ChunkVersion::IGNORED()))); invariant(metadata->isValid()); return metadata; }
std::unique_ptr<CollectionMetadata> CollectionMetadata::clonePlusPending( const ChunkType& chunk) const { invariant(!rangeMapOverlaps(_chunksMap, chunk.getMin(), chunk.getMax())); unique_ptr<CollectionMetadata> metadata(stdx::make_unique<CollectionMetadata>()); metadata->_keyPattern = _keyPattern.getOwned(); metadata->fillKeyPatternFields(); metadata->_pendingMap = _pendingMap; metadata->_chunksMap = _chunksMap; metadata->_rangesMap = _rangesMap; metadata->_shardVersion = _shardVersion; metadata->_collVersion = _collVersion; // If there are any pending chunks on the interval to be added this is ok, since pending // chunks aren't officially tracked yet and something may have changed on servers we do not // see yet. // We remove any chunks we overlap, the remote request starting a chunk migration must have // been authoritative. if (rangeMapOverlaps(_pendingMap, chunk.getMin(), chunk.getMax())) { RangeVector pendingOverlap; getRangeMapOverlap(_pendingMap, chunk.getMin(), chunk.getMax(), &pendingOverlap); warning() << "new pending chunk " << rangeToString(chunk.getMin(), chunk.getMax()) << " overlaps existing pending chunks " << overlapToString(pendingOverlap) << ", a migration may not have completed"; for (RangeVector::iterator it = pendingOverlap.begin(); it != pendingOverlap.end(); ++it) { metadata->_pendingMap.erase(it->first); } } metadata->_pendingMap.insert(make_pair(chunk.getMin(), chunk.getMax())); invariant(metadata->isValid()); return metadata; }
CollectionMetadata* CollectionMetadata::cloneMerge( const BSONObj& minKey, const BSONObj& maxKey, const ChunkVersion& newShardVersion, string* errMsg ) const { if (newShardVersion <= _shardVersion) { *errMsg = stream() << "cannot merge range " << rangeToString( minKey, maxKey ) << ", new shard version " << newShardVersion.toString() << " is not greater than current version " << _shardVersion.toString(); warning() << *errMsg << endl; return NULL; } RangeVector overlap; getRangeMapOverlap( _chunksMap, minKey, maxKey, &overlap ); if ( overlap.empty() || overlap.size() == 1 ) { *errMsg = stream() << "cannot merge range " << rangeToString( minKey, maxKey ) << ( overlap.empty() ? ", no chunks found in this range" : ", only one chunk found in this range" ); warning() << *errMsg << endl; return NULL; } bool validStartEnd = true; bool validNoHoles = true; if ( overlap.begin()->first.woCompare( minKey ) != 0 ) { // First chunk doesn't start with minKey validStartEnd = false; } else if ( overlap.rbegin()->second.woCompare( maxKey ) != 0 ) { // Last chunk doesn't end with maxKey validStartEnd = false; } else { // Check that there are no holes BSONObj prevMaxKey = minKey; for ( RangeVector::iterator it = overlap.begin(); it != overlap.end(); ++it ) { if ( it->first.woCompare( prevMaxKey ) != 0 ) { validNoHoles = false; break; } prevMaxKey = it->second; } } if ( !validStartEnd || !validNoHoles ) { *errMsg = stream() << "cannot merge range " << rangeToString( minKey, maxKey ) << ", overlapping chunks " << overlapToString( overlap ) << ( !validStartEnd ? " do not have the same min and max key" : " are not all adjacent" ); warning() << *errMsg << endl; return NULL; } auto_ptr<CollectionMetadata> metadata( new CollectionMetadata ); metadata->_keyPattern = this->_keyPattern; metadata->_keyPattern.getOwned(); metadata->_pendingMap = this->_pendingMap; metadata->_chunksMap = this->_chunksMap; metadata->_rangesMap = this->_rangesMap; metadata->_shardVersion = newShardVersion; metadata->_collVersion = newShardVersion > _collVersion ? newShardVersion : this->_collVersion; for ( RangeVector::iterator it = overlap.begin(); it != overlap.end(); ++it ) { metadata->_chunksMap.erase( it->first ); } metadata->_chunksMap.insert( make_pair( minKey, maxKey ) ); dassert(metadata->isValid()); return metadata.release(); }
StatusWith<std::unique_ptr<CollectionMetadata>> CollectionMetadata::cloneMerge( const BSONObj& minKey, const BSONObj& maxKey, const ChunkVersion& newShardVersion) const { invariant(newShardVersion.epoch() == _shardVersion.epoch()); invariant(newShardVersion > _shardVersion); RangeVector overlap; getRangeMapOverlap(_chunksMap, minKey, maxKey, &overlap); if (overlap.empty() || overlap.size() == 1) { return {ErrorCodes::IllegalOperation, stream() << "cannot merge range " << rangeToString(minKey, maxKey) << (overlap.empty() ? ", no chunks found in this range" : ", only one chunk found in this range")}; } bool validStartEnd = true; bool validNoHoles = true; if (overlap.begin()->first.woCompare(minKey) != 0) { // First chunk doesn't start with minKey validStartEnd = false; } else if (overlap.rbegin()->second.woCompare(maxKey) != 0) { // Last chunk doesn't end with maxKey validStartEnd = false; } else { // Check that there are no holes BSONObj prevMaxKey = minKey; for (RangeVector::iterator it = overlap.begin(); it != overlap.end(); ++it) { if (it->first.woCompare(prevMaxKey) != 0) { validNoHoles = false; break; } prevMaxKey = it->second; } } if (!validStartEnd || !validNoHoles) { return {ErrorCodes::IllegalOperation, stream() << "cannot merge range " << rangeToString(minKey, maxKey) << ", overlapping chunks " << overlapToString(overlap) << (!validStartEnd ? " do not have the same min and max key" : " are not all adjacent")}; } unique_ptr<CollectionMetadata> metadata(stdx::make_unique<CollectionMetadata>()); metadata->_keyPattern = _keyPattern.getOwned(); metadata->fillKeyPatternFields(); metadata->_pendingMap = _pendingMap; metadata->_chunksMap = _chunksMap; metadata->_rangesMap = _rangesMap; metadata->_shardVersion = newShardVersion; metadata->_collVersion = newShardVersion > _collVersion ? newShardVersion : this->_collVersion; for (RangeVector::iterator it = overlap.begin(); it != overlap.end(); ++it) { metadata->_chunksMap.erase(it->first); } metadata->_chunksMap.insert(make_pair(minKey, maxKey)); invariant(metadata->isValid()); return std::move(metadata); }