void MetadataManager::_addRangeToClean_inlock(const ChunkRange& range) { invariant(!rangeMapOverlaps(_rangesToClean, range.getMin(), range.getMax())); invariant(!rangeMapOverlaps(_receivingChunks, range.getMin(), range.getMax())); _rangesToClean.insert(std::make_pair(range.getMin().getOwned(), range.getMax().getOwned())); // If _rangesToClean was previously empty, we need to start the collection range deleter if (_rangesToClean.size() == 1UL) { ShardingState::get(_serviceContext)->scheduleCleanup(_nss); } }
std::shared_ptr<Notification<Status>> MetadataManager::_addRangeToClean_inlock( const ChunkRange& range) { // This first invariant currently makes an unnecessary copy, to reuse the // rangeMapOverlaps helper function. invariant(!rangeMapOverlaps(_getCopyOfRangesToClean_inlock(), range.getMin(), range.getMax())); invariant(!rangeMapOverlaps(_receivingChunks, range.getMin(), range.getMax())); RangeToCleanDescriptor descriptor(range.getMax().getOwned()); _rangesToClean.insert(std::make_pair(range.getMin().getOwned(), descriptor)); // If _rangesToClean was previously empty, we need to start the collection range deleter if (_rangesToClean.size() == 1UL) { ShardingState::get(_serviceContext)->scheduleCleanup(_nss); } return descriptor.getNotification(); }
Status MetadataLoader::promotePendingChunks( const CollectionMetadata* afterMetadata, CollectionMetadata* remoteMetadata ) const { // Ensure pending chunks are applicable bool notApplicable = ( NULL == afterMetadata || NULL == remoteMetadata ) || ( afterMetadata->getShardVersion() > remoteMetadata->getShardVersion() ) || ( afterMetadata->getShardVersion().epoch() != remoteMetadata->getShardVersion().epoch() ); if ( notApplicable ) return Status::OK(); // The chunks from remoteMetadata are the latest version, and the pending chunks // from afterMetadata are the latest version. If no trickery is afoot, pending chunks // should match exactly zero or one loaded chunk. remoteMetadata->_pendingMap = afterMetadata->_pendingMap; // Resolve our pending chunks against the chunks we've loaded for ( RangeMap::iterator it = remoteMetadata->_pendingMap.begin(); it != remoteMetadata->_pendingMap.end(); ) { if ( !rangeMapOverlaps( remoteMetadata->_chunksMap, it->first, it->second ) ) { ++it; continue; } // Our pending range overlaps at least one chunk if ( rangeMapContains( remoteMetadata->_chunksMap, it->first, it->second ) ) { // Chunk was promoted from pending, successful migration LOG( 2 ) << "verified chunk " << rangeToString( it->first, it->second ) << " was migrated earlier to this shard" << endl; remoteMetadata->_pendingMap.erase( it++ ); } else { // Something strange happened, maybe manual editing of config? RangeVector overlap; getRangeMapOverlap( remoteMetadata->_chunksMap, it->first, it->second, &overlap ); string errMsg = str::stream() << "the remote metadata changed unexpectedly, pending range " << rangeToString( it->first, it->second ) << " does not exactly overlap loaded chunks " << overlapToString( overlap ); return Status( ErrorCodes::RemoteChangeDetected, errMsg ); } } return Status::OK(); }
void MetadataManager::refreshActiveMetadata(std::unique_ptr<CollectionMetadata> remoteMetadata) { LOG(1) << "Refreshing the active metadata from " << (_activeMetadataTracker->metadata ? _activeMetadataTracker->metadata->toStringBasic() : "(empty)") << ", to " << (remoteMetadata ? remoteMetadata->toStringBasic() : "(empty)"); stdx::lock_guard<stdx::mutex> scopedLock(_managerLock); // Collection is not sharded anymore if (!remoteMetadata) { log() << "Marking collection as not sharded."; _receivingChunks.clear(); _rangesToClean.clear(); _setActiveMetadata_inlock(nullptr); return; } invariant(!remoteMetadata->getCollVersion().isWriteCompatibleWith(ChunkVersion::UNSHARDED())); invariant(!remoteMetadata->getShardVersion().isWriteCompatibleWith(ChunkVersion::UNSHARDED())); // Collection is not sharded currently if (!_activeMetadataTracker->metadata) { log() << "Marking collection as sharded with version " << remoteMetadata->toStringBasic(); invariant(_receivingChunks.empty()); invariant(_rangesToClean.empty()); _setActiveMetadata_inlock(std::move(remoteMetadata)); return; } // If the metadata being installed has a different epoch from ours, this means the collection // was dropped and recreated, so we must entirely reset the metadata state if (_activeMetadataTracker->metadata->getCollVersion().epoch() != remoteMetadata->getCollVersion().epoch()) { log() << "Overwriting collection metadata due to epoch change."; _receivingChunks.clear(); _rangesToClean.clear(); _setActiveMetadata_inlock(std::move(remoteMetadata)); return; } // We already have newer version if (_activeMetadataTracker->metadata->getCollVersion() >= remoteMetadata->getCollVersion()) { LOG(1) << "Attempted to refresh active metadata " << _activeMetadataTracker->metadata->toStringBasic() << " with an older version " << remoteMetadata->toStringBasic(); return; } // Resolve any receiving chunks, which might have completed by now for (auto it = _receivingChunks.begin(); it != _receivingChunks.end();) { const BSONObj min = it->first; const BSONObj max = it->second; // Our pending range overlaps at least one chunk if (rangeMapContains(remoteMetadata->getChunks(), min, max)) { // The remote metadata contains a chunk we were earlier in the process of receiving, so // we deem it successfully received. LOG(2) << "Verified chunk " << ChunkRange(min, max).toString() << " was migrated earlier to this shard"; _receivingChunks.erase(it++); continue; } else if (!rangeMapOverlaps(remoteMetadata->getChunks(), min, max)) { ++it; continue; } // Partial overlap indicates that the earlier migration has failed, but the chunk being // migrated underwent some splits and other migrations and ended up here again. In this // case, we will request full reload of the metadata. Currently this cannot happen, because // all migrations are with the explicit knowledge of the recipient shard. However, we leave // the option open so that chunk splits can do empty chunk move without having to notify the // recipient. RangeVector overlappedChunks; getRangeMapOverlap(remoteMetadata->getChunks(), min, max, &overlappedChunks); for (const auto& overlapChunkMin : overlappedChunks) { auto itRecv = _receivingChunks.find(overlapChunkMin.first); invariant(itRecv != _receivingChunks.end()); const ChunkRange receivingRange(itRecv->first, itRecv->second); _receivingChunks.erase(itRecv); // Make sure any potentially partially copied chunks are scheduled to be cleaned up _addRangeToClean_inlock(receivingRange); } // Need to reset the iterator it = _receivingChunks.begin(); } // For compatibility with the current range deleter, which is driven entirely by the contents of // the CollectionMetadata update the pending chunks for (const auto& receivingChunk : _receivingChunks) { ChunkType chunk; chunk.setMin(receivingChunk.first); chunk.setMax(receivingChunk.second); remoteMetadata = remoteMetadata->clonePlusPending(chunk); } _setActiveMetadata_inlock(std::move(remoteMetadata)); }
void MetadataManager::_addRangeToClean_inlock(const ChunkRange& range) { invariant(!rangeMapOverlaps(_rangesToClean, range.getMin(), range.getMax())); invariant(!rangeMapOverlaps(_receivingChunks, range.getMin(), range.getMax())); _rangesToClean.insert(std::make_pair(range.getMin().getOwned(), range.getMax().getOwned())); }