Exemplo n.º 1
0
void MetadataManager::beginReceive(const ChunkRange& range) {
    stdx::lock_guard<stdx::mutex> scopedLock(_managerLock);

    // Collection is not known to be sharded if the active metadata tracker is null
    invariant(_activeMetadataTracker);

    // If range is contained within pending chunks, this means a previous migration must have failed
    // and we need to clean all overlaps
    RangeVector overlappedChunks;
    getRangeMapOverlap(_receivingChunks, range.getMin(), range.getMax(), &overlappedChunks);

    for (const auto& overlapChunkMin : overlappedChunks) {
        auto itRecv = _receivingChunks.find(overlapChunkMin.first);
        invariant(itRecv != _receivingChunks.end());

        const ChunkRange receivingRange(itRecv->first, itRecv->second);

        _receivingChunks.erase(itRecv);

        // Make sure any potentially partially copied chunks are scheduled to be cleaned up
        _addRangeToClean_inlock(receivingRange);
    }

    // Need to ensure that the background range deleter task won't delete the range we are about to
    // receive
    _removeRangeToClean_inlock(range);
    _receivingChunks.insert(std::make_pair(range.getMin().getOwned(), range.getMax().getOwned()));

    // For compatibility with the current range deleter, update the pending chunks on the collection
    // metadata to include the chunk being received
    ChunkType chunk;
    chunk.setMin(range.getMin());
    chunk.setMax(range.getMax());
    _setActiveMetadata_inlock(_activeMetadataTracker->metadata->clonePlusPending(chunk));
}
Exemplo n.º 2
0
    Status MetadataLoader::promotePendingChunks( const CollectionMetadata* afterMetadata,
                                                 CollectionMetadata* remoteMetadata ) const {

        // Ensure pending chunks are applicable
        bool notApplicable =
            ( NULL == afterMetadata || NULL == remoteMetadata ) ||
            ( afterMetadata->getShardVersion() > remoteMetadata->getShardVersion() ) ||
            ( afterMetadata->getShardVersion().epoch() != 
                  remoteMetadata->getShardVersion().epoch() );
        if ( notApplicable ) return Status::OK();

        // The chunks from remoteMetadata are the latest version, and the pending chunks
        // from afterMetadata are the latest version.  If no trickery is afoot, pending chunks
        // should match exactly zero or one loaded chunk.

        remoteMetadata->_pendingMap = afterMetadata->_pendingMap;

        // Resolve our pending chunks against the chunks we've loaded
        for ( RangeMap::iterator it = remoteMetadata->_pendingMap.begin();
                it != remoteMetadata->_pendingMap.end(); ) {

            if ( !rangeMapOverlaps( remoteMetadata->_chunksMap, it->first, it->second ) ) {
                ++it;
                continue;
            }

            // Our pending range overlaps at least one chunk

            if ( rangeMapContains( remoteMetadata->_chunksMap, it->first, it->second ) ) {

                // Chunk was promoted from pending, successful migration
                LOG( 2 ) << "verified chunk " << rangeToString( it->first, it->second )
                         << " was migrated earlier to this shard" << endl;

                remoteMetadata->_pendingMap.erase( it++ );
            }
            else {

                // Something strange happened, maybe manual editing of config?
                RangeVector overlap;
                getRangeMapOverlap( remoteMetadata->_chunksMap,
                                    it->first,
                                    it->second,
                                    &overlap );

                string errMsg = str::stream()
                    << "the remote metadata changed unexpectedly, pending range "
                    << rangeToString( it->first, it->second )
                    << " does not exactly overlap loaded chunks "
                    << overlapToString( overlap );

                return Status( ErrorCodes::RemoteChangeDetected, errMsg );
            }
        }

        return Status::OK();
    }
Exemplo n.º 3
0
void MetadataManager::refreshActiveMetadata(std::unique_ptr<CollectionMetadata> remoteMetadata) {
    LOG(1) << "Refreshing the active metadata from "
           << (_activeMetadataTracker->metadata ? _activeMetadataTracker->metadata->toStringBasic()
                                                : "(empty)")
           << ", to " << (remoteMetadata ? remoteMetadata->toStringBasic() : "(empty)");

    stdx::lock_guard<stdx::mutex> scopedLock(_managerLock);

    // Collection is not sharded anymore
    if (!remoteMetadata) {
        log() << "Marking collection as not sharded.";

        _receivingChunks.clear();
        _rangesToClean.clear();

        _setActiveMetadata_inlock(nullptr);
        return;
    }

    invariant(!remoteMetadata->getCollVersion().isWriteCompatibleWith(ChunkVersion::UNSHARDED()));
    invariant(!remoteMetadata->getShardVersion().isWriteCompatibleWith(ChunkVersion::UNSHARDED()));

    // Collection is not sharded currently
    if (!_activeMetadataTracker->metadata) {
        log() << "Marking collection as sharded with version " << remoteMetadata->toStringBasic();

        invariant(_receivingChunks.empty());
        invariant(_rangesToClean.empty());

        _setActiveMetadata_inlock(std::move(remoteMetadata));
        return;
    }

    // If the metadata being installed has a different epoch from ours, this means the collection
    // was dropped and recreated, so we must entirely reset the metadata state
    if (_activeMetadataTracker->metadata->getCollVersion().epoch() !=
        remoteMetadata->getCollVersion().epoch()) {
        log() << "Overwriting collection metadata due to epoch change.";

        _receivingChunks.clear();
        _rangesToClean.clear();

        _setActiveMetadata_inlock(std::move(remoteMetadata));
        return;
    }

    // We already have newer version
    if (_activeMetadataTracker->metadata->getCollVersion() >= remoteMetadata->getCollVersion()) {
        LOG(1) << "Attempted to refresh active metadata "
               << _activeMetadataTracker->metadata->toStringBasic() << " with an older version "
               << remoteMetadata->toStringBasic();

        return;
    }

    // Resolve any receiving chunks, which might have completed by now
    for (auto it = _receivingChunks.begin(); it != _receivingChunks.end();) {
        const BSONObj min = it->first;
        const BSONObj max = it->second;

        // Our pending range overlaps at least one chunk
        if (rangeMapContains(remoteMetadata->getChunks(), min, max)) {
            // The remote metadata contains a chunk we were earlier in the process of receiving, so
            // we deem it successfully received.
            LOG(2) << "Verified chunk " << ChunkRange(min, max).toString()
                   << " was migrated earlier to this shard";

            _receivingChunks.erase(it++);
            continue;
        } else if (!rangeMapOverlaps(remoteMetadata->getChunks(), min, max)) {
            ++it;
            continue;
        }

        // Partial overlap indicates that the earlier migration has failed, but the chunk being
        // migrated underwent some splits and other migrations and ended up here again. In this
        // case, we will request full reload of the metadata. Currently this cannot happen, because
        // all migrations are with the explicit knowledge of the recipient shard. However, we leave
        // the option open so that chunk splits can do empty chunk move without having to notify the
        // recipient.
        RangeVector overlappedChunks;
        getRangeMapOverlap(remoteMetadata->getChunks(), min, max, &overlappedChunks);

        for (const auto& overlapChunkMin : overlappedChunks) {
            auto itRecv = _receivingChunks.find(overlapChunkMin.first);
            invariant(itRecv != _receivingChunks.end());

            const ChunkRange receivingRange(itRecv->first, itRecv->second);

            _receivingChunks.erase(itRecv);

            // Make sure any potentially partially copied chunks are scheduled to be cleaned up
            _addRangeToClean_inlock(receivingRange);
        }

        // Need to reset the iterator
        it = _receivingChunks.begin();
    }

    // For compatibility with the current range deleter, which is driven entirely by the contents of
    // the CollectionMetadata update the pending chunks
    for (const auto& receivingChunk : _receivingChunks) {
        ChunkType chunk;
        chunk.setMin(receivingChunk.first);
        chunk.setMax(receivingChunk.second);
        remoteMetadata = remoteMetadata->clonePlusPending(chunk);
    }

    _setActiveMetadata_inlock(std::move(remoteMetadata));
}