Example #1
0
    bool mergeChunks( OperationContext* txn,
                      const NamespaceString& nss,
                      const BSONObj& minKey,
                      const BSONObj& maxKey,
                      const OID& epoch,
                      string* errMsg ) {

        //
        // Get sharding state up-to-date
        //

        ConnectionString configLoc = ConnectionString::parse( shardingState.getConfigServer(),
                                                              *errMsg );
        if ( !configLoc.isValid() ){
            warning() << *errMsg << endl;
            return false;
        }

        //
        // Get the distributed lock
        //

        ScopedDistributedLock collLock( configLoc, nss.ns() );
        collLock.setLockMessage( stream() << "merging chunks in " << nss.ns() << " from "
                                          << minKey << " to " << maxKey );

        Status acquisitionStatus = collLock.tryAcquire();
        if (!acquisitionStatus.isOK()) {
            *errMsg = stream() << "could not acquire collection lock for " << nss.ns()
                               << " to merge chunks in [" << minKey << "," << maxKey << ")"
                               << causedBy(acquisitionStatus);

            warning() << *errMsg << endl;
            return false;
        }

        //
        // We now have the collection lock, refresh metadata to latest version and sanity check
        //

        ChunkVersion shardVersion;
        Status status = shardingState.refreshMetadataNow(txn, nss.ns(), &shardVersion);

        if ( !status.isOK() ) {

            *errMsg = str::stream() << "could not merge chunks, failed to refresh metadata for "
                                    << nss.ns() << causedBy( status.reason() );

            warning() << *errMsg << endl;
            return false;
        }

        if ( epoch.isSet() && shardVersion.epoch() != epoch ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " has changed" << " since merge was sent" << "(sent epoch : "
                               << epoch.toString()
                               << ", current epoch : " << shardVersion.epoch().toString() << ")";

            warning() << *errMsg << endl;
            return false;
        }

        CollectionMetadataPtr metadata = shardingState.getCollectionMetadata( nss.ns() );

        if ( !metadata || metadata->getKeyPattern().isEmpty() ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " is not sharded";

            warning() << *errMsg << endl;
            return false;
        }

        dassert( metadata->getShardVersion().equals( shardVersion ) );

        if ( !metadata->isValidKey( minKey ) || !metadata->isValidKey( maxKey ) ) {

            *errMsg = stream() << "could not merge chunks, the range "
                               << rangeToString( minKey, maxKey ) << " is not valid"
                               << " for collection " << nss.ns() << " with key pattern "
                               << metadata->getKeyPattern();

            warning() << *errMsg << endl;
            return false;
        }

        //
        // Get merged chunk information
        //

        ChunkVersion mergeVersion = metadata->getCollVersion();
        mergeVersion.incMinor();

        OwnedPointerVector<ChunkType> chunksToMerge;

        ChunkType itChunk;
        itChunk.setMin( minKey );
        itChunk.setMax( minKey );
        itChunk.setNS( nss.ns() );
        itChunk.setShard( shardingState.getShardName() );

        while ( itChunk.getMax().woCompare( maxKey ) < 0 &&
                metadata->getNextChunk( itChunk.getMax(), &itChunk ) ) {
            auto_ptr<ChunkType> saved( new ChunkType );
            itChunk.cloneTo( saved.get() );
            chunksToMerge.mutableVector().push_back( saved.release() );
        }

        if ( chunksToMerge.empty() ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " range starting at " << minKey
                               << " and ending at " << maxKey
                               << " does not belong to shard " << shardingState.getShardName();

            warning() << *errMsg << endl;
            return false;
        }

        //
        // Validate the range starts and ends at chunks and has no holes, error if not valid
        //

        BSONObj firstDocMin = ( *chunksToMerge.begin() )->getMin();
        BSONObj firstDocMax = ( *chunksToMerge.begin() )->getMax();
        // minKey is inclusive
        bool minKeyInRange = rangeContains( firstDocMin, firstDocMax, minKey );

        if ( !minKeyInRange ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " range starting at " << minKey
                               << " does not belong to shard " << shardingState.getShardName();

            warning() << *errMsg << endl;
            return false;
        }

        BSONObj lastDocMin = ( *chunksToMerge.rbegin() )->getMin();
        BSONObj lastDocMax = ( *chunksToMerge.rbegin() )->getMax();
        // maxKey is exclusive
        bool maxKeyInRange = lastDocMin.woCompare( maxKey ) < 0 &&
                lastDocMax.woCompare( maxKey ) >= 0;

        if ( !maxKeyInRange ) {
            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " range ending at " << maxKey
                               << " does not belong to shard " << shardingState.getShardName();

            warning() << *errMsg << endl;
            return false;
        }

        bool validRangeStartKey = firstDocMin.woCompare( minKey ) == 0;
        bool validRangeEndKey = lastDocMax.woCompare( maxKey ) == 0;

        if ( !validRangeStartKey || !validRangeEndKey ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " does not contain a chunk "
                               << ( !validRangeStartKey ? "starting at " + minKey.toString() : "" )
                               << ( !validRangeStartKey && !validRangeEndKey ? " or " : "" )
                               << ( !validRangeEndKey ? "ending at " + maxKey.toString() : "" );

            warning() << *errMsg << endl;
            return false;
        }

        if ( chunksToMerge.size() == 1 ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " already contains chunk for " << rangeToString( minKey, maxKey );

            warning() << *errMsg << endl;
            return false;
        }

        bool holeInRange = false;

        // Look for hole in range
        ChunkType* prevChunk = *chunksToMerge.begin();
        ChunkType* nextChunk = NULL;
        for ( OwnedPointerVector<ChunkType>::const_iterator it = chunksToMerge.begin();
                it != chunksToMerge.end(); ++it ) {
            if ( it == chunksToMerge.begin() ) continue;

            nextChunk = *it;
            if ( prevChunk->getMax().woCompare( nextChunk->getMin() ) != 0 ) {
                holeInRange = true;
                break;
            }
            prevChunk = nextChunk;
        }

        if ( holeInRange ) {

            dassert( NULL != nextChunk );
            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " has a hole in the range " << rangeToString( minKey, maxKey )
                               << " at " << rangeToString( prevChunk->getMax(),
                                                           nextChunk->getMin() );

            warning() << *errMsg << endl;
            return false;
        }

        //
        // Run apply ops command
        //

        BSONObj applyOpsCmd = buildApplyOpsCmd( chunksToMerge,
                                                shardVersion,
                                                mergeVersion );

        bool ok;
        BSONObj result;
        try {
            ScopedDbConnection conn( configLoc, 30.0 );
            ok = conn->runCommand( "config", applyOpsCmd, result );
            if ( !ok ) *errMsg = result.toString();
            conn.done();
        }
        catch( const DBException& ex ) {
            ok = false;
            *errMsg = ex.toString();
        }

        if ( !ok ) {
            *errMsg = stream() << "could not merge chunks for " << nss.ns()
                               << ", writing to config failed" << causedBy( errMsg );

            warning() << *errMsg << endl;
            return false;
        }

        //
        // Install merged chunk metadata
        //

        {
            Lock::DBLock writeLk(txn->lockState(), nss.db(), newlm::MODE_X);
            shardingState.mergeChunks(txn, nss.ns(), minKey, maxKey, mergeVersion);
        }

        //
        // Log change
        //

        BSONObj mergeLogEntry = buildMergeLogEntry( chunksToMerge,
                                                    shardVersion,
                                                    mergeVersion );

        configServer.logChange( "merge", nss.ns(), mergeLogEntry );

        return true;
    }
Example #2
0
static Status parseGeoJSONPolygonCoordinates(const BSONElement& elem, S2Polygon* out) {
    if (Array != elem.type()) {
        return BAD_VALUE("Polygon coordinates must be an array");
    }

    OwnedPointerVector<S2Loop> loops;
    Status status = Status::OK();
    string err;

    BSONObjIterator it(elem.Obj());
    // Iterate all loops of the polygon.
    while (it.more()) {
        // Parse the array of vertices of a loop.
        BSONElement coordinateElt = it.next();
        vector<S2Point> points;
        status = parseArrayOfCoordinates(coordinateElt, &points);
        if (!status.isOK())
            return status;

        // Check if the loop is closed.
        status = isLoopClosed(points, coordinateElt);
        if (!status.isOK())
            return status;

        eraseDuplicatePoints(&points);
        // Drop the duplicated last point.
        points.resize(points.size() - 1);

        // At least 3 vertices.
        if (points.size() < 3) {
            return BAD_VALUE(
                "Loop must have at least 3 different vertices: " << coordinateElt.toString(false));
        }

        S2Loop* loop = new S2Loop(points);
        loops.push_back(loop);

        // Check whether this loop is valid.
        // 1. At least 3 vertices.
        // 2. All vertices must be unit length. Guaranteed by parsePoints().
        // 3. Loops are not allowed to have any duplicate vertices.
        // 4. Non-adjacent edges are not allowed to intersect.
        if (!loop->IsValid(&err)) {
            return BAD_VALUE("Loop is not valid: " << coordinateElt.toString(false) << " " << err);
        }
        // If the loop is more than one hemisphere, invert it.
        loop->Normalize();

        // Check the first loop must be the exterior ring and any others must be
        // interior rings or holes.
        if (loops.size() > 1 && !loops[0]->Contains(loop)) {
            return BAD_VALUE(
                "Secondary loops not contained by first exterior loop - "
                "secondary loops must be holes: "
                << coordinateElt.toString(false)
                << " first loop: " << elem.Obj().firstElement().toString(false));
        }
    }

    if (loops.empty()) {
        return BAD_VALUE("Polygon has no loops.");
    }

    // Check if the given loops form a valid polygon.
    // 1. If a loop contains an edge AB, then no other loop may contain AB or BA.
    // 2. No loop covers more than half of the sphere.
    // 3. No two loops cross.
    if (!S2Polygon::IsValid(loops.vector(), &err))
        return BAD_VALUE("Polygon isn't valid: " << err << " " << elem.toString(false));

    // Given all loops are valid / normalized and S2Polygon::IsValid() above returns true.
    // The polygon must be valid. See S2Polygon member function IsValid().

    // Transfer ownership of the loops and clears loop vector.
    out->Init(&loops.mutableVector());

    // Check if every loop of this polygon shares at most one vertex with
    // its parent loop.
    if (!out->IsNormalized(&err))
        // "err" looks like "Loop 1 shares more than one vertex with its parent loop 0"
        return BAD_VALUE(err << ": " << elem.toString(false));

    // S2Polygon contains more than one ring, which is allowed by S2, but not by GeoJSON.
    //
    // Loops are indexed according to a preorder traversal of the nesting hierarchy.
    // GetLastDescendant() returns the index of the last loop that is contained within
    // a given loop. We guarantee that the first loop is the exterior ring.
    if (out->GetLastDescendant(0) < out->num_loops() - 1) {
        return BAD_VALUE("Only one exterior polygon loop is allowed: " << elem.toString(false));
    }

    // In GeoJSON, only one nesting is allowed.
    // The depth of a loop is set by polygon according to the nesting hierarchy of polygon,
    // so the exterior ring's depth is 0, a hole in it is 1, etc.
    for (int i = 0; i < out->num_loops(); i++) {
        if (out->loop(i)->depth() > 1) {
            return BAD_VALUE("Polygon interior loops cannot be nested: " << elem.toString(false));
        }
    }
    return Status::OK();
}