Exemple #1
0
void Strategy::getMore(OperationContext* txn, Request& request) {
    Timer getMoreTimer;

    const char* ns = request.getns();
    const int ntoreturn = request.d().pullInt();
    const long long id = request.d().pullInt64();

    // TODO:  Handle stale config exceptions here from coll being dropped or sharded during op
    // for now has same semantics as legacy request
    const NamespaceString nss(ns);
    auto statusGetDb = grid.catalogCache()->getDatabase(txn, nss.db().toString());
    if (statusGetDb == ErrorCodes::DatabaseNotFound) {
        cursorCache.remove(id);
        replyToQuery(ResultFlag_CursorNotFound, request.p(), request.m(), 0, 0, 0);
        return;
    }

    uassertStatusOK(statusGetDb);

    // Spigot which controls whether OP_QUERY style find on mongos uses the new ClusterClientCursor
    // code path.
    //
    // TODO: Delete the spigot and always use the new code.
    if (useClusterClientCursor) {
        boost::optional<long long> batchSize;
        if (ntoreturn) {
            batchSize = ntoreturn;
        }
        GetMoreRequest getMoreRequest(NamespaceString(ns), id, batchSize, boost::none);

        auto cursorResponse = ClusterFind::runGetMore(txn, getMoreRequest);
        if (cursorResponse == ErrorCodes::CursorNotFound) {
            replyToQuery(ResultFlag_CursorNotFound, request.p(), request.m(), 0, 0, 0);
            return;
        }
        uassertStatusOK(cursorResponse.getStatus());

        // Build the response document.
        //
        // TODO: this constant should be shared between mongos and mongod, and should not be inside
        // ShardedClientCursor.
        BufBuilder buffer(ShardedClientCursor::INIT_REPLY_BUFFER_SIZE);

        int numResults = 0;
        for (const auto& obj : cursorResponse.getValue().batch) {
            buffer.appendBuf((void*)obj.objdata(), obj.objsize());
            ++numResults;
        }

        replyToQuery(0,
                     request.p(),
                     request.m(),
                     buffer.buf(),
                     buffer.len(),
                     numResults,
                     cursorResponse.getValue().numReturnedSoFar.value_or(0),
                     cursorResponse.getValue().cursorId);
        return;
    }

    shared_ptr<DBConfig> config = statusGetDb.getValue();

    ShardPtr primary;
    ChunkManagerPtr info;
    config->getChunkManagerOrPrimary(txn, ns, info, primary);

    //
    // TODO: Cleanup cursor cache, consolidate into single codepath
    //
    const string host = cursorCache.getRef(id);
    ShardedClientCursorPtr cursor = cursorCache.get(id);
    int cursorMaxTimeMS = cursorCache.getMaxTimeMS(id);

    // Cursor ids should not overlap between sharded and unsharded cursors
    massert(17012,
            str::stream() << "duplicate sharded and unsharded cursor id " << id << " detected for "
                          << ns << ", duplicated on host " << host,
            NULL == cursorCache.get(id).get() || host.empty());

    ClientBasic* client = ClientBasic::getCurrent();
    NamespaceString nsString(ns);
    AuthorizationSession* authSession = AuthorizationSession::get(client);
    Status status = authSession->checkAuthForGetMore(nsString, id, false);
    audit::logGetMoreAuthzCheck(client, nsString, id, status.code());
    uassertStatusOK(status);

    if (!host.empty()) {
        LOG(3) << "single getmore: " << ns;

        // we used ScopedDbConnection because we don't get about config versions
        // not deleting data is handled elsewhere
        // and we don't want to call setShardVersion
        ScopedDbConnection conn(host);

        Message response;
        bool ok = conn->callRead(request.m(), response);
        uassert(10204, "dbgrid: getmore: error calling db", ok);

        bool hasMore = (response.singleData().getCursor() != 0);

        if (!hasMore) {
            cursorCache.removeRef(id);
        }

        request.reply(response, "" /*conn->getServerAddress() */);
        conn.done();
        return;
    } else if (cursor) {
        if (cursorMaxTimeMS == kMaxTimeCursorTimeLimitExpired) {
            cursorCache.remove(id);
            uasserted(ErrorCodes::ExceededTimeLimit, "operation exceeded time limit");
        }

        // TODO: Try to match logic of mongod, where on subsequent getMore() we pull lots more data?
        BufBuilder buffer(ShardedClientCursor::INIT_REPLY_BUFFER_SIZE);
        int docCount = 0;
        const int startFrom = cursor->getTotalSent();
        bool hasMore = cursor->sendNextBatch(ntoreturn, buffer, docCount);

        if (hasMore) {
            // still more data
            cursor->accessed();

            if (cursorMaxTimeMS != kMaxTimeCursorNoTimeLimit) {
                // Update remaining amount of time in cursor cache.
                int cursorLeftoverMillis = cursorMaxTimeMS - getMoreTimer.millis();
                if (cursorLeftoverMillis <= 0) {
                    cursorLeftoverMillis = kMaxTimeCursorTimeLimitExpired;
                }
                cursorCache.updateMaxTimeMS(id, cursorLeftoverMillis);
            }
        } else {
            // we've exhausted the cursor
            cursorCache.remove(id);
        }

        replyToQuery(0,
                     request.p(),
                     request.m(),
                     buffer.buf(),
                     buffer.len(),
                     docCount,
                     startFrom,
                     hasMore ? cursor->getId() : 0);
        return;
    } else {
        LOG(3) << "could not find cursor " << id << " in cache for " << ns;

        replyToQuery(ResultFlag_CursorNotFound, request.p(), request.m(), 0, 0, 0);
        return;
    }
}
Exemple #2
0
    void Strategy::getMore( Request& r ) {
        Timer getMoreTimer;

        const char* ns = r.getns();
        const int ntoreturn = r.d().pullInt();
        const long long id = r.d().pullInt64();

        // TODO:  Handle stale config exceptions here from coll being dropped or sharded during op
        // for now has same semantics as legacy request
        const NamespaceString nss(ns);
        auto statusGetDb = grid.catalogCache()->getDatabase(nss.db().toString());
        if (statusGetDb == ErrorCodes::DatabaseNotFound) {
            cursorCache.remove(id);
            replyToQuery(ResultFlag_CursorNotFound, r.p(), r.m(), 0, 0, 0);
            return;
        }

        uassertStatusOK(statusGetDb);

        shared_ptr<DBConfig> config = statusGetDb.getValue();

        ShardPtr primary;
        ChunkManagerPtr info;
        config->getChunkManagerOrPrimary( ns, info, primary );

        //
        // TODO: Cleanup cursor cache, consolidate into single codepath
        //
        const string host = cursorCache.getRef(id);
        ShardedClientCursorPtr cursor = cursorCache.get( id );
        int cursorMaxTimeMS = cursorCache.getMaxTimeMS( id );

        // Cursor ids should not overlap between sharded and unsharded cursors
        massert( 17012, str::stream() << "duplicate sharded and unsharded cursor id "
                                      << id << " detected for " << ns
                                      << ", duplicated on host " << host,
                 NULL == cursorCache.get( id ).get() || host.empty() );

        ClientBasic* client = ClientBasic::getCurrent();
        NamespaceString nsString(ns);
        AuthorizationSession* authSession = client->getAuthorizationSession();
        Status status = authSession->checkAuthForGetMore( nsString, id );
        audit::logGetMoreAuthzCheck( client, nsString, id, status.code() );
        uassertStatusOK(status);

        if( !host.empty() ){

            LOG(3) << "single getmore: " << ns << endl;

            // we used ScopedDbConnection because we don't get about config versions
            // not deleting data is handled elsewhere
            // and we don't want to call setShardVersion
            ScopedDbConnection conn(host);

            Message response;
            bool ok = conn->callRead( r.m() , response);
            uassert( 10204 , "dbgrid: getmore: error calling db", ok);

            bool hasMore = (response.singleData().getCursor() != 0);

            if ( !hasMore ) {
                cursorCache.removeRef( id );
            }

            r.reply( response , "" /*conn->getServerAddress() */ );
            conn.done();
            return;
        }
        else if ( cursor ) {

            if ( cursorMaxTimeMS == kMaxTimeCursorTimeLimitExpired ) {
                cursorCache.remove( id );
                uasserted( ErrorCodes::ExceededTimeLimit, "operation exceeded time limit" );
            }

            // TODO: Try to match logic of mongod, where on subsequent getMore() we pull lots more data?
            BufBuilder buffer( ShardedClientCursor::INIT_REPLY_BUFFER_SIZE );
            int docCount = 0;
            const int startFrom = cursor->getTotalSent();
            bool hasMore = cursor->sendNextBatch(ntoreturn, buffer, docCount);

            if ( hasMore ) {
                // still more data
                cursor->accessed();

                if ( cursorMaxTimeMS != kMaxTimeCursorNoTimeLimit ) {
                    // Update remaining amount of time in cursor cache.
                    int cursorLeftoverMillis = cursorMaxTimeMS - getMoreTimer.millis();
                    if ( cursorLeftoverMillis <= 0 ) {
                        cursorLeftoverMillis = kMaxTimeCursorTimeLimitExpired;
                    }
                    cursorCache.updateMaxTimeMS( id, cursorLeftoverMillis );
                }
            }
            else {
                // we've exhausted the cursor
                cursorCache.remove( id );
            }

            replyToQuery( 0, r.p(), r.m(), buffer.buf(), buffer.len(), docCount,
                    startFrom, hasMore ? cursor->getId() : 0 );
            return;
        }
        else {

            LOG( 3 ) << "could not find cursor " << id << " in cache for " << ns << endl;

            replyToQuery( ResultFlag_CursorNotFound , r.p() , r.m() , 0 , 0 , 0 );
            return;
        }
    }