コード例 #1
0
ファイル: dbclientcursor.cpp プロジェクト: mihail812/mongo
void DBClientCursor::dataReceived(bool& retry, string& host) {
    // If this is a reply to our initial command request.
    if (_isCommand && cursorId == 0) {
        commandDataReceived();
        return;
    }

    QueryResult::View qr = batch.m.singleData().view2ptr();
    resultFlags = qr.getResultFlags();

    if (qr.getResultFlags() & ResultFlag_ErrSet) {
        wasError = true;
    }

    if (qr.getResultFlags() & ResultFlag_CursorNotFound) {
        // cursor id no longer valid at the server.
        invariant(qr.getCursorId() == 0);

        if (!(opts & QueryOption_CursorTailable)) {
            uasserted(13127,
                      str::stream() << "cursor id " << cursorId << " didn't exist on server.");
        }

        // 0 indicates no longer valid (dead)
        cursorId = 0;
    }

    if (cursorId == 0 || !(opts & QueryOption_CursorTailable)) {
        // only set initially: we don't want to kill it on end of data
        // if it's a tailable cursor
        cursorId = qr.getCursorId();
    }

    batch.nReturned = qr.getNReturned();
    batch.pos = 0;
    batch.data = qr.data();
    batch.remainingBytes = qr.dataLen();

    _client->checkResponse(batch.data, batch.nReturned, &retry, &host);  // watches for "not master"

    if (qr.getResultFlags() & ResultFlag_ShardConfigStale) {
        BSONObj error;
        verify(peekError(&error));
        throw RecvStaleConfigException(
            (string) "stale config on lazy receive" + causedBy(getErrField(error)), error);
    }

    /* this assert would fire the way we currently work:
        verify( nReturned || cursorId == 0 );
    */
}
コード例 #2
0
ファイル: strategy.cpp プロジェクト: yaochang/mongo
/**
 * Returns true if request is a query for sharded indexes.
 */
static bool doShardedIndexQuery(OperationContext* txn, Request& r, const QuerySpec& qSpec) {
    // Extract the ns field from the query, which may be embedded within the "query" or
    // "$query" field.
    auto nsField = qSpec.filter()["ns"];
    if (nsField.eoo()) {
        return false;
    }
    const NamespaceString indexNSSQuery(nsField.str());

    auto status = grid.catalogCache()->getDatabase(txn, indexNSSQuery.db().toString());
    if (!status.isOK()) {
        return false;
    }

    shared_ptr<DBConfig> config = status.getValue();
    if (!config->isSharded(indexNSSQuery.ns())) {
        return false;
    }

    // if you are querying on system.indexes, we need to make sure we go to a shard
    // that actually has chunks. This is not a perfect solution (what if you just
    // look at all indexes), but better than doing nothing.

    ShardPtr shard;
    ChunkManagerPtr cm;
    config->getChunkManagerOrPrimary(indexNSSQuery.ns(), cm, shard);
    if (cm) {
        set<ShardId> shardIds;
        cm->getAllShardIds(&shardIds);
        verify(shardIds.size() > 0);
        shard = grid.shardRegistry()->getShard(*shardIds.begin());
    }

    ShardConnection dbcon(shard->getConnString(), r.getns());
    DBClientBase& c = dbcon.conn();

    string actualServer;

    Message response;
    bool ok = c.call(r.m(), response, true, &actualServer);
    uassert(10200, "mongos: error calling db", ok);

    {
        QueryResult::View qr = response.singleData().view2ptr();
        if (qr.getResultFlags() & ResultFlag_ShardConfigStale) {
            dbcon.done();
            // Version is zero b/c this is deprecated codepath
            throw RecvStaleConfigException(r.getns(),
                                           "Strategy::doQuery",
                                           ChunkVersion(0, 0, OID()),
                                           ChunkVersion(0, 0, OID()));
        }
    }

    r.reply(response, actualServer.size() ? actualServer : c.getServerAddress());
    dbcon.done();

    return true;
}
コード例 #3
0
ファイル: instance.cpp プロジェクト: lianggx/mongo
void generateLegacyQueryErrorResponse(const AssertionException* exception,
                                      const QueryMessage& queryMessage,
                                      CurOp* curop,
                                      Message* response) {
    curop->debug().exceptionInfo = exception->getInfo();

    log(LogComponent::kQuery) << "assertion " << exception->toString() << " ns:" << queryMessage.ns
                              << " query:" << (queryMessage.query.valid()
                                                   ? queryMessage.query.toString()
                                                   : "query object is corrupt");
    if (queryMessage.ntoskip || queryMessage.ntoreturn) {
        log(LogComponent::kQuery) << " ntoskip:" << queryMessage.ntoskip
                                  << " ntoreturn:" << queryMessage.ntoreturn;
    }

    const SendStaleConfigException* scex = (exception->getCode() == ErrorCodes::SendStaleConfig)
        ? static_cast<const SendStaleConfigException*>(exception)
        : NULL;

    BSONObjBuilder err;
    exception->getInfo().append(err);
    if (scex) {
        err.append("ok", 0.0);
        err.append("ns", scex->getns());
        scex->getVersionReceived().addToBSON(err, "vReceived");
        scex->getVersionWanted().addToBSON(err, "vWanted");
    }
    BSONObj errObj = err.done();

    if (scex) {
        log(LogComponent::kQuery) << "stale version detected during query over " << queryMessage.ns
                                  << " : " << errObj;
    }

    BufBuilder bb;
    bb.skip(sizeof(QueryResult::Value));
    bb.appendBuf((void*)errObj.objdata(), errObj.objsize());

    // TODO: call replyToQuery() from here instead of this!!! see dbmessage.h
    QueryResult::View msgdata = bb.buf();
    bb.decouple();
    QueryResult::View qr = msgdata;
    qr.setResultFlags(ResultFlag_ErrSet);
    if (scex)
        qr.setResultFlags(qr.getResultFlags() | ResultFlag_ShardConfigStale);
    qr.msgdata().setLen(bb.len());
    qr.msgdata().setOperation(opReply);
    qr.setCursorId(0);
    qr.setStartingFrom(0);
    qr.setNReturned(1);
    response->setData(msgdata.view2ptr(), true);
}
コード例 #4
0
ファイル: dbclientcursor.cpp プロジェクト: DieterLutz/mongo
    void DBClientCursor::dataReceived( bool& retry, string& host ) {

        QueryResult::View qr = batch.m->singleData().view2ptr();
        resultFlags = qr.getResultFlags();

        if ( qr.getResultFlags() & ResultFlag_ErrSet ) {
            wasError = true;
        }

        if ( qr.getResultFlags() & ResultFlag_CursorNotFound ) {
            // cursor id no longer valid at the server.
            verify( qr.getCursorId() == 0 );
            cursorId = 0; // 0 indicates no longer valid (dead)
            if ( ! ( opts & QueryOption_CursorTailable ) )
                throw UserException( 13127 , "getMore: cursor didn't exist on server, possible restart or timeout?" );
        }

        if ( cursorId == 0 || ! ( opts & QueryOption_CursorTailable ) ) {
            // only set initially: we don't want to kill it on end of data
            // if it's a tailable cursor
            cursorId = qr.getCursorId();
        }

        batch.nReturned = qr.getNReturned();
        batch.pos = 0;
        batch.data = qr.data();

        _client->checkResponse( batch.data, batch.nReturned, &retry, &host ); // watches for "not master"

        if( qr.getResultFlags() & ResultFlag_ShardConfigStale ) {
            BSONObj error;
            verify( peekError( &error ) );
            throw RecvStaleConfigException( (string)"stale config on lazy receive" + causedBy( getErrField( error ) ), error );
        }

        /* this assert would fire the way we currently work:
            verify( nReturned || cursorId == 0 );
        */
    }