Пример #1
0
 bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
     LastError *le = lastError.disableForCommand();
     le->appendSelf( result );
     if ( le->valid )
         result.append( "nPrev", le->nPrev );
     else
         result.append( "nPrev", -1 );
     return true;
 }
Пример #2
0
 bool run(OperationContext* txn,
          const string& db,
          BSONObj& cmdObj,
          int,
          string& errmsg,
          BSONObjBuilder& result,
          bool fromRepl) {
     LastError* le = lastError.get();
     verify(le);
     le->reset();
     return true;
 }
Пример #3
0
            virtual bool run(const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
                LastError *le = lastError.disableForCommand();
                {
                    assert( le );
                    if ( le->msg.size() && le->nPrev == 1 ) {
                        le->appendSelf( result );
                        return true;
                    }
                }

                ClientInfo * client = ClientInfo::get();
                return client->getLastError( cmdObj , result );
            }
Пример #4
0
 bool run(OperationContext* txn,
          const string& dbname,
          BSONObj& cmdObj,
          int,
          string& errmsg,
          BSONObjBuilder& result) {
     LastError *le = &LastError::get(txn->getClient());
     le->disable();
     le->appendSelf(result, true);
     if (le->isValid())
         result.append("nPrev", le->getNPrev());
     else
         result.append("nPrev", -1);
     return true;
 }
Пример #5
0
        bool run(const string& dbname, BSONObj& _cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
            LastError *le = lastError.disableForCommand();

            if ( le->nPrev != 1 ) {
                LastError::noError.appendSelf( result , false );
                le->appendSelfStatus( result );
            }
            else {
                le->appendSelf( result , false );
            }

            Client& c = cc();
            c.appendLastOp( result );

            result.appendNumber( "connectionId" , c.getConnectionId() ); // for sharding; also useful in general for debugging

            BSONObj cmdObj = _cmdObj;
            {
                BSONObj::iterator i(_cmdObj);
                i.next();
                if( !i.more() ) {
                    /* empty, use default */
                    BSONObj *def = getLastErrorDefault;
                    if( def )
                        cmdObj = *def;
                }
            }

            WriteConcernOptions writeConcern;
            Status status = writeConcern.parse( cmdObj );
            if ( !status.isOK() ) {
                result.append( "badGLE", cmdObj );
                errmsg = status.toString();
                return false;
            }

            WriteConcernResult res;
            status = waitForWriteConcern( cc(), writeConcern, &res );
            res.appendTo( &result );
            if ( status.code() == ErrorCodes::WriteConcernLegacyOK ) {
                result.append( "wnote", status.toString() );
                return true;
            }
            return appendCommandStatus( result, status );
        }
Пример #6
0
    bool ClusterWriteCmd::run( const string& dbName,
                               BSONObj& cmdObj,
                               int options,
                               string& errMsg,
                               BSONObjBuilder& result,
                               bool ) {

        BatchedCommandRequest request( _writeType );
        BatchedCommandResponse response;

        // TODO: if we do namespace parsing, push this to the type
        if ( !request.parseBSON( cmdObj, &errMsg ) || !request.isValid( &errMsg ) ) {

            // Batch parse failure
            response.setOk( false );
            response.setErrCode( ErrorCodes::FailedToParse );
            response.setErrMessage( errMsg );

            dassert( response.isValid( &errMsg ) );
        }
        else {

            // Fixup the namespace to be a full ns internally
            NamespaceString nss( dbName, request.getNS() );
            request.setNS( nss.ns() );
            clusterWrite( request, &response, true /* autosplit */ );
        }

        // Populate the lastError object based on the write
        dassert( response.isValid( NULL ) );
        LastError* lastErrorForRequest = lastError.get( true /* create */ );
        dassert( lastErrorForRequest );
        lastErrorForRequest->reset();
        batchErrorToLastError( request, response, lastErrorForRequest );

        // TODO
        // There's a pending issue about how to report response here. If we use
        // the command infra-structure, we should reuse the 'errmsg' field. But
        // we have already filed that message inside the BatchCommandResponse.
        // return response.getOk();
        result.appendElements( response.toBSON() );
        return true;
    }
Пример #7
0
        virtual void process( Message& m , AbstractMessagingPort* p ){
            assert( p );
            Request r( m , p );

            LastError * le = lastError.startRequest( m , r.getClientId() );
            assert( le );
            
            if ( logLevel > 5 ){
                log(5) << "client id: " << hex << r.getClientId() << "\t" << r.getns() << "\t" << dec << r.op() << endl;
            }
            try {
                r.init();
                setClientId( r.getClientId() );
                r.process();
            }
            catch ( AssertionException & e ){
                log( e.isUserAssertion() ? 1 : 0 ) << "AssertionException in process: " << e.what() << endl;

                le->raiseError( e.getCode() , e.what() );
                
                m.header()->id = r.id();
                
                if ( r.expectResponse() ){
                    BSONObj err = BSON( "$err" << e.what() << "code" << e.getCode() );
                    replyToQuery( ResultFlag_ErrSet, p , m , err );
                }
            }
            catch ( DBException& e ){
                log() << "DBException in process: " << e.what() << endl;
                
                le->raiseError( e.getCode() , e.what() );
                
                m.header()->id = r.id();
                
                if ( r.expectResponse() ){
                    BSONObj err = BSON( "$err" << e.what() << "code" << e.getCode() );
                    replyToQuery( ResultFlag_ErrSet, p , m , err );
                }
            }
        }
Пример #8
0
            virtual bool run(const string& dbName, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
                LastError *le = lastError.disableForCommand();
                {
                    assert( le );
                    if ( le->msg.size() && le->nPrev == 1 ){
                        le->appendSelf( result );
                        return true;
                    }
                }
                
                ClientInfo * client = ClientInfo::get();
                set<string> * shards = client->getPrev();
                
                if ( shards->size() == 0 ){
                    result.appendNull( "err" );
                    return true;
                }

                //log() << "getlasterror enter: " << shards->size() << endl;


                vector<OID> writebacks;
                
                // handle single server
                if ( shards->size() == 1 ){
                    string theShard = *(shards->begin() );
                    result.append( "theshard" , theShard.c_str() );
                    ShardConnection conn( theShard , "" );
                    BSONObj res;
                    bool ok = conn->runCommand( dbName , cmdObj , res );
                    //log() << "\t" << res << endl;
                    result.appendElements( res );
                    conn.done();
                    result.append( "singleShard" , theShard );
                    addWriteBack( writebacks , res );
                    
                    // hit other machines just to block
                    for ( set<string>::const_iterator i=client->sinceLastGetError().begin(); i!=client->sinceLastGetError().end(); ++i ){
                        string temp = *i;
                        if ( temp == theShard )
                            continue;
                        
                        ShardConnection conn( temp , "" );
                        addWriteBack( writebacks , conn->getLastErrorDetailed() );
                        conn.done();
                    }
                    client->clearSinceLastGetError();
                    handleWriteBacks( writebacks );
                    return ok;
                }
                
                BSONArrayBuilder bbb( result.subarrayStart( "shards" ) );

                long long n = 0;

                // hit each shard
                vector<string> errors;
                vector<BSONObj> errorObjects;
                for ( set<string>::iterator i = shards->begin(); i != shards->end(); i++ ){
                    string theShard = *i;
                    bbb.append( theShard );
                    ShardConnection conn( theShard , "" );
                    BSONObj res;
                    bool ok = conn->runCommand( dbName , cmdObj , res );
                    addWriteBack( writebacks, res );
                    string temp = DBClientWithCommands::getLastErrorString( res );
                    if ( ok == false || temp.size() ){
                        errors.push_back( temp );
                        errorObjects.push_back( res );
                    }
                    n += res["n"].numberLong();
                    conn.done();
                }
                
                bbb.done();
                
                result.appendNumber( "n" , n );

                // hit other machines just to block
                for ( set<string>::const_iterator i=client->sinceLastGetError().begin(); i!=client->sinceLastGetError().end(); ++i ){
                    string temp = *i;
                    if ( shards->count( temp ) )
                        continue;
                    
                    ShardConnection conn( temp , "" );
                    addWriteBack( writebacks, conn->getLastErrorDetailed() );
                    conn.done();
                }
                client->clearSinceLastGetError();

                if ( errors.size() == 0 ){
                    result.appendNull( "err" );
                    handleWriteBacks( writebacks );
                    return true;
                }
                
                result.append( "err" , errors[0].c_str() );
                
                { // errs
                    BSONArrayBuilder all( result.subarrayStart( "errs" ) );
                    for ( unsigned i=0; i<errors.size(); i++ ){
                        all.append( errors[i].c_str() );
                    }
                    all.done();
                }

                { // errObjects
                    BSONArrayBuilder all( result.subarrayStart( "errObjects" ) );
                    for ( unsigned i=0; i<errorObjects.size(); i++ ){
                        all.append( errorObjects[i] );
                    }
                    all.done();
                }
                handleWriteBacks( writebacks );
                return true;
            }
Пример #9
0
        bool run(OperationContext* txn, const string& dbname,
                  BSONObj& cmdObj,
                  int,
                  string& errmsg,
                  BSONObjBuilder& result,
                  bool fromRepl ) {

            //
            // Correct behavior here is very finicky.
            //
            // 1.  The first step is to append the error that occurred on the previous operation.
            // This adds an "err" field to the command, which is *not* the command failing.
            //
            // 2.  Next we parse and validate write concern options.  If these options are invalid
            // the command fails no matter what, even if we actually had an error earlier.  The
            // reason for checking here is to match legacy behavior on these kind of failures -
            // we'll still get an "err" field for the write error.
            //
            // 3.  If we had an error on the previous operation, we then return immediately.
            //
            // 4.  Finally, we actually enforce the write concern.  All errors *except* timeout are
            // reported with ok : 0.0, to match legacy behavior.
            //
            // There is a special case when "wOpTime" and "wElectionId" are explicitly provided by 
            // the client (mongos) - in this case we *only* enforce the write concern if it is 
            // valid.
            //
            // We always need to either report "err" (if ok : 1) or "errmsg" (if ok : 0), even if
            // err is null.
            //

            LastError *le = lastError.disableForCommand();

            // Always append lastOp and connectionId
            Client& c = cc();
            c.appendLastOp( result );

            // for sharding; also useful in general for debugging
            result.appendNumber( "connectionId" , c.getConnectionId() );

            OpTime lastOpTime;
            BSONField<OpTime> wOpTimeField("wOpTime");
            FieldParser::FieldState extracted = FieldParser::extract(cmdObj, wOpTimeField, 
                                                                     &lastOpTime, &errmsg);
            if (!extracted) {
                result.append("badGLE", cmdObj);
                appendCommandStatus(result, false, errmsg);
                return false;
            }
            bool lastOpTimePresent = extracted != FieldParser::FIELD_NONE;
            if (!lastOpTimePresent) {
                // Use the client opTime if no wOpTime is specified
                lastOpTime = cc().getLastOp();
            }
            
            OID electionId;
            BSONField<OID> wElectionIdField("wElectionId");
            extracted = FieldParser::extract(cmdObj, wElectionIdField, 
                                             &electionId, &errmsg);
            if (!extracted) {
                result.append("badGLE", cmdObj);
                appendCommandStatus(result, false, errmsg);
                return false;
            }

            bool electionIdPresent = extracted != FieldParser::FIELD_NONE;
            bool errorOccurred = false;

            // Errors aren't reported when wOpTime is used
            if ( !lastOpTimePresent ) {
                if ( le->nPrev != 1 ) {
                    errorOccurred = LastError::noError.appendSelf( result, false );
                    le->appendSelfStatus( result );
                }
                else {
                    errorOccurred = le->appendSelf( result, false );
                }
            }

            BSONObj writeConcernDoc = cmdObj;
            // Use the default options if we have no gle options aside from wOpTime/wElectionId
            const int nFields = cmdObj.nFields();
            bool useDefaultGLEOptions = (nFields == 1) || 
                (nFields == 2 && lastOpTimePresent) ||
                (nFields == 3 && lastOpTimePresent && electionIdPresent);

            if ( useDefaultGLEOptions && getLastErrorDefault ) {
                writeConcernDoc = *getLastErrorDefault;
            }

            //
            // Validate write concern no matter what, this matches 2.4 behavior
            //

            WriteConcernOptions writeConcern;
            Status status = writeConcern.parse( writeConcernDoc );

            if ( status.isOK() ) {
                // Ensure options are valid for this host
                status = validateWriteConcern( writeConcern );
            }

            if ( !status.isOK() ) {
                result.append( "badGLE", writeConcernDoc );
                return appendCommandStatus( result, status );
            }

            // Don't wait for replication if there was an error reported - this matches 2.4 behavior
            if ( errorOccurred ) {
                dassert( !lastOpTimePresent );
                return true;
            }

            // No error occurred, so we won't duplicate these fields with write concern errors
            dassert( result.asTempObj()["err"].eoo() );
            dassert( result.asTempObj()["code"].eoo() );

            // If we got an electionId, make sure it matches
            if (electionIdPresent) {
                if (!theReplSet) {
                    // Ignore electionIds of 0 from mongos.
                    if (electionId != OID()) {
                        errmsg = "wElectionId passed but no replication active";
                        result.append("code", ErrorCodes::BadValue);
                        return false;
                    }
                } 
                else {
                    if (electionId != theReplSet->getElectionId()) {
                        LOG(3) << "oid passed in is " << electionId
                               << ", but our id is " << theReplSet->getElectionId();
                        errmsg = "election occurred after write";
                        result.append("code", ErrorCodes::WriteConcernFailed);
                        return false;
                    }
                }
            }

            cc().curop()->setMessage( "waiting for write concern" );

            WriteConcernResult wcResult;
            status = waitForWriteConcern( txn, writeConcern, lastOpTime, &wcResult );
            wcResult.appendTo( writeConcern, &result );

            // For backward compatibility with 2.4, wtimeout returns ok : 1.0
            if ( wcResult.wTimedOut ) {
                dassert( !wcResult.err.empty() ); // so we always report err
                dassert( !status.isOK() );
                result.append( "errmsg", "timed out waiting for slaves" );
                result.append( "code", status.code() );
                return true;
            }

            return appendCommandStatus( result, status );
        }
Пример #10
0
    bool ClusterWriteCmd::run(OperationContext* txn, const string& dbName,
                               BSONObj& cmdObj,
                               int options,
                               string& errMsg,
                               BSONObjBuilder& result,
                               bool ) {

        BatchedCommandRequest request( _writeType );
        BatchedCommandResponse response;
        ClusterWriter writer( true /* autosplit */, 0 /* timeout */ );

        // NOTE: Sometimes this command is invoked with LE disabled for legacy writes
        LastError* cmdLastError = lastError.get( false );

        {
            // Disable the last error object for the duration of the write
            LastError::Disabled disableLastError( cmdLastError );

            // TODO: if we do namespace parsing, push this to the type
            if ( !request.parseBSON( cmdObj, &errMsg ) || !request.isValid( &errMsg ) ) {

                // Batch parse failure
                response.setOk( false );
                response.setErrCode( ErrorCodes::FailedToParse );
                response.setErrMessage( errMsg );
            }
            else {

                // Fixup the namespace to be a full ns internally
                NamespaceString nss( dbName, request.getNS() );
                request.setNS( nss.ns() );

                writer.write( request, &response );
            }

            dassert( response.isValid( NULL ) );
        }

        if ( cmdLastError ) {
            // Populate the lastError object based on the write response
            cmdLastError->reset();
            batchErrorToLastError( request, response, cmdLastError );
        }

        size_t numAttempts;
        if ( !response.getOk() ) {
            numAttempts = 0;
        } else if ( request.getOrdered() && response.isErrDetailsSet() ) {
            numAttempts = response.getErrDetailsAt(0)->getIndex() + 1; // Add one failed attempt
        } else {
            numAttempts = request.sizeWriteOps();
        }

        // TODO: increase opcounters by more than one
        if ( _writeType == BatchedCommandRequest::BatchType_Insert ) {
            for( size_t i = 0; i < numAttempts; ++i ) {
                globalOpCounters.gotInsert();
            }
        } else if ( _writeType == BatchedCommandRequest::BatchType_Update ) {
            for( size_t i = 0; i < numAttempts; ++i ) {
                globalOpCounters.gotUpdate();
            }
        } else if ( _writeType == BatchedCommandRequest::BatchType_Delete ) {
            for( size_t i = 0; i < numAttempts; ++i ) {
                globalOpCounters.gotDelete();
            }
        }

        // Save the last opTimes written on each shard for this client, to allow GLE to work
        if ( ClientInfo::exists() && writer.getStats().hasShardStats() ) {
            ClientInfo* clientInfo = ClientInfo::get( NULL );
            clientInfo->addHostOpTimes( writer.getStats().getShardStats().getWriteOpTimes() );
        }

        // TODO
        // There's a pending issue about how to report response here. If we use
        // the command infra-structure, we should reuse the 'errmsg' field. But
        // we have already filed that message inside the BatchCommandResponse.
        // return response.getOk();
        result.appendElements( response.toBSON() );
        return true;
    }