コード例 #1
0
ファイル: namespace_string.cpp プロジェクト: bjori/mongo
NamespaceString NamespaceString::makeDropPendingNamespace(const repl::OpTime& opTime) const {
    mongo::StringBuilder ss;
    ss << db() << "." << dropPendingNSPrefix;
    ss << opTime.getSecs() << "i" << opTime.getTimestamp().getInc() << "t" << opTime.getTerm();
    ss << "." << coll();
    return NamespaceString(ss.stringData().substr(0, MaxNsCollectionLen));
}
コード例 #2
0
/**
 * Updates the session state with the last write timestamp and transaction for that session.
 *
 * In the case of writes with transaction/statement id, this method will be recursively entered a
 * second time for the actual write to the transactions table. Since this write does not generate an
 * oplog entry, the recursion will stop at this point.
 */
void onWriteOpCompleted(OperationContext* opCtx,
                        const NamespaceString& nss,
                        Session* session,
                        std::vector<StmtId> stmtIdsWritten,
                        const repl::OpTime& lastStmtIdWriteOpTime,
                        Date_t lastStmtIdWriteDate) {
    if (lastStmtIdWriteOpTime.isNull())
        return;

    if (session) {
        session->onWriteOpCompletedOnPrimary(opCtx,
                                             *opCtx->getTxnNumber(),
                                             std::move(stmtIdsWritten),
                                             lastStmtIdWriteOpTime,
                                             lastStmtIdWriteDate);
    }
}
コード例 #3
0
/**
 * Updates the session state with the last write timestamp and transaction for that session.
 *
 * In the case of writes with transaction/statement id, this method will be recursively entered a
 * second time for the actual write to the transactions table. Since this write does not generate an
 * oplog entry, the recursion will stop at this point.
 */
void onWriteOpCompleted(OperationContext* opCtx,
                        const NamespaceString& nss,
                        std::vector<StmtId> stmtIdsWritten,
                        const repl::OpTime& lastStmtIdWriteOpTime,
                        Date_t lastStmtIdWriteDate,
                        boost::optional<DurableTxnStateEnum> txnState) {
    if (lastStmtIdWriteOpTime.isNull())
        return;

    const auto txnParticipant = TransactionParticipant::get(opCtx);
    if (!txnParticipant)
        return;

    txnParticipant->onWriteOpCompletedOnPrimary(opCtx,
                                                *opCtx->getTxnNumber(),
                                                std::move(stmtIdsWritten),
                                                lastStmtIdWriteOpTime,
                                                lastStmtIdWriteDate,
                                                txnState);
}
コード例 #4
0
// Adds a wOpTime and a wElectionId field to a set of gle options
static BSONObj buildGLECmdWithOpTime(const BSONObj& gleOptions,
                                     const repl::OpTime& opTime,
                                     const OID& electionId) {
    BSONObjBuilder builder;
    BSONObjIterator it(gleOptions);

    for (int i = 0; it.more(); ++i) {
        BSONElement el = it.next();

        // Make sure first element is getLastError : 1
        if (i == 0) {
            StringData elName(el.fieldName());
            if (!elName.equalCaseInsensitive("getLastError")) {
                builder.append("getLastError", 1);
            }
        }

        builder.append(el);
    }
    opTime.append(&builder, "wOpTime");
    builder.appendOID("wElectionId", const_cast<OID*>(&electionId));
    return builder.obj();
}
コード例 #5
0
ファイル: get_last_error.cpp プロジェクト: ArgusTek/mongo
        bool run(OperationContext* txn, const string& dbname,
                  BSONObj& cmdObj,
                  int,
                  string& errmsg,
                  BSONObjBuilder& result) {

            //
            // Correct behavior here is very finicky.
            //
            // 1.  The first step is to append the error that occurred on the previous operation.
            // This adds an "err" field to the command, which is *not* the command failing.
            //
            // 2.  Next we parse and validate write concern options.  If these options are invalid
            // the command fails no matter what, even if we actually had an error earlier.  The
            // reason for checking here is to match legacy behavior on these kind of failures -
            // we'll still get an "err" field for the write error.
            //
            // 3.  If we had an error on the previous operation, we then return immediately.
            //
            // 4.  Finally, we actually enforce the write concern.  All errors *except* timeout are
            // reported with ok : 0.0, to match legacy behavior.
            //
            // There is a special case when "wOpTime" and "wElectionId" are explicitly provided by 
            // the client (mongos) - in this case we *only* enforce the write concern if it is 
            // valid.
            //
            // We always need to either report "err" (if ok : 1) or "errmsg" (if ok : 0), even if
            // err is null.
            //

            LastError *le = &LastError::get(txn->getClient());
            le->disable();

            // Always append lastOp and connectionId
            Client& c = *txn->getClient();
            if (repl::getGlobalReplicationCoordinator()->getReplicationMode() ==
                repl::ReplicationCoordinator::modeReplSet) {
                const repl::OpTime lastOp = repl::ReplClientInfo::forClient(c).getLastOp();
                if (!lastOp.isNull()) {
                    result.append("lastOp", lastOp.getTimestamp());
                    // TODO(siyuan) Add "lastOpTerm"
                }
            }

            // for sharding; also useful in general for debugging
            result.appendNumber( "connectionId" , c.getConnectionId() );

            Timestamp lastTimestamp;
            BSONField<Timestamp> wOpTimeField("wOpTime");
            FieldParser::FieldState extracted = FieldParser::extract(cmdObj, wOpTimeField, 
                                                                     &lastTimestamp, &errmsg);
            if (!extracted) {
                result.append("badGLE", cmdObj);
                appendCommandStatus(result, false, errmsg);
                return false;
            }

            repl::OpTime lastOpTime;
            bool lastOpTimePresent = extracted != FieldParser::FIELD_NONE;
            if (!lastOpTimePresent) {
                // Use the client opTime if no wOpTime is specified
                lastOpTime = repl::ReplClientInfo::forClient(c).getLastOp();
                // TODO(siyuan) Fix mongos to supply wOpTimeTerm, then parse out that value here
            } else {
                // TODO(siyuan) Don't use the default term after fixing mongos.
                lastOpTime = repl::OpTime(lastTimestamp, repl::OpTime::kDefaultTerm);
            }
            
            OID electionId;
            BSONField<OID> wElectionIdField("wElectionId");
            extracted = FieldParser::extract(cmdObj, wElectionIdField, 
                                             &electionId, &errmsg);
            if (!extracted) {
                result.append("badGLE", cmdObj);
                appendCommandStatus(result, false, errmsg);
                return false;
            }

            bool electionIdPresent = extracted != FieldParser::FIELD_NONE;
            bool errorOccurred = false;

            // Errors aren't reported when wOpTime is used
            if ( !lastOpTimePresent ) {
                if ( le->getNPrev() != 1 ) {
                    errorOccurred = LastError::noError.appendSelf( result, false );
                }
                else {
                    errorOccurred = le->appendSelf( result, false );
                }
            }

            BSONObj writeConcernDoc = cmdObj;
            // Use the default options if we have no gle options aside from wOpTime/wElectionId
            const int nFields = cmdObj.nFields();
            bool useDefaultGLEOptions = (nFields == 1) || 
                (nFields == 2 && lastOpTimePresent) ||
                (nFields == 3 && lastOpTimePresent && electionIdPresent);

            WriteConcernOptions writeConcern;

            if (useDefaultGLEOptions) {
                writeConcern = repl::getGlobalReplicationCoordinator()->getGetLastErrorDefault();
            }

            Status status = writeConcern.parse( writeConcernDoc );

            //
            // Validate write concern no matter what, this matches 2.4 behavior
            //


            if ( status.isOK() ) {
                // Ensure options are valid for this host
                status = validateWriteConcern( writeConcern );
            }

            if ( !status.isOK() ) {
                result.append( "badGLE", writeConcernDoc );
                return appendCommandStatus( result, status );
            }

            // Don't wait for replication if there was an error reported - this matches 2.4 behavior
            if ( errorOccurred ) {
                dassert( !lastOpTimePresent );
                return true;
            }

            // No error occurred, so we won't duplicate these fields with write concern errors
            dassert( result.asTempObj()["err"].eoo() );
            dassert( result.asTempObj()["code"].eoo() );

            // If we got an electionId, make sure it matches
            if (electionIdPresent) {
                if (repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
                        repl::ReplicationCoordinator::modeReplSet) {
                    // Ignore electionIds of 0 from mongos.
                    if (electionId != OID()) {
                        errmsg = "wElectionId passed but no replication active";
                        result.append("code", ErrorCodes::BadValue);
                        return false;
                    }
                } 
                else {
                    if (electionId != repl::getGlobalReplicationCoordinator()->getElectionId()) {
                        LOG(3) << "oid passed in is " << electionId
                               << ", but our id is "
                               << repl::getGlobalReplicationCoordinator()->getElectionId();
                        errmsg = "election occurred after write";
                        result.append("code", ErrorCodes::WriteConcernFailed);
                        return false;
                    }
                }
            }

            txn->setWriteConcern(writeConcern);
            txn->setMessage( "waiting for write concern" );

            WriteConcernResult wcResult;
            status = waitForWriteConcern( txn, lastOpTime, &wcResult );
            wcResult.appendTo( writeConcern, &result );

            // For backward compatibility with 2.4, wtimeout returns ok : 1.0
            if ( wcResult.wTimedOut ) {
                dassert( !wcResult.err.empty() ); // so we always report err
                dassert( !status.isOK() );
                result.append( "errmsg", "timed out waiting for slaves" );
                result.append( "code", status.code() );
                return true;
            }

            return appendCommandStatus( result, status );
        }