StatusWith<long long> ShardingCatalogManager::_runCountCommandOnConfig(OperationContext* opCtx,
                                                                       const NamespaceString& nss,
                                                                       BSONObj query) {
    BSONObjBuilder countBuilder;
    countBuilder.append("count", nss.coll());
    countBuilder.append("query", query);

    auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
    auto resultStatus =
        configShard->runCommandWithFixedRetryAttempts(opCtx,
                                                      kConfigReadSelector,
                                                      nss.db().toString(),
                                                      countBuilder.done(),
                                                      Shard::kDefaultConfigCommandTimeout,
                                                      Shard::RetryPolicy::kIdempotent);
    if (!resultStatus.isOK()) {
        return resultStatus.getStatus();
    }
    if (!resultStatus.getValue().commandStatus.isOK()) {
        return resultStatus.getValue().commandStatus;
    }

    auto responseObj = std::move(resultStatus.getValue().response);

    long long result;
    auto status = bsonExtractIntegerField(responseObj, "n", &result);
    if (!status.isOK()) {
        return status;
    }

    return result;
}
Пример #2
0
Shard::CommandResponse TransactionRouter::_commitWithRecoveryToken(
    OperationContext* opCtx, const TxnRecoveryToken& recoveryToken) {
    const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
    const auto& coordinatorId = recoveryToken.getShardId();

    auto coordinateCommitCmd = [&] {
        CoordinateCommitTransaction coordinateCommitCmd;
        coordinateCommitCmd.setDbName("admin");
        coordinateCommitCmd.setParticipants({});

        auto rawCoordinateCommit = coordinateCommitCmd.toBSON(
            BSON(WriteConcernOptions::kWriteConcernField << opCtx->getWriteConcern().toBSON()));

        auto existingParticipant = getParticipant(coordinatorId);
        auto coordinatorParticipant =
            existingParticipant ? existingParticipant : &_createParticipant(coordinatorId);
        return coordinatorParticipant->attachTxnFieldsIfNeeded(rawCoordinateCommit, false);
    }();

    _initiatedTwoPhaseCommit = true;

    auto coordinatorShard = uassertStatusOK(shardRegistry->getShard(opCtx, coordinatorId));
    return uassertStatusOK(coordinatorShard->runCommandWithFixedRetryAttempts(
        opCtx,
        ReadPreferenceSetting{ReadPreference::PrimaryOnly},
        "admin",
        coordinateCommitCmd,
        Shard::RetryPolicy::kIdempotent));
}
Пример #3
0
Shard::CommandResponse TransactionRouter::_commitSingleShardTransaction(OperationContext* opCtx) {
    auto shardRegistry = Grid::get(opCtx)->shardRegistry();

    const auto citer = _participants.cbegin();

    const auto& shardId(citer->first);
    const auto& participant = citer->second;

    auto shard = uassertStatusOK(shardRegistry->getShard(opCtx, shardId));

    LOG(0) << _txnIdToString()
           << " Committing single shard transaction, single participant: " << shardId;

    CommitTransaction commitCmd;
    commitCmd.setDbName(NamespaceString::kAdminDb);

    return uassertStatusOK(shard->runCommandWithFixedRetryAttempts(
        opCtx,
        ReadPreferenceSetting{ReadPreference::PrimaryOnly},
        "admin",
        participant.attachTxnFieldsIfNeeded(
            commitCmd.toBSON(
                BSON(WriteConcernOptions::kWriteConcernField << opCtx->getWriteConcern().toBSON())),
            false),
        Shard::RetryPolicy::kIdempotent));
}
Пример #4
0
StatusWith<CachedDatabaseInfo> createShardDatabase(OperationContext* opCtx, StringData dbName) {
    auto dbStatus = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName);
    if (dbStatus == ErrorCodes::NamespaceNotFound) {
        ConfigsvrCreateDatabase configCreateDatabaseRequest(dbName.toString());
        configCreateDatabaseRequest.setDbName(NamespaceString::kAdminDb);

        auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();

        auto createDbStatus =
            uassertStatusOK(configShard->runCommandWithFixedRetryAttempts(
                                opCtx,
                                ReadPreferenceSetting(ReadPreference::PrimaryOnly),
                                "admin",
                                CommandHelpers::appendMajorityWriteConcern(
                                    configCreateDatabaseRequest.toBSON({})),
                                Shard::RetryPolicy::kIdempotent))
                .commandStatus;

        if (createDbStatus.isOK() || createDbStatus == ErrorCodes::NamespaceExists) {
            dbStatus = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName);
        } else {
            dbStatus = createDbStatus;
        }
    }

    if (dbStatus.isOK()) {
        return dbStatus;
    }

    return dbStatus.getStatus().withContext(str::stream() << "Database " << dbName << " not found");
}
Пример #5
0
Status ShardingCatalogManager::setFeatureCompatibilityVersionOnShards(OperationContext* opCtx,
                                                                      const BSONObj& cmdObj) {

    // No shards should be added until we have forwarded featureCompatibilityVersion to all shards.
    Lock::SharedLock lk(opCtx->lockState(), _kShardMembershipLock);

    // We do a direct read of the shards collection with local readConcern so no shards are missed,
    // but don't go through the ShardRegistry to prevent it from caching data that may be rolled
    // back.
    const auto opTimeWithShards = uassertStatusOK(Grid::get(opCtx)->catalogClient()->getAllShards(
        opCtx, repl::ReadConcernLevel::kLocalReadConcern));

    for (const auto& shardType : opTimeWithShards.value) {
        const auto shardStatus =
            Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardType.getName());
        if (!shardStatus.isOK()) {
            continue;
        }
        const auto shard = shardStatus.getValue();

        auto response = shard->runCommandWithFixedRetryAttempts(
            opCtx,
            ReadPreferenceSetting{ReadPreference::PrimaryOnly},
            "admin",
            cmdObj,
            Shard::RetryPolicy::kIdempotent);
        if (!response.isOK()) {
            return response.getStatus();
        }
        if (!response.getValue().commandStatus.isOK()) {
            return response.getValue().commandStatus;
        }
        if (!response.getValue().writeConcernStatus.isOK()) {
            return response.getValue().writeConcernStatus;
        }
    }

    return Status::OK();
}
Пример #6
0
Shard::CommandResponse TransactionRouter::_commitMultiShardTransaction(OperationContext* opCtx) {
    invariant(_coordinatorId);
    auto coordinatorIter = _participants.find(*_coordinatorId);
    invariant(coordinatorIter != _participants.end());

    std::vector<CommitParticipant> participantList;
    for (const auto& participantEntry : _participants) {
        CommitParticipant commitParticipant;
        commitParticipant.setShardId(participantEntry.first);
        participantList.push_back(std::move(commitParticipant));
    }

    auto coordinatorShard =
        uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, *_coordinatorId));

    if (MONGO_FAIL_POINT(sendCoordinateCommitToConfigServer)) {
        LOG(0) << "Sending coordinateCommit for transaction " << *opCtx->getTxnNumber()
               << " on session " << opCtx->getLogicalSessionId()->toBSON()
               << " to config server rather than actual coordinator because failpoint is active";

        coordinatorShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();

        if (!_initiatedTwoPhaseCommit) {
            // Send a fake transaction statement to the config server primary so that the config
            // server primary sets up state in memory to receive coordinateCommit.
            auto cmdResponse = coordinatorShard->runCommandWithFixedRetryAttempts(
                opCtx,
                ReadPreferenceSetting{ReadPreference::PrimaryOnly},
                "dummy",
                coordinatorIter->second.attachTxnFieldsIfNeeded(BSON("distinct"
                                                                     << "dummy"
                                                                     << "key"
                                                                     << "dummy"),
                                                                true),
                Shard::RetryPolicy::kIdempotent);
            uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(cmdResponse));

            // Abort the fake transaction on the config server to release the actual transaction's
            // resources.
            cmdResponse = coordinatorShard->runCommandWithFixedRetryAttempts(
                opCtx,
                ReadPreferenceSetting{ReadPreference::PrimaryOnly},
                "admin",
                coordinatorIter->second.attachTxnFieldsIfNeeded(BSON("abortTransaction" << 1),
                                                                false),
                Shard::RetryPolicy::kIdempotent);
            uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(cmdResponse));
        }
    }

    CoordinateCommitTransaction coordinateCommitCmd;
    coordinateCommitCmd.setDbName("admin");
    coordinateCommitCmd.setParticipants(participantList);

    _initiatedTwoPhaseCommit = true;

    LOG(0) << _txnIdToString()
           << " Committing multi shard transaction, coordinator: " << *_coordinatorId;

    return uassertStatusOK(coordinatorShard->runCommandWithFixedRetryAttempts(
        opCtx,
        ReadPreferenceSetting{ReadPreference::PrimaryOnly},
        "admin",
        coordinatorIter->second.attachTxnFieldsIfNeeded(
            coordinateCommitCmd.toBSON(
                BSON(WriteConcernOptions::kWriteConcernField << opCtx->getWriteConcern().toBSON())),
            false),
        Shard::RetryPolicy::kIdempotent));
}