Status ReplicationCoordinatorExternalStateImpl::initializeReplSetStorage(OperationContext* txn,
                                                                         const BSONObj& config) {
    try {
        createOplog(txn);

        MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
            ScopedTransaction scopedXact(txn, MODE_X);
            Lock::GlobalWrite globalWrite(txn->lockState());

            WriteUnitOfWork wuow(txn);
            Helpers::putSingleton(txn, configCollectionName, config);
            const auto msgObj = BSON("msg"
                                     << "initiating set");
            getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, msgObj);
            wuow.commit();
        }
        MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "initiate oplog entry", "local.oplog.rs");

        // This initializes the minvalid document with a null "ts" because older versions (<=3.2)
        // get angry if the minValid document is present but doesn't have a "ts" field.
        // Consider removing this once we no longer need to support downgrading to 3.2.
        _storageInterface->setMinValidToAtLeast(txn, {});

        FeatureCompatibilityVersion::setIfCleanStartup(txn, _storageInterface);
    } catch (const DBException& ex) {
        return ex.toStatus();
    }
    return Status::OK();
}
    void ReplicationCoordinatorExternalStateImpl::initiateOplog(OperationContext* txn) {
        createOplog(txn);

        ScopedTransaction scopedXact(txn, MODE_X);
        Lock::GlobalWrite globalWrite(txn->lockState());

        WriteUnitOfWork wuow(txn);
        getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, BSON("msg" << "initiating set"));
        wuow.commit();
    }
コード例 #3
0
Status ReplicationCoordinatorExternalStateImpl::initializeReplSetStorage(OperationContext* opCtx,
                                                                         const BSONObj& config) {
    try {
        createOplog(opCtx);

        writeConflictRetry(opCtx,
                           "initiate oplog entry",
                           NamespaceString::kRsOplogNamespace.toString(),
                           [this, &opCtx, &config] {
                               Lock::GlobalWrite globalWrite(opCtx);

                               WriteUnitOfWork wuow(opCtx);
                               Helpers::putSingleton(opCtx, configCollectionName, config);
                               const auto msgObj = BSON("msg"
                                                        << "initiating set");
                               _service->getOpObserver()->onOpMessage(opCtx, msgObj);
                               wuow.commit();
                               // ReplSetTest assumes that immediately after the replSetInitiate
                               // command returns, it can allow other nodes to initial sync with no
                               // retries and they will succeed.  Unfortunately, initial sync will
                               // fail if it finds its sync source has an empty oplog.  Thus, we
                               // need to wait here until the seed document is visible in our oplog.
                               AutoGetCollection oplog(
                                   opCtx, NamespaceString::kRsOplogNamespace, MODE_IS);
                               waitForAllEarlierOplogWritesToBeVisible(opCtx);
                           });

        // Set UUIDs for all non-replicated collections. This is necessary for independent replica
        // sets and config server replica sets started with no data files because collections in
        // local are created prior to the featureCompatibilityVersion being set to 3.6, so the
        // collections are not created with UUIDs. We exclude ShardServers when adding UUIDs to
        // non-replicated collections on the primary because ShardServers are started up by default
        // with featureCompatibilityVersion 3.4, so we don't want to assign UUIDs to them until the
        // cluster's featureCompatibilityVersion is explicitly set to 3.6 by the config server. The
        // below UUID addition for non-replicated collections only occurs on the primary; UUIDs are
        // added to non-replicated collections on secondaries during InitialSync. When the config
        // server sets the featureCompatibilityVersion to 3.6, the shard primary will add UUIDs to
        // all the collections that need them. One special case here is if a shard is already in
        // featureCompatibilityVersion 3.6 and a new node is started up with --shardsvr and added to
        // that shard, the new node will still start up with featureCompatibilityVersion 3.4 and
        // need to have UUIDs added to each collection. These UUIDs are added during InitialSync,
        // because the new node is a secondary.
        if (serverGlobalParams.clusterRole != ClusterRole::ShardServer &&
            FeatureCompatibilityVersion::isCleanStartUp()) {
            auto schemaStatus = updateUUIDSchemaVersionNonReplicated(opCtx, true);
            if (!schemaStatus.isOK()) {
                return schemaStatus;
            }
        }
        FeatureCompatibilityVersion::setIfCleanStartup(opCtx, _storageInterface);
    } catch (const DBException& ex) {
        return ex.toStatus();
    }
    return Status::OK();
}
コード例 #4
0
    TEST(DConcurrency, writelocktryTimeout) {
        LockerImpl ls;
        writelocktry globalWrite(&ls, 0);
        ASSERT(globalWrite.got());

        {
            LockerImpl lsTry;
            writelocktry lockTry(&lsTry, 1);
            ASSERT(!lockTry.got());
        }
    }
コード例 #5
0
    TEST(DConcurrency, TempReleaseGlobalWrite) {
        LockerImpl ls;
        Lock::GlobalWrite globalWrite(&ls);

        {
            Lock::TempRelease tempRelease(&ls);
            ASSERT(!ls.isLocked());
        }

        ASSERT(ls.isW());
    }
コード例 #6
0
    void ReplicationCoordinatorExternalStateImpl::initiateOplog(OperationContext* txn) {
        createOplog(txn);

        MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
            ScopedTransaction scopedXact(txn, MODE_X);
            Lock::GlobalWrite globalWrite(txn->lockState());

            WriteUnitOfWork wuow(txn);
            getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, BSON("msg" << "initiating set"));
            wuow.commit();
        } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "initiate oplog entry", "local.oplog.rs");
    }
コード例 #7
0
    TEST(DConcurrency, GlobalWriteAndGlobalRead) {
        LockerImpl ls;

        Lock::GlobalWrite globalWrite(&ls);
        ASSERT(ls.isW());

        {
            Lock::GlobalRead globalRead(&ls);
            ASSERT(ls.isW());
        }

        ASSERT(ls.isW());
    }
コード例 #8
0
Status ReplicationCoordinatorExternalStateImpl::runRepairOnLocalDB(OperationContext* opCtx) {
    try {
        Lock::GlobalWrite globalWrite(opCtx);
        StorageEngine* engine = getGlobalServiceContext()->getGlobalStorageEngine();

        if (!engine->isMmapV1()) {
            return Status::OK();
        }

        UnreplicatedWritesBlock uwb(opCtx);
        Status status = repairDatabase(opCtx, engine, localDbName, false, false);

        // Open database before returning
        dbHolder().openDb(opCtx, localDbName);
    } catch (const DBException& ex) {
        return ex.toStatus();
    }
    return Status::OK();
}
コード例 #9
0
Status ReplicationCoordinatorExternalStateImpl::initializeReplSetStorage(OperationContext* txn,
                                                                         const BSONObj& config,
                                                                         bool updateReplOpTime) {
    try {
        createOplog(txn, rsOplogName, true);

        MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
            ScopedTransaction scopedXact(txn, MODE_X);
            Lock::GlobalWrite globalWrite(txn->lockState());

            WriteUnitOfWork wuow(txn);
            Helpers::putSingleton(txn, configCollectionName, config);
            const auto msgObj = BSON("msg"
                                     << "initiating set");
            if (updateReplOpTime) {
                getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, msgObj);
            } else {
                // 'updateReplOpTime' is false when called from the replSetInitiate command when the
                // server is running with replication disabled. We bypass onOpMessage to invoke
                // _logOp directly so that we can override the replication mode and keep _logO from
                // updating the replication coordinator's op time (illegal operation when
                // replication is not enabled).
                repl::oplogCheckCloseDatabase(txn, nullptr);
                repl::_logOp(txn,
                             "n",
                             "",
                             msgObj,
                             nullptr,
                             false,
                             rsOplogName,
                             ReplicationCoordinator::modeReplSet,
                             updateReplOpTime);
                repl::oplogCheckCloseDatabase(txn, nullptr);
            }
            wuow.commit();
        }
        MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "initiate oplog entry", "local.oplog.rs");
    } catch (const DBException& ex) {
        return ex.toStatus();
    }
    return Status::OK();
}
Status ReplicationCoordinatorExternalStateImpl::initializeReplSetStorage(OperationContext* txn,
                                                                         const BSONObj& config) {
    try {
        createOplog(txn);

        MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
            ScopedTransaction scopedXact(txn, MODE_X);
            Lock::GlobalWrite globalWrite(txn->lockState());

            WriteUnitOfWork wuow(txn);
            Helpers::putSingleton(txn, configCollectionName, config);
            const auto msgObj = BSON("msg"
                                     << "initiating set");
            getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, msgObj);
            wuow.commit();
        }
        MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "initiate oplog entry", "local.oplog.rs");
    } catch (const DBException& ex) {
        return ex.toStatus();
    }
    return Status::OK();
}
コード例 #11
0
Status RollbackImpl::_transitionToRollback(OperationContext* opCtx) {
    invariant(opCtx);
    if (_isInShutdown()) {
        return Status(ErrorCodes::ShutdownInProgress, "rollback shutting down");
    }

    log() << "transition to ROLLBACK";
    {
        Lock::GlobalWrite globalWrite(opCtx);

        auto status = _replicationCoordinator->setFollowerMode(MemberState::RS_ROLLBACK);
        if (!status.isOK()) {
            status.addContext(str::stream() << "Cannot transition from "
                                            << _replicationCoordinator->getMemberState().toString()
                                            << " to "
                                            << MemberState(MemberState::RS_ROLLBACK).toString());
            log() << status;
            return status;
        }
    }
    return Status::OK();
}
コード例 #12
0
 TEST(DConcurrency, GlobalWrite) {
     LockerImpl ls;
     Lock::GlobalWrite globalWrite(&ls);
     ASSERT(ls.isW());
 }