Beispiel #1
0
void CollectionCloner::_verifyCollectionWasDropped(
    const stdx::unique_lock<stdx::mutex>& lk,
    Status batchStatus,
    std::shared_ptr<OnCompletionGuard> onCompletionGuard,
    OperationContext* opCtx) {
    // If we already have a _verifyCollectionDroppedScheduler, just return; the existing
    // scheduler will take care of cleaning up.
    if (_verifyCollectionDroppedScheduler) {
        return;
    }
    BSONObjBuilder cmdObj;
    _options.uuid->appendToBuilder(&cmdObj, "find");
    cmdObj.append("batchSize", 0);
    _verifyCollectionDroppedScheduler = stdx::make_unique<RemoteCommandRetryScheduler>(
        _executor,
        RemoteCommandRequest(_source,
                             _sourceNss.db().toString(),
                             cmdObj.obj(),
                             ReadPreferenceSetting::secondaryPreferredMetadata(),
                             opCtx,
                             RemoteCommandRequest::kNoTimeout),
        [this, batchStatus, onCompletionGuard](const RemoteCommandCallbackArgs& args) {
            // If the attempt to determine if the collection was dropped fails for any reason other
            // than NamespaceNotFound, return the original error code.
            //
            // Otherwise, if the collection was dropped, either the error will be NamespaceNotFound,
            // or it will be a drop-pending collection and the find will succeed and give us a
            // collection with a drop-pending name.
            UniqueLock lk(_mutex);
            Status finalStatus(batchStatus);
            if (args.response.isOK()) {
                auto response = CursorResponse::parseFromBSON(args.response.data);
                if (response.getStatus().code() == ErrorCodes::NamespaceNotFound ||
                    (response.isOK() && response.getValue().getNSS().isDropPendingNamespace())) {
                    log() << "CollectionCloner ns: '" << _sourceNss.ns() << "' uuid: UUID(\""
                          << *_options.uuid << "\") stopped because collection was dropped.";
                    finalStatus = Status::OK();
                } else if (!response.isOK()) {
                    log() << "CollectionCloner received an unexpected error when verifying drop of "
                             "ns: '"
                          << _sourceNss.ns() << "' uuid: UUID(\"" << *_options.uuid
                          << "\"), status " << response.getStatus();
                }
            } else {
                log() << "CollectionCloner is unable to verify drop of ns: '" << _sourceNss.ns()
                      << "' uuid: UUID(\"" << *_options.uuid << "\"), status "
                      << args.response.status;
            }
            onCompletionGuard->setResultAndCancelRemainingWork_inlock(lk, finalStatus);
        },
        RemoteCommandRetryScheduler::makeNoRetryPolicy());

    auto status = _verifyCollectionDroppedScheduler->startup();
    if (!status.isOK()) {
        log() << "CollectionCloner is unable to start verification of ns: '" << _sourceNss.ns()
              << "' uuid: UUID(\"" << *_options.uuid << "\"), status " << status;
        // If we can't run the command, assume this wasn't a drop and just use the original error.
        onCompletionGuard->setResultAndCancelRemainingWork_inlock(lk, batchStatus);
    }
}
Beispiel #2
0
    Status Reporter::_schedule_inlock() {
        if (!_status.isOK()) {
            return _status;
        }

        if (_active) {
            _willRunAgain = true;
            return _status;
        }

        LOG(2) << "Reporter scheduling report to : " << _target;

        _willRunAgain = false;

        BSONObjBuilder cmd;
        _updatePositionSource->prepareReplSetUpdatePositionCommand(&cmd);
        StatusWith<ReplicationExecutor::CallbackHandle> scheduleResult =
            _executor->scheduleRemoteCommand(
                RemoteCommandRequest(_target, "admin", cmd.obj()),
                stdx::bind(&Reporter::_callback, this, stdx::placeholders::_1));

        if (!scheduleResult.isOK()) {
            _status = scheduleResult.getStatus();
            LOG(2) << "Reporter failed to schedule with status: " << _status;

            return _status;
        }

        _active = true;
        _remoteCommandCallbackHandle = scheduleResult.getValue();
        return Status::OK();
    }
Beispiel #3
0
    std::vector<RemoteCommandRequest>
    VoteRequester::Algorithm::getRequests() const {
        BSONObjBuilder requestVotesCmdBuilder;
        requestVotesCmdBuilder.append("replSetRequestVotes", 1);
        requestVotesCmdBuilder.append("setName", _rsConfig.getReplSetName());
        requestVotesCmdBuilder.append("term", _term);
        requestVotesCmdBuilder.append("candidateId", _candidateId);
        requestVotesCmdBuilder.append("configVersion", _rsConfig.getConfigVersion());

        BSONObjBuilder lastCommittedOp(requestVotesCmdBuilder.subobjStart("lastCommittedOp"));
        lastCommittedOp.append("ts", _lastOplogEntry.getTimestamp());
        lastCommittedOp.append("term", _lastOplogEntry.getTerm());
        lastCommittedOp.done();

        const BSONObj requestVotesCmd = requestVotesCmdBuilder.obj();

        std::vector<RemoteCommandRequest> requests;
        for (const auto& target : _targets) {
            requests.push_back(RemoteCommandRequest(
                        target,
                        "admin",
                        requestVotesCmd,
                        Milliseconds(30*1000)));   // trying to match current Socket timeout
        }

        return requests;
    }
Beispiel #4
0
    std::vector<RemoteCommandRequest>
    ElectCmdRunner::Algorithm::getRequests() const {

        const MemberConfig& selfConfig = _rsConfig.getMemberAt(_selfIndex);
        std::vector<RemoteCommandRequest> requests;
        BSONObjBuilder electCmdBuilder;
        electCmdBuilder.append("replSetElect", 1);
        electCmdBuilder.append("set", _rsConfig.getReplSetName());
        electCmdBuilder.append("who", selfConfig.getHostAndPort().toString());
        electCmdBuilder.append("whoid", selfConfig.getId());
        electCmdBuilder.appendIntOrLL("cfgver", _rsConfig.getConfigVersion());
        electCmdBuilder.append("round", _round);
        const BSONObj replSetElectCmd = electCmdBuilder.obj();

        // Schedule a RemoteCommandRequest for each non-DOWN node
        for (std::vector<HostAndPort>::const_iterator it = _targets.begin();
             it != _targets.end();
             ++it) {

            invariant(*it != selfConfig.getHostAndPort());
            requests.push_back(RemoteCommandRequest(
                        *it,
                        "admin",
                        replSetElectCmd,
                        Milliseconds(30*1000)));   // trying to match current Socket timeout
        }

        return requests;
    }
Beispiel #5
0
Fetcher::Fetcher(executor::TaskExecutor* executor,
                 const HostAndPort& source,
                 const std::string& dbname,
                 const BSONObj& findCmdObj,
                 const CallbackFn& work,
                 const BSONObj& metadata,
                 Milliseconds timeout,
                 std::unique_ptr<RemoteCommandRetryScheduler::RetryPolicy> firstCommandRetryPolicy)
    : _executor(executor),
      _source(source),
      _dbname(dbname),
      _cmdObj(findCmdObj.getOwned()),
      _metadata(metadata.getOwned()),
      _work(work),
      _timeout(timeout),
      _firstRemoteCommandScheduler(
          _executor,
          RemoteCommandRequest(_source, _dbname, _cmdObj, _metadata, nullptr, _timeout),
          stdx::bind(&Fetcher::_callback, this, stdx::placeholders::_1, kFirstBatchFieldName),
          std::move(firstCommandRetryPolicy)) {
    uassert(ErrorCodes::BadValue, "callback function cannot be null", work);
}
    std::vector<RemoteCommandRequest> QuorumChecker::getRequests() const {
        const bool isInitialConfig = _rsConfig->getConfigVersion() == 1;
        const MemberConfig& myConfig = _rsConfig->getMemberAt(_myIndex);

        std::vector<RemoteCommandRequest> requests;
        if (hasReceivedSufficientResponses()) {
            return requests;
        }

        ReplSetHeartbeatArgs hbArgs;
        hbArgs.setSetName(_rsConfig->getReplSetName());
        hbArgs.setProtocolVersion(1);
        hbArgs.setConfigVersion(_rsConfig->getConfigVersion());
        hbArgs.setCheckEmpty(isInitialConfig);
        hbArgs.setSenderHost(myConfig.getHostAndPort());
        hbArgs.setSenderId(myConfig.getId());
        const BSONObj hbRequest = hbArgs.toBSON();

        // Send a bunch of heartbeat requests.
        // Schedule an operation when a "sufficient" number of them have completed, and use that
        // to compute the quorum check results.
        // Wait for the "completion" callback to finish, and then it's OK to return the results.
        for (int i = 0; i < _rsConfig->getNumMembers(); ++i) {
            if (_myIndex == i) {
                // No need to check self for liveness or unreadiness.
                continue;
            }
            requests.push_back(RemoteCommandRequest(
                                       _rsConfig->getMemberAt(i).getHostAndPort(),
                                       "admin",
                                       hbRequest,
                                       _rsConfig->getHeartbeatTimeoutPeriodMillis()));
        }

        return requests;
    }
Beispiel #7
0
void CollectionCloner::_beginCollectionCallback(const executor::TaskExecutor::CallbackArgs& cbd) {
    if (!cbd.status.isOK()) {
        _finishCallback(cbd.status);
        return;
    }
    MONGO_FAIL_POINT_BLOCK(initialSyncHangCollectionClonerBeforeEstablishingCursor, nssData) {
        const BSONObj& data = nssData.getData();
        auto nss = data["nss"].str();
        // Only hang when cloning the specified collection, or if no collection was specified.
        if (nss.empty() || _destNss.toString() == nss) {
            while (MONGO_FAIL_POINT(initialSyncHangCollectionClonerBeforeEstablishingCursor) &&
                   !_isShuttingDown()) {
                log() << "initialSyncHangCollectionClonerBeforeEstablishingCursor fail point "
                         "enabled for "
                      << _destNss.toString() << ". Blocking until fail point is disabled.";
                mongo::sleepsecs(1);
            }
        }
    }
    if (!_idIndexSpec.isEmpty() && _options.autoIndexId == CollectionOptions::NO) {
        warning()
            << "Found the _id_ index spec but the collection specified autoIndexId of false on ns:"
            << this->_sourceNss;
    }

    auto collectionBulkLoader = _storageInterface->createCollectionForBulkLoading(
        _destNss, _options, _idIndexSpec, _indexSpecs);

    if (!collectionBulkLoader.isOK()) {
        _finishCallback(collectionBulkLoader.getStatus());
        return;
    }

    _stats.indexes = _indexSpecs.size();
    if (!_idIndexSpec.isEmpty()) {
        ++_stats.indexes;
    }

    _collLoader = std::move(collectionBulkLoader.getValue());

    BSONObjBuilder cmdObj;
    EstablishCursorsCommand cursorCommand;
    // The 'find' command is used when the number of cloning cursors is 1 to ensure
    // the correctness of the collection cloning process until 'parallelCollectionScan'
    // can be tested more extensively in context of initial sync.
    if (_maxNumClonerCursors == 1) {
        cmdObj.appendElements(
            makeCommandWithUUIDorCollectionName("find", _options.uuid, _sourceNss));
        cmdObj.append("noCursorTimeout", true);
        // Set batchSize to be 0 to establish the cursor without fetching any documents,
        // similar to the response format of 'parallelCollectionScan'.
        cmdObj.append("batchSize", 0);
        cursorCommand = Find;
    } else {
        cmdObj.appendElements(makeCommandWithUUIDorCollectionName(
            "parallelCollectionScan", _options.uuid, _sourceNss));
        cmdObj.append("numCursors", _maxNumClonerCursors);
        cursorCommand = ParallelCollScan;
    }

    Client::initThreadIfNotAlready();
    auto opCtx = cc().getOperationContext();

    MONGO_FAIL_POINT_BLOCK(initialSyncHangBeforeCollectionClone, options) {
        const BSONObj& data = options.getData();
        if (data["namespace"].String() == _destNss.ns()) {
            log() << "initial sync - initialSyncHangBeforeCollectionClone fail point "
                     "enabled. Blocking until fail point is disabled.";
            while (MONGO_FAIL_POINT(initialSyncHangBeforeCollectionClone) && !_isShuttingDown()) {
                mongo::sleepsecs(1);
            }
        }
    }

    _establishCollectionCursorsScheduler = stdx::make_unique<RemoteCommandRetryScheduler>(
        _executor,
        RemoteCommandRequest(_source,
                             _sourceNss.db().toString(),
                             cmdObj.obj(),
                             ReadPreferenceSetting::secondaryPreferredMetadata(),
                             opCtx,
                             RemoteCommandRequest::kNoTimeout),
        [=](const RemoteCommandCallbackArgs& rcbd) {
            _establishCollectionCursorsCallback(rcbd, cursorCommand);
        },
        RemoteCommandRetryScheduler::makeRetryPolicy(
            numInitialSyncCollectionFindAttempts.load(),
            executor::RemoteCommandRequest::kNoTimeout,
            RemoteCommandRetryScheduler::kAllRetriableErrors));
    auto scheduleStatus = _establishCollectionCursorsScheduler->startup();
    LOG(1) << "Attempting to establish cursors with maxNumClonerCursors: " << _maxNumClonerCursors;

    if (!scheduleStatus.isOK()) {
        _establishCollectionCursorsScheduler.reset();
        _finishCallback(scheduleStatus);
        return;
    }
}