Пример #1
0
ServiceStateMachine::State ServiceStateMachineFixture::runPingTest() {
    _tl->setNextMessage(buildRequest(BSON("ping" << 1)));

    ASSERT_FALSE(haveClient());
    ASSERT_EQ(_ssm->state(), ServiceStateMachine::State::Created);
    log() << "run next";
    _ssm->runNext();
    auto ret = _ssm->state();
    ASSERT_FALSE(haveClient());

    return ret;
}
Пример #2
0
void MobileSessionPool::shutDown() {
    stdx::unique_lock<stdx::mutex> lk(_mutex);
    _shuttingDown = true;

    // Retrieve the operation context from the thread's client if the client exists.
    if (haveClient()) {
        OperationContext* opCtx = cc().getOperationContext();

        // Locks if the operation context still exists.
        if (opCtx) {
            opCtx->waitForConditionOrInterrupt(
                _releasedSessionNotifier, lk, [&] { return _sessions.size() == _curPoolSize; });
        }
    } else {
        _releasedSessionNotifier.wait(lk, [&] { return _sessions.size() == _curPoolSize; });
    }

    // Retry all the drops that have been queued on failure.
    // Create a new sqlite session to do so, all other sessions might have been closed already.
    if (!failedDropsQueue.isEmpty()) {
        sqlite3* session;

        int status = sqlite3_open(_path.c_str(), &session);
        embedded::checkStatus(status, SQLITE_OK, "sqlite3_open");
        std::unique_ptr<MobileSession> mobSession = stdx::make_unique<MobileSession>(session, this);
        LOG(MOBILE_LOG_LEVEL_LOW) << "MobileSE: Executing queued drops at shutdown";
        failedDropsQueue.execAndDequeueAllOps(mobSession.get());
        sqlite3_close(session);
    }

    for (auto&& session : _sessions) {
        sqlite3_close(session);
    }
}
Пример #3
0
    StatusWithMatchExpression expressionParserWhereCallbackReal(const BSONElement& where) {
        if ( !haveClient() )
            return StatusWithMatchExpression( ErrorCodes::BadValue, "no current client needed for $where" );

        Client::Context* context = cc().getContext();
        if ( !context )
            return StatusWithMatchExpression( ErrorCodes::BadValue, "no context in $where parsing" );

        const char* ns = context->ns();
        if ( !ns )
            return StatusWithMatchExpression( ErrorCodes::BadValue, "no ns in $where parsing" );

        if ( !globalScriptEngine )
            return StatusWithMatchExpression( ErrorCodes::BadValue, "no globalScriptEngine in $where parsing" );

        auto_ptr<WhereMatchExpression> exp( new WhereMatchExpression() );
        if ( where.type() == String || where.type() == Code ) {
            Status s = exp->init( ns, where.valuestr(), BSONObj() );
            if ( !s.isOK() )
                return StatusWithMatchExpression( s );
            return StatusWithMatchExpression( exp.release() );
        }

        if ( where.type() == CodeWScope ) {
            Status s = exp->init( ns,
                                  where.codeWScopeCode(),
                                  BSONObj( where.codeWScopeScopeDataUnsafe() ) );
            if ( !s.isOK() )
                return StatusWithMatchExpression( s );
            return StatusWithMatchExpression( exp.release() );
        }

        return StatusWithMatchExpression( ErrorCodes::BadValue, "$where got bad type" );
    }
Пример #4
0
 void KillCurrentOp::notifyAllWaiters() {
     boost::unique_lock<boost::mutex> lck(_mtx);
     if (!haveClient()) 
         return;
     cc().curop()->setKillWaiterFlags();
     _condvar.notify_all();
 }
void ShardingEgressMetadataHookForMongos::_saveGLEStats(const BSONObj& metadata,
                                                        StringData hostString) {
    if (!haveClient()) {
        // Client will be present only when write commands are used.
        return;
    }

    auto swShardingMetadata = rpc::ShardingMetadata::readFromMetadata(metadata);
    if (swShardingMetadata.getStatus() == ErrorCodes::NoSuchKey) {
        return;
    } else if (!swShardingMetadata.isOK()) {
        warning() << "Got invalid sharding metadata " << redact(swShardingMetadata.getStatus())
                  << " metadata object was '" << redact(metadata) << "'";
        return;
    }

    auto shardConn = ConnectionString::parse(hostString.toString());

    // If we got the reply from this host, we expect that its 'hostString' must be valid.
    if (!shardConn.isOK()) {
        severe() << "got bad host string in saveGLEStats: " << hostString;
    }
    invariantOK(shardConn.getStatus());

    auto shardingMetadata = std::move(swShardingMetadata.getValue());

    auto& clientInfo = cc();
    LOG(4) << "saveGLEStats lastOpTime:" << shardingMetadata.getLastOpTime()
           << " electionId:" << shardingMetadata.getLastElectionId();

    ClusterLastErrorInfo::get(clientInfo)
        ->addHostOpTime(
            shardConn.getValue(),
            HostOpTime(shardingMetadata.getLastOpTime(), shardingMetadata.getLastElectionId()));
}
Пример #6
0
void Client::initThread(StringData desc,
                        ServiceContext* service,
                        transport::SessionHandle session) {
    invariant(!haveClient());

    std::string fullDesc;
    if (session) {
        fullDesc = str::stream() << desc << session->id();
    } else {
        fullDesc = desc.toString();
    }

    setThreadName(fullDesc);

    // Create the client obj, attach to thread
    currentClient = service->makeClient(fullDesc, std::move(session));
}
Пример #7
0
    /**
     * Outline of the delete process:
     * 1. Initialize the client for this thread if there is no client. This is for the worker
     *    threads that are attached to any of the threads servicing client requests.
     * 2. Grant this thread authorization to perform deletes.
     * 3. Temporarily enable mode to bypass shard version checks. TODO: Replace this hack.
     * 4. Setup callback to save deletes to moveChunk directory (only if moveParanoia is true).
     * 5. Delete range.
     * 6. Wait until the majority of the secondaries catch up.
     */
    bool RangeDeleterDBEnv::deleteRange(OperationContext* txn,
                                        const RangeDeleteEntry& taskDetails,
                                        long long int* deletedDocs,
                                        std::string* errMsg) {
        const string ns(taskDetails.options.range.ns);
        const BSONObj inclusiveLower(taskDetails.options.range.minKey);
        const BSONObj exclusiveUpper(taskDetails.options.range.maxKey);
        const BSONObj keyPattern(taskDetails.options.range.keyPattern);
        const WriteConcernOptions writeConcern(taskDetails.options.writeConcern);
        const bool fromMigrate = taskDetails.options.fromMigrate;
        const bool onlyRemoveOrphans = taskDetails.options.onlyRemoveOrphanedDocs;

        const bool initiallyHaveClient = haveClient();

        if (!initiallyHaveClient) {
            Client::initThread("RangeDeleter");
        }

        *deletedDocs = 0;
        ShardForceVersionOkModeBlock forceVersion;
        {
            Helpers::RemoveSaver removeSaver("moveChunk",
                                             ns,
                                             taskDetails.options.removeSaverReason);
            Helpers::RemoveSaver* removeSaverPtr = NULL;
            if (serverGlobalParams.moveParanoia &&
                    !taskDetails.options.removeSaverReason.empty()) {
                removeSaverPtr = &removeSaver;
            }

            // log the opId so the user can use it to cancel the delete using killOp.
            unsigned int opId = txn->getCurOp()->opNum();
            log() << "Deleter starting delete for: " << ns
                  << " from " << inclusiveLower
                  << " -> " << exclusiveUpper
                  << ", with opId: " << opId
                  << endl;

            try {
                *deletedDocs =
                        Helpers::removeRange(txn,
                                             KeyRange(ns,
                                                      inclusiveLower,
                                                      exclusiveUpper,
                                                      keyPattern),
                                             false, /*maxInclusive*/
                                             writeConcern,
                                             removeSaverPtr,
                                             fromMigrate,
                                             onlyRemoveOrphans);

                if (*deletedDocs < 0) {
                    *errMsg = "collection or index dropped before data could be cleaned";
                    warning() << *errMsg << endl;

                    if (!initiallyHaveClient) {
                        txn->getClient()->shutdown();
                    }

                    return false;
                }

                log() << "rangeDeleter deleted " << *deletedDocs
                      << " documents for " << ns
                      << " from " << inclusiveLower
                      << " -> " << exclusiveUpper
                      << endl;
            }
            catch (const DBException& ex) {
                *errMsg = str::stream() << "Error encountered while deleting range: "
                                        << "ns" << ns
                                        << " from " << inclusiveLower
                                        << " -> " << exclusiveUpper
                                        << ", cause by:" << causedBy(ex);

                if (!initiallyHaveClient) {
                    txn->getClient()->shutdown();
                }

                return false;
            }
        }

        if (!initiallyHaveClient) {
            txn->getClient()->shutdown();
        }

        return true;
    }
Пример #8
0
    /**
     * Outline of the delete process:
     * 1. Initialize the client for this thread if there is no client. This is for the worker
     *    threads that are attached to any of the threads servicing client requests.
     * 2. Grant this thread authorization to perform deletes.
     * 3. Temporarily enable mode to bypass shard version checks. TODO: Replace this hack.
     * 4. Setup callback to save deletes to moveChunk directory (only if moveParanoia is true).
     * 5. Delete range.
     * 6. Wait until the majority of the secondaries catch up.
     */
    bool RangeDeleterDBEnv::deleteRange(const StringData& ns,
                                        const BSONObj& inclusiveLower,
                                        const BSONObj& exclusiveUpper,
                                        const BSONObj& keyPattern,
                                        bool secondaryThrottle,
                                        std::string* errMsg) {
        const bool initiallyHaveClient = haveClient();

        if (!initiallyHaveClient) {
            Client::initThread("RangeDeleter");
        }

        ShardForceVersionOkModeBlock forceVersion;
        {
            Helpers::RemoveSaver removeSaver("moveChunk", ns.toString(), "post-cleanup");

            // log the opId so the user can use it to cancel the delete using killOp.
            unsigned int opId = cc().curop()->opNum();
            log() << "Deleter starting delete for: " << ns
                  << " from " << inclusiveLower
                  << " -> " << exclusiveUpper
                  << ", with opId: " << opId
                  << endl;

            try {
                long long numDeleted =
                        Helpers::removeRange(KeyRange(ns.toString(),
                                                      inclusiveLower,
                                                      exclusiveUpper,
                                                      keyPattern),
                                             false, /*maxInclusive*/
                                             replSet? secondaryThrottle : false,
                                             serverGlobalParams.moveParanoia ? &removeSaver : NULL,
                                             true, /*fromMigrate*/
                                             true); /*onlyRemoveOrphans*/

                if (numDeleted < 0) {
                    warning() << "collection or index dropped "
                              << "before data could be cleaned" << endl;

                    if (!initiallyHaveClient) {
                        cc().shutdown();
                    }

                    return false;
                }

                log() << "rangeDeleter deleted " << numDeleted
                      << " documents for " << ns
                      << " from " << inclusiveLower
                      << " -> " << exclusiveUpper
                      << endl;
            }
            catch (const DBException& ex) {
                *errMsg = str::stream() << "Error encountered while deleting range: "
                                        << "ns" << ns
                                        << " from " << inclusiveLower
                                        << " -> " << exclusiveUpper
                                        << ", cause by:" << causedBy(ex);

                if (!initiallyHaveClient) {
                    cc().shutdown();
                }

                return false;
            }
        }

        if (replSet) {
            Timer elapsedTime;
            ReplTime lastOpApplied = cc().getLastOp().asDate();
            while (!opReplicatedEnough(lastOpApplied,
                                       BSON("w" << "majority").firstElement())) {
                if (elapsedTime.seconds() >= 3600) {
                    *errMsg = str::stream() << "moveChunk repl sync timed out after "
                                            << elapsedTime.seconds() << " seconds";

                    if (!initiallyHaveClient) {
                        cc().shutdown();
                    }

                    return false;
                }

                sleepsecs(1);
            }

            LOG(elapsedTime.seconds() < 30 ? 1 : 0)
                << "moveChunk repl sync took "
                << elapsedTime.seconds() << " seconds" << endl;
        }

        if (!initiallyHaveClient) {
            cc().shutdown();
        }

        return true;
    }
Пример #9
0
 ~TrackLockAcquireTime() {
     if (haveClient()) {
         cc().curop()->lockStat().recordAcquireTimeMicros(_type, _timer.micros());
     }
 }
Пример #10
0
 void Lock::ScopedLock::recordTime() {
     if (haveClient()) {
         cc().curop()->lockStat().recordLockTimeMicros(_type, _timer.micros());
     }
 }
Пример #11
0
void Client::setCurrent(ServiceContext::UniqueClient client) {
    invariant(!haveClient());
    currentClient = std::move(client);
}
Пример #12
0
ServiceContext::UniqueClient Client::releaseCurrent() {
    invariant(haveClient());
    return std::move(currentClient);
}
Пример #13
0
Client& cc() {
    invariant(haveClient());
    return *Client::getCurrent();
}