bool SSLManager::_setSubjectName(const std::string& keyFile, std::string& subjectName) { // Read the certificate subject name and store it BIO *in = BIO_new(BIO_s_file_internal()); if (NULL == in){ error() << "failed to allocate BIO object: " << getSSLErrorMessage(ERR_get_error()) << endl; return false; } ON_BLOCK_EXIT(BIO_free, in); if (BIO_read_filename(in, keyFile.c_str()) <= 0){ error() << "cannot read key file when setting subject name: " << keyFile << ' ' << getSSLErrorMessage(ERR_get_error()) << endl; return false; } X509* x509 = PEM_read_bio_X509(in, NULL, &SSLManager::password_cb, this); if (NULL == x509) { error() << "cannot retreive certificate from keyfile: " << keyFile << ' ' << getSSLErrorMessage(ERR_get_error()) << endl; return false; } ON_BLOCK_EXIT(X509_free, x509); subjectName = getCertificateSubjectName(x509); return true; }
bool ImpersonateCurrentUser() { SetLastError(0); HANDLE process=0; HANDLE Token=NULL; GetCurrentUserToken(process, Token); // Auto close process and token when leaving scope ON_BLOCK_EXIT(CloseHandle, process); ON_BLOCK_EXIT(CloseHandle, Token); bool test=(FALSE != ImpersonateLoggedOnUser(Token)); return test; }
void DBClientCursor::requestMore() { verify(cursorId && batch.pos == batch.nReturned); if (haveLimit) { nToReturn -= batch.nReturned; verify(nToReturn > 0); } BufBuilder b; b.appendNum(opts); b.appendStr(ns); b.appendNum(nextBatchSize()); b.appendNum(cursorId); Message toSend; toSend.setData(dbGetMore, b.buf(), b.len()); Message response; if (_client) { _client->call(toSend, response); this->batch.m = std::move(response); dataReceived(); } else { verify(_scopedHost.size()); ScopedDbConnection conn(_scopedHost); conn->call(toSend, response); _client = conn.get(); ON_BLOCK_EXIT([this] { _client = nullptr; }); this->batch.m = std::move(response); dataReceived(); conn.done(); } }
void BackgroundSync::producerThread() { Client::initThread("rsBackgroundSync"); AuthorizationSession::get(cc())->grantInternalAuthorization(); _threadPoolTaskExecutor.startup(); ON_BLOCK_EXIT([this]() { _threadPoolTaskExecutor.shutdown(); _threadPoolTaskExecutor.join(); }); while (!inShutdown()) { try { _producerThread(); } catch (const DBException& e) { std::string msg(str::stream() << "sync producer problem: " << e.toString()); error() << msg; _replCoord->setMyHeartbeatMessage(msg); sleepmillis(100); // sleep a bit to keep from hammering this thread with temp. errors. } catch (const std::exception& e2) { severe() << "sync producer exception: " << e2.what(); fassertFailed(28546); } } stop(); }
Shard::HostWithResponse ShardLocal::_runCommand(OperationContext* txn, const ReadPreferenceSetting& unused, const std::string& dbName, Milliseconds maxTimeMSOverrideUnused, const BSONObj& cmdObj) { repl::OpTime currentOpTimeFromClient = repl::ReplClientInfo::forClient(txn->getClient()).getLastOp(); ON_BLOCK_EXIT([this, &txn, ¤tOpTimeFromClient] { _updateLastOpTimeFromClient(txn, currentOpTimeFromClient); }); try { DBDirectClient client(txn); rpc::UniqueReply commandResponse = client.runCommandWithMetadata( dbName, cmdObj.firstElementFieldName(), rpc::makeEmptyMetadata(), cmdObj); BSONObj responseReply = commandResponse->getCommandReply().getOwned(); BSONObj responseMetadata = commandResponse->getMetadata().getOwned(); Status commandStatus = getStatusFromCommandResult(responseReply); Status writeConcernStatus = getWriteConcernStatusFromCommandResult(responseReply); return Shard::HostWithResponse(boost::none, Shard::CommandResponse{std::move(responseReply), std::move(responseMetadata), std::move(commandStatus), std::move(writeConcernStatus)}); } catch (const DBException& ex) { return Shard::HostWithResponse(boost::none, ex.toStatus()); } }
void WiredTigerOperationStats::fetchStats(WT_SESSION* session, const std::string& uri, const std::string& config) { invariant(session); WT_CURSOR* c = nullptr; const char* cursorConfig = config.empty() ? nullptr : config.c_str(); int ret = session->open_cursor(session, uri.c_str(), nullptr, cursorConfig, &c); uassert(ErrorCodes::CursorNotFound, "Unable to open statistics cursor", ret == 0); invariant(c); ON_BLOCK_EXIT([&] { c->close(c); }); const char* desc; uint64_t value; uint64_t key; while (c->next(c) == 0 && c->get_key(c, &key) == 0) { fassert(51035, c->get_value(c, &desc, nullptr, &value) == 0); #if defined(__s390x__) _stats[key >> 32] = WiredTigerUtil::castStatisticsValue<long long>(value); #else _stats[key] = WiredTigerUtil::castStatisticsValue<long long>(value); #endif // __s390x__ } // Reset the statistics so that the next fetch gives the recent values. invariantWTOK(c->reset(c)); }
std::string SSLManager::validatePeerCertificate(const SSLConnection* conn) { if (!_validateCertificates) return ""; X509* peerCert = SSL_get_peer_certificate(conn->ssl); if (NULL == peerCert) { // no certificate presented by peer if (_weakValidation) { warning() << "no SSL certificate provided by peer" << endl; } else { error() << "no SSL certificate provided by peer; connection rejected" << endl; throw SocketException(SocketException::CONNECT_ERROR, ""); } return ""; } ON_BLOCK_EXIT(X509_free, peerCert); long result = SSL_get_verify_result(conn->ssl); if (result != X509_V_OK) { error() << "SSL peer certificate validation failed:" << X509_verify_cert_error_string(result) << endl; throw SocketException(SocketException::CONNECT_ERROR, ""); } // TODO: check optional cipher restriction, using cert. return getCertificateSubjectName(peerCert); }
void BackgroundSync::producerThread(executor::TaskExecutor* taskExecutor) { Client::initThread("rsBackgroundSync"); AuthorizationSession::get(cc())->grantInternalAuthorization(); // Disregard task executor passed into this function and use a local thread pool task executor // instead. This ensures that potential blocking operations inside the fetcher callback will // not affect other coordinators (such as the replication coordinator) that might be dependent // on the shared task executor. ThreadPool::Options threadPoolOptions; threadPoolOptions.poolName = "rsBackgroundSync"; executor::ThreadPoolTaskExecutor threadPoolTaskExecutor( stdx::make_unique<ThreadPool>(threadPoolOptions), executor::makeNetworkInterface()); taskExecutor = &threadPoolTaskExecutor; taskExecutor->startup(); ON_BLOCK_EXIT([taskExecutor]() { taskExecutor->shutdown(); taskExecutor->join(); }); while (!inShutdown()) { try { _producerThread(taskExecutor); } catch (const DBException& e) { std::string msg(str::stream() << "sync producer problem: " << e.toString()); error() << msg; _replCoord->setMyHeartbeatMessage(msg); } catch (const std::exception& e2) { severe() << "sync producer exception: " << e2.what(); fassertFailed(28546); } } }
void eventProcessingThread() { std::string eventName = getShutdownSignalName(ProcessId::getCurrent().asUInt32()); HANDLE event = CreateEventA(NULL, TRUE, FALSE, eventName.c_str()); if (event == NULL) { warning() << "eventProcessingThread CreateEvent failed: " << errnoWithDescription(); return; } ON_BLOCK_EXIT(CloseHandle, event); int returnCode = WaitForSingleObject(event, INFINITE); if (returnCode != WAIT_OBJECT_0) { if (returnCode == WAIT_FAILED) { warning() << "eventProcessingThread WaitForSingleObject failed: " << errnoWithDescription(); return; } else { warning() << "eventProcessingThread WaitForSingleObject failed: " << errnoWithDescription(returnCode); return; } } Client::initThread("eventTerminate"); log() << "shutdown event signaled, will terminate after current cmd ends"; exitCleanly(EXIT_CLEAN); }
void syncDoInitialSync(ReplicationCoordinatorExternalState* replicationCoordinatorExternalState) { stdx::unique_lock<stdx::mutex> lk(_initialSyncMutex, stdx::defer_lock); if (!lk.try_lock()) { uasserted(34474, "Initial Sync Already Active."); } std::unique_ptr<BackgroundSync> bgsync; { log() << "Starting replication fetcher thread for initial sync"; auto txn = cc().makeOperationContext(); bgsync = stdx::make_unique<BackgroundSync>( replicationCoordinatorExternalState, replicationCoordinatorExternalState->makeInitialSyncOplogBuffer(txn.get())); bgsync->startup(txn.get()); createOplog(txn.get()); } ON_BLOCK_EXIT([&bgsync]() { log() << "Stopping replication fetcher thread for initial sync"; auto txn = cc().makeOperationContext(); bgsync->shutdown(txn.get()); bgsync->join(txn.get()); }); int failedAttempts = 0; while (failedAttempts < kMaxFailedAttempts) { try { // leave loop when successful Status status = _initialSync(bgsync.get()); if (status.isOK()) { break; } else { error() << status; } } catch (const DBException& e) { error() << e; // Return if in shutdown if (inShutdown()) { return; } } if (inShutdown()) { return; } error() << "initial sync attempt failed, " << (kMaxFailedAttempts - ++failedAttempts) << " attempts remaining"; sleepmillis(durationCount<Milliseconds>(kInitialSyncRetrySleepDuration)); } // No need to print a stack if (failedAttempts >= kMaxFailedAttempts) { severe() << "The maximum number of retries have been exhausted for initial sync."; fassertFailedNoTrace(16233); } }
Status PlanYieldPolicy::yield(stdx::function<void()> beforeYieldingFn, stdx::function<void()> whileYieldingFn) { invariant(_planYielding); invariant(canAutoYield()); // After we finish yielding (or in any early return), call resetTimer() to prevent yielding // again right away. We delay the resetTimer() call so that the clock doesn't start ticking // until after we return from the yield. ON_BLOCK_EXIT([this]() { resetTimer(); }); _forceYield = false; OperationContext* opCtx = _planYielding->getOpCtx(); invariant(opCtx); invariant(!opCtx->lockState()->inAWriteUnitOfWork()); // Can't use writeConflictRetry since we need to call saveState before reseting the transaction. for (int attempt = 1; true; attempt++) { try { // All YIELD_AUTO plans will get here eventually when the elapsed tracker triggers // that it's time to yield. Whether or not we will actually yield, we need to check // if this operation has been interrupted. if (_policy == PlanExecutor::YIELD_AUTO) { MONGO_FAIL_POINT_PAUSE_WHILE_SET(setCheckForInterruptHang); auto interruptStatus = opCtx->checkForInterruptNoAssert(); if (!interruptStatus.isOK()) { return interruptStatus; } } try { _planYielding->saveState(); } catch (const WriteConflictException&) { invariant(!"WriteConflictException not allowed in saveState"); } if (_policy == PlanExecutor::WRITE_CONFLICT_RETRY_ONLY) { // Just reset the snapshot. Leave all LockManager locks alone. opCtx->recoveryUnit()->abandonSnapshot(); } else { // Release and reacquire locks. if (beforeYieldingFn) beforeYieldingFn(); QueryYield::yieldAllLocks(opCtx, whileYieldingFn, _planYielding->nss()); } return _planYielding->restoreStateWithoutRetrying(); } catch (const WriteConflictException&) { CurOp::get(opCtx)->debug().writeConflicts++; WriteConflictException::logAndBackoff( attempt, "plan execution restoreState", _planYielding->nss().ns()); // retry } } }
std::string errnoWithDescription(int errNumber) { #if defined(_WIN32) if (errNumber == -1) errNumber = GetLastError(); #else if (errNumber < 0) errNumber = errno; #endif char buf[kBuflen]; char* msg{nullptr}; #if defined(__GNUC__) && defined(_GNU_SOURCE) && !(__ANDROID_API__ <= 22) && !defined(EMSCRIPTEN) msg = strerror_r(errNumber, buf, kBuflen); #elif defined(_WIN32) LPWSTR errorText = nullptr; FormatMessageW(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_IGNORE_INSERTS, nullptr, errNumber, 0, reinterpret_cast<LPWSTR>(&errorText), // output 0, // minimum size for output buffer nullptr); if (errorText) { ON_BLOCK_EXIT([&errorText] { LocalFree(errorText); }); std::string utf8ErrorText = toUtf8String(errorText); auto size = utf8ErrorText.find_first_of("\r\n"); if (size == std::string::npos) { // not found size = utf8ErrorText.length(); } if (size >= kBuflen) { size = kBuflen - 1; } memcpy(buf, utf8ErrorText.c_str(), size); buf[size] = '\0'; msg = buf; } else if (strerror_s(buf, kBuflen, errNumber) != 0) { msg = buf; } #else /* XSI strerror_r */ if (strerror_r(errNumber, buf, kBuflen) == 0) { msg = buf; } #endif if (!msg) { return str::stream() << kUnknownMsg << errNumber; } return {msg}; }
void WiredTigerSessionCache::waitUntilDurable(bool forceCheckpoint) { const int shuttingDown = _shuttingDown.fetchAndAdd(1); ON_BLOCK_EXIT([this] { _shuttingDown.fetchAndSubtract(1); }); uassert(ErrorCodes::ShutdownInProgress, "Cannot wait for durability because a shutdown is in progress", !(shuttingDown & kShuttingDownMask)); // When forcing a checkpoint with journaling enabled, don't synchronize with other // waiters, as a log flush is much cheaper than a full checkpoint. if (forceCheckpoint && _engine->isDurable()) { UniqueWiredTigerSession session = getSession(); WT_SESSION* s = session->getSession(); { stdx::unique_lock<stdx::mutex> lk(_journalListenerMutex); JournalListener::Token token = _journalListener->getToken(); invariantWTOK(s->checkpoint(s, NULL)); _journalListener->onDurable(token); } LOG(4) << "created checkpoint (forced)"; return; } uint32_t start = _lastSyncTime.load(); // Do the remainder in a critical section that ensures only a single thread at a time // will attempt to synchronize. stdx::unique_lock<stdx::mutex> lk(_lastSyncMutex); uint32_t current = _lastSyncTime.loadRelaxed(); // synchronized with writes through mutex if (current != start) { // Someone else synced already since we read lastSyncTime, so we're done! return; } _lastSyncTime.store(current + 1); // Nobody has synched yet, so we have to sync ourselves. auto session = getSession(); WT_SESSION* s = session->getSession(); // This gets the token (OpTime) from the last write, before flushing (either the journal, or a // checkpoint), and then reports that token (OpTime) as a durable write. stdx::unique_lock<stdx::mutex> jlk(_journalListenerMutex); JournalListener::Token token = _journalListener->getToken(); // Use the journal when available, or a checkpoint otherwise. if (_engine->isDurable()) { invariantWTOK(s->log_flush(s, "sync=on")); LOG(4) << "flushed journal"; } else { invariantWTOK(s->checkpoint(s, NULL)); LOG(4) << "created checkpoint"; } _journalListener->onDurable(token); }
int BOOST_TEST_CALL_DECL main( int argc, char* argv[] ) { google::InitGoogleLogging(argv[0]); google::InstallFailureSignalHandler(); #ifdef DEBUG google::LogToStderr(); #endif ON_BLOCK_EXIT(&google::ShutdownGoogleLogging); return ::boost::unit_test::unit_test_main( &init_unit_test, argc, argv ); }
Status PlanYieldPolicy::yieldOrInterrupt(stdx::function<void()> beforeYieldingFn, stdx::function<void()> whileYieldingFn) { if (_policy == PlanExecutor::INTERRUPT_ONLY) { ON_BLOCK_EXIT([this]() { resetTimer(); }); OperationContext* opCtx = _planYielding->getOpCtx(); invariant(opCtx); MONGO_FAIL_POINT_PAUSE_WHILE_SET(setCheckForInterruptHang); return opCtx->checkForInterruptNoAssert(); } return yield(beforeYieldingFn, whileYieldingFn); }
void run() { // Create a new collection. Database* db = _ctx.db(); Collection* coll; { WriteUnitOfWork wunit(&_opCtx); ASSERT_OK(db->dropCollection(&_opCtx, _nss)); coll = db->createCollection(&_opCtx, _nss); OpDebug* const nullOpDebug = nullptr; ASSERT_OK(coll->insertDocument(&_opCtx, InsertStatement(BSON("_id" << 1 << "a" << "dup")), nullOpDebug, true)); ASSERT_OK(coll->insertDocument(&_opCtx, InsertStatement(BSON("_id" << 2 << "a" << "dup")), nullOpDebug, true)); wunit.commit(); } MultiIndexBlock indexer; const BSONObj spec = BSON("name" << "a" << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v" << static_cast<int>(kIndexVersion) << "unique" << true << "background" << background); ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(&_opCtx, coll); }); ASSERT_OK(indexer.init(&_opCtx, coll, spec, MultiIndexBlock::kNoopOnInitFn).getStatus()); auto desc = coll->getIndexCatalog()->findIndexByName(&_opCtx, "a", true /* includeUnfinished */); ASSERT(desc); // Hybrid index builds check duplicates explicitly. ASSERT_OK(indexer.insertAllDocumentsInCollection(&_opCtx, coll)); auto status = indexer.checkConstraints(&_opCtx); ASSERT_EQUALS(status.code(), ErrorCodes::DuplicateKey); }
void run() { // Create a new collection. Database* db = _ctx.db(); Collection* coll; { WriteUnitOfWork wunit(&_opCtx); ASSERT_OK(db->dropCollection(&_opCtx, _nss)); coll = db->createCollection(&_opCtx, _nss); OpDebug* const nullOpDebug = nullptr; ASSERT_OK(coll->insertDocument(&_opCtx, InsertStatement(BSON("_id" << 1 << "a" << "dup")), nullOpDebug, true)); ASSERT_OK(coll->insertDocument(&_opCtx, InsertStatement(BSON("_id" << 2 << "a" << "dup")), nullOpDebug, true)); wunit.commit(); } MultiIndexBlock indexer; indexer.ignoreUniqueConstraint(); const BSONObj spec = BSON("name" << "a" << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v" << static_cast<int>(kIndexVersion) << "unique" << true << "background" << background); ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(&_opCtx, coll); }); ASSERT_OK(indexer.init(&_opCtx, coll, spec, MultiIndexBlock::kNoopOnInitFn).getStatus()); ASSERT_OK(indexer.insertAllDocumentsInCollection(&_opCtx, coll)); WriteUnitOfWork wunit(&_opCtx); ASSERT_OK(indexer.commit( &_opCtx, coll, MultiIndexBlock::kNoopOnCreateEachFn, MultiIndexBlock::kNoopOnCommitFn)); wunit.commit(); }
bool processStillActive(DWORD pid) { HANDLE proc = OpenProcess(SYNCHRONIZE, FALSE, pid); ON_BLOCK_EXIT([proc]() { ::CloseHandle(proc); }); DWORD exitCode; if (!GetExitCodeProcess(proc, &exitCode)) { spdlog::get("usvfs")->warn("failed to query exit code on process {}", pid); return false; } else { return exitCode == STILL_ACTIVE; } }
void WiredTigerSessionCache::releaseSession(WiredTigerSession* session) { invariant(session); invariant(session->cursorsOut() == 0); const int shuttingDown = _shuttingDown.fetchAndAdd(1); ON_BLOCK_EXIT([this] { _shuttingDown.fetchAndSubtract(1); }); if (shuttingDown & kShuttingDownMask) { // Leak the session in order to avoid race condition with clean shutdown, where the // storage engine is ripped from underneath transactions, which are not "active" // (i.e., do not have any locks), but are just about to delete the recovery unit. // See SERVER-16031 for more information. return; } // This checks that we are only caching idle sessions and not something which might hold // locks or otherwise prevent truncation. { WT_SESSION* ss = session->getSession(); uint64_t range; invariantWTOK(ss->transaction_pinned_range(ss, &range)); invariant(range == 0); } // If the cursor epoch has moved on, close all cursors in the session. uint64_t cursorEpoch = _cursorEpoch.load(); if (session->_getCursorEpoch() != cursorEpoch) session->closeAllCursors(); bool returnedToCache = false; uint64_t currentEpoch = _epoch.load(); if (session->_getEpoch() == currentEpoch) { // check outside of lock to reduce contention stdx::lock_guard<stdx::mutex> lock(_cacheLock); if (session->_getEpoch() == _epoch.load()) { // recheck inside the lock for correctness returnedToCache = true; _sessions.push_back(session); } } else invariant(session->_getEpoch() < currentEpoch); if (!returnedToCache) delete session; if (_engine && _engine->haveDropsQueued()) _engine->dropSomeQueuedIdents(); }
bool run(OperationContext* opCtx, const std::string& dbname, const BSONObj& cmdObj, BSONObjBuilder& result) override { uassert(ErrorCodes::IllegalOperation, "_configsvrDropCollection can only be run on config servers", serverGlobalParams.clusterRole == ClusterRole::ConfigServer); // Set the operation context read concern level to local for reads into the config database. repl::ReadConcernArgs::get(opCtx) = repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern); const NamespaceString nss(parseNs(dbname, cmdObj)); uassert(ErrorCodes::InvalidOptions, str::stream() << "dropCollection must be called with majority writeConcern, got " << cmdObj, opCtx->getWriteConcern().wMode == WriteConcernOptions::kMajority); Seconds waitFor(DistLockManager::kDefaultLockTimeout); MONGO_FAIL_POINT_BLOCK(setDropCollDistLockWait, customWait) { const BSONObj& data = customWait.getData(); waitFor = Seconds(data["waitForSecs"].numberInt()); } auto const catalogClient = Grid::get(opCtx)->catalogClient(); auto scopedDbLock = ShardingCatalogManager::get(opCtx)->serializeCreateOrDropDatabase(opCtx, nss.db()); auto scopedCollLock = ShardingCatalogManager::get(opCtx)->serializeCreateOrDropCollection(opCtx, nss); auto dbDistLock = uassertStatusOK( catalogClient->getDistLockManager()->lock(opCtx, nss.db(), "dropCollection", waitFor)); auto collDistLock = uassertStatusOK( catalogClient->getDistLockManager()->lock(opCtx, nss.ns(), "dropCollection", waitFor)); ON_BLOCK_EXIT( [opCtx, nss] { Grid::get(opCtx)->catalogCache()->invalidateShardedCollection(nss); }); staleExceptionRetry( opCtx, "_configsvrDropCollection", [&] { _dropCollection(opCtx, nss); }); return true; }
/* * Computes a SHA-1 hash of 'input'. */ bool sha1(const unsigned char* input, const size_t inputLen, unsigned char* output) { EVP_MD_CTX digestCtx; EVP_MD_CTX_init(&digestCtx); ON_BLOCK_EXIT(EVP_MD_CTX_cleanup, &digestCtx); if (1 != EVP_DigestInit_ex(&digestCtx, EVP_sha1(), NULL)) { return false; } if (1 != EVP_DigestUpdate(&digestCtx, input, inputLen)) { return false; } return (1 == EVP_DigestFinal_ex(&digestCtx, output, NULL)); }
void OperationContext::markKilled(ErrorCodes::Error killCode) { invariant(killCode != ErrorCodes::OK); stdx::unique_lock<stdx::mutex> lkWaitMutex; if (_waitMutex) { invariant(++_numKillers > 0); getClient()->unlock(); ON_BLOCK_EXIT([this]() noexcept { getClient()->lock(); invariant(--_numKillers >= 0); }); lkWaitMutex = stdx::unique_lock<stdx::mutex>{*_waitMutex}; } _killCode.compareAndSwap(ErrorCodes::OK, killCode); if (lkWaitMutex && _numKillers == 0) { invariant(_waitCV); _waitCV->notify_all(); } }
// Applies a batch of oplog entries, by using a set of threads to apply the operations and then // writes the oplog entries to the local oplog. OpTime SyncTail::multiApply(OperationContext* txn, const OpQueue& ops) { invariant(_applyFunc); if (getGlobalServiceContext()->getGlobalStorageEngine()->isMmapV1()) { // Use a ThreadPool to prefetch all the operations in a batch. prefetchOps(ops.getDeque(), &_prefetcherPool); } std::vector<std::vector<BSONObj>> writerVectors(replWriterThreadCount); fillWriterVectors(txn, ops.getDeque(), &writerVectors); LOG(2) << "replication batch size is " << ops.getDeque().size() << endl; // We must grab this because we're going to grab write locks later. // We hold this mutex the entire time we're writing; it doesn't matter // because all readers are blocked anyway. stdx::lock_guard<SimpleMutex> fsynclk(filesLockedFsync); // stop all readers until we're done Lock::ParallelBatchWriterMode pbwm(txn->lockState()); ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator(); if (replCoord->getMemberState().primary() && !replCoord->isWaitingForApplierToDrain()) { severe() << "attempting to replicate ops while primary"; fassertFailed(28527); } applyOps(writerVectors, &_writerPool, _applyFunc, this); OpTime lastOpTime; { ON_BLOCK_EXIT([&] { _writerPool.join(); }); std::vector<BSONObj> raws; raws.reserve(ops.getDeque().size()); for (auto&& op : ops.getDeque()) { raws.emplace_back(op.raw); } lastOpTime = writeOpsToOplog(txn, raws); if (inShutdown()) { return OpTime(); } } // We have now written all database writes and updated the oplog to match. return lastOpTime; }
inline void kill_wrapper( ProcessId pid, int sig, int port, const BSONObj& opt ) { #ifdef _WIN32 if (sig == SIGKILL || port == 0) { verify( registry._handles.count(pid) ); TerminateProcess(registry._handles[pid], 1); // returns failure for "zombie" processes. return; } std::string eventName = getShutdownSignalName(pid.asUInt32()); HANDLE event = OpenEventA(EVENT_MODIFY_STATE, FALSE, eventName.c_str()); if (event == NULL) { int gle = GetLastError(); if (gle != ERROR_FILE_NOT_FOUND) { warning() << "kill_wrapper OpenEvent failed: " << errnoWithDescription(); } else { log() << "kill_wrapper OpenEvent failed to open event to the process " << pid.asUInt32() << ". It has likely died already"; } return; } ON_BLOCK_EXIT(CloseHandle, event); bool result = SetEvent(event); if (!result) { error() << "kill_wrapper SetEvent failed: " << errnoWithDescription(); return; } #else int x = kill( pid.toNative(), sig ); if ( x ) { if ( errno == ESRCH ) { } else { log() << "killFailed: " << errnoWithDescription() << endl; verify( x == 0 ); } } #endif }
// class method int HttpSvc::Ccd::Dispatcher::Dispatch(HttpStream *hs) { const std::string &uri = hs->GetUri(); if (uri.empty()) { // error return CCD_ERROR_PARSE_CONTENT; } std::vector<std::string> uri_parts; VPLHttp_SplitUri(hs->GetUri(), uri_parts); if (uri_parts.size() < 1 || uri_parts[0].empty() || *uri.rbegin() == '/') { LOG_ERROR("Dispatcher: Unexpected URI %s", hs->GetUri().c_str()); Utils::SetCompleteResponse(hs, 400); return 0; } const std::string &serviceName = uri_parts[0]; if (handlerCreatorJumpTable.find(serviceName) == handlerCreatorJumpTable.end()) { LOG_ERROR("Dispatcher: No handler for service %s", serviceName.c_str()); Utils::SetCompleteResponse(hs, 400); return 0; } Handler *handler = (handlerCreatorJumpTable[serviceName])(hs); if (handler == NULL) { LOG_ERROR("Dispatcher: No memory to create handler for service %s", serviceName.c_str()); Utils::SetCompleteResponse(hs, 500); return 0; } ON_BLOCK_EXIT(deleteObj<HttpSvc::Ccd::Handler>, handler); LOG_INFO("Dispatcher: Dispatching HttpStream[%p]: %s %s", hs, hs->GetMethod().c_str(), uri.c_str()); int err = handler->Run(); if (err) { LOG_ERROR("Dispatcher: Handler failed: err %d", err); } else { // response set by Handler::Run() LOG_INFO("Dispatcher: HttpStream[%p] outcome: status %d", hs, hs->GetStatusCode()); } return err; }
bool ProcessInfo::checkNumaEnabled() { lgrp_cookie_t cookie = lgrp_init(LGRP_VIEW_OS); if (cookie == LGRP_COOKIE_NONE) { warning() << "lgrp_init failed: " << errnoWithDescription(); return false; } ON_BLOCK_EXIT(lgrp_fini, cookie); int groups = lgrp_nlgrps(cookie); if (groups == -1) { warning() << "lgrp_nlgrps failed: " << errnoWithDescription(); return false; } // NUMA machines have more then 1 locality group return groups > 1; }
FromUTF8::FromUTF8(Char const* src, UInt length) { UErrorCode err = U_ZERO_ERROR; UConverter *conv = ucnv_open("utf8", &err); CHECK_ICU(err); ON_BLOCK_EXIT(ucnv_close, conv); if (!length) length = static_cast<UInt>(strlen(src)); int32_t utf16_len = ucnv_toUChars(conv, 0, 0, src, length, &err); if(err==U_BUFFER_OVERFLOW_ERROR) { err = U_ZERO_ERROR; m_utf16.reset(new UChar[utf16_len+1]); ucnv_toUChars(conv, m_utf16.get(), utf16_len, src, length, &err); CHECK_ICU(err); m_utf16[utf16_len]=0; } }
void IndexRebuilder::run() { // Disable record access timer warnings ON_BLOCK_EXIT(resetMemoryTracking, Record::MemoryTrackingEnabled); Record::MemoryTrackingEnabled = false; Client::GodScope gs; Lock::GlobalWrite lk; bool firstTime = true; std::vector<std::string> dbNames; getDatabaseNames(dbNames); for (std::vector<std::string>::const_iterator it = dbNames.begin(); it < dbNames.end(); it++) { checkDB(*it, &firstTime); } cc().shutdown(); }
int runDbTests(int argc, char** argv) { frameworkGlobalParams.perfHist = 1; frameworkGlobalParams.seed = time(0); frameworkGlobalParams.runsPerTest = 1; Client::initThread("testsuite"); srand((unsigned)frameworkGlobalParams.seed); printBuildInfo(); getGlobalServiceContext()->initializeGlobalStorageEngine(); { auto txn = cc().makeOperationContext(); // Initialize the sharding state so we can run sharding tests in isolation auto connectHook = stdx::make_unique<CustomConnectHook>(txn.get()); ConnectionString::setConnectionHook(connectHook.get()); ON_BLOCK_EXIT([] { ConnectionString::setConnectionHook(nullptr); }); ShardingState::get(txn.get())->initialize(txn.get(), "$dummy:10000"); } // Note: ShardingState::initialize also initializes the distLockMgr. { auto txn = cc().makeOperationContext(); auto distLockMgr = dynamic_cast<LegacyDistLockManager*>( grid.forwardingCatalogManager()->getDistLockManager()); if (distLockMgr) { distLockMgr->enablePinger(false); } } int ret = unittest::Suite::run(frameworkGlobalParams.suites, frameworkGlobalParams.filter, frameworkGlobalParams.runsPerTest); // So everything shuts down cleanly exitCleanly((ExitCode)ret); return ret; }
TEST_F(QueryStageCachedPlan, EntriesAreNotDeactivatedWhenInactiveEntriesDisabled) { // Set the global flag for disabling active entries. internalQueryCacheDisableInactiveEntries.store(true); ON_BLOCK_EXIT([] { internalQueryCacheDisableInactiveEntries.store(false); }); AutoGetCollectionForReadCommand ctx(&_opCtx, nss); Collection* collection = ctx.getCollection(); ASSERT(collection); // Never run - just used as a key for the cache's get() functions, since all of the other // CanonicalQueries created in this test will have this shape. const auto shapeCq = canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 123}, b: {$gte: 123}}")); // Query can be answered by either index on "a" or index on "b". const auto noResultsCq = canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 11}, b: {$gte: 11}}")); // We shouldn't have anything in the plan cache for this shape yet. PlanCache* cache = collection->infoCache()->getPlanCache(); ASSERT(cache); ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kNotPresent); // Run the CachedPlanStage with a long-running child plan. Replanning should be // triggered and an _active_ entry will be added (since the disableInactiveEntries flag is on). forceReplanning(collection, noResultsCq.get()); // Check for an inactive cache entry. ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentActive); // Run the plan again. The entry should still be active. forceReplanning(collection, noResultsCq.get()); ASSERT_EQ(cache->get(*noResultsCq.get()).state, PlanCache::CacheEntryState::kPresentActive); // Run another query which takes long enough to evict the active cache entry. After replanning // is triggered, be sure that the the cache entry is still active. auto highWorksCq = canonicalQueryFromFilterObj(opCtx(), nss, fromjson("{a: {$gte: 0}, b: {$gte:0}}")); forceReplanning(collection, highWorksCq.get()); ASSERT_EQ(cache->get(*shapeCq).state, PlanCache::CacheEntryState::kPresentActive); }