/*static*/ string Database::duplicateUncasedName(const string &name, set< string > *duplicates) { if ( duplicates ) { duplicates->clear(); } vector<string> others; StorageEngine* storageEngine = getGlobalEnvironment()->getGlobalStorageEngine(); storageEngine->listDatabases(&others); set<string> allShortNames; dbHolder().getAllShortNames(allShortNames); others.insert( others.end(), allShortNames.begin(), allShortNames.end() ); for ( unsigned i=0; i<others.size(); i++ ) { if ( strcasecmp( others[i].c_str() , name.c_str() ) ) continue; if ( strcmp( others[i].c_str() , name.c_str() ) == 0 ) continue; if ( duplicates ) { duplicates->insert( others[i] ); } else { return others[i]; } } if ( duplicates ) { return duplicates->empty() ? "" : *duplicates->begin(); } return ""; }
void restartInProgressIndexesFromLastShutdown() { OperationContextImpl txn; cc().getAuthorizationSession()->grantInternalAuthorization(); std::vector<std::string> dbNames; StorageEngine* storageEngine = getGlobalEnvironment()->getGlobalStorageEngine(); storageEngine->listDatabases( &dbNames ); try { std::list<std::string> collNames; for (std::vector<std::string>::const_iterator dbName = dbNames.begin(); dbName < dbNames.end(); ++dbName) { Client::ReadContext ctx(&txn, *dbName); Database* db = ctx.ctx().db(); db->getDatabaseCatalogEntry()->getCollectionNamespaces(&collNames); } checkNS(&txn, collNames); } catch (const DBException& e) { error() << "Index rebuilding did not complete: " << e.toString(); log() << "note: restart the server with --noIndexBuildRetry to skip index rebuilds"; fassertFailedNoTrace(18643); } LOG(1) << "checking complete" << endl; }
void dropAllDatabasesExceptLocal(OperationContext* txn) { ScopedTransaction transaction(txn, MODE_X); Lock::GlobalWrite lk(txn->lockState()); vector<string> n; StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); storageEngine->listDatabases(&n); if (n.size() == 0) return; log() << "dropAllDatabasesExceptLocal " << n.size(); repl::getGlobalReplicationCoordinator()->dropAllSnapshots(); for (vector<string>::iterator i = n.begin(); i != n.end(); i++) { if (*i != "local") { MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { Database* db = dbHolder().get(txn, *i); // This is needed since dropDatabase can't be rolled back. // This is safe be replaced by "invariant(db);dropDatabase(txn, db);" once fixed if (db == nullptr) { log() << "database disappeared after listDatabases but before drop: " << *i; } else { Database::dropDatabase(txn, db); } } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropAllDatabasesExceptLocal", *i); } }
void restartInProgressIndexesFromLastShutdown(OperationContext* txn) { AuthorizationSession::get(txn->getClient())->grantInternalAuthorization(); std::vector<std::string> dbNames; StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); storageEngine->listDatabases(&dbNames); try { std::list<std::string> collNames; for (std::vector<std::string>::const_iterator dbName = dbNames.begin(); dbName < dbNames.end(); ++dbName) { ScopedTransaction scopedXact(txn, MODE_IS); AutoGetDb autoDb(txn, *dbName, MODE_S); Database* db = autoDb.getDb(); db->getDatabaseCatalogEntry()->getCollectionNamespaces(&collNames); } checkNS(txn, collNames); } catch (const DBException& e) { error() << "Index verification did not complete: " << redact(e); fassertFailedNoTrace(18643); } LOG(1) << "checking complete"; }
void FeatureCompatibilityVersion::setIfCleanStartup(OperationContext* txn) { std::vector<std::string> dbNames; StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); storageEngine->listDatabases(&dbNames); for (auto&& dbName : dbNames) { if (dbName != "local") { return; } } if (serverGlobalParams.clusterRole != ClusterRole::ShardServer) { // Insert featureCompatibilityDocument into admin.system.version. // Do not use writeConcern majority, because we may be holding locks. NamespaceString nss(FeatureCompatibilityVersion::kCollection); DBDirectClient client(txn); BSONObj result; client.runCommand( nss.db().toString(), BSON("insert" << nss.coll() << "documents" << BSON_ARRAY(BSON("_id" << FeatureCompatibilityVersion::kParameterName << FeatureCompatibilityVersion::kVersionField << FeatureCompatibilityVersion::kVersion34))), result); auto status = getStatusFromCommandResult(result); if (!status.isOK() && status != ErrorCodes::InterruptedAtShutdown) { uassertStatusOK(status); } // Update server parameter. serverGlobalParams.featureCompatibilityVersion.store( ServerGlobalParams::FeatureCompatibilityVersion_34); } }
void FeatureCompatibilityVersion::setIfCleanStartup(OperationContext* txn, repl::StorageInterface* storageInterface) { if (serverGlobalParams.clusterRole != ClusterRole::ShardServer) { std::vector<std::string> dbNames; StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); storageEngine->listDatabases(&dbNames); for (auto&& dbName : dbNames) { if (dbName != "local") { return; } } // Insert featureCompatibilityDocument into admin.system.version. txn->setReplicatedWrites(false); NamespaceString nss(FeatureCompatibilityVersion::kCollection); CollectionOptions options; uassertStatusOK(storageInterface->createCollection(txn, nss, options)); uassertStatusOK(storageInterface->insertDocument( txn, nss, BSON("_id" << FeatureCompatibilityVersion::kParameterName << FeatureCompatibilityVersion::kVersionField << FeatureCompatibilityVersion::kVersion34))); // Update server parameter. serverGlobalParams.featureCompatibilityVersion.store( ServerGlobalParams::FeatureCompatibilityVersion_34); } }
Status RollbackImpl::_awaitBgIndexCompletion(OperationContext* opCtx) { invariant(opCtx); if (_isInShutdown()) { return Status(ErrorCodes::ShutdownInProgress, "rollback shutting down"); } // Get a list of all databases. StorageEngine* storageEngine = opCtx->getServiceContext()->getGlobalStorageEngine(); std::vector<std::string> dbs; { Lock::GlobalLock lk(opCtx, MODE_IS, Date_t::max()); storageEngine->listDatabases(&dbs); } // Wait for all background operations to complete by waiting on each database. std::vector<StringData> dbNames(dbs.begin(), dbs.end()); log() << "Waiting for all background operations to complete before starting rollback"; for (auto db : dbNames) { LOG(1) << "Waiting for " << BackgroundOperation::numInProgForDb(db) << " background operations to complete on database '" << db << "'"; BackgroundOperation::awaitNoBgOpInProgForDb(db); // Check for shutdown again. if (_isInShutdown()) { return Status(ErrorCodes::ShutdownInProgress, "rollback shutting down"); } } log() << "Finished waiting for background operations to complete before rollback"; return Status::OK(); }
bool FeatureCompatibilityVersion::isCleanStartUp() { std::vector<std::string> dbNames; StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); storageEngine->listDatabases(&dbNames); for (auto&& dbName : dbNames) { if (dbName != "local") { return false; } } return true; }
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result) { vector<string> dbNames; StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); { ScopedTransaction transaction(txn, MODE_IS); Lock::GlobalLock lk(txn->lockState(), MODE_IS, UINT_MAX); storageEngine->listDatabases(&dbNames); } vector<BSONObj> dbInfos; set<string> seen; intmax_t totalSize = 0; for (vector<string>::iterator i = dbNames.begin(); i != dbNames.end(); ++i) { const string& dbname = *i; BSONObjBuilder b; b.append("name", dbname); { ScopedTransaction transaction(txn, MODE_IS); Lock::DBLock dbLock(txn->lockState(), dbname, MODE_IS); Database* db = dbHolder().get(txn, dbname); if (!db) continue; const DatabaseCatalogEntry* entry = db->getDatabaseCatalogEntry(); invariant(entry); int64_t size = entry->sizeOnDisk(txn); b.append("sizeOnDisk", static_cast<double>(size)); totalSize += size; b.appendBool("empty", entry->isEmpty()); } dbInfos.push_back(b.obj()); seen.insert(i->c_str()); } result.append("databases", dbInfos); result.append("totalSize", double(totalSize)); return true; }
void ReplicationCoordinatorExternalStateImpl::dropAllTempCollections(OperationContext* txn) { std::vector<std::string> dbNames; StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); storageEngine->listDatabases(&dbNames); for (std::vector<std::string>::iterator it = dbNames.begin(); it != dbNames.end(); ++it) { // The local db is special because it isn't replicated. It is cleared at startup even on // replica set members. if (*it == "local") continue; LOG(2) << "Removing temporary collections from " << *it; Database* db = dbHolder().get(txn, *it); // Since we must be holding the global lock during this function, if listDatabases // returned this dbname, we should be able to get a reference to it - it can't have // been dropped. invariant(db); db->clearTmpCollections(txn); } }
void dropAllDatabasesExceptLocal(OperationContext* txn) { Lock::GlobalWrite lk(txn->lockState()); vector<string> n; StorageEngine* storageEngine = getGlobalEnvironment()->getGlobalStorageEngine(); storageEngine->listDatabases(&n); if( n.size() == 0 ) return; log() << "dropAllDatabasesExceptLocal " << n.size() << endl; for (vector<string>::iterator i = n.begin(); i != n.end(); i++) { if (*i != "local") { Database* db = dbHolder().get(txn, *i); invariant(db); dropDatabase(txn, db); } } }
void repairDatabasesAndCheckVersion(OperationContext* txn) { LOG(1) << "enter repairDatabases (to check pdfile version #)"; ScopedTransaction transaction(txn, MODE_X); Lock::GlobalWrite lk(txn->lockState()); vector<string> dbNames; StorageEngine* storageEngine = txn->getServiceContext()->getGlobalStorageEngine(); storageEngine->listDatabases(&dbNames); // Repair all databases first, so that we do not try to open them if they are in bad shape if (storageGlobalParams.repair) { invariant(!storageGlobalParams.readOnly); for (vector<string>::const_iterator i = dbNames.begin(); i != dbNames.end(); ++i) { const string dbName = *i; LOG(1) << " Repairing database: " << dbName; fassert(18506, repairDatabase(txn, storageEngine, dbName)); } } const repl::ReplSettings& replSettings = repl::getGlobalReplicationCoordinator()->getSettings(); // On replica set members we only clear temp collections on DBs other than "local" during // promotion to primary. On pure slaves, they are only cleared when the oplog tells them // to. The local DB is special because it is not replicated. See SERVER-10927 for more // details. const bool shouldClearNonLocalTmpCollections = !(checkIfReplMissingFromCommandLine(txn) || replSettings.usingReplSets() || replSettings.isSlave()); const bool shouldDoCleanupForSERVER23299 = isSubjectToSERVER23299(txn); for (vector<string>::const_iterator i = dbNames.begin(); i != dbNames.end(); ++i) { const string dbName = *i; LOG(1) << " Recovering database: " << dbName; Database* db = dbHolder().openDb(txn, dbName); invariant(db); // First thing after opening the database is to check for file compatibility, // otherwise we might crash if this is a deprecated format. auto status = db->getDatabaseCatalogEntry()->currentFilesCompatible(txn); if (!status.isOK()) { if (status.code() == ErrorCodes::CanRepairToDowngrade) { // Convert CanRepairToDowngrade statuses to MustUpgrade statuses to avoid logging a // potentially confusing and inaccurate message. // // TODO SERVER-24097: Log a message informing the user that they can start the // current version of mongod with --repair and then proceed with normal startup. status = {ErrorCodes::MustUpgrade, status.reason()}; } severe() << "Unable to start mongod due to an incompatibility with the data files and" " this version of mongod: " << redact(status); severe() << "Please consult our documentation when trying to downgrade to a previous" " major release"; quickExit(EXIT_NEED_UPGRADE); return; } // Check if admin.system.version contains an invalid featureCompatibilityVersion. // If a valid featureCompatibilityVersion is present, cache it as a server parameter. if (dbName == "admin") { if (Collection* versionColl = db->getCollection(FeatureCompatibilityVersion::kCollection)) { BSONObj featureCompatibilityVersion; if (Helpers::findOne(txn, versionColl, BSON("_id" << FeatureCompatibilityVersion::kParameterName), featureCompatibilityVersion)) { auto version = FeatureCompatibilityVersion::parse(featureCompatibilityVersion); if (!version.isOK()) { severe() << version.getStatus(); fassertFailedNoTrace(40283); } serverGlobalParams.featureCompatibilityVersion.store(version.getValue()); } } } // Major versions match, check indexes const string systemIndexes = db->name() + ".system.indexes"; Collection* coll = db->getCollection(systemIndexes); unique_ptr<PlanExecutor> exec( InternalPlanner::collectionScan(txn, systemIndexes, coll, PlanExecutor::YIELD_MANUAL)); BSONObj index; PlanExecutor::ExecState state; while (PlanExecutor::ADVANCED == (state = exec->getNext(&index, NULL))) { const BSONObj key = index.getObjectField("key"); const string plugin = IndexNames::findPluginName(key); if (db->getDatabaseCatalogEntry()->isOlderThan24(txn)) { if (IndexNames::existedBefore24(plugin)) { continue; } log() << "Index " << index << " claims to be of type '" << plugin << "', " << "which is either invalid or did not exist before v2.4. " << "See the upgrade section: " << "http://dochub.mongodb.org/core/upgrade-2.4" << startupWarningsLog; } const Status keyStatus = validateKeyPattern(key); if (!keyStatus.isOK()) { log() << "Problem with index " << index << ": " << redact(keyStatus) << " This index can still be used however it cannot be rebuilt." << " For more info see" << " http://dochub.mongodb.org/core/index-validation" << startupWarningsLog; } if (index["v"].isNumber() && index["v"].numberInt() == 0) { log() << "WARNING: The index: " << index << " was created with the deprecated" << " v:0 format. This format will not be supported in a future release." << startupWarningsLog; log() << "\t To fix this, you need to rebuild this index." << " For instructions, see http://dochub.mongodb.org/core/rebuild-v0-indexes" << startupWarningsLog; } } // Non-yielding collection scans from InternalPlanner will never error. invariant(PlanExecutor::IS_EOF == state); if (replSettings.usingReplSets()) { // We only care about the _id index if we are in a replset checkForIdIndexes(txn, db); // Ensure oplog is capped (mmap does not guarantee order of inserts on noncapped // collections) if (db->name() == "local") { checkForCappedOplog(txn, db); } } if (shouldDoCleanupForSERVER23299) { handleSERVER23299ForDb(txn, db); } if (!storageGlobalParams.readOnly && (shouldClearNonLocalTmpCollections || dbName == "local")) { db->clearTmpCollections(txn); } } LOG(1) << "done repairDatabases"; }
static void repairDatabasesAndCheckVersion() { LOG(1) << "enter repairDatabases (to check pdfile version #)" << endl; OperationContextImpl txn; ScopedTransaction transaction(&txn, MODE_X); Lock::GlobalWrite lk(txn.lockState()); vector<string> dbNames; StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); storageEngine->listDatabases(&dbNames); // Repair all databases first, so that we do not try to open them if they are in bad shape if (storageGlobalParams.repair) { for (vector<string>::const_iterator i = dbNames.begin(); i != dbNames.end(); ++i) { const string dbName = *i; LOG(1) << " Repairing database: " << dbName << endl; fassert(18506, repairDatabase(&txn, storageEngine, dbName)); } } const repl::ReplSettings& replSettings = repl::getGlobalReplicationCoordinator()->getSettings(); // On replica set members we only clear temp collections on DBs other than "local" during // promotion to primary. On pure slaves, they are only cleared when the oplog tells them // to. The local DB is special because it is not replicated. See SERVER-10927 for more // details. const bool shouldClearNonLocalTmpCollections = !(checkIfReplMissingFromCommandLine(&txn) || replSettings.usingReplSets() || replSettings.slave == repl::SimpleSlave); for (vector<string>::const_iterator i = dbNames.begin(); i != dbNames.end(); ++i) { const string dbName = *i; LOG(1) << " Recovering database: " << dbName << endl; Database* db = dbHolder().openDb(&txn, dbName); invariant(db); // First thing after opening the database is to check for file compatibility, // otherwise we might crash if this is a deprecated format. if (!db->getDatabaseCatalogEntry()->currentFilesCompatible(&txn)) { log() << "****"; log() << "cannot do this upgrade without an upgrade in the middle"; log() << "please do a --repair with 2.6 and then start this version"; dbexit(EXIT_NEED_UPGRADE); return; } // Major versions match, check indexes const string systemIndexes = db->name() + ".system.indexes"; Collection* coll = db->getCollection(systemIndexes); unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(&txn, systemIndexes, coll)); BSONObj index; PlanExecutor::ExecState state; while (PlanExecutor::ADVANCED == (state = exec->getNext(&index, NULL))) { const BSONObj key = index.getObjectField("key"); const string plugin = IndexNames::findPluginName(key); if (db->getDatabaseCatalogEntry()->isOlderThan24(&txn)) { if (IndexNames::existedBefore24(plugin)) { continue; } log() << "Index " << index << " claims to be of type '" << plugin << "', " << "which is either invalid or did not exist before v2.4. " << "See the upgrade section: " << "http://dochub.mongodb.org/core/upgrade-2.4" << startupWarningsLog; } const Status keyStatus = validateKeyPattern(key); if (!keyStatus.isOK()) { log() << "Problem with index " << index << ": " << keyStatus.reason() << " This index can still be used however it cannot be rebuilt." << " For more info see" << " http://dochub.mongodb.org/core/index-validation" << startupWarningsLog; } } if (PlanExecutor::IS_EOF != state) { warning() << "Internal error while reading collection " << systemIndexes; } if (replSettings.usingReplSets()) { // We only care about the _id index if we are in a replset checkForIdIndexes(&txn, db); } if (shouldClearNonLocalTmpCollections || dbName == "local") { db->clearTmpCollections(&txn); } } LOG(1) << "done repairDatabases" << endl; }
Status AuthzManagerExternalStateMongod::getAllDatabaseNames( OperationContext* txn, std::vector<std::string>* dbnames) { StorageEngine* storageEngine = getGlobalEnvironment()->getGlobalStorageEngine(); storageEngine->listDatabases(dbnames); return Status::OK(); }
bool run(OperationContext* opCtx, const string& dbname, const BSONObj& cmdObj, BSONObjBuilder& result) final { CommandHelpers::handleMarkKillOnClientDisconnect(opCtx); IDLParserErrorContext ctx("listDatabases"); auto cmd = ListDatabasesCommand::parse(ctx, cmdObj); auto* as = AuthorizationSession::get(opCtx->getClient()); // {nameOnly: bool} - default false. const bool nameOnly = cmd.getNameOnly(); // {authorizedDatabases: bool} - Dynamic default based on permissions. const bool authorizedDatabases = ([as](const boost::optional<bool>& authDB) { const bool mayListAllDatabases = as->isAuthorizedForActionsOnResource( ResourcePattern::forClusterResource(), ActionType::listDatabases); if (authDB) { uassert(ErrorCodes::Unauthorized, "Insufficient permissions to list all databases", authDB.get() || mayListAllDatabases); return authDB.get(); } // By default, list all databases if we can, otherwise // only those we're allowed to find on. return !mayListAllDatabases; })(cmd.getAuthorizedDatabases()); // {filter: matchExpression}. std::unique_ptr<MatchExpression> filter; if (auto filterObj = cmd.getFilter()) { // The collator is null because database metadata objects are compared using simple // binary comparison. const CollatorInterface* collator = nullptr; boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(opCtx, collator)); auto matcher = uassertStatusOK(MatchExpressionParser::parse(filterObj.get(), std::move(expCtx))); filter = std::move(matcher); } vector<string> dbNames; StorageEngine* storageEngine = getGlobalServiceContext()->getStorageEngine(); { Lock::GlobalLock lk(opCtx, MODE_IS); CurOpFailpointHelpers::waitWhileFailPointEnabled( &hangBeforeListDatabases, opCtx, "hangBeforeListDatabases", []() {}); dbNames = storageEngine->listDatabases(); } vector<BSONObj> dbInfos; const bool filterNameOnly = filter && filter->getCategory() == MatchExpression::MatchCategory::kLeaf && filter->path() == kNameField; intmax_t totalSize = 0; for (const auto& dbname : dbNames) { if (authorizedDatabases && !as->isAuthorizedForAnyActionOnAnyResourceInDB(dbname)) { // We don't have listDatabases on the cluser or find on this database. continue; } BSONObjBuilder b; b.append("name", dbname); int64_t size = 0; if (!nameOnly) { // Filtering on name only should not require taking locks on filtered-out names. if (filterNameOnly && !filter->matchesBSON(b.asTempObj())) continue; AutoGetDb autoDb(opCtx, dbname, MODE_IS); Database* const db = autoDb.getDb(); if (!db) continue; writeConflictRetry(opCtx, "sizeOnDisk", dbname, [&] { size = storageEngine->sizeOnDiskForDb(opCtx, dbname); }); b.append("sizeOnDisk", static_cast<double>(size)); b.appendBool( "empty", CollectionCatalog::get(opCtx).getAllCollectionUUIDsFromDb(dbname).empty()); } BSONObj curDbObj = b.obj(); if (!filter || filter->matchesBSON(curDbObj)) { totalSize += size; dbInfos.push_back(curDbObj); } } result.append("databases", dbInfos); if (!nameOnly) { result.append("totalSize", double(totalSize)); } return true; }
// ran at startup. static void repairDatabasesAndCheckVersion(bool shouldClearNonLocalTmpCollections) { LOG(1) << "enter repairDatabases (to check pdfile version #)" << endl; OperationContextImpl txn; Lock::GlobalWrite lk(txn.lockState()); vector< string > dbNames; StorageEngine* storageEngine = getGlobalEnvironment()->getGlobalStorageEngine(); storageEngine->listDatabases( &dbNames ); for ( vector< string >::iterator i = dbNames.begin(); i != dbNames.end(); ++i ) { string dbName = *i; LOG(1) << "\t" << dbName << endl; Client::Context ctx(&txn, dbName ); if (repl::getGlobalReplicationCoordinator()->getSettings().usingReplSets()) { // we only care about the _id index if we are in a replset checkForIdIndexes(&txn, ctx.db()); } if (shouldClearNonLocalTmpCollections || dbName == "local") ctx.db()->clearTmpCollections(&txn); if ( storageGlobalParams.repair ) { fassert(18506, storageEngine->repairDatabase(&txn, dbName)); } else if (!ctx.db()->getDatabaseCatalogEntry()->currentFilesCompatible(&txn)) { log() << "****"; log() << "cannot do this upgrade without an upgrade in the middle"; log() << "please do a --repair with 2.6 and then start this version"; dbexit( EXIT_NEED_UPGRADE ); invariant( false ); return; } else { // major versions match, check indexes const string systemIndexes = ctx.db()->name() + ".system.indexes"; Collection* coll = ctx.db()->getCollection( &txn, systemIndexes ); auto_ptr<PlanExecutor> exec( InternalPlanner::collectionScan(&txn, systemIndexes,coll)); BSONObj index; PlanExecutor::ExecState state; while (PlanExecutor::ADVANCED == (state = exec->getNext(&index, NULL))) { const BSONObj key = index.getObjectField("key"); const string plugin = IndexNames::findPluginName(key); if (ctx.db()->getDatabaseCatalogEntry()->isOlderThan24(&txn)) { if (IndexNames::existedBefore24(plugin)) continue; log() << "Index " << index << " claims to be of type '" << plugin << "', " << "which is either invalid or did not exist before v2.4. " << "See the upgrade section: " << "http://dochub.mongodb.org/core/upgrade-2.4" << startupWarningsLog; } const Status keyStatus = validateKeyPattern(key); if (!keyStatus.isOK()) { log() << "Problem with index " << index << ": " << keyStatus.reason() << " This index can still be used however it cannot be rebuilt." << " For more info see" << " http://dochub.mongodb.org/core/index-validation" << startupWarningsLog; } } if (PlanExecutor::IS_EOF != state) { warning() << "Internal error while reading collection " << systemIndexes; } dbHolder().close( &txn, dbName ); } } LOG(1) << "done repairDatabases" << endl; }