void Database::clearTmpCollections(OperationContext* txn) { txn->lockState()->assertWriteLocked( _name ); list<string> collections; _dbEntry->getCollectionNamespaces( &collections ); for ( list<string>::iterator i = collections.begin(); i != collections.end(); ++i ) { string ns = *i; invariant( NamespaceString::normal( ns ) ); CollectionCatalogEntry* coll = _dbEntry->getCollectionCatalogEntry( txn, ns ); CollectionOptions options = coll->getCollectionOptions( txn ); if ( !options.temp ) continue; WriteUnitOfWork wunit(txn); Status status = dropCollection( txn, ns ); if ( !status.isOK() ) { warning() << "could not drop temp collection '" << ns << "': " << status; continue; } string cmdNs = _name + ".$cmd"; repl::logOp( txn, "c", cmdNs.c_str(), BSON( "drop" << nsToCollectionSubstring( ns ) ) ); wunit.commit(); } }
void Database::clearTmpCollections(OperationContext* txn) { invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X)); list<string> collections; _dbEntry->getCollectionNamespaces(&collections); for (list<string>::iterator i = collections.begin(); i != collections.end(); ++i) { string ns = *i; invariant(NamespaceString::normal(ns)); CollectionCatalogEntry* coll = _dbEntry->getCollectionCatalogEntry(ns); CollectionOptions options = coll->getCollectionOptions(txn); if (!options.temp) continue; try { WriteUnitOfWork wunit(txn); Status status = dropCollection(txn, ns); if (!status.isOK()) { warning() << "could not drop temp collection '" << ns << "': " << redact(status); continue; } wunit.commit(); } catch (const WriteConflictException& exp) { warning() << "could not drop temp collection '" << ns << "' due to " "WriteConflictException"; txn->recoveryUnit()->abandonSnapshot(); } } }
TEST_F(KVCollectionCatalogEntryTest, CanSetMultipleFieldsAndComponentsAsMultikey) { std::string indexName = createIndex(BSON("a.b.c" << 1 << "a.b.d" << 1)); CollectionCatalogEntry* collEntry = getCollectionCatalogEntry(); auto opCtx = newOperationContext(); ASSERT(collEntry->setIndexIsMultikey(opCtx.get(), indexName, {{0U, 1U}, {0U, 1U}})); { MultikeyPaths multikeyPaths; ASSERT(collEntry->isIndexMultikey(opCtx.get(), indexName, &multikeyPaths)); assertMultikeyPathsAreEqual(multikeyPaths, {{0U, 1U}, {0U, 1U}}); } }
TEST_F(KVStorageEngineTest, RecreateIndexes) { repl::setGlobalReplicationCoordinator( new repl::ReplicationCoordinatorMock(getGlobalServiceContext(), repl::ReplSettings())); auto opCtx = cc().makeOperationContext(); // Create two indexes for `db.coll1` in the catalog named `foo` and `bar`. Verify the indexes // appear as idents in the KVEngine. ASSERT_OK(createCollection(opCtx.get(), NamespaceString("db.coll1")).getStatus()); ASSERT_OK(createIndex(opCtx.get(), NamespaceString("db.coll1"), "foo")); ASSERT_OK(createIndex(opCtx.get(), NamespaceString("db.coll1"), "bar")); auto kvIdents = getAllKVEngineIdents(opCtx.get()); ASSERT_EQUALS(2, std::count_if(kvIdents.begin(), kvIdents.end(), [](const std::string& str) { return str.find("index-") == 0; })); // Use the `getIndexNameObjs` to find the `foo` index in the IndexCatalog. DatabaseCatalogEntry* dbce = _storageEngine->getDatabaseCatalogEntry(opCtx.get(), "db"); CollectionCatalogEntry* cce = dbce->getCollectionCatalogEntry("db.coll1"); auto swIndexNameObjs = getIndexNameObjs( opCtx.get(), dbce, cce, [](const std::string& indexName) { return indexName == "foo"; }); ASSERT_OK(swIndexNameObjs.getStatus()); auto& indexNameObjs = swIndexNameObjs.getValue(); // There's one index that matched the name `foo`. ASSERT_EQUALS(static_cast<const unsigned long>(1), indexNameObjs.first.size()); // Assert the parallel vectors have matching sizes. ASSERT_EQUALS(static_cast<const unsigned long>(1), indexNameObjs.second.size()); // The index that matched should be named `foo`. ASSERT_EQUALS("foo", indexNameObjs.first[0]); ASSERT_EQUALS("db.coll1"_sd, indexNameObjs.second[0].getStringField("ns")); ASSERT_EQUALS("foo"_sd, indexNameObjs.second[0].getStringField("name")); ASSERT_EQUALS(2, indexNameObjs.second[0].getIntField("v")); ASSERT_EQUALS(1, indexNameObjs.second[0].getObjectField("key").getIntField("foo")); // Drop the `foo` index table. Count one remaining index ident according to the KVEngine. ASSERT_OK(dropIndexTable(opCtx.get(), NamespaceString("db.coll1"), "foo")); kvIdents = getAllKVEngineIdents(opCtx.get()); ASSERT_EQUALS(1, std::count_if(kvIdents.begin(), kvIdents.end(), [](const std::string& str) { return str.find("index-") == 0; })); AutoGetCollection coll(opCtx.get(), NamespaceString("db.coll1"), LockMode::MODE_X); // Find the `foo` index in the catalog. Rebuild it. Count two indexes in the KVEngine. ASSERT_OK(rebuildIndexesOnCollection(opCtx.get(), dbce, cce, indexNameObjs)); ASSERT_TRUE(cce->isIndexReady(opCtx.get(), "foo")); kvIdents = getAllKVEngineIdents(opCtx.get()); ASSERT_EQUALS(2, std::count_if(kvIdents.begin(), kvIdents.end(), [](const std::string& str) { return str.find("index-") == 0; })); }
bool run(OperationContext* opCtx, const string& dbname, const BSONObj& jsobj, BSONObjBuilder& result) { const NamespaceString nss("local", "oplog.rs"); Lock::GlobalWrite global(opCtx); Database* database = dbHolder().get(opCtx, nss.db()); if (!database) { return CommandHelpers::appendCommandStatus( result, Status(ErrorCodes::NamespaceNotFound, "database local does not exist")); } Collection* coll = database->getCollection(opCtx, nss); if (!coll) { return CommandHelpers::appendCommandStatus( result, Status(ErrorCodes::NamespaceNotFound, "oplog does not exist")); } if (!coll->isCapped()) { return CommandHelpers::appendCommandStatus( result, Status(ErrorCodes::IllegalOperation, "oplog isn't capped")); } if (!jsobj["size"].isNumber()) { return CommandHelpers::appendCommandStatus( result, Status(ErrorCodes::InvalidOptions, "invalid size field, size should be a number")); } long long sizeMb = jsobj["size"].numberLong(); long long size = sizeMb * 1024 * 1024; if (sizeMb < 990L) { return CommandHelpers::appendCommandStatus( result, Status(ErrorCodes::InvalidOptions, "oplog size should be 990MB at least")); } WriteUnitOfWork wunit(opCtx); Status status = coll->getRecordStore()->updateCappedSize(opCtx, size); if (!status.isOK()) { return CommandHelpers::appendCommandStatus(result, status); } CollectionCatalogEntry* entry = coll->getCatalogEntry(); entry->updateCappedSize(opCtx, size); wunit.commit(); LOG(0) << "replSetResizeOplog success, currentSize:" << size; return CommandHelpers::appendCommandStatus(result, Status::OK()); }
TEST_F(KVCollectionCatalogEntryTest, NoOpWhenSpecifiedPathComponentsAlreadySetAsMultikey) { std::string indexName = createIndex(BSON("a" << 1)); CollectionCatalogEntry* collEntry = getCollectionCatalogEntry(); auto opCtx = newOperationContext(); ASSERT(collEntry->setIndexIsMultikey(opCtx.get(), indexName, {{0U}})); { MultikeyPaths multikeyPaths; ASSERT(collEntry->isIndexMultikey(opCtx.get(), indexName, &multikeyPaths)); assertMultikeyPathsAreEqual(multikeyPaths, {{0U}}); } ASSERT(!collEntry->setIndexIsMultikey(opCtx.get(), indexName, {{0U}})); { MultikeyPaths multikeyPaths; ASSERT(collEntry->isIndexMultikey(opCtx.get(), indexName, &multikeyPaths)); assertMultikeyPathsAreEqual(multikeyPaths, {{0U}}); } }
TEST_F(KVCollectionCatalogEntryTest, MultikeyPathsAccumulateOnDifferentComponentsOfTheSameField) { std::string indexName = createIndex(BSON("a.b" << 1)); CollectionCatalogEntry* collEntry = getCollectionCatalogEntry(); auto opCtx = newOperationContext(); ASSERT(collEntry->setIndexIsMultikey(opCtx.get(), indexName, {{0U}})); { MultikeyPaths multikeyPaths; ASSERT(collEntry->isIndexMultikey(opCtx.get(), indexName, &multikeyPaths)); assertMultikeyPathsAreEqual(multikeyPaths, {{0U}}); } ASSERT(collEntry->setIndexIsMultikey(opCtx.get(), indexName, {{1U}})); { MultikeyPaths multikeyPaths; ASSERT(collEntry->isIndexMultikey(opCtx.get(), indexName, &multikeyPaths)); assertMultikeyPathsAreEqual(multikeyPaths, {{0U, 1U}}); } }
/** * Create an index with a key of `{<key>: 1}` and a `name` of <key>. */ Status createIndex(OperationContext* opCtx, NamespaceString collNs, std::string key) { Collection* coll = nullptr; BSONObjBuilder builder; { BSONObjBuilder keyObj; builder.append("key", keyObj.append(key, 1).done()); } BSONObj spec = builder.append("name", key).append("ns", collNs.ns()).append("v", 2).done(); auto descriptor = stdx::make_unique<IndexDescriptor>(coll, IndexNames::findPluginName(spec), spec); DatabaseCatalogEntry* dbce = _storageEngine->getDatabaseCatalogEntry(opCtx, collNs.db()); CollectionCatalogEntry* cce = dbce->getCollectionCatalogEntry(collNs.ns()); auto ret = cce->prepareForIndexBuild(opCtx, descriptor.get()); if (!ret.isOK()) { return ret; } cce->indexBuildSuccess(opCtx, key); return Status::OK(); }
void Database::clearTmpCollections(OperationContext* txn) { invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X)); list<string> collections; _dbEntry->getCollectionNamespaces( &collections ); for ( list<string>::iterator i = collections.begin(); i != collections.end(); ++i ) { string ns = *i; invariant( NamespaceString::normal( ns ) ); CollectionCatalogEntry* coll = _dbEntry->getCollectionCatalogEntry( ns ); CollectionOptions options = coll->getCollectionOptions( txn ); if ( !options.temp ) continue; try { WriteUnitOfWork wunit(txn); Status status = dropCollection( txn, ns ); if ( !status.isOK() ) { warning() << "could not drop temp collection '" << ns << "': " << status; continue; } string cmdNs = _name + ".$cmd"; repl::logOp( txn, "c", cmdNs.c_str(), BSON( "drop" << nsToCollectionSubstring( ns ) ) ); wunit.commit(); } catch (const WriteConflictException& exp) { warning() << "could not drop temp collection '" << ns << "' due to " "WriteConflictException"; txn->recoveryUnit()->commitAndRestart(); } } }
/** * Set a collection option flag for 'UsePowerOf2Sizes' or 'NoPadding'. Appends both the new and * old flag setting to the given 'result' builder. */ void setCollectionOptionFlag(OperationContext* opCtx, Collection* coll, BSONElement& collOptionElement, BSONObjBuilder* result) { const StringData flagName = collOptionElement.fieldNameStringData(); int flag; if (flagName == "usePowerOf2Sizes") { flag = CollectionOptions::Flag_UsePowerOf2Sizes; } else if (flagName == "noPadding") { flag = CollectionOptions::Flag_NoPadding; } else { flag = 0; } CollectionCatalogEntry* cce = coll->getCatalogEntry(); const int oldFlags = cce->getCollectionOptions(opCtx).flags; const bool oldSetting = oldFlags & flag; const bool newSetting = collOptionElement.trueValue(); result->appendBool(flagName.toString() + "_old", oldSetting); result->appendBool(flagName.toString() + "_new", newSetting); const int newFlags = newSetting ? (oldFlags | flag) // set flag : (oldFlags & ~flag); // clear flag // NOTE we do this unconditionally to ensure that we note that the user has // explicitly set flags, even if they are just setting the default. cce->updateFlags(opCtx, newFlags); const CollectionOptions newOptions = cce->getCollectionOptions(opCtx); invariant(newOptions.flags == newFlags); invariant(newOptions.flagsSet); }
bool run(OperationContext* opCtx, const string& dbname, const BSONObj& cmdObj, BSONObjBuilder& result) { if (MONGO_FAIL_POINT(validateCmdCollectionNotValid)) { result.appendBool("valid", false); return true; } const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj)); const bool full = cmdObj["full"].trueValue(); const bool scanData = cmdObj["scandata"].trueValue(); ValidateCmdLevel level = kValidateIndex; if (full) { level = kValidateFull; } else if (scanData) { level = kValidateRecordStore; } if (!nss.isNormal() && full) { CommandHelpers::appendCommandStatus( result, {ErrorCodes::CommandFailed, "Can only run full validate on a regular collection"}); return false; } if (!serverGlobalParams.quiet.load()) { LOG(0) << "CMD: validate " << nss.ns(); } AutoGetDb ctx(opCtx, nss.db(), MODE_IX); auto collLk = stdx::make_unique<Lock::CollectionLock>(opCtx->lockState(), nss.ns(), MODE_X); Collection* collection = ctx.getDb() ? ctx.getDb()->getCollection(opCtx, nss) : NULL; if (!collection) { if (ctx.getDb() && ctx.getDb()->getViewCatalog()->lookup(opCtx, nss.ns())) { return CommandHelpers::appendCommandStatus( result, {ErrorCodes::CommandNotSupportedOnView, "Cannot validate a view"}); } CommandHelpers::appendCommandStatus(result, {ErrorCodes::NamespaceNotFound, "ns not found"}); return false; } // Omit background validation logic until it is fully implemented and vetted. const bool background = false; /* bool isInRecordIdOrder = collection->getRecordStore()->isInRecordIdOrder(); if (isInRecordIdOrder && !full) { background = true; } if (cmdObj.hasElement("background")) { background = cmdObj["background"].trueValue(); } if (!isInRecordIdOrder && background) { appendCommandStatus(result, {ErrorCodes::CommandFailed, "This storage engine does not support the background option, use " "background:false"}); return false; } if (full && background) { appendCommandStatus(result, {ErrorCodes::CommandFailed, "A full validate cannot run in the background, use full:false"}); return false; } */ result.append("ns", nss.ns()); // Only one validation per collection can be in progress, the rest wait in order. { stdx::unique_lock<stdx::mutex> lock(_validationMutex); try { while (_validationsInProgress.find(nss.ns()) != _validationsInProgress.end()) { opCtx->waitForConditionOrInterrupt(_validationNotifier, lock); } } catch (AssertionException& e) { CommandHelpers::appendCommandStatus( result, {ErrorCodes::CommandFailed, str::stream() << "Exception during validation: " << e.toString()}); return false; } _validationsInProgress.insert(nss.ns()); } ON_BLOCK_EXIT([&] { stdx::lock_guard<stdx::mutex> lock(_validationMutex); _validationsInProgress.erase(nss.ns()); _validationNotifier.notify_all(); }); ValidateResults results; Status status = collection->validate(opCtx, level, background, std::move(collLk), &results, &result); if (!status.isOK()) { return CommandHelpers::appendCommandStatus(result, status); } CollectionCatalogEntry* catalogEntry = collection->getCatalogEntry(); CollectionOptions opts = catalogEntry->getCollectionOptions(opCtx); // Skip checking UUID on system.indexes and system.namespaces until SERVER-30095 and // SERVER-29926 are resolved. bool skipUUIDCheck = nss.coll() == "system.indexes" || nss.coll() == "system.namespaces"; if (!skipUUIDCheck) { // All collections must have a UUID. if (!opts.uuid) { results.errors.push_back(str::stream() << "UUID missing on collection " << nss.ns() << " but SchemaVersion=3.6"); results.valid = false; } } if (!full) { results.warnings.push_back( "Some checks omitted for speed. use {full:true} option to do more thorough scan."); } result.appendBool("valid", results.valid); result.append("warnings", results.warnings); result.append("errors", results.errors); if (!results.valid) { result.append("advice", "A corrupt namespace has been detected. See " "http://dochub.mongodb.org/core/data-recovery for recovery steps."); } return true; }
bool run(OperationContext* opCtx, const string& dbname, const BSONObj& cmdObj, BSONObjBuilder& result) { if (MONGO_FAIL_POINT(validateCmdCollectionNotValid)) { result.appendBool("valid", false); return true; } const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj)); const bool full = cmdObj["full"].trueValue(); const bool scanData = cmdObj["scandata"].trueValue(); ValidateCmdLevel level = kValidateIndex; if (full) { level = kValidateFull; } else if (scanData) { level = kValidateRecordStore; } if (!nss.isNormal() && full) { uasserted(ErrorCodes::CommandFailed, "Can only run full validate on a regular collection"); } if (!serverGlobalParams.quiet.load()) { LOG(0) << "CMD: validate " << nss.ns(); } AutoGetDb ctx(opCtx, nss.db(), MODE_IX); Lock::CollectionLock collLk(opCtx, nss, MODE_X); Collection* collection = ctx.getDb() ? ctx.getDb()->getCollection(opCtx, nss) : NULL; if (!collection) { if (ctx.getDb() && ViewCatalog::get(ctx.getDb())->lookup(opCtx, nss.ns())) { uasserted(ErrorCodes::CommandNotSupportedOnView, "Cannot validate a view"); } uasserted(ErrorCodes::NamespaceNotFound, "ns not found"); } result.append("ns", nss.ns()); // Only one validation per collection can be in progress, the rest wait in order. { stdx::unique_lock<stdx::mutex> lock(_validationMutex); try { while (_validationsInProgress.find(nss.ns()) != _validationsInProgress.end()) { opCtx->waitForConditionOrInterrupt(_validationNotifier, lock); } } catch (AssertionException& e) { CommandHelpers::appendCommandStatusNoThrow( result, {ErrorCodes::CommandFailed, str::stream() << "Exception during validation: " << e.toString()}); return false; } _validationsInProgress.insert(nss.ns()); } ON_BLOCK_EXIT([&] { stdx::lock_guard<stdx::mutex> lock(_validationMutex); _validationsInProgress.erase(nss.ns()); _validationNotifier.notify_all(); }); // TODO SERVER-30357: Add support for background validation. const bool background = false; ValidateResults results; Status status = collection->validate(opCtx, level, background, &results, &result); if (!status.isOK()) { return CommandHelpers::appendCommandStatusNoThrow(result, status); } CollectionCatalogEntry* catalogEntry = collection->getCatalogEntry(); CollectionOptions opts = catalogEntry->getCollectionOptions(opCtx); // All collections must have a UUID. if (!opts.uuid) { results.errors.push_back(str::stream() << "UUID missing on collection " << nss.ns() << " but SchemaVersion=3.6"); results.valid = false; } if (!full) { results.warnings.push_back( "Some checks omitted for speed. use {full:true} option to do more thorough scan."); } result.appendBool("valid", results.valid); result.append("warnings", results.warnings); result.append("errors", results.errors); result.append("extraIndexEntries", results.extraIndexEntries); result.append("missingIndexEntries", results.missingIndexEntries); if (!results.valid) { result.append("advice", "A corrupt namespace has been detected. See " "http://dochub.mongodb.org/core/data-recovery for recovery steps."); } return true; }