CollectionOptions MMAPV1DatabaseCatalogEntry::getCollectionOptions( OperationContext* txn, const StringData& ns ) const { if ( nsToCollectionSubstring( ns ) == "system.namespaces" ) { return CollectionOptions(); } RecordStoreV1Base* rs = _getNamespaceRecordStore(); invariant( rs ); scoped_ptr<RecordIterator> it( rs->getIterator(txn) ); while ( !it->isEOF() ) { DiskLoc loc = it->getNext(); BSONObj entry = it->dataFor( loc ).toBson(); BSONElement name = entry["name"]; if ( name.type() == String && name.String() == ns ) { CollectionOptions options; if ( entry["options"].isABSONObj() ) { Status status = options.parse( entry["options"].Obj() ); fassert( 18523, status ); } return options; } } return CollectionOptions(); }
TEST( KVEngineTestHarness, SimpleRS1 ) { scoped_ptr<KVHarnessHelper> helper( KVHarnessHelper::create() ); KVEngine* engine = helper->getEngine(); ASSERT( engine ); string ns = "a.b"; scoped_ptr<RecordStore> rs; { MyOperationContext opCtx( engine ); ASSERT_OK( engine->createRecordStore( &opCtx, ns, CollectionOptions() ) ); rs.reset( engine->getRecordStore( &opCtx, ns, ns, CollectionOptions() ) ); ASSERT( rs ); } DiskLoc loc; { MyOperationContext opCtx( engine ); WriteUnitOfWork uow( &opCtx ); StatusWith<DiskLoc> res = rs->insertRecord( &opCtx, "abc", 4, false ); ASSERT_OK( res.getStatus() ); loc = res.getValue(); uow.commit(); } { MyOperationContext opCtx( engine ); ASSERT_EQUALS( string("abc"), rs->dataFor( &opCtx, loc ).data() ); } }
virtual RecordStore* newNonCappedRecordStore() { std::auto_ptr<OperationContext> opCtx(new OperationContextNoop(newRecoveryUnit())); const std::string ident = mongoutils::str::stream() << "TokuFTRecordStore-" << _seq++; Status status = _engine->createRecordStore(opCtx.get(), "ns", ident, CollectionOptions()); invariant(status.isOK()); return _engine->getRecordStore(opCtx.get(), "ns", ident, CollectionOptions()); }
TEST(CollectionOptions, ErrorBadSize) { ASSERT_NOT_OK(CollectionOptions().parse(fromjson("{capped: true, size: -1}"))); ASSERT_NOT_OK(CollectionOptions().parse(fromjson("{capped: false, size: -1}"))); ASSERT_NOT_OK(CollectionOptions().parse( BSON("capped" << true << "size" << std::numeric_limits<long long>::min()))); ASSERT_NOT_OK(CollectionOptions().parse(BSON("capped" << true << "size" << (1LL << 62)))); ASSERT_NOT_OK(CollectionOptions().parse( BSON("capped" << true << "size" << std::numeric_limits<long long>::max()))); }
TEST(CollectionOptions, InvalidStorageEngineField) { // "storageEngine" field has to be an object if present. ASSERT_NOT_OK(CollectionOptions().parse(fromjson("{storageEngine: 1}"))); // Every field under "storageEngine" has to be an object. ASSERT_NOT_OK(CollectionOptions().parse(fromjson("{storageEngine: {storageEngine1: 1}}"))); // Empty "storageEngine" not allowed ASSERT_OK(CollectionOptions().parse(fromjson("{storageEngine: {}}"))); }
TEST_F(KVCatalogTest, Coll1) { unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create()); KVEngine* engine = helper->getEngine(); unique_ptr<RecordStore> rs; unique_ptr<KVCatalog> catalog; { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); ASSERT_OK(engine->createRecordStore(&opCtx, "catalog", "catalog", CollectionOptions())); rs = engine->getRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()); catalog.reset(new KVCatalog(rs.get(), false, false, nullptr)); uow.commit(); } { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); ASSERT_OK(newCollection(&opCtx, NamespaceString("a.b"), CollectionOptions(), KVPrefix::kNotPrefixed, catalog.get())); ASSERT_NOT_EQUALS("a.b", catalog->getCollectionIdent("a.b")); uow.commit(); } string ident = catalog->getCollectionIdent("a.b"); { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); catalog.reset(new KVCatalog(rs.get(), false, false, nullptr)); catalog->init(&opCtx); uow.commit(); } ASSERT_EQUALS(ident, catalog->getCollectionIdent("a.b")); { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); dropCollection(&opCtx, "a.b", catalog.get()).transitional_ignore(); newCollection(&opCtx, NamespaceString("a.b"), CollectionOptions(), KVPrefix::kNotPrefixed, catalog.get()) .transitional_ignore(); uow.commit(); } ASSERT_NOT_EQUALS(ident, catalog->getCollectionIdent("a.b")); }
TEST_F(KVCatalogTest, DirectoryPerAndSplit1) { unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create()); KVEngine* engine = helper->getEngine(); unique_ptr<RecordStore> rs; unique_ptr<KVCatalog> catalog; { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); ASSERT_OK(engine->createRecordStore(&opCtx, "catalog", "catalog", CollectionOptions())); rs = engine->getRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()); catalog.reset(new KVCatalog(rs.get(), true, true, nullptr)); uow.commit(); } { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); ASSERT_OK(newCollection(&opCtx, NamespaceString("a.b"), CollectionOptions(), KVPrefix::kNotPrefixed, catalog.get())); ASSERT_STRING_CONTAINS(catalog->getCollectionIdent("a.b"), "a/collection/"); ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b"))); uow.commit(); } { // index MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); BSONCollectionCatalogEntry::MetaData md; md.ns = "a.b"; BSONCollectionCatalogEntry::IndexMetaData imd; imd.spec = BSON("name" << "foo"); imd.ready = false; imd.head = RecordId(); imd.multikey = false; imd.prefix = KVPrefix::kNotPrefixed; imd.isBackgroundSecondaryBuild = false; md.indexes.push_back(imd); catalog->putMetaData(&opCtx, "a.b", md); ASSERT_STRING_CONTAINS(catalog->getIndexIdent(&opCtx, "a.b", "foo"), "a/index/"); ASSERT_TRUE(catalog->isUserDataIdent(catalog->getIndexIdent(&opCtx, "a.b", "foo"))); uow.commit(); } }
TEST( RocksEngineTest, CreateDirect1 ) { std::string path = "/tmp/mongo-rocks-engine-test"; boost::filesystem::remove_all( path ); RocksEngine engine( path ); { MyOperationContext opCtx( &engine ); Status status = engine.createCollection( &opCtx, "test.foo", CollectionOptions() ); ASSERT_OK( status ); } RocksRecordStore* rs = engine.getEntry( "test.foo" )->recordStore.get(); string s = "eliot was here"; { MyOperationContext opCtx( &engine ); DiskLoc loc; { WriteUnitOfWork uow( opCtx.recoveryUnit() ); StatusWith<DiskLoc> res = rs->insertRecord( &opCtx, s.c_str(), s.size() + 1, -1 ); ASSERT_OK( res.getStatus() ); loc = res.getValue(); } ASSERT_EQUALS( s, rs->dataFor( loc ).data() ); } }
TEST(KVCatalogTest, DirectoryPerAndSplit1) { unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create()); KVEngine* engine = helper->getEngine(); unique_ptr<RecordStore> rs; unique_ptr<KVCatalog> catalog; { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); ASSERT_OK(engine->createRecordStore(&opCtx, "catalog", "catalog", CollectionOptions())); rs = engine->getRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()); catalog.reset(new KVCatalog(rs.get(), true, true)); uow.commit(); } { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); ASSERT_OK( catalog->newCollection(&opCtx, "a.b", CollectionOptions(), KVPrefix::kNotPrefixed)); ASSERT_STRING_CONTAINS(catalog->getCollectionIdent("a.b"), "a/collection/"); ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b"))); uow.commit(); } { // index MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); BSONCollectionCatalogEntry::MetaData md; md.ns = "a.b"; md.indexes.push_back(BSONCollectionCatalogEntry::IndexMetaData(BSON("name" << "foo"), false, RecordId(), false, KVPrefix::kNotPrefixed, false)); catalog->putMetaData(&opCtx, "a.b", md); ASSERT_STRING_CONTAINS(catalog->getIndexIdent(&opCtx, "a.b", "foo"), "a/index/"); ASSERT_TRUE(catalog->isUserDataIdent(catalog->getIndexIdent(&opCtx, "a.b", "foo"))); uow.commit(); } }
/** * Create a collection in the catalog and in the KVEngine. Return the storage engine's `ident`. */ StatusWith<std::string> createCollection(OperationContext* opCtx, NamespaceString ns) { AutoGetDb db(opCtx, ns.db(), LockMode::MODE_X); DatabaseCatalogEntry* dbce = _storageEngine->getDatabaseCatalogEntry(opCtx, ns.db()); auto ret = dbce->createCollection(opCtx, ns.ns(), CollectionOptions(), false); if (!ret.isOK()) { return ret; } return _storageEngine->getCatalog()->getCollectionIdent(ns.ns()); }
TEST( KVCatalogTest, Coll1 ) { scoped_ptr<KVHarnessHelper> helper( KVHarnessHelper::create() ); KVEngine* engine = helper->getEngine(); scoped_ptr<RecordStore> rs; scoped_ptr<KVCatalog> catalog; { MyOperationContext opCtx( engine ); WriteUnitOfWork uow( &opCtx ); ASSERT_OK( engine->createRecordStore( &opCtx, "catalog", CollectionOptions() ) ); rs.reset( engine->getRecordStore( &opCtx, "catalog", "catalog", CollectionOptions() ) ); catalog.reset( new KVCatalog( rs.get() ) ); uow.commit(); } { MyOperationContext opCtx( engine ); WriteUnitOfWork uow( &opCtx ); ASSERT_OK( catalog->newCollection( &opCtx, "a.b", CollectionOptions() ) ); ASSERT_NOT_EQUALS( "a.b", catalog->getCollectionIdent( "a.b" ) ); uow.commit(); } string ident = catalog->getCollectionIdent( "a.b" ); { MyOperationContext opCtx( engine ); WriteUnitOfWork uow( &opCtx ); catalog.reset( new KVCatalog( rs.get() ) ); catalog->init( &opCtx ); uow.commit(); } ASSERT_EQUALS( ident, catalog->getCollectionIdent( "a.b" ) ); { MyOperationContext opCtx( engine ); WriteUnitOfWork uow( &opCtx ); catalog->dropCollection( &opCtx, "a.b" ); catalog->newCollection( &opCtx, "a.b", CollectionOptions() ); uow.commit(); } ASSERT_NOT_EQUALS( ident, catalog->getCollectionIdent( "a.b" ) ); }
void KVStorageEngine::finishInit() { if ( _initialized ) return; OperationContextNoop opCtx( _engine->newRecoveryUnit() ); WriteUnitOfWork uow( &opCtx ); Status status = _engine->createRecordStore( &opCtx, catalogInfo, catalogInfo, CollectionOptions() ); fassert( 28520, status ); _catalogRecordStore.reset( _engine->getRecordStore( &opCtx, catalogInfo, catalogInfo, CollectionOptions() ) ); _catalog.reset( new KVCatalog( _catalogRecordStore.get(), _supportsDocLocking ) ); _catalog->init( &opCtx ); std::vector<std::string> collections; _catalog->getAllCollections( &collections ); for ( size_t i = 0; i < collections.size(); i++ ) { std::string coll = collections[i]; NamespaceString nss( coll ); string dbName = nss.db().toString(); // No rollback since this is only for committed dbs. KVDatabaseCatalogEntry*& db = _dbs[dbName]; if ( !db ) { db = new KVDatabaseCatalogEntry( dbName, this ); } db->initCollection( &opCtx, coll ); } uow.commit(); _initialized = true; }
UpdateResult update(OperationContext* txn, Database* db, const UpdateRequest& request, OpDebug* opDebug) { invariant(db); // Explain should never use this helper. invariant(!request.isExplain()); const NamespaceString& nsString = request.getNamespaceString(); Collection* collection = db->getCollection(nsString.ns()); // The update stage does not create its own collection. As such, if the update is // an upsert, create the collection that the update stage inserts into beforehand. if (!collection && request.isUpsert()) { // We have to have an exclusive lock on the db to be allowed to create the collection. // Callers should either get an X or create the collection. const Locker* locker = txn->lockState(); invariant(locker->isW() || locker->isLockHeldForMode(ResourceId(RESOURCE_DATABASE, nsString.db()), MODE_X)); ScopedTransaction transaction(txn, MODE_IX); Lock::DBLock lk(txn->lockState(), nsString.db(), MODE_X); bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() && !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(nsString.db()); if (userInitiatedWritesAndNotPrimary) { uassertStatusOK(Status(ErrorCodes::NotMaster, str::stream() << "Not primary while creating collection " << nsString.ns() << " during upsert")); } WriteUnitOfWork wuow(txn); collection = db->createCollection(txn, nsString.ns(), CollectionOptions()); invariant(collection); wuow.commit(); } // Parse the update, get an executor for it, run the executor, get stats out. ParsedUpdate parsedUpdate(txn, &request); uassertStatusOK(parsedUpdate.parseRequest()); PlanExecutor* rawExec; uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, opDebug, &rawExec)); boost::scoped_ptr<PlanExecutor> exec(rawExec); uassertStatusOK(exec->executePlan()); return UpdateStage::makeUpdateResult(exec.get(), opDebug); }
TEST(KVCatalogTest, Idx1) { unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create()); KVEngine* engine = helper->getEngine(); unique_ptr<RecordStore> rs; unique_ptr<KVCatalog> catalog; { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); ASSERT_OK(engine->createRecordStore(&opCtx, "catalog", "catalog", CollectionOptions())); rs = engine->getRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()); catalog.reset(new KVCatalog(rs.get(), false, false)); uow.commit(); } { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); ASSERT_OK( catalog->newCollection(&opCtx, "a.b", CollectionOptions(), KVPrefix::kNotPrefixed)); ASSERT_NOT_EQUALS("a.b", catalog->getCollectionIdent("a.b")); ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b"))); uow.commit(); } { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); BSONCollectionCatalogEntry::MetaData md; md.ns = "a.b"; BSONCollectionCatalogEntry::IndexMetaData imd; imd.spec = BSON("name" << "foo"); imd.ready = false; imd.head = RecordId(); imd.multikey = false; imd.prefix = KVPrefix::kNotPrefixed; imd.isBackgroundSecondaryBuild = false; md.indexes.push_back(imd); catalog->putMetaData(&opCtx, "a.b", md); uow.commit(); } string idxIndent; { MyOperationContext opCtx(engine); idxIndent = catalog->getIndexIdent(&opCtx, "a.b", "foo"); } { MyOperationContext opCtx(engine); ASSERT_EQUALS(idxIndent, catalog->getIndexIdent(&opCtx, "a.b", "foo")); ASSERT_TRUE(catalog->isUserDataIdent(catalog->getIndexIdent(&opCtx, "a.b", "foo"))); } { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); BSONCollectionCatalogEntry::MetaData md; md.ns = "a.b"; catalog->putMetaData(&opCtx, "a.b", md); // remove index BSONCollectionCatalogEntry::IndexMetaData imd; imd.spec = BSON("name" << "foo"); imd.ready = false; imd.head = RecordId(); imd.multikey = false; imd.prefix = KVPrefix::kNotPrefixed; imd.isBackgroundSecondaryBuild = false; md.indexes.push_back(imd); catalog->putMetaData(&opCtx, "a.b", md); uow.commit(); } { MyOperationContext opCtx(engine); ASSERT_NOT_EQUALS(idxIndent, catalog->getIndexIdent(&opCtx, "a.b", "foo")); } }
TEST(CollectionOptions, ErrorBadMax) { ASSERT_NOT_OK(CollectionOptions().parse(BSON("capped" << true << "max" << (1LL << 31)))); }
std::unique_ptr<RecordStore> DevNullKVEngine::makeTemporaryRecordStore(OperationContext* opCtx, StringData ident) { return stdx::make_unique<DevNullRecordStore>("", CollectionOptions()); }
KVStorageEngine::KVStorageEngine( KVEngine* engine ) : _engine( engine ) , _supportsDocLocking(_engine->supportsDocLocking()) { OperationContextNoop opCtx( _engine->newRecoveryUnit() ); { WriteUnitOfWork uow( &opCtx ); Status status = _engine->createRecordStore( &opCtx, catalogInfo, catalogInfo, CollectionOptions() ); // BadValue is usually caused by invalid configuration string. // We still fassert() but without a stack trace. if (status.code() == ErrorCodes::BadValue) { fassertFailedNoTrace(28562); } fassert( 28520, status ); _catalogRecordStore.reset( _engine->getRecordStore( &opCtx, catalogInfo, catalogInfo, CollectionOptions() ) ); _catalog.reset( new KVCatalog( _catalogRecordStore.get(), _supportsDocLocking ) ); _catalog->init( &opCtx ); std::vector<std::string> collections; _catalog->getAllCollections( &collections ); for ( size_t i = 0; i < collections.size(); i++ ) { std::string coll = collections[i]; NamespaceString nss( coll ); string dbName = nss.db().toString(); // No rollback since this is only for committed dbs. KVDatabaseCatalogEntry*& db = _dbs[dbName]; if ( !db ) { db = new KVDatabaseCatalogEntry( dbName, this ); } db->initCollection( &opCtx, coll ); } uow.commit(); } opCtx.recoveryUnit()->commitAndRestart(); // now clean up orphaned idents { // get all idents std::set<std::string> allIdents; { std::vector<std::string> v = _engine->getAllIdents( &opCtx ); allIdents.insert( v.begin(), v.end() ); allIdents.erase( catalogInfo ); } // remove ones still in use { vector<string> idents = _catalog->getAllIdents( &opCtx ); for ( size_t i = 0; i < idents.size(); i++ ) { allIdents.erase( idents[i] ); } } for ( std::set<std::string>::const_iterator it = allIdents.begin(); it != allIdents.end(); ++it ) { const std::string& toRemove = *it; if ( !_catalog->isUserDataIdent( toRemove ) ) continue; log() << "dropping unused ident: " << toRemove; WriteUnitOfWork wuow( &opCtx ); _engine->dropIdent( &opCtx, toRemove ); wuow.commit(); } } }
virtual bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result) { const NamespaceString ns(parseNs(dbname, cmdObj)); Status status = userAllowedWriteNS(ns); if (!status.isOK()) return appendCommandStatus(result, status); if (cmdObj["indexes"].type() != Array) { errmsg = "indexes has to be an array"; result.append("cmdObj", cmdObj); return false; } std::vector<BSONObj> specs; { BSONObjIterator i(cmdObj["indexes"].Obj()); while (i.more()) { BSONElement e = i.next(); if (e.type() != Object) { errmsg = "everything in indexes has to be an Object"; result.append("cmdObj", cmdObj); return false; } specs.push_back(e.Obj()); } } if (specs.size() == 0) { errmsg = "no indexes to add"; return false; } // check specs for (size_t i = 0; i < specs.size(); i++) { BSONObj spec = specs[i]; if (spec["ns"].eoo()) { spec = _addNsToSpec(ns, spec); specs[i] = spec; } if (spec["ns"].type() != String) { errmsg = "ns field must be a string"; result.append("spec", spec); return false; } std::string nsFromUser = spec["ns"].String(); if (nsFromUser.empty()) { errmsg = "ns field cannot be an empty string"; result.append("spec", spec); return false; } if (ns != nsFromUser) { errmsg = str::stream() << "value of ns field '" << nsFromUser << "' doesn't match namespace " << ns.ns(); result.append("spec", spec); return false; } } // now we know we have to create index(es) // Note: createIndexes command does not currently respect shard versioning. ScopedTransaction transaction(txn, MODE_IX); Lock::DBLock dbLock(txn->lockState(), ns.db(), MODE_X); if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) { return appendCommandStatus( result, Status(ErrorCodes::NotMaster, str::stream() << "Not primary while creating indexes in " << ns.ns())); } Database* db = dbHolder().get(txn, ns.db()); if (!db) { db = dbHolder().openDb(txn, ns.db()); } Collection* collection = db->getCollection(ns.ns()); if (collection) { result.appendBool("createdCollectionAutomatically", false); } else { MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { WriteUnitOfWork wunit(txn); collection = db->createCollection(txn, ns.ns(), CollectionOptions()); invariant(collection); wunit.commit(); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createIndexes", ns.ns()); result.appendBool("createdCollectionAutomatically", true); } const int numIndexesBefore = collection->getIndexCatalog()->numIndexesTotal(txn); result.append("numIndexesBefore", numIndexesBefore); auto client = txn->getClient(); ScopeGuard lastOpSetterGuard = MakeObjGuard(repl::ReplClientInfo::forClient(client), &repl::ReplClientInfo::setLastOpToSystemLastOpTime, txn); MultiIndexBlock indexer(txn, collection); indexer.allowBackgroundBuilding(); indexer.allowInterruption(); const size_t origSpecsSize = specs.size(); indexer.removeExistingIndexes(&specs); if (specs.size() == 0) { result.append("numIndexesAfter", numIndexesBefore); result.append("note", "all indexes already exist"); return true; } if (specs.size() != origSpecsSize) { result.append("note", "index already exists"); } for (size_t i = 0; i < specs.size(); i++) { const BSONObj& spec = specs[i]; if (spec["unique"].trueValue()) { status = checkUniqueIndexConstraints(txn, ns.ns(), spec["key"].Obj()); if (!status.isOK()) { return appendCommandStatus(result, status); } } if (spec["v"].isNumber() && spec["v"].numberInt() == 0) { return appendCommandStatus( result, Status(ErrorCodes::CannotCreateIndex, str::stream() << "illegal index specification: " << spec << ". " << "The option v:0 cannot be passed explicitly")); } } MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { uassertStatusOK(indexer.init(specs)); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createIndexes", ns.ns()); // If we're a background index, replace exclusive db lock with an intent lock, so that // other readers and writers can proceed during this phase. if (indexer.getBuildInBackground()) { txn->recoveryUnit()->abandonSnapshot(); dbLock.relockWithMode(MODE_IX); if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) { return appendCommandStatus( result, Status(ErrorCodes::NotMaster, str::stream() << "Not primary while creating background indexes in " << ns.ns())); } } try { Lock::CollectionLock colLock(txn->lockState(), ns.ns(), MODE_IX); uassertStatusOK(indexer.insertAllDocumentsInCollection()); } catch (const DBException& e) { invariant(e.getCode() != ErrorCodes::WriteConflict); // Must have exclusive DB lock before we clean up the index build via the // destructor of 'indexer'. if (indexer.getBuildInBackground()) { try { // This function cannot throw today, but we will preemptively prepare for // that day, to avoid data corruption due to lack of index cleanup. txn->recoveryUnit()->abandonSnapshot(); dbLock.relockWithMode(MODE_X); if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) { return appendCommandStatus( result, Status(ErrorCodes::NotMaster, str::stream() << "Not primary while creating background indexes in " << ns.ns() << ": cleaning up index build failure due to " << e.toString())); } } catch (...) { std::terminate(); } } throw; } // Need to return db lock back to exclusive, to complete the index build. if (indexer.getBuildInBackground()) { txn->recoveryUnit()->abandonSnapshot(); dbLock.relockWithMode(MODE_X); uassert(ErrorCodes::NotMaster, str::stream() << "Not primary while completing index build in " << dbname, repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)); Database* db = dbHolder().get(txn, ns.db()); uassert(28551, "database dropped during index build", db); uassert(28552, "collection dropped during index build", db->getCollection(ns.ns())); } MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { WriteUnitOfWork wunit(txn); indexer.commit(); for (size_t i = 0; i < specs.size(); i++) { std::string systemIndexes = ns.getSystemIndexesCollection(); getGlobalServiceContext()->getOpObserver()->onCreateIndex( txn, systemIndexes, specs[i]); } wunit.commit(); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createIndexes", ns.ns()); result.append("numIndexesAfter", collection->getIndexCatalog()->numIndexesTotal(txn)); lastOpSetterGuard.Dismiss(); return true; }
virtual bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool fromRepl = false ) { // --- parse NamespaceString ns( dbname, cmdObj[name].String() ); Status status = userAllowedWriteNS( ns ); if ( !status.isOK() ) return appendCommandStatus( result, status ); if ( cmdObj["indexes"].type() != Array ) { errmsg = "indexes has to be an array"; result.append( "cmdObj", cmdObj ); return false; } std::vector<BSONObj> specs; { BSONObjIterator i( cmdObj["indexes"].Obj() ); while ( i.more() ) { BSONElement e = i.next(); if ( e.type() != Object ) { errmsg = "everything in indexes has to be an Object"; result.append( "cmdObj", cmdObj ); return false; } specs.push_back( e.Obj() ); } } if ( specs.size() == 0 ) { errmsg = "no indexes to add"; return false; } // check specs for ( size_t i = 0; i < specs.size(); i++ ) { BSONObj spec = specs[i]; if ( spec["ns"].eoo() ) { spec = _addNsToSpec( ns, spec ); specs[i] = spec; } if ( spec["ns"].type() != String ) { errmsg = "spec has no ns"; result.append( "spec", spec ); return false; } if ( ns != spec["ns"].String() ) { errmsg = "namespace mismatch"; result.append( "spec", spec ); return false; } } // now we know we have to create index(es) // Note: createIndexes command does not currently respect shard versioning. ScopedTransaction transaction(txn, MODE_IX); Lock::DBLock dbLock(txn->lockState(), ns.db(), MODE_X); if (!fromRepl && !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbname)) { return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream() << "Not primary while creating indexes in " << ns.ns())); } Database* db = dbHolder().get(txn, ns.db()); if (!db) { db = dbHolder().openDb(txn, ns.db()); } Collection* collection = db->getCollection( ns.ns() ); result.appendBool( "createdCollectionAutomatically", collection == NULL ); if ( !collection ) { MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { WriteUnitOfWork wunit(txn); collection = db->createCollection( txn, ns.ns() ); invariant( collection ); if (!fromRepl) { getGlobalEnvironment()->getOpObserver()->onCreateCollection( txn, ns, CollectionOptions()); } wunit.commit(); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createIndexes", ns.ns()); }
/** * Create a collection table in the KVEngine not reflected in the KVCatalog. */ Status createCollTable(OperationContext* opCtx, NamespaceString collName) { const std::string identName = "collection-" + collName.ns(); return _storageEngine->getEngine()->createGroupedRecordStore( opCtx, collName.ns(), identName, CollectionOptions(), KVPrefix::kNotPrefixed); }
TEST( RocksEngineTest, DropDirect1 ) { std::string path = "/tmp/mongo-rocks-engine-test"; boost::filesystem::remove_all( path ); RocksEngine engine( path ); { MyOperationContext opCtx( &engine ); Status status = engine.createCollection( &opCtx, "test.foo", CollectionOptions() ); ASSERT_OK( status ); } { MyOperationContext opCtx( &engine ); Status status = engine.createCollection( &opCtx, "test.bar", CollectionOptions() ); ASSERT_OK( status ); } { MyOperationContext opCtx( &engine ); Status status = engine.createCollection( &opCtx, "silly.bar", CollectionOptions() ); ASSERT_OK( status ); } { std::list<std::string> names; engine.getCollectionNamespaces( "test", &names ); ASSERT_EQUALS( 2U, names.size() ); } { std::list<std::string> names; engine.getCollectionNamespaces( "silly", &names ); ASSERT_EQUALS( 1U, names.size() ); } { MyOperationContext opCtx( &engine ); Status status = engine.dropCollection( &opCtx, "test.foo" ); ASSERT_OK( status ); } { std::list<std::string> names; engine.getCollectionNamespaces( "test", &names ); ASSERT_EQUALS( 1U, names.size() ); ASSERT_EQUALS( names.front(), "test.bar" ); } { MyOperationContext opCtx( &engine ); Status status = engine.dropCollection( &opCtx, "test.foo" ); ASSERT_NOT_OK( status ); } }
TEST( KVCatalogTest, Idx1 ) { scoped_ptr<KVHarnessHelper> helper( KVHarnessHelper::create() ); KVEngine* engine = helper->getEngine(); scoped_ptr<RecordStore> rs; scoped_ptr<KVCatalog> catalog; { MyOperationContext opCtx( engine ); WriteUnitOfWork uow( &opCtx ); ASSERT_OK( engine->createRecordStore( &opCtx, "catalog", CollectionOptions() ) ); rs.reset( engine->getRecordStore( &opCtx, "catalog", "catalog", CollectionOptions() ) ); catalog.reset( new KVCatalog( rs.get() ) ); uow.commit(); } { MyOperationContext opCtx( engine ); WriteUnitOfWork uow( &opCtx ); ASSERT_OK( catalog->newCollection( &opCtx, "a.b", CollectionOptions() ) ); ASSERT_NOT_EQUALS( "a.b", catalog->getCollectionIdent( "a.b" ) ); uow.commit(); } { MyOperationContext opCtx( engine ); WriteUnitOfWork uow( &opCtx ); BSONCollectionCatalogEntry::MetaData md; md.ns ="a.b"; md.indexes.push_back( BSONCollectionCatalogEntry::IndexMetaData( BSON( "name" << "foo" ), false, DiskLoc(), false ) ); catalog->putMetaData( &opCtx, "a.b", md ); uow.commit(); } string idxIndent; { MyOperationContext opCtx( engine ); idxIndent = catalog->getIndexIdent( &opCtx, "a.b", "foo" ); } { MyOperationContext opCtx( engine ); ASSERT_EQUALS( idxIndent, catalog->getIndexIdent( &opCtx, "a.b", "foo" ) ); } { MyOperationContext opCtx( engine ); WriteUnitOfWork uow( &opCtx ); BSONCollectionCatalogEntry::MetaData md; md.ns ="a.b"; catalog->putMetaData( &opCtx, "a.b", md ); // remove index md.indexes.push_back( BSONCollectionCatalogEntry::IndexMetaData( BSON( "name" << "foo" ), false, DiskLoc(), false ) ); catalog->putMetaData( &opCtx, "a.b", md ); uow.commit(); } { MyOperationContext opCtx( engine ); ASSERT_NOT_EQUALS( idxIndent, catalog->getIndexIdent( &opCtx, "a.b", "foo" ) ); } }
TEST(CollectionOptions, ErrorBadSize) { ASSERT_NOT_OK(CollectionOptions().parse(fromjson("{capped: true, size: -1}"))); ASSERT_NOT_OK(CollectionOptions().parse(fromjson("{capped: false, size: -1}"))); }
TEST(CollectionOptions, IgnoreUnregisteredFields) { ASSERT_OK(CollectionOptions().parse(BSON("create" << "c"))); ASSERT_OK(CollectionOptions().parse(BSON("foo" << "bar"))); }
TEST_F(KVCatalogTest, RestartForPrefixes) { storageGlobalParams.groupCollections = true; ON_BLOCK_EXIT([&] { storageGlobalParams.groupCollections = false; }); KVPrefix abCollPrefix = KVPrefix::getNextPrefix(NamespaceString("a.b")); KVPrefix fooIndexPrefix = KVPrefix::getNextPrefix(NamespaceString("a.b")); unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create()); KVEngine* engine = helper->getEngine(); { unique_ptr<RecordStore> rs; unique_ptr<KVCatalog> catalog; { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); ASSERT_OK(engine->createRecordStore(&opCtx, "catalog", "catalog", CollectionOptions())); rs = engine->getRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()); catalog.reset(new KVCatalog(rs.get(), false, false, nullptr)); uow.commit(); } { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); ASSERT_OK(newCollection( &opCtx, NamespaceString("a.b"), CollectionOptions(), abCollPrefix, catalog.get())); ASSERT_NOT_EQUALS("a.b", catalog->getCollectionIdent("a.b")); ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b"))); uow.commit(); } { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); BSONCollectionCatalogEntry::MetaData md; md.ns = "a.b"; BSONCollectionCatalogEntry::IndexMetaData imd; imd.spec = BSON("name" << "foo"); imd.ready = false; imd.head = RecordId(); imd.multikey = false; imd.prefix = fooIndexPrefix; imd.isBackgroundSecondaryBuild = false; md.indexes.push_back(imd); md.prefix = abCollPrefix; catalog->putMetaData(&opCtx, "a.b", md); uow.commit(); } } engine = helper->restartEngine(); { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); unique_ptr<RecordStore> rs = engine->getRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()); unique_ptr<KVCatalog> catalog = stdx::make_unique<KVCatalog>(rs.get(), false, false, nullptr); catalog->init(&opCtx); const BSONCollectionCatalogEntry::MetaData md = catalog->getMetaData(&opCtx, "a.b"); ASSERT_EQ("a.b", md.ns); ASSERT_EQ(abCollPrefix, md.prefix); ASSERT_EQ(fooIndexPrefix, md.indexes[md.findIndexOffset("foo")].prefix); } }
TEST_F( SyncTailTest, MultiApplyDoesNotSetOplogEntryIsForCappedCollectionWhenProcessingNonCappedCollectionInsertOperation) { NamespaceString nss("local." + _agent.getSuiteName() + "_" + _agent.getTestName()); ASSERT_FALSE(_testOplogEntryIsForCappedCollection(_txn.get(), nss, CollectionOptions())); }