// Insert a record and try to perform an in-place update on it.
    TEST( RecordStoreTestHarness, UpdateWithDamages ) {
        scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
        scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
        }

        string data = "00010111";
        DiskLoc loc;
        const RecordData rec(data.c_str(), data.size() + 1);
        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                WriteUnitOfWork uow( opCtx.get() );
                StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
                                                            rec.data(),
                                                            rec.size(),
                                                            false );
                ASSERT_OK( res.getStatus() );
                loc = res.getValue();
                uow.commit();
            }
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                mutablebson::DamageVector dv( 3 );
                dv[0].sourceOffset = 5;
                dv[0].targetOffset = 0;
                dv[0].size = 2;
                dv[1].sourceOffset = 3;
                dv[1].targetOffset = 2;
                dv[1].size = 3;
                dv[2].sourceOffset = 0;
                dv[2].targetOffset = 5;
                dv[2].size = 3;

                WriteUnitOfWork uow( opCtx.get() );
                ASSERT_OK( rs->updateWithDamages( opCtx.get(), loc, rec, data.c_str(), dv ) );
                uow.commit();
            }
        }

        data = "11101000";
        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                RecordData record = rs->dataFor( opCtx.get(), loc );
                ASSERT_EQUALS( data, record.data() );
            }
        }
    }
Exemple #2
0
    void run() {
        string ns = "unittests.rollback_set_index_head";
        const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
        OperationContext& opCtx = *opCtxPtr;
        NamespaceString nss(ns);
        dropDatabase(&opCtx, nss);
        createCollection(&opCtx, nss);

        AutoGetDb autoDb(&opCtx, nss.db(), MODE_X);

        Collection* coll = autoDb.getDb()->getCollection(&opCtx, nss);
        IndexCatalog* catalog = coll->getIndexCatalog();

        string idxName = "a";
        BSONObj spec = BSON("ns" << ns << "key" << BSON("a" << 1) << "name" << idxName << "v"
                                 << static_cast<int>(kIndexVersion));

        {
            WriteUnitOfWork uow(&opCtx);
            ASSERT_OK(catalog->createIndexOnEmptyCollection(&opCtx, spec));
            uow.commit();
        }

        IndexDescriptor* indexDesc = catalog->findIndexByName(&opCtx, idxName);
        invariant(indexDesc);
        const IndexCatalogEntry* ice = catalog->getEntry(indexDesc);
        invariant(ice);
        HeadManager* headManager = ice->headManager();

        const RecordId oldHead = headManager->getHead(&opCtx);
        ASSERT_EQ(oldHead, ice->head(&opCtx));

        const RecordId dummyHead(123, 456);
        ASSERT_NE(oldHead, dummyHead);

        // END SETUP / START TEST

        {
            WriteUnitOfWork uow(&opCtx);

            headManager->setHead(&opCtx, dummyHead);

            ASSERT_EQ(ice->head(&opCtx), dummyHead);
            ASSERT_EQ(headManager->getHead(&opCtx), dummyHead);

            if (!rollback) {
                uow.commit();
            }
        }

        if (rollback) {
            ASSERT_EQ(ice->head(&opCtx), oldHead);
            ASSERT_EQ(headManager->getHead(&opCtx), oldHead);
        } else {
            ASSERT_EQ(ice->head(&opCtx), dummyHead);
            ASSERT_EQ(headManager->getHead(&opCtx), dummyHead);
        }
    }
    TEST( RecordStoreTestHarness, Simple1 ) {
        scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
        scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
        }

        string s = "eliot was here";

        DiskLoc loc1;

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                WriteUnitOfWork uow( opCtx.get() );
                StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
                ASSERT_OK( res.getStatus() );
                loc1 = res.getValue();
                uow.commit();
            }

            ASSERT_EQUALS( s, rs->dataFor( opCtx.get(), loc1 ).data() );
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( s, rs->dataFor( opCtx.get(), loc1 ).data() );
            ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );

            RecordData rd;
            ASSERT( !rs->findRecord( opCtx.get(), DiskLoc(111,17), &rd ) );
            ASSERT( rd.data() == NULL );

            ASSERT( rs->findRecord( opCtx.get(), loc1, &rd ) );
            ASSERT_EQUALS( s, rd.data() );
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                WriteUnitOfWork uow( opCtx.get() );
                StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false );
                ASSERT_OK( res.getStatus() );
                uow.commit();
            }

        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( 2, rs->numRecords( opCtx.get() ) );
        }
    }
    TEST( RecordStoreTestHarness, UpdateInPlace1 ) {
        scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
        scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );

        string s1 = "aaa111bbb";
        string s2 = "aaa222bbb";

        DiskLoc loc;
        const RecordData s1Rec(s1.c_str(), s1.size() + 1);
        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                WriteUnitOfWork uow( opCtx.get() );
                StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
                                                            s1Rec.data(),
                                                            s1Rec.size(),
                                                            -1 );
                ASSERT_OK( res.getStatus() );
                loc = res.getValue();
                uow.commit();
            }

        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( s1, rs->dataFor( opCtx.get(), loc ).data() );
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                WriteUnitOfWork uow( opCtx.get() );
                const char* damageSource = "222";
                mutablebson::DamageVector dv;
                dv.push_back( mutablebson::DamageEvent() );
                dv[0].sourceOffset = 0;
                dv[0].targetOffset = 3;
                dv[0].size = 3;
                Status res = rs->updateWithDamages( opCtx.get(),
                                                   loc,
                                                   s1Rec,
                                                   damageSource,
                                                   dv );
                ASSERT_OK( res );
                uow.commit();
            }
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( s2, rs->dataFor( opCtx.get(), loc ).data() );
        }
    }
    // Insert a record and try to call updateWithDamages() with an empty DamageVector.
    TEST( RecordStoreTestHarness, UpdateWithNoDamages ) {
        scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
        scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );

        if (!rs->updateWithDamagesSupported())
            return;

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
        }

        string data = "my record";
        RecordId loc;
        const RecordData rec(data.c_str(), data.size() + 1);
        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                WriteUnitOfWork uow( opCtx.get() );
                StatusWith<RecordId> res = rs->insertRecord( opCtx.get(),
                                                            rec.data(),
                                                            rec.size(),
                                                            false );
                ASSERT_OK( res.getStatus() );
                loc = res.getValue();
                uow.commit();
            }
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                mutablebson::DamageVector dv;

                WriteUnitOfWork uow( opCtx.get() );
                ASSERT_OK( rs->updateWithDamages( opCtx.get(), loc, rec, "", dv ) );
                uow.commit();
            }
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                RecordData record = rs->dataFor( opCtx.get(), loc );
                ASSERT_EQUALS( data, record.data() );
            }
        }
    }
    TEST( RecordStoreTestHarness, Update1 ) {
        scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
        scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
        }

        string s1 = "eliot was here";
        string s2 = "eliot was here again";

        DiskLoc loc;
        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                WriteUnitOfWork uow( opCtx.get() );
                StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(),
                                                           s1.c_str(), s1.size() + 1,
                                                           false );
                ASSERT_OK( res.getStatus() );
                loc = res.getValue();
                uow.commit();
            }

        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( s1, rs->dataFor( opCtx.get(), loc ).data() );
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                WriteUnitOfWork uow( opCtx.get() );
                StatusWith<DiskLoc> res = rs->updateRecord( opCtx.get(), loc,
                                                           s2.c_str(), s2.size() + 1,
                                                           false, NULL );
                ASSERT_OK( res.getStatus() );
                loc = res.getValue();
                uow.commit();
            }

        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) );
            ASSERT_EQUALS( s2, rs->dataFor( opCtx.get(), loc ).data() );
        }

    }
TEST_F(KVCatalogTest, Coll1) {
    unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create());
    KVEngine* engine = helper->getEngine();

    unique_ptr<RecordStore> rs;
    unique_ptr<KVCatalog> catalog;
    {
        MyOperationContext opCtx(engine);
        WriteUnitOfWork uow(&opCtx);
        ASSERT_OK(engine->createRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()));
        rs = engine->getRecordStore(&opCtx, "catalog", "catalog", CollectionOptions());
        catalog.reset(new KVCatalog(rs.get(), false, false, nullptr));
        uow.commit();
    }

    {
        MyOperationContext opCtx(engine);
        WriteUnitOfWork uow(&opCtx);
        ASSERT_OK(newCollection(&opCtx,
                                NamespaceString("a.b"),
                                CollectionOptions(),
                                KVPrefix::kNotPrefixed,
                                catalog.get()));
        ASSERT_NOT_EQUALS("a.b", catalog->getCollectionIdent("a.b"));
        uow.commit();
    }

    string ident = catalog->getCollectionIdent("a.b");
    {
        MyOperationContext opCtx(engine);
        WriteUnitOfWork uow(&opCtx);
        catalog.reset(new KVCatalog(rs.get(), false, false, nullptr));
        catalog->init(&opCtx);
        uow.commit();
    }
    ASSERT_EQUALS(ident, catalog->getCollectionIdent("a.b"));

    {
        MyOperationContext opCtx(engine);
        WriteUnitOfWork uow(&opCtx);
        dropCollection(&opCtx, "a.b", catalog.get()).transitional_ignore();
        newCollection(&opCtx,
                      NamespaceString("a.b"),
                      CollectionOptions(),
                      KVPrefix::kNotPrefixed,
                      catalog.get())
            .transitional_ignore();
        uow.commit();
    }
    ASSERT_NOT_EQUALS(ident, catalog->getCollectionIdent("a.b"));
}
TEST_F(KVCatalogTest, DirectoryPerAndSplit1) {
    unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create());
    KVEngine* engine = helper->getEngine();

    unique_ptr<RecordStore> rs;
    unique_ptr<KVCatalog> catalog;
    {
        MyOperationContext opCtx(engine);
        WriteUnitOfWork uow(&opCtx);
        ASSERT_OK(engine->createRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()));
        rs = engine->getRecordStore(&opCtx, "catalog", "catalog", CollectionOptions());
        catalog.reset(new KVCatalog(rs.get(), true, true, nullptr));
        uow.commit();
    }

    {
        MyOperationContext opCtx(engine);
        WriteUnitOfWork uow(&opCtx);
        ASSERT_OK(newCollection(&opCtx,
                                NamespaceString("a.b"),
                                CollectionOptions(),
                                KVPrefix::kNotPrefixed,
                                catalog.get()));
        ASSERT_STRING_CONTAINS(catalog->getCollectionIdent("a.b"), "a/collection/");
        ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b")));
        uow.commit();
    }

    {  // index
        MyOperationContext opCtx(engine);
        WriteUnitOfWork uow(&opCtx);

        BSONCollectionCatalogEntry::MetaData md;
        md.ns = "a.b";

        BSONCollectionCatalogEntry::IndexMetaData imd;
        imd.spec = BSON("name"
                        << "foo");
        imd.ready = false;
        imd.head = RecordId();
        imd.multikey = false;
        imd.prefix = KVPrefix::kNotPrefixed;
        imd.isBackgroundSecondaryBuild = false;
        md.indexes.push_back(imd);
        catalog->putMetaData(&opCtx, "a.b", md);
        ASSERT_STRING_CONTAINS(catalog->getIndexIdent(&opCtx, "a.b", "foo"), "a/index/");
        ASSERT_TRUE(catalog->isUserDataIdent(catalog->getIndexIdent(&opCtx, "a.b", "foo")));
        uow.commit();
    }
}
Exemple #9
0
    void run() {
        NamespaceString nss("unittests.rollback_truncate_collection");
        const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
        OperationContext& opCtx = *opCtxPtr;
        dropDatabase(&opCtx, nss);

        Lock::DBLock dbXLock(&opCtx, nss.db(), MODE_X);
        OldClientContext ctx(&opCtx, nss.ns());

        BSONObj doc = BSON("_id"
                           << "foo");

        ASSERT(!collectionExists(&ctx, nss.ns()));
        {
            WriteUnitOfWork uow(&opCtx);

            ASSERT_OK(userCreateNS(&opCtx,
                                   ctx.db(),
                                   nss.ns(),
                                   BSONObj(),
                                   CollectionOptions::parseForCommand,
                                   defaultIndexes));
            ASSERT(collectionExists(&ctx, nss.ns()));
            insertRecord(&opCtx, nss, doc);
            assertOnlyRecord(&opCtx, nss, doc);
            uow.commit();
        }
        assertOnlyRecord(&opCtx, nss, doc);

        // END OF SETUP / START OF TEST

        {
            WriteUnitOfWork uow(&opCtx);

            ASSERT_OK(truncateCollection(&opCtx, nss));
            ASSERT(collectionExists(&ctx, nss.ns()));
            assertEmpty(&opCtx, nss);

            if (!rollback) {
                uow.commit();
            }
        }
        ASSERT(collectionExists(&ctx, nss.ns()));
        if (rollback) {
            assertOnlyRecord(&opCtx, nss, doc);
        } else {
            assertEmpty(&opCtx, nss);
        }
    }
    TEST( RocksRecordStoreTest, Truncate1 ) {
        unittest::TempDir td( _rocksRecordStoreTestDir );
        scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );

        {
            rocksdb::ColumnFamilyHandle* cf1;
            rocksdb::ColumnFamilyHandle* cf1_m;

            rocksdb::Status status;

            status = db->CreateColumnFamily( getColumnFamilyOptions(), "foo.bar1", &cf1 );
            ASSERT_OK( toMongoStatus( status ) );
            status = db->CreateColumnFamily( rocksdb::ColumnFamilyOptions(), "foo.bar1&", &cf1_m );
            ASSERT_OK( toMongoStatus( status ) );

            RocksRecordStore rs( "foo.bar", db.get(), cf1, cf1_m );
            string s = "antonio was here";

            {
                MyOperationContext opCtx( db.get() );
                WriteUnitOfWork uow( opCtx.recoveryUnit() );
                StatusWith<DiskLoc> res = rs.insertRecord( &opCtx, s.c_str(), s.size() + 1, -1 );
                ASSERT_OK( res.getStatus() );
                res = rs.insertRecord( &opCtx, s.c_str(), s.size() + 1, -1 );
                ASSERT_OK( res.getStatus() );
            }

            {
                MyOperationContext opCtx( db.get() );
                WriteUnitOfWork uow( opCtx.recoveryUnit() );
                Status stat = rs.truncate( &opCtx );
                ASSERT_OK( stat );

                ASSERT_EQUALS( 0, rs.numRecords() );
                ASSERT_EQUALS( 0, rs.dataSize() );
            }

            // Test that truncate does not fail on an empty collection
            {
                MyOperationContext opCtx( db.get() );
                WriteUnitOfWork uow( opCtx.recoveryUnit() );
                Status stat = rs.truncate( &opCtx );
                ASSERT_OK( stat );

                ASSERT_EQUALS( 0, rs.numRecords() );
                ASSERT_EQUALS( 0, rs.dataSize() );
            }
        }
    }
    TEST( RocksRecordStoreTest, UpdateInPlace1 ) {
        unittest::TempDir td( _rocksRecordStoreTestDir );
        scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );

        {
            RocksRecordStore rs( "foo.bar", db.get(),
                                 db->DefaultColumnFamily(),
                                 db->DefaultColumnFamily() );
            string s1 = "aaa111bbb";
            string s2 = "aaa222bbb";

            DiskLoc loc;
            {
                MyOperationContext opCtx( db.get() );
                {
                    WriteUnitOfWork uow( opCtx.recoveryUnit() );
                    StatusWith<DiskLoc> res = rs.insertRecord( &opCtx,
                                                               s1.c_str(),
                                                               s1.size() + 1,
                                                               -1 );
                    ASSERT_OK( res.getStatus() );
                    loc = res.getValue();
                }

                ASSERT_EQUALS( s1, rs.dataFor( loc ).data() );
            }

            {
                MyOperationContext opCtx( db.get() );
                {
                    WriteUnitOfWork uow( opCtx.recoveryUnit() );
                    const char* damageSource = "222";
                    mutablebson::DamageVector dv;
                    dv.push_back( mutablebson::DamageEvent() );
                    dv[0].sourceOffset = 0;
                    dv[0].targetOffset = 3;
                    dv[0].size = 3;
                    Status res = rs.updateWithDamages( &opCtx,
                                                       loc,
                                                       damageSource,
                                                       dv );
                    ASSERT_OK( res );
                }
                ASSERT_EQUALS( s2, rs.dataFor( loc ).data() );
            }

        }
    }
Exemple #12
0
    void run() {
        string ns = "unittests.rollback_drop_index";
        const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
        OperationContext& opCtx = *opCtxPtr;
        NamespaceString nss(ns);
        dropDatabase(&opCtx, nss);
        createCollection(&opCtx, nss);

        AutoGetDb autoDb(&opCtx, nss.db(), MODE_X);

        Collection* coll = autoDb.getDb()->getCollection(&opCtx, nss);
        IndexCatalog* catalog = coll->getIndexCatalog();

        string idxName = "a";
        BSONObj spec = BSON("ns" << ns << "key" << BSON("a" << 1) << "name" << idxName << "v"
                                 << static_cast<int>(kIndexVersion));

        {
            WriteUnitOfWork uow(&opCtx);
            ASSERT_OK(catalog->createIndexOnEmptyCollection(&opCtx, spec));
            insertRecord(&opCtx, nss, BSON("a" << 1));
            insertRecord(&opCtx, nss, BSON("a" << 2));
            insertRecord(&opCtx, nss, BSON("a" << 3));
            uow.commit();
        }
        ASSERT(indexReady(&opCtx, nss, idxName));
        ASSERT_EQ(3u, getNumIndexEntries(&opCtx, nss, idxName));

        // END SETUP / START TEST

        {
            WriteUnitOfWork uow(&opCtx);

            dropIndex(&opCtx, nss, idxName);
            ASSERT(!indexExists(&opCtx, nss, idxName));

            if (!rollback) {
                uow.commit();
            }
        }
        if (rollback) {
            ASSERT(indexExists(&opCtx, nss, idxName));
            ASSERT(indexReady(&opCtx, nss, idxName));
            ASSERT_EQ(3u, getNumIndexEntries(&opCtx, nss, idxName));
        } else {
            ASSERT(!indexExists(&opCtx, nss, idxName));
        }
    }
Exemple #13
0
 long long BackgroundSync::_readLastAppliedHash(OperationContext* txn) {
     BSONObj oplogEntry;
     try {
         // Uses WuoW because there is no way to demarcate a read transaction boundary.
         Lock::DBLock lk(txn->lockState(), "local", MODE_X);
         WriteUnitOfWork uow(txn);
         bool success = Helpers::getLast(txn, rsoplog, oplogEntry);
         uow.commit();
         if (!success) {
             // This can happen when we are to do an initial sync.  lastHash will be set
             // after the initial sync is complete.
             return 0;
         }
     }
     catch (const DBException& ex) {
         severe() << "Problem reading " << rsoplog << ": " << ex.toStatus();
         fassertFailed(18904);
     }
     BSONElement hashElement = oplogEntry[hashFieldName];
     if (hashElement.eoo()) {
         severe() << "Most recent entry in " << rsoplog << " missing \"" << hashFieldName <<
             "\" field";
         fassertFailed(18902);
     }
     if (hashElement.type() != NumberLong) {
         severe() << "Expected type of \"" << hashFieldName << "\" in most recent " << 
             rsoplog << " entry to have type NumberLong, but found " << 
             typeName(hashElement.type());
         fassertFailed(18903);
     }
     return hashElement.safeNumberLong();
 }
    // Call savePosition() on a reverse cursor without ever calling restorePosition().
    // May be useful to run this test under valgrind to verify there are no leaks.
    TEST( SortedDataInterface, SavePositionWithoutRestoreReversed ) {
        scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
        scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) );

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT( sorted->isEmpty( opCtx.get() ) );
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                WriteUnitOfWork uow( opCtx.get() );
                ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) );
                uow.commit();
            }
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) );
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) );
            cursor->savePosition();
        }
    }
    TEST( KVEngineTestHarness, SimpleRS1 ) {
        scoped_ptr<KVHarnessHelper> helper( KVHarnessHelper::create() );
        KVEngine* engine = helper->getEngine();
        ASSERT( engine );

        string ns = "a.b";
        scoped_ptr<RecordStore> rs;
        {
            MyOperationContext opCtx( engine );
            ASSERT_OK( engine->createRecordStore( &opCtx, ns, CollectionOptions() ) );
            rs.reset( engine->getRecordStore( &opCtx, ns, ns, CollectionOptions() ) );
            ASSERT( rs );
        }


        DiskLoc loc;
        {
            MyOperationContext opCtx( engine );
            WriteUnitOfWork uow( &opCtx );
            StatusWith<DiskLoc> res = rs->insertRecord( &opCtx, "abc", 4, false );
            ASSERT_OK( res.getStatus() );
            loc = res.getValue();
            uow.commit();
        }

        {
            MyOperationContext opCtx( engine );
            ASSERT_EQUALS( string("abc"), rs->dataFor( &opCtx, loc ).data() );
        }

    }
    TEST( RocksRecordStoreTest, Stats1 ) {
        unittest::TempDir td( _rocksRecordStoreTestDir );
        scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );

        RocksRecordStore rs( "foo.bar", db.get(),
                             db->DefaultColumnFamily(),
                             db->DefaultColumnFamily() );
        string s = "eliot was here";

        {
            MyOperationContext opCtx( db.get() );
            DiskLoc loc;
            {
                WriteUnitOfWork uow( opCtx.recoveryUnit() );
                StatusWith<DiskLoc> res = rs.insertRecord( &opCtx, s.c_str(), s.size() + 1, -1 );
                ASSERT_OK( res.getStatus() );
                loc = res.getValue();
            }

            ASSERT_EQUALS( s, rs.dataFor( loc ).data() );
        }

        {
            MyOperationContext opCtx( db.get() );
            BSONObjBuilder b;
            rs.appendCustomStats( &opCtx, &b, 1 );
            BSONObj obj = b.obj();
            ASSERT( obj["stats"].String().find( "WAL" ) != string::npos );
        }
    }
    TEST( KVEngineTestHarness, SimpleSorted1 ) {
        scoped_ptr<KVHarnessHelper> helper( KVHarnessHelper::create() );
        KVEngine* engine = helper->getEngine();
        ASSERT( engine );

        string ident = "abc";
        IndexDescriptor desc( NULL, "", BSON( "key" << BSON( "a" << 1 ) ) );
        scoped_ptr<SortedDataInterface> sorted;
        {
            MyOperationContext opCtx( engine );
            ASSERT_OK( engine->createSortedDataInterface( &opCtx, ident, &desc ) );
            sorted.reset( engine->getSortedDataInterface( &opCtx, ident, &desc ) );
            ASSERT( sorted );
        }

        {
            MyOperationContext opCtx( engine );
            WriteUnitOfWork uow( &opCtx );
            ASSERT_OK( sorted->insert( &opCtx, BSON( "" << 5 ), DiskLoc( 6, 4 ), true ) );
            uow.commit();
        }

        {
            MyOperationContext opCtx( engine );
            ASSERT_EQUALS( 1, sorted->numEntries( &opCtx ) );
        }

    }
// Insert multiple keys and verify that fullValidate() either sets
// the `numKeysOut` as the number of entries in the index, or as -1.
TEST(SortedDataInterface, FullValidate) {
    const std::unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
    const std::unique_ptr<SortedDataInterface> sorted(harnessHelper->newSortedDataInterface(false));

    {
        const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        ASSERT(sorted->isEmpty(opCtx.get()));
    }

    int nToInsert = 10;
    for (int i = 0; i < nToInsert; i++) {
        const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        {
            WriteUnitOfWork uow(opCtx.get());
            BSONObj key = BSON("" << i);
            RecordId loc(42, i * 2);
            ASSERT_OK(sorted->insert(opCtx.get(), key, loc, true));
            uow.commit();
        }
    }

    {
        const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get()));
    }

    {
        long long numKeysOut;
        const std::unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        sorted->fullValidate(opCtx.get(), &numKeysOut, NULL);
        // fullValidate() can set numKeysOut as the number of existing keys or -1.
        ASSERT(numKeysOut == nToInsert || numKeysOut == -1);
    }
}
// Verify that a nonempty collection maybe takes up some space on disk.
TEST(RecordStoreTestHarness, StorageSizeNonEmpty) {
    unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
    unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());

    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
    }

    int nToInsert = 10;
    for (int i = 0; i < nToInsert; i++) {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        {
            stringstream ss;
            ss << "record " << i;
            string data = ss.str();

            WriteUnitOfWork uow(opCtx.get());
            StatusWith<RecordId> res =
                rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
            ASSERT_OK(res.getStatus());
            uow.commit();
        }
    }

    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
    }

    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        ASSERT(rs->storageSize(opCtx.get(), NULL) >= 0);
    }
}
Exemple #20
0
    TEST( RocksEngineTest, CreateDirect1 ) {
        std::string path = "/tmp/mongo-rocks-engine-test";
        boost::filesystem::remove_all( path );
        RocksEngine engine( path );

        {
            MyOperationContext opCtx( &engine );
            Status status = engine.createCollection( &opCtx,
                                                     "test.foo",
                                                     CollectionOptions() );
            ASSERT_OK( status );
        }

        RocksRecordStore* rs = engine.getEntry( "test.foo" )->recordStore.get();
        string s = "eliot was here";

        {
            MyOperationContext opCtx( &engine );
            DiskLoc loc;
            {
                WriteUnitOfWork uow( opCtx.recoveryUnit() );
                StatusWith<DiskLoc> res = rs->insertRecord( &opCtx, s.c_str(), s.size() + 1, -1 );
                ASSERT_OK( res.getStatus() );
                loc = res.getValue();
            }

            ASSERT_EQUALS( s, rs->dataFor( loc ).data() );
        }
    }
Exemple #21
0
    void run() {
        string ns = "unittests.rollback_create_collection";
        const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
        OperationContext& opCtx = *opCtxPtr;
        NamespaceString nss(ns);
        dropDatabase(&opCtx, nss);

        Lock::DBLock dbXLock(&opCtx, nss.db(), MODE_X);
        OldClientContext ctx(&opCtx, ns);
        {
            WriteUnitOfWork uow(&opCtx);
            ASSERT(!collectionExists(&ctx, ns));
            auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj();
            ASSERT_OK(userCreateNS(
                &opCtx, ctx.db(), ns, options, CollectionOptions::parseForCommand, defaultIndexes));
            ASSERT(collectionExists(&ctx, ns));
            if (!rollback) {
                uow.commit();
            }
        }
        if (rollback) {
            ASSERT(!collectionExists(&ctx, ns));
        } else {
            ASSERT(collectionExists(&ctx, ns));
        }
    }
    TEST( RocksRecordStoreTest, Insert1 ) {
        unittest::TempDir td( _rocksRecordStoreTestDir );
        scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );
        int size;

        {
            RocksRecordStore rs( "foo.bar", db.get(),
                                 db->DefaultColumnFamily(),
                                 db->DefaultColumnFamily() );
            string s = "eliot was here";
            size = s.length() + 1;

            MyOperationContext opCtx( db.get() );
            DiskLoc loc;
            {
                WriteUnitOfWork uow( opCtx.recoveryUnit() );
                StatusWith<DiskLoc> res = rs.insertRecord( &opCtx, s.c_str(), s.size() + 1, -1 );
                ASSERT_OK( res.getStatus() );
                loc = res.getValue();
            }

            ASSERT_EQUALS( s, rs.dataFor( loc ).data() );
        }

        {
            RocksRecordStore rs( "foo.bar", db.get(),
                                 db->DefaultColumnFamily(),
                                 db->DefaultColumnFamily() );
            ASSERT_EQUALS( 1, rs.numRecords() );
            ASSERT_EQUALS( size, rs.dataSize() );
        }
    }
TEST(KVEngineTestHarness, SimpleSorted1) {
    unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create());
    KVEngine* engine = helper->getEngine();
    ASSERT(engine);

    string ident = "abc";
    IndexDescriptor desc(nullptr,
                         "",
                         BSON("v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion) << "ns"
                                  << "mydb.mycoll"
                                  << "key"
                                  << BSON("a" << 1)));
    unique_ptr<SortedDataInterface> sorted;
    {
        MyOperationContext opCtx(engine);
        ASSERT_OK(engine->createSortedDataInterface(&opCtx, ident, &desc));
        sorted.reset(engine->getSortedDataInterface(&opCtx, ident, &desc));
        ASSERT(sorted);
    }

    {
        MyOperationContext opCtx(engine);
        WriteUnitOfWork uow(&opCtx);
        ASSERT_OK(sorted->insert(&opCtx, BSON("" << 5), RecordId(6, 4), true));
        uow.commit();
    }

    {
        MyOperationContext opCtx(engine);
        ASSERT_EQUALS(1, sorted->numEntries(&opCtx));
    }
}
Exemple #24
0
    void run() {
        NamespaceString source("unittests.rollback_rename_collection_src");
        NamespaceString target("unittests.rollback_rename_collection_dest");
        const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext();
        OperationContext& opCtx = *opCtxPtr;

        dropDatabase(&opCtx, source);
        dropDatabase(&opCtx, target);

        Lock::GlobalWrite globalWriteLock(&opCtx);
        OldClientContext ctx(&opCtx, source.ns());

        {
            WriteUnitOfWork uow(&opCtx);
            ASSERT(!collectionExists(&ctx, source.ns()));
            ASSERT(!collectionExists(&ctx, target.ns()));
            auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj();
            ASSERT_OK(userCreateNS(&opCtx,
                                   ctx.db(),
                                   source.ns(),
                                   options,
                                   CollectionOptions::parseForCommand,
                                   defaultIndexes));
            uow.commit();
        }
        ASSERT(collectionExists(&ctx, source.ns()));
        ASSERT(!collectionExists(&ctx, target.ns()));

        // END OF SETUP / START OF TEST

        {
            WriteUnitOfWork uow(&opCtx);
            ASSERT_OK(renameCollection(&opCtx, source, target));
            ASSERT(!collectionExists(&ctx, source.ns()));
            ASSERT(collectionExists(&ctx, target.ns()));
            if (!rollback) {
                uow.commit();
            }
        }
        if (rollback) {
            ASSERT(collectionExists(&ctx, source.ns()));
            ASSERT(!collectionExists(&ctx, target.ns()));
        } else {
            ASSERT(!collectionExists(&ctx, source.ns()));
            ASSERT(collectionExists(&ctx, target.ns()));
        }
    }
    TEST( RocksRecordStoreTest, ForwardIterator ) {
        {
            unittest::TempDir td( _rocksRecordStoreTestDir );
            scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );

            rocksdb::ColumnFamilyHandle* cf1;
            rocksdb::ColumnFamilyHandle* cf1_m;

            rocksdb::Status status;

            status = db->CreateColumnFamily( getColumnFamilyOptions(), "foo.bar1", &cf1 );
            ASSERT_OK( toMongoStatus( status ) );
            status = db->CreateColumnFamily( rocksdb::ColumnFamilyOptions(), "foo.bar1&", &cf1_m );
            ASSERT_OK( toMongoStatus( status ) );

            RocksRecordStore rs( "foo.bar", db.get(), cf1, cf1_m );
            string s1 = "eliot was here";
            string s2 = "antonio was here";
            string s3 = "eliot and antonio were here";
            DiskLoc loc1;
            DiskLoc loc2;
            DiskLoc loc3;

            {
                MyOperationContext opCtx( db.get() );
                {
                    WriteUnitOfWork uow( opCtx.recoveryUnit() );
                    StatusWith<DiskLoc> res = rs.insertRecord( &opCtx, s1.c_str(), s1.size() + 1, -1 );
                    ASSERT_OK( res.getStatus() );
                    loc1 = res.getValue();
                    res = rs.insertRecord( &opCtx, s2.c_str(), s2.size() + 1, -1 );
                    ASSERT_OK( res.getStatus() );
                    loc2 = res.getValue();
                    res = rs.insertRecord( &opCtx, s3.c_str(), s3.size() + 1, -1 );
                    ASSERT_OK( res.getStatus() );
                    loc3 = res.getValue();
                }
            }

            OperationContextNoop txn;

            scoped_ptr<RecordIterator> iter( rs.getIterator( &txn ) );

            ASSERT_EQUALS( false, iter->isEOF() );
            ASSERT_EQUALS( loc1, iter->getNext() );
            ASSERT_EQUALS( s1, iter->dataFor( loc1 ).data() );

            ASSERT_EQUALS( false, iter->isEOF() );
            ASSERT_EQUALS( loc2, iter->getNext() );
            ASSERT_EQUALS( s2, iter->dataFor( loc2 ).data() );

            ASSERT_EQUALS( false, iter->isEOF() );
            ASSERT_EQUALS( loc3, iter->getNext() );
            ASSERT_EQUALS( s3, iter->dataFor( loc3 ).data() );

            ASSERT_EQUALS( true, iter->isEOF() );
            ASSERT_EQUALS( DiskLoc(), iter->getNext() );
        }
    }
    TEST( RocksRecordStoreTest, Update1 ) {
        unittest::TempDir td( _rocksRecordStoreTestDir );
        scoped_ptr<rocksdb::DB> db( getDB( td.path() ) );

        {
            RocksRecordStore rs( "foo.bar", db.get(),
                                 db->DefaultColumnFamily(),
                                 db->DefaultColumnFamily() );
            string s1 = "eliot1";
            string s2 = "eliot2 and more";

            DiskLoc loc;
            {
                MyOperationContext opCtx( db.get() );
                {
                    WriteUnitOfWork uow( opCtx.recoveryUnit() );
                    StatusWith<DiskLoc> res = rs.insertRecord( &opCtx,
                                                               s1.c_str(),
                                                               s1.size() + 1,
                                                               -1 );
                    ASSERT_OK( res.getStatus() );
                    loc = res.getValue();
                }

                ASSERT_EQUALS( s1, rs.dataFor( loc ).data() );
            }

            {
                MyOperationContext opCtx( db.get() );
                {
                    WriteUnitOfWork uow( opCtx.recoveryUnit() );
                    StatusWith<DiskLoc> res = rs.updateRecord( &opCtx,
                                                               loc,
                                                               s2.c_str(),
                                                               s2.size() + 1,
                                                               -1,
                                                               NULL );
                    ASSERT_OK( res.getStatus() );
                    ASSERT( loc == res.getValue() );
                }

                ASSERT_EQUALS( s2, rs.dataFor( loc ).data() );
            }

        }
    }
    TEST( RecordStoreTestHarness, Cursor1 ) {
        const int N = 10;

        scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
        scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() );

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) );
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            {
                WriteUnitOfWork uow( opCtx.get() );
                for ( int i = 0; i < N; i++ ) {
                    string s = str::stream() << "eliot" << i;
                    ASSERT_OK( rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false ).getStatus() );
                }
                uow.commit();
            }
        }

        {
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            ASSERT_EQUALS( N, rs->numRecords( opCtx.get() ) );
        }

        {
            int x = 0;
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            scoped_ptr<RecordIterator> it( rs->getIterator( opCtx.get() ) );
            while ( !it->isEOF() ) {
                DiskLoc loc = it->getNext();
                RecordData data = it->dataFor( loc );
                string s = str::stream() << "eliot" << x++;
                ASSERT_EQUALS( s, data.data() );
            }
            ASSERT_EQUALS( N, x );
        }

        {
            int x = N;
            scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
            scoped_ptr<RecordIterator> it( rs->getIterator( opCtx.get(),
                                                           DiskLoc(),
                                                           false,
                                                           CollectionScanParams::BACKWARD ) );
            while ( !it->isEOF() ) {
                DiskLoc loc = it->getNext();
                RecordData data = it->dataFor( loc );
                string s = str::stream() << "eliot" << --x;
                ASSERT_EQUALS( s, data.data() );
            }
            ASSERT_EQUALS( 0, x );
        }

    }
// Insert multiple records and create a random iterator for the record store
TEST(RecordStoreTestHarness, GetRandomIteratorNonEmpty) {
    unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper());
    unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore());

    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        ASSERT_EQUALS(0, rs->numRecords(opCtx.get()));
    }

    const unsigned nToInsert =
        5000;  // should be non-trivial amount, so we get multiple btree levels
    RecordId locs[nToInsert];
    for (unsigned i = 0; i < nToInsert; i++) {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        {
            stringstream ss;
            ss << "record " << i;
            string data = ss.str();

            WriteUnitOfWork uow(opCtx.get());
            StatusWith<RecordId> res =
                rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false);
            ASSERT_OK(res.getStatus());
            locs[i] = res.getValue();
            uow.commit();
        }
    }

    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get()));
    }

    set<RecordId> remain(locs, locs + nToInsert);
    {
        unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext());
        auto cursor = rs->getRandomCursor(opCtx.get());
        // returns NULL if getRandomCursor is not supported
        if (!cursor) {
            return;
        }

        // Iterate documents and mark those visited, but let at least one remain
        for (unsigned i = 0; i < nToInsert - 1; i++) {
            // Get a new cursor once in a while, shouldn't affect things
            if (i % (nToInsert / 8) == 0) {
                cursor = rs->getRandomCursor(opCtx.get());
            }
            remain.erase(cursor->next()->id);  // can happen more than once per doc
        }
        ASSERT(!remain.empty());
        ASSERT(cursor->next());

        // We should have at least visited a quarter of the items if we're any random at all
        // The expected fraction of visited records is 62.3%.
        ASSERT_LT(remain.size(), nToInsert * 3 / 4);
    }
}
TEST(KVCatalogTest, DirectoryPerAndSplit1) {
    unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create());
    KVEngine* engine = helper->getEngine();

    unique_ptr<RecordStore> rs;
    unique_ptr<KVCatalog> catalog;
    {
        MyOperationContext opCtx(engine);
        WriteUnitOfWork uow(&opCtx);
        ASSERT_OK(engine->createRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()));
        rs = engine->getRecordStore(&opCtx, "catalog", "catalog", CollectionOptions());
        catalog.reset(new KVCatalog(rs.get(), true, true));
        uow.commit();
    }

    {
        MyOperationContext opCtx(engine);
        WriteUnitOfWork uow(&opCtx);
        ASSERT_OK(
            catalog->newCollection(&opCtx, "a.b", CollectionOptions(), KVPrefix::kNotPrefixed));
        ASSERT_STRING_CONTAINS(catalog->getCollectionIdent("a.b"), "a/collection/");
        ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b")));
        uow.commit();
    }

    {  // index
        MyOperationContext opCtx(engine);
        WriteUnitOfWork uow(&opCtx);

        BSONCollectionCatalogEntry::MetaData md;
        md.ns = "a.b";
        md.indexes.push_back(BSONCollectionCatalogEntry::IndexMetaData(BSON("name"
                                                                            << "foo"),
                                                                       false,
                                                                       RecordId(),
                                                                       false,
                                                                       KVPrefix::kNotPrefixed,
                                                                       false));
        catalog->putMetaData(&opCtx, "a.b", md);
        ASSERT_STRING_CONTAINS(catalog->getIndexIdent(&opCtx, "a.b", "foo"), "a/index/");
        ASSERT_TRUE(catalog->isUserDataIdent(catalog->getIndexIdent(&opCtx, "a.b", "foo")));
        uow.commit();
    }
}
    TEST( KVCatalogTest, Coll1 ) {
        scoped_ptr<KVHarnessHelper> helper( KVHarnessHelper::create() );
        KVEngine* engine = helper->getEngine();

        scoped_ptr<RecordStore> rs;
        scoped_ptr<KVCatalog> catalog;
        {
            MyOperationContext opCtx( engine );
            WriteUnitOfWork uow( &opCtx );
            ASSERT_OK( engine->createRecordStore( &opCtx, "catalog", CollectionOptions() ) );
            rs.reset( engine->getRecordStore( &opCtx, "catalog", "catalog", CollectionOptions() ) );
            catalog.reset( new KVCatalog( rs.get() ) );
            uow.commit();
        }

        {
            MyOperationContext opCtx( engine );
            WriteUnitOfWork uow( &opCtx );
            ASSERT_OK( catalog->newCollection( &opCtx, "a.b", CollectionOptions() ) );
            ASSERT_NOT_EQUALS( "a.b", catalog->getCollectionIdent( "a.b" ) );
            uow.commit();
        }

        string ident = catalog->getCollectionIdent( "a.b" );
        {
            MyOperationContext opCtx( engine );
            WriteUnitOfWork uow( &opCtx );
            catalog.reset( new KVCatalog( rs.get() ) );
            catalog->init( &opCtx );
            uow.commit();
        }
        ASSERT_EQUALS( ident, catalog->getCollectionIdent( "a.b" ) );

        {
            MyOperationContext opCtx( engine );
            WriteUnitOfWork uow( &opCtx );
            catalog->dropCollection( &opCtx, "a.b" );
            catalog->newCollection( &opCtx, "a.b", CollectionOptions() );
            uow.commit();
        }
        ASSERT_NOT_EQUALS( ident, catalog->getCollectionIdent( "a.b" ) );
    }