// Add multiple compound keys using a bulk builder. TEST( SortedDataInterface, BuilderAddMultipleCompoundKeys ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT( sorted->isEmpty( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); scoped_ptr<SortedDataBuilderInterface> builder( sorted->getBulkBuilder( opCtx.get(), true ) ); ASSERT_OK( builder->addKey( compoundKey1a, loc1 ) ); ASSERT_OK( builder->addKey( compoundKey1b, loc2 ) ); ASSERT_OK( builder->addKey( compoundKey1c, loc4 ) ); ASSERT_OK( builder->addKey( compoundKey2b, loc3 ) ); ASSERT_OK( builder->addKey( compoundKey3a, loc5 ) ); builder->commit( false ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 5, sorted->numEntries( opCtx.get() ) ); } }
// Verify that a nonempty collection maybe takes up some space on disk. TEST(RecordStoreTestHarness, StorageSizeNonEmpty) { unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper()); unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore()); { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } int nToInsert = 10; for (int i = 0; i < nToInsert; i++) { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { stringstream ss; ss << "record " << i; string data = ss.str(); WriteUnitOfWork uow(opCtx.get()); StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false); ASSERT_OK(res.getStatus()); uow.commit(); } } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get())); } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT(rs->storageSize(opCtx.get(), NULL) >= 0); } }
// Call savePosition() on a reverse cursor without ever calling restorePosition(). // May be useful to run this test under valgrind to verify there are no leaks. TEST( SortedDataInterface, SavePositionWithoutRestoreReversed ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT( sorted->isEmpty( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, true ) ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 1, sorted->numEntries( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) ); cursor->savePosition(); } }
// Insert a record and try to perform an in-place update on it. TEST( RecordStoreTestHarness, UpdateWithDamages ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } string data = "00010111"; DiskLoc loc; const RecordData rec(data.c_str(), data.size() + 1); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), rec.data(), rec.size(), false ); ASSERT_OK( res.getStatus() ); loc = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { mutablebson::DamageVector dv( 3 ); dv[0].sourceOffset = 5; dv[0].targetOffset = 0; dv[0].size = 2; dv[1].sourceOffset = 3; dv[1].targetOffset = 2; dv[1].size = 3; dv[2].sourceOffset = 0; dv[2].targetOffset = 5; dv[2].size = 3; WriteUnitOfWork uow( opCtx.get() ); ASSERT_OK( rs->updateWithDamages( opCtx.get(), loc, rec, data.c_str(), dv ) ); uow.commit(); } } data = "11101000"; { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { RecordData record = rs->dataFor( opCtx.get(), loc ); ASSERT_EQUALS( data, record.data() ); } } }
// Insert multiple records and create a random iterator for the record store TEST(RecordStoreTestHarness, GetRandomIteratorNonEmpty) { unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper()); unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore()); { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } const unsigned nToInsert = 5000; // should be non-trivial amount, so we get multiple btree levels RecordId locs[nToInsert]; for (unsigned i = 0; i < nToInsert; i++) { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { stringstream ss; ss << "record " << i; string data = ss.str(); WriteUnitOfWork uow(opCtx.get()); StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false); ASSERT_OK(res.getStatus()); locs[i] = res.getValue(); uow.commit(); } } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get())); } set<RecordId> remain(locs, locs + nToInsert); { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); auto cursor = rs->getRandomCursor(opCtx.get()); // returns NULL if getRandomCursor is not supported if (!cursor) { return; } // Iterate documents and mark those visited, but let at least one remain for (unsigned i = 0; i < nToInsert - 1; i++) { // Get a new cursor once in a while, shouldn't affect things if (i % (nToInsert / 8) == 0) { cursor = rs->getRandomCursor(opCtx.get()); } remain.erase(cursor->next()->id); // can happen more than once per doc } ASSERT(!remain.empty()); ASSERT(cursor->next()); // We should have at least visited a quarter of the items if we're any random at all // The expected fraction of visited records is 62.3%. ASSERT_LT(remain.size(), nToInsert * 3 / 4); } }
TEST( RecordStoreTestHarness, Cursor1 ) { const int N = 10; scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); for ( int i = 0; i < N; i++ ) { string s = str::stream() << "eliot" << i; ASSERT_OK( rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false ).getStatus() ); } uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( N, rs->numRecords( opCtx.get() ) ); } { int x = 0; scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); scoped_ptr<RecordIterator> it( rs->getIterator( opCtx.get() ) ); while ( !it->isEOF() ) { DiskLoc loc = it->getNext(); RecordData data = it->dataFor( loc ); string s = str::stream() << "eliot" << x++; ASSERT_EQUALS( s, data.data() ); } ASSERT_EQUALS( N, x ); } { int x = N; scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); scoped_ptr<RecordIterator> it( rs->getIterator( opCtx.get(), DiskLoc(), false, CollectionScanParams::BACKWARD ) ); while ( !it->isEOF() ) { DiskLoc loc = it->getNext(); RecordData data = it->dataFor( loc ); string s = str::stream() << "eliot" << --x; ASSERT_EQUALS( s, data.data() ); } ASSERT_EQUALS( 0, x ); } }
// Call getDirection() on a reverse cursor and verify the result equals -1. TEST( SortedDataInterface, GetCursorDirectionReversed ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) ); ASSERT_EQUALS( -1, cursor->getDirection() ); } }
TEST( RecordStoreTestHarness, Simple1 ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } string s = "eliot was here"; DiskLoc loc1; { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false ); ASSERT_OK( res.getStatus() ); loc1 = res.getValue(); uow.commit(); } ASSERT_EQUALS( s, rs->dataFor( opCtx.get(), loc1 ).data() ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( s, rs->dataFor( opCtx.get(), loc1 ).data() ); ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) ); RecordData rd; ASSERT( !rs->findRecord( opCtx.get(), DiskLoc(111,17), &rd ) ); ASSERT( rd.data() == NULL ); ASSERT( rs->findRecord( opCtx.get(), loc1, &rd ) ); ASSERT_EQUALS( s, rd.data() ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false ); ASSERT_OK( res.getStatus() ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 2, rs->numRecords( opCtx.get() ) ); } }
TEST( RecordStoreTestHarness, UpdateInPlace1 ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); string s1 = "aaa111bbb"; string s2 = "aaa222bbb"; DiskLoc loc; const RecordData s1Rec(s1.c_str(), s1.size() + 1); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), s1Rec.data(), s1Rec.size(), -1 ); ASSERT_OK( res.getStatus() ); loc = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( s1, rs->dataFor( opCtx.get(), loc ).data() ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); const char* damageSource = "222"; mutablebson::DamageVector dv; dv.push_back( mutablebson::DamageEvent() ); dv[0].sourceOffset = 0; dv[0].targetOffset = 3; dv[0].size = 3; Status res = rs->updateWithDamages( opCtx.get(), loc, s1Rec, damageSource, dv ); ASSERT_OK( res ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( s2, rs->dataFor( opCtx.get(), loc ).data() ); } }
// Insert a record and try to call updateWithDamages() with an empty DamageVector. TEST( RecordStoreTestHarness, UpdateWithNoDamages ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); if (!rs->updateWithDamagesSupported()) return; { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } string data = "my record"; RecordId loc; const RecordData rec(data.c_str(), data.size() + 1); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), rec.data(), rec.size(), false ); ASSERT_OK( res.getStatus() ); loc = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { mutablebson::DamageVector dv; WriteUnitOfWork uow( opCtx.get() ); ASSERT_OK( rs->updateWithDamages( opCtx.get(), loc, rec, "", dv ) ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { RecordData record = rs->dataFor( opCtx.get(), loc ); ASSERT_EQUALS( data, record.data() ); } } }
// Insert a single record. Create a random iterator pointing to that single record. // Then check we'll retrieve the record. TEST(RecordStoreTestHarness, GetRandomIteratorSingleton) { unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper()); unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore()); { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQ(0, rs->numRecords(opCtx.get())); } // Insert one record. RecordId idToRetrieve; { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); WriteUnitOfWork uow(opCtx.get()); StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), "some data", 10, false); ASSERT_OK(res.getStatus()); idToRetrieve = res.getValue(); uow.commit(); } // Double-check that the record store has one record in it now. { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQ(1, rs->numRecords(opCtx.get())); } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); auto cursor = rs->getRandomCursor(opCtx.get()); // returns NULL if getRandomCursor is not supported if (!cursor) { return; } // We should be pointing at the only record in the store. // Check deattaching / reattaching cursor->save(); cursor->detachFromOperationContext(); opCtx = harnessHelper->newOperationContext(); cursor->reattachToOperationContext(opCtx.get()); ASSERT_TRUE(cursor->restore()); auto record = cursor->next(); ASSERT_EQUALS(record->id, idToRetrieve); // Iterator should either be EOF now, or keep returning the single existing document for (int i = 0; i < 10; i++) { record = cursor->next(); ASSERT(!record || record->id == idToRetrieve); } } }
TEST( RecordStoreTestHarness, Update1 ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } string s1 = "eliot was here"; string s2 = "eliot was here again"; DiskLoc loc; { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), s1.c_str(), s1.size() + 1, false ); ASSERT_OK( res.getStatus() ); loc = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( s1, rs->dataFor( opCtx.get(), loc ).data() ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); StatusWith<DiskLoc> res = rs->updateRecord( opCtx.get(), loc, s2.c_str(), s2.size() + 1, false, NULL ); ASSERT_OK( res.getStatus() ); loc = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) ); ASSERT_EQUALS( s2, rs->dataFor( opCtx.get(), loc ).data() ); } }
TEST(RocksRecordStoreTest, Isolation2 ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); RecordId loc1; RecordId loc2; { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false ); ASSERT_OK( res.getStatus() ); loc1 = res.getValue(); res = rs->insertRecord( opCtx.get(), "a", 2, false ); ASSERT_OK( res.getStatus() ); loc2 = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> t1( harnessHelper->newOperationContext() ); scoped_ptr<OperationContext> t2( harnessHelper->newOperationContext() ); // ensure we start transactions rs->dataFor( t1.get(), loc2 ); rs->dataFor( t2.get(), loc2 ); { WriteUnitOfWork w( t1.get() ); ASSERT_OK( rs->updateRecord( t1.get(), loc1, "b", 2, false, NULL ).getStatus() ); w.commit(); } { WriteUnitOfWork w( t2.get() ); ASSERT_EQUALS( string("a"), rs->dataFor( t2.get(), loc1 ).data() ); try { // this should fail as our version of loc1 is too old rs->updateRecord( t2.get(), loc1, "c", 2, false, NULL ); ASSERT( 0 ); } catch ( WriteConflictException& dle ) { } } } }
// Insert multiple records and create an iterator for repairing the record store, // even though the it has not been corrupted. TEST( RecordStoreTestHarness, GetIteratorForRepairNonEmpty ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } const int nToInsert = 10; DiskLoc locs[nToInsert]; for ( int i = 0; i < nToInsert; i++ ) { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { stringstream ss; ss << "record " << i; string data = ss.str(); WriteUnitOfWork uow( opCtx.get() ); StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), data.c_str(), data.size() + 1, false ); ASSERT_OK( res.getStatus() ); locs[i] = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) ); } set<DiskLoc> remain( locs, locs + nToInsert ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); RecordIterator *it = rs->getIteratorForRepair( opCtx.get() ); while ( !it->isEOF() ) { DiskLoc loc = it->curr(); ASSERT( 1 == remain.erase( loc ) ); ASSERT_EQUALS( loc, it->getNext() ); } ASSERT( remain.empty() ); ASSERT_EQUALS( DiskLoc(), it->curr() ); ASSERT_EQUALS( DiskLoc(), it->getNext() ); ASSERT( it->isEOF() ); ASSERT_EQUALS( DiskLoc(), it->curr() ); } }
TEST(RocksRecordStoreTest, Isolation1 ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); RecordId loc1; RecordId loc2; { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), "a", 2, false ); ASSERT_OK( res.getStatus() ); loc1 = res.getValue(); res = rs->insertRecord( opCtx.get(), "a", 2, false ); ASSERT_OK( res.getStatus() ); loc2 = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> t1( harnessHelper->newOperationContext() ); scoped_ptr<OperationContext> t2( harnessHelper->newOperationContext() ); scoped_ptr<WriteUnitOfWork> w1( new WriteUnitOfWork( t1.get() ) ); scoped_ptr<WriteUnitOfWork> w2( new WriteUnitOfWork( t2.get() ) ); rs->dataFor( t1.get(), loc1 ); rs->dataFor( t2.get(), loc1 ); ASSERT_OK( rs->updateRecord( t1.get(), loc1, "b", 2, false, NULL ).getStatus() ); ASSERT_OK( rs->updateRecord( t1.get(), loc2, "B", 2, false, NULL ).getStatus() ); try { // this should fail rs->updateRecord( t2.get(), loc1, "c", 2, false, NULL ); ASSERT( 0 ); } catch ( WriteConflictException& dle ) { w2.reset( NULL ); t2.reset( NULL ); } w1->commit(); // this should succeed } }
// Verify that calling touch() on an empty collection returns an OK status, // even when NULL is passed in for the stats output. TEST( RecordStoreTestHarness, TouchEmptyWithNullStats ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_OK( rs->touch( opCtx.get(), NULL /* stats output */ ) ); } }
// Insert multiple records and create an iterator for repairing the record store, // even though the it has not been corrupted. TEST(RecordStoreTestHarness, GetIteratorForRepairNonEmpty) { unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper()); unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore()); { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } const int nToInsert = 10; RecordId locs[nToInsert]; for (int i = 0; i < nToInsert; i++) { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { stringstream ss; ss << "record " << i; string data = ss.str(); WriteUnitOfWork uow(opCtx.get()); StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false); ASSERT_OK(res.getStatus()); locs[i] = res.getValue(); uow.commit(); } } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get())); } set<RecordId> remain(locs, locs + nToInsert); { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); auto cursor = rs->getCursorForRepair(opCtx.get()); // returns NULL if getCursorForRepair is not supported if (!cursor) { return; } while (auto record = cursor->next()) { remain.erase(record->id); // can happen more than once per doc } ASSERT(remain.empty()); ASSERT(!cursor->next()); } }
// Insert a single record. Create a repair iterator pointing to that single record. // Then invalidate the record and ensure that the repair iterator responds correctly. // See SERVER-16300. TEST(RecordStoreTestHarness, GetIteratorForRepairInvalidateSingleton) { unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper()); unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore()); { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQ(0, rs->numRecords(opCtx.get())); } // Insert one record. RecordId idToInvalidate; { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); WriteUnitOfWork uow(opCtx.get()); StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), "some data", 10, false); ASSERT_OK(res.getStatus()); idToInvalidate = res.getValue(); uow.commit(); } // Double-check that the record store has one record in it now. { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQ(1, rs->numRecords(opCtx.get())); } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); auto cursor = rs->getCursorForRepair(opCtx.get()); // returns NULL if getCursorForRepair is not supported if (!cursor) { return; } // We should be pointing at the only record in the store. // Invalidate the record we're pointing at. cursor->save(); cursor->invalidate(opCtx.get(), idToInvalidate); cursor->restore(); // Iterator should be EOF now because the only thing in the collection got deleted. ASSERT(!cursor->next()); } }
// Verify that calling touch() on an empty collection returns an OK status. TEST( RecordStoreTestHarness, TouchEmpty ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { BSONObjBuilder stats; Status status = rs->touch( opCtx.get(), &stats ); ASSERT( status.isOK() || status.code() == ErrorCodes::CommandNotSupported ); } } }
// Insert multiple records, and verify that calling touch() on a nonempty collection // returns an OK status. TEST( RecordStoreTestHarness, TouchNonEmpty ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } int nToInsert = 10; for ( int i = 0; i < nToInsert; i++ ) { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { stringstream ss; ss << "record " << i; string data = ss.str(); WriteUnitOfWork uow( opCtx.get() ); StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), data.c_str(), data.size() + 1, false ); ASSERT_OK( res.getStatus() ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( nToInsert, rs->numRecords( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { BSONObjBuilder stats; // XXX does not verify the collection was loaded into cache // (even if supported by storage engine) Status status = rs->touch( opCtx.get(), &stats ); ASSERT( status.isOK() || status.code() == ErrorCodes::CommandNotSupported ); } } }
// Create a random iterator for empty record store. TEST(RecordStoreTestHarness, GetRandomIteratorEmpty) { unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper()); unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore()); { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); auto cursor = rs->getRandomCursor(opCtx.get()); // returns NULL if getRandomCursor is not supported if (!cursor) { return; } ASSERT(!cursor->next()); } }
// Call advance() on a reverse cursor until it is exhausted. // When a cursor positioned at EOF is advanced, it stays at EOF. TEST( SortedDataInterface, ExhaustCursorReversed ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT( sorted->isEmpty( opCtx.get() ) ); } int nToInsert = 10; for ( int i = 0; i < nToInsert; i++ ) { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); BSONObj key = BSON( "" << i ); DiskLoc loc( 42, i * 2 ); ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( nToInsert, sorted->numEntries( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) ); ASSERT( !cursor->locate( maxKey, maxDiskLoc ) ); for ( int i = nToInsert - 1; i >= 0; i-- ) { ASSERT( !cursor->isEOF() ); ASSERT_EQUALS( BSON( "" << i ), cursor->getKey() ); ASSERT_EQUALS( DiskLoc( 42, i * 2 ), cursor->getDiskLoc() ); cursor->advance(); } ASSERT( cursor->isEOF() ); // Cursor at EOF should remain at EOF when advanced cursor->advance(); ASSERT( cursor->isEOF() ); } }
// Create an iterator for repairing an empty record store. TEST( RecordStoreTestHarness, GetIteratorForRepairEmpty ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); RecordIterator *it = rs->getIteratorForRepair( opCtx.get() ); ASSERT( it->isEOF() ); ASSERT_EQUALS( DiskLoc(), it->curr() ); ASSERT_EQUALS( DiskLoc(), it->getNext() ); ASSERT( it->isEOF() ); ASSERT_EQUALS( DiskLoc(), it->curr() ); } }
// Insert multiple keys and try to iterate through all of them // using a forward cursor while calling savePosition() and // restorePosition() in succession. TEST( SortedDataInterface, SaveAndRestorePositionWhileIterateCursor ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT( sorted->isEmpty( opCtx.get() ) ); } int nToInsert = 10; for ( int i = 0; i < nToInsert; i++ ) { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); BSONObj key = BSON( "" << i ); DiskLoc loc( 42, i * 2 ); ASSERT_OK( sorted->insert( opCtx.get(), key, loc, true ) ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( nToInsert, sorted->numEntries( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), 1 ) ); ASSERT( !cursor->locate( minKey, minDiskLoc ) ); for ( int i = 0; i < nToInsert; i++ ) { ASSERT( !cursor->isEOF() ); ASSERT_EQUALS( BSON( "" << i ), cursor->getKey() ); ASSERT_EQUALS( DiskLoc( 42, i * 2 ), cursor->getDiskLoc() ); cursor->advance(); cursor->savePosition(); cursor->restorePosition( opCtx.get() ); } ASSERT( cursor->isEOF() ); } }
// Verify that a reverse cursor is positioned at EOF when the index is empty. TEST( SortedDataInterface, CursorIsEOFWhenEmptyReversed ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT( sorted->isEmpty( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) ); ASSERT( !cursor->locate( maxKey, maxDiskLoc ) ); ASSERT( cursor->isEOF() ); // Cursor at EOF should remain at EOF when advanced cursor->advance(); ASSERT( cursor->isEOF() ); } }
TEST( RecordStoreTestHarness, Simple1InsertDocWroter ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); DiskLoc loc1; { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); DummyDocWriter dw; StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), &dw, false ); ASSERT_OK( res.getStatus() ); loc1 = res.getValue(); uow.commit(); } ASSERT_EQUALS( string("eliot"), rs->dataFor( opCtx.get(), loc1 ).data() ); } }
// Insert multiple compound keys and verify that the number of entries // in the index equals the number that were inserted. TEST( SortedDataInterface, InsertMultipleCompoundKeys ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT( sorted->isEmpty( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1a, loc1, false ) ); ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1b, loc2, false ) ); ASSERT_OK( sorted->insert( opCtx.get(), compoundKey2b, loc3, false ) ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 3, sorted->numEntries( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); ASSERT_OK( sorted->insert( opCtx.get(), compoundKey1c, loc4, false ) ); ASSERT_OK( sorted->insert( opCtx.get(), compoundKey3a, loc5, false ) ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 5, sorted->numEntries( opCtx.get() ) ); } }
// Insert the same key multiple times and try to iterate through each // occurrence using a reverse cursor while calling savePosition() and // restorePosition() in succession. Verify that the DiskLoc is saved // as part of the current position of the cursor. TEST( SortedDataInterface, SaveAndRestorePositionWhileIterateCursorWithDupKeysReversed ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface( false ) ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT( sorted->isEmpty( opCtx.get() ) ); } int nToInsert = 10; for ( int i = 0; i < nToInsert; i++ ) { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); DiskLoc loc( 42, i * 2 ); ASSERT_OK( sorted->insert( opCtx.get(), key1, loc, true /* allow duplicates */ ) ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( nToInsert, sorted->numEntries( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); scoped_ptr<SortedDataInterface::Cursor> cursor( sorted->newCursor( opCtx.get(), -1 ) ); ASSERT( !cursor->locate( maxKey, maxDiskLoc ) ); for ( int i = nToInsert - 1; i >= 0; i-- ) { ASSERT( !cursor->isEOF() ); ASSERT_EQUALS( key1, cursor->getKey() ); ASSERT_EQUALS( DiskLoc( 42, i * 2 ), cursor->getDiskLoc() ); cursor->advance(); cursor->savePosition(); cursor->restorePosition( opCtx.get() ); } ASSERT( cursor->isEOF() ); } }
// Insert multiple, distinct keys at the same DiskLoc and verify that the // number of entries in the index equals the number that were inserted, even // when duplicates are not allowed. TEST( SortedDataInterface, InsertSameDiskLoc ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT( sorted->isEmpty( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, false ) ); ASSERT_OK( sorted->insert( opCtx.get(), key2, loc1, false ) ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 2, sorted->numEntries( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); ASSERT_OK( sorted->insert( opCtx.get(), key3, loc1, false ) ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 3, sorted->numEntries( opCtx.get() ) ); } }
// Insert the same key multiple times and verify that all entries exists // in the index when duplicates are allowed. TEST( SortedDataInterface, InsertSameKeyWithDupsAllowed ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<SortedDataInterface> sorted( harnessHelper->newSortedDataInterface() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT( sorted->isEmpty( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); ASSERT_OK( sorted->insert( opCtx.get(), key1, loc1, false ) ); ASSERT_OK( sorted->insert( opCtx.get(), key1, loc2, true /* allow duplicates */ ) ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 2, sorted->numEntries( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); ASSERT_OK( sorted->insert( opCtx.get(), key1, loc3, true /* allow duplicates */ ) ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 3, sorted->numEntries( opCtx.get() ) ); } }