TEST( Shard, EqualitySingle ) { ASSERT_EQUALS(Shard("foo", "b.foo.com:123", 0, false, BSONArray()), Shard("foo", "b.foo.com:123", 0, false, BSONArray())); ASSERT_NOT_EQUALS(Shard("foo", "b.foo.com:123", 0, false, BSONArray()), Shard("foo", "a.foo.com:123", 0, false, BSONArray())); ASSERT_NOT_EQUALS(Shard("foo", "b.foo.com:123", 0, false, BSONArray()), Shard("foo", "b.foo.com:124", 0, false, BSONArray())); ASSERT_NOT_EQUALS(Shard("foo", "b.foo.com:123", 0, false, BSONArray()), Shard("foa", "b.foo.com:123", 0, false, BSONArray())); }
TEST( NamespaceStringTest, DBHash ) { ASSERT_EQUALS( nsDBHash( "foo" ), nsDBHash( "foo" ) ); ASSERT_EQUALS( nsDBHash( "foo" ), nsDBHash( "foo.a" ) ); ASSERT_EQUALS( nsDBHash( "foo" ), nsDBHash( "foo." ) ); ASSERT_EQUALS( nsDBHash( "" ), nsDBHash( "" ) ); ASSERT_EQUALS( nsDBHash( "" ), nsDBHash( ".a" ) ); ASSERT_EQUALS( nsDBHash( "" ), nsDBHash( "." ) ); ASSERT_NOT_EQUALS( nsDBHash( "foo" ), nsDBHash( "food" ) ); ASSERT_NOT_EQUALS( nsDBHash( "foo." ), nsDBHash( "food" ) ); ASSERT_NOT_EQUALS( nsDBHash( "foo.d" ), nsDBHash( "food" ) ); }
TEST_F(KVCatalogTest, Coll1) { unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create()); KVEngine* engine = helper->getEngine(); unique_ptr<RecordStore> rs; unique_ptr<KVCatalog> catalog; { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); ASSERT_OK(engine->createRecordStore(&opCtx, "catalog", "catalog", CollectionOptions())); rs = engine->getRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()); catalog.reset(new KVCatalog(rs.get(), false, false, nullptr)); uow.commit(); } { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); ASSERT_OK(newCollection(&opCtx, NamespaceString("a.b"), CollectionOptions(), KVPrefix::kNotPrefixed, catalog.get())); ASSERT_NOT_EQUALS("a.b", catalog->getCollectionIdent("a.b")); uow.commit(); } string ident = catalog->getCollectionIdent("a.b"); { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); catalog.reset(new KVCatalog(rs.get(), false, false, nullptr)); catalog->init(&opCtx); uow.commit(); } ASSERT_EQUALS(ident, catalog->getCollectionIdent("a.b")); { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); dropCollection(&opCtx, "a.b", catalog.get()).transitional_ignore(); newCollection(&opCtx, NamespaceString("a.b"), CollectionOptions(), KVPrefix::kNotPrefixed, catalog.get()) .transitional_ignore(); uow.commit(); } ASSERT_NOT_EQUALS(ident, catalog->getCollectionIdent("a.b")); }
void run() { // Insert enough documents that counting them will exceed the iteration threshold // to trigger a yield. for( int i = 0; i < 1000; ++i ) { insert( BSON( "a" << 1 ) ); } // Call runCount() under a read lock. dbtemprelease release; Client::ReadContext ctx( ns() ); int numYieldsBeforeCount = numYields(); string err; int errCode; ASSERT_EQUALS( 1000, runCount( ns(), countCommand( BSON( "a" << 1 ) ), err, errCode ) ); ASSERT_EQUALS( "", err ); int numYieldsAfterCount = numYields(); int numYieldsDuringCount = numYieldsAfterCount - numYieldsBeforeCount; // The runCount() function yieled. ASSERT_NOT_EQUALS( 0, numYieldsDuringCount ); ASSERT( 0 < numYieldsDuringCount ); }
void run() { createAccumulator(); accumulator()->process(Value(numeric_limits<double>::quiet_NaN()), false); // NaN is unequal to itself. ASSERT_NOT_EQUALS(accumulator()->getValue(false).getDouble(), accumulator()->getValue(false).getDouble()); }
void run() { Client::WriteContext ctx(&_txn, ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(&_txn, ns()); if (!coll) { coll = db->createCollection(&_txn, ns()); } const int N = 50; for (int i = 0; i < N; ++i) { insert(BSON("a" << 1 << "c" << i)); insert(BSON("b" << 1 << "c" << i)); } BSONObj firstIndex = BSON("a" << 1 << "c" << 1); BSONObj secondIndex = BSON("b" << 1 << "c" << 1); addIndex(firstIndex); addIndex(secondIndex); WorkingSet* ws = new WorkingSet(); // Sort by c:1 MergeSortStageParams msparams; msparams.pattern = BSON("c" << 1); MergeSortStage* ms = new MergeSortStage(msparams, ws, coll); // a:1 IndexScanParams params; params.descriptor = getIndex(firstIndex, coll); params.bounds.isSimpleRange = true; params.bounds.startKey = objWithMinKey(1); params.bounds.endKey = objWithMaxKey(1); params.bounds.endKeyInclusive = true; params.direction = 1; ms->addChild(new IndexScan(&_txn, params, ws, NULL)); // b:1 params.descriptor = getIndex(secondIndex, coll); ms->addChild(new IndexScan(&_txn, params, ws, NULL)); ctx.commit(); // Must fetch if we want to easily pull out an obj. PlanExecutor runner(ws, new FetchStage(ws, ms, NULL, coll), coll); for (int i = 0; i < N; ++i) { BSONObj first, second; ASSERT_EQUALS(Runner::RUNNER_ADVANCED, runner.getNext(&first, NULL)); ASSERT_EQUALS(Runner::RUNNER_ADVANCED, runner.getNext(&second, NULL)); ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt()); ASSERT_EQUALS(i, first["c"].numberInt()); ASSERT((first.hasField("a") && second.hasField("b")) || (first.hasField("b") && second.hasField("a"))); } // Should be done now. BSONObj foo; ASSERT_NOT_EQUALS(Runner::RUNNER_ADVANCED, runner.getNext(&foo, NULL)); }
void run() { _client.insert("unittests.system.indexes", BSON("name" << "x" << "ns" << _ns << "key" << BSON("y" << 1 << "x" << 1))); // Cannot create a different index with the same name as the existing one. ASSERT_NOT_EQUALS(_client.getLastError(), ""); }
TEST(RandomTest, Secure1) { auto a = SecureRandom::create(); auto b = SecureRandom::create(); for (int i = 0; i < 100; i++) { ASSERT_NOT_EQUALS(a->nextInt64(), b->nextInt64()); } }
void run() { _client.insert("unittests.system.indexes", BSON("name" << "x" << "ns" << _ns << "unique" << true << "key" << BSON("x" << 1 << "y" << 1))); // Cannot have same key spec with an option different from the existing one. ASSERT_NOT_EQUALS(_client.getLastError(), ""); }
TEST(SecureAllocator, SecureString) { SecureAllocatorDefaultDomain::SecureString str; str->resize(2000, 'x'); ASSERT_EQUALS(0, str->compare(*SecureAllocatorDefaultDomain::SecureString(2000, 'x'))); SecureAllocatorDefaultDomain::SecureString str2(str); ASSERT_NOT_EQUALS(&*str, &*str2); str2 = str; ASSERT_NOT_EQUALS(&*str, &*str2); auto strPtr = &*str; auto str2Ptr = &*str2; SecureAllocatorDefaultDomain::SecureString str3(std::move(str)); ASSERT_EQUALS(strPtr, &*str3); str3 = std::move(str2); ASSERT_EQUALS(str2Ptr, &*str3); }
TEST( KVCatalogTest, Coll1 ) { scoped_ptr<KVHarnessHelper> helper( KVHarnessHelper::create() ); KVEngine* engine = helper->getEngine(); scoped_ptr<RecordStore> rs; scoped_ptr<KVCatalog> catalog; { MyOperationContext opCtx( engine ); WriteUnitOfWork uow( &opCtx ); ASSERT_OK( engine->createRecordStore( &opCtx, "catalog", CollectionOptions() ) ); rs.reset( engine->getRecordStore( &opCtx, "catalog", "catalog", CollectionOptions() ) ); catalog.reset( new KVCatalog( rs.get() ) ); uow.commit(); } { MyOperationContext opCtx( engine ); WriteUnitOfWork uow( &opCtx ); ASSERT_OK( catalog->newCollection( &opCtx, "a.b", CollectionOptions() ) ); ASSERT_NOT_EQUALS( "a.b", catalog->getCollectionIdent( "a.b" ) ); uow.commit(); } string ident = catalog->getCollectionIdent( "a.b" ); { MyOperationContext opCtx( engine ); WriteUnitOfWork uow( &opCtx ); catalog.reset( new KVCatalog( rs.get() ) ); catalog->init( &opCtx ); uow.commit(); } ASSERT_EQUALS( ident, catalog->getCollectionIdent( "a.b" ) ); { MyOperationContext opCtx( engine ); WriteUnitOfWork uow( &opCtx ); catalog->dropCollection( &opCtx, "a.b" ); catalog->newCollection( &opCtx, "a.b", CollectionOptions() ); uow.commit(); } ASSERT_NOT_EQUALS( ident, catalog->getCollectionIdent( "a.b" ) ); }
TEST(RandomTest, Secure1) { SecureRandom* a = SecureRandom::create(); SecureRandom* b = SecureRandom::create(); for (int i = 0; i < 100; i++) { ASSERT_NOT_EQUALS(a->nextInt64(), b->nextInt64()); } delete a; delete b; }
void run() { _client.insert("unittests.system.indexes", BSON("name" << "super2" << "ns" << _ns << "unique" << 1 << "dropDups" << true << "sparse" << true << "expireAfterSeconds" << 2400 << "key" << BSON("superIdx" << "2d"))); ASSERT_NOT_EQUALS(_client.getLastError(), ""); }
TEST(DBHelperTests, FindDiskLocs) { DBDirectClient client; OperationContextImpl txn; // Some unique tag we can use to make sure we're pulling back the right data OID tag = OID::gen(); client.remove( ns, BSONObj() ); int numDocsInserted = 10; for ( int i = 0; i < numDocsInserted; ++i ) { client.insert( ns, BSON( "_id" << i << "tag" << tag ) ); } long long maxSizeBytes = 1024 * 1024 * 1024; set<DiskLoc> locs; long long numDocsFound; long long estSizeBytes; { // search _id range (0, 10) Lock::DBRead lk(txn.lockState(), ns); KeyRange range( ns, BSON( "_id" << 0 ), BSON( "_id" << numDocsInserted ), BSON( "_id" << 1 ) ); Status result = Helpers::getLocsInRange( &txn, range, maxSizeBytes, &locs, &numDocsFound, &estSizeBytes ); ASSERT_EQUALS( result, Status::OK() ); ASSERT_EQUALS( numDocsFound, numDocsInserted ); ASSERT_NOT_EQUALS( estSizeBytes, 0 ); ASSERT_LESS_THAN( estSizeBytes, maxSizeBytes ); Database* db = dbHolder().get( &txn, nsToDatabase(range.ns), storageGlobalParams.dbpath); const Collection* collection = db->getCollection(&txn, ns); // Make sure all the disklocs actually correspond to the right info for ( set<DiskLoc>::const_iterator it = locs.begin(); it != locs.end(); ++it ) { const BSONObj obj = collection->docFor(*it); ASSERT_EQUALS(obj["tag"].OID(), tag); } } }
TEST( NamespaceTest, Basics ) { Namespace foo( "foo.bar" ); Namespace bar( "bar.foo" ); ASSERT_EQUALS( foo.toString(), foo.toString() ); ASSERT_EQUALS( foo.hash(), foo.hash() ); ASSERT_NOT_EQUALS( foo.hash(), bar.hash() ); ASSERT( foo == foo ); ASSERT( !( foo != foo ) ); ASSERT( foo != bar ); ASSERT( !( foo == bar ) ); }
// Performs a test using a count stage whereby each unit of work is interjected // in some way by the invocation of interject(). const CountStats* runCount(CountStage& count_stage) { int interjection = 0; WorkingSetID wsid; while (!count_stage.isEOF()) { // do some work -- assumes that one work unit counts a single doc PlanStage::StageState state = count_stage.work(&wsid); ASSERT_NOT_EQUALS(state, PlanStage::FAILURE); ASSERT_NOT_EQUALS(state, PlanStage::DEAD); // prepare for yield count_stage.saveState(); // interject in some way kInterjection times if (interjection < kInterjections) { interject(count_stage, interjection++); } // resume from yield count_stage.restoreState(&_txn); } return static_cast<const CountStats*>(count_stage.getSpecificStats()); }
TEST(DBHelperTests, FindDiskLocs) { DBDirectClient client; // Some unique tag we can use to make sure we're pulling back the right data OID tag = OID::gen(); client.remove( ns, BSONObj() ); int numDocsInserted = 10; for ( int i = 0; i < numDocsInserted; ++i ) { client.insert( ns, BSON( "_id" << i << "tag" << tag ) ); } long long maxSizeBytes = 1024 * 1024 * 1024; set<DiskLoc> locs; long long numDocsFound; long long estSizeBytes; { // search _id range (0, 10) Lock::DBRead lk( ns ); Client::Context ctx( ns ); KeyRange range( ns, BSON( "_id" << 0 ), BSON( "_id" << numDocsInserted ), BSON( "_id" << 1 ) ); Status result = Helpers::getLocsInRange( range, maxSizeBytes, &locs, &numDocsFound, &estSizeBytes ); ASSERT_EQUALS( result, Status::OK() ); ASSERT_EQUALS( numDocsFound, numDocsInserted ); ASSERT_NOT_EQUALS( estSizeBytes, 0 ); ASSERT_LESS_THAN( estSizeBytes, maxSizeBytes ); // Make sure all the disklocs actually correspond to the right info for ( set<DiskLoc>::iterator it = locs.begin(); it != locs.end(); ++it ) { ASSERT_EQUALS( it->obj()["tag"].OID(), tag ); } } }
/** * Returns the projected value from the working set that would * be returned in the 'values' field of the distinct command result. * Limited to NumberInt BSON types because this is the only * BSON type used in this suite of tests. */ static int getIntFieldDotted(const WorkingSet& ws, WorkingSetID wsid, const std::string& field) { // For some reason (at least under OS X clang), we cannot refer to INVALID_ID // inside the test assertion macro. WorkingSetID invalid = WorkingSet::INVALID_ID; ASSERT_NOT_EQUALS(invalid, wsid); WorkingSetMember* member = ws.get(wsid); // Distinct hack execution is always covered. // Key value is retrieved from working set key data // instead of RecordId. ASSERT_FALSE(member->hasObj()); BSONElement keyElt; ASSERT_TRUE(member->getFieldDotted(field, &keyElt)); ASSERT_TRUE(keyElt.isNumber()); return keyElt.numberInt(); }
TEST_F(ConfigUpgradeTests, InitialUpgrade) { // // Tests initializing the config server to the initial version // // Empty version VersionType versionOld; VersionType version; string errMsg; bool result = checkAndUpgradeConfigVersion(configSvr(), false, &versionOld, &version, &errMsg); ASSERT(result); ASSERT_EQUALS(versionOld.getCurrentVersion(), 0); ASSERT_EQUALS(version.getMinCompatibleVersion(), MIN_COMPATIBLE_CONFIG_VERSION); ASSERT_EQUALS(version.getCurrentVersion(), CURRENT_CONFIG_VERSION); ASSERT_NOT_EQUALS(version.getClusterId(), OID()); }
void run() { Client::WriteContext ctx(ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(ns()); if (!coll) { coll = db->createCollection(ns()); } for (int i = 0; i < 50; ++i) { insert(BSON("foo" << i << "bar" << i)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); WorkingSet ws; scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL)); // Foo <= 20 IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1), coll); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 20); params.bounds.endKey = BSONObj(); params.bounds.endKeyInclusive = true; params.direction = -1; ah->addChild(new IndexScan(params, &ws, NULL)); // Bar >= 10 params.descriptor = getIndex(BSON("bar" << 1), coll); params.bounds.startKey = BSON("" << 10); params.bounds.endKey = BSONObj(); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); // ah reads the first child into its hash table. // ah should read foo=20, foo=19, ..., foo=0 in that order. // Read half of them... for (int i = 0; i < 10; ++i) { WorkingSetID out; PlanStage::StageState status = ah->work(&out); ASSERT_EQUALS(PlanStage::NEED_TIME, status); } // ...yield ah->prepareToYield(); // ...invalidate one of the read objects set<DiskLoc> data; getLocs(&data, coll); for (set<DiskLoc>::const_iterator it = data.begin(); it != data.end(); ++it) { if (it->obj()["foo"].numberInt() == 15) { ah->invalidate(*it); remove(it->obj()); break; } } ah->recoverFromYield(); // And expect to find foo==15 it flagged for review. const unordered_set<WorkingSetID>& flagged = ws.getFlagged(); ASSERT_EQUALS(size_t(1), flagged.size()); // Expect to find the right value of foo in the flagged item. WorkingSetMember* member = ws.get(*flagged.begin()); ASSERT_TRUE(NULL != member); ASSERT_EQUALS(WorkingSetMember::OWNED_OBJ, member->state); BSONElement elt; ASSERT_TRUE(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(15, elt.numberInt()); // Now, finish up the AND. Since foo == bar, we would have 11 results, but we subtract // one because of a mid-plan invalidation, so 10. int count = 0; while (!ah->isEOF()) { WorkingSetID id; PlanStage::StageState status = ah->work(&id); if (PlanStage::ADVANCED != status) { continue; } ++count; member = ws.get(id); ASSERT_TRUE(member->getFieldDotted("foo", &elt)); ASSERT_LESS_THAN_OR_EQUALS(elt.numberInt(), 20); ASSERT_NOT_EQUALS(15, elt.numberInt()); ASSERT_TRUE(member->getFieldDotted("bar", &elt)); ASSERT_GREATER_THAN_OR_EQUALS(elt.numberInt(), 10); } ASSERT_EQUALS(10, count); }
void run() { Client::WriteContext ctx(ns()); fillData(); // The data we're going to later invalidate. set<DiskLoc> locs; getLocs(&locs); // Build the mock stage which feeds the data. WorkingSet ws; auto_ptr<MockStage> ms(new MockStage(&ws)); insertVarietyOfObjects(ms.get()); SortStageParams params; params.pattern = BSON("foo" << 1); auto_ptr<SortStage> ss(new SortStage(params, &ws, ms.get())); const int firstRead = 10; // Have sort read in data from the mock stage. for (int i = 0; i < firstRead; ++i) { WorkingSetID id; PlanStage::StageState status = ss->work(&id); ASSERT_NOT_EQUALS(PlanStage::ADVANCED, status); } // We should have read in the first 'firstRead' locs. Invalidate the first. ss->prepareToYield(); set<DiskLoc>::iterator it = locs.begin(); ss->invalidate(*it++); ss->recoverFromYield(); // Read the rest of the data from the mock stage. while (!ms->isEOF()) { WorkingSetID id; ss->work(&id); } // Release to prevent double-deletion. ms.release(); // Let's just invalidate everything now. ss->prepareToYield(); while (it != locs.end()) { ss->invalidate(*it++); } ss->recoverFromYield(); // The sort should still work. int count = 0; while (!ss->isEOF()) { WorkingSetID id; PlanStage::StageState status = ss->work(&id); if (PlanStage::ADVANCED != status) { continue; } WorkingSetMember* member = ws.get(id); ASSERT(member->hasObj()); ASSERT(!member->hasLoc()); ++count; } // We've invalidated everything, but only 2/3 of our data had a DiskLoc to be // invalidated. We get the rest as-is. ASSERT_EQUALS(count, numObj()); }
TEST(KVCatalogTest, Idx1) { unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create()); KVEngine* engine = helper->getEngine(); unique_ptr<RecordStore> rs; unique_ptr<KVCatalog> catalog; { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); ASSERT_OK(engine->createRecordStore(&opCtx, "catalog", "catalog", CollectionOptions())); rs = engine->getRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()); catalog.reset(new KVCatalog(rs.get(), false, false)); uow.commit(); } { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); ASSERT_OK( catalog->newCollection(&opCtx, "a.b", CollectionOptions(), KVPrefix::kNotPrefixed)); ASSERT_NOT_EQUALS("a.b", catalog->getCollectionIdent("a.b")); ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b"))); uow.commit(); } { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); BSONCollectionCatalogEntry::MetaData md; md.ns = "a.b"; md.indexes.push_back(BSONCollectionCatalogEntry::IndexMetaData(BSON("name" << "foo"), false, RecordId(), false, KVPrefix::kNotPrefixed, false)); catalog->putMetaData(&opCtx, "a.b", md); uow.commit(); } string idxIndent; { MyOperationContext opCtx(engine); idxIndent = catalog->getIndexIdent(&opCtx, "a.b", "foo"); } { MyOperationContext opCtx(engine); ASSERT_EQUALS(idxIndent, catalog->getIndexIdent(&opCtx, "a.b", "foo")); ASSERT_TRUE(catalog->isUserDataIdent(catalog->getIndexIdent(&opCtx, "a.b", "foo"))); } { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); BSONCollectionCatalogEntry::MetaData md; md.ns = "a.b"; catalog->putMetaData(&opCtx, "a.b", md); // remove index md.indexes.push_back(BSONCollectionCatalogEntry::IndexMetaData(BSON("name" << "foo"), false, RecordId(), false, KVPrefix::kNotPrefixed, false)); catalog->putMetaData(&opCtx, "a.b", md); uow.commit(); } { MyOperationContext opCtx(engine); ASSERT_NOT_EQUALS(idxIndent, catalog->getIndexIdent(&opCtx, "a.b", "foo")); } }
TEST( Shard, EqualitySingle ) { ASSERT_EQUALS( Shard( "foo", "b.foo.com:123"), Shard( "foo", "b.foo.com:123") ); ASSERT_NOT_EQUALS( Shard( "foo", "b.foo.com:123"), Shard( "foo", "a.foo.com:123") ); ASSERT_NOT_EQUALS( Shard( "foo", "b.foo.com:123"), Shard( "foo", "b.foo.com:124") ); ASSERT_NOT_EQUALS( Shard( "foo", "b.foo.com:123"), Shard( "foa", "b.foo.com:123") ); }
void assertStateV1RS(const LocAndSize* records, const LocAndSize* drecs, const ExtentManager* em, const DummyRecordStoreV1MetaData* md) { invariant(records || drecs); // if both are NULL nothing is being asserted... try { if (records) { long long dataSize = 0; long long numRecs = 0; int recIdx = 0; DiskLoc extLoc = md->firstExtent(); while (!extLoc.isNull()) { // for each Extent Extent* ext = em->getExtent(extLoc, true); int expectedPrevOfs = DiskLoc::NullOfs; DiskLoc actualLoc = ext->firstRecord; while (!actualLoc.isNull()) { // for each Record in this Extent const Record* actualRec = em->recordForV1(actualLoc); const int actualSize = actualRec->lengthWithHeaders(); dataSize += actualSize - Record::HeaderSize; numRecs += 1; ASSERT_EQUALS(actualLoc, records[recIdx].loc); ASSERT_EQUALS(actualSize, records[recIdx].size); ASSERT_EQUALS(actualRec->extentOfs(), extLoc.getOfs()); ASSERT_EQUALS(actualRec->prevOfs(), expectedPrevOfs); expectedPrevOfs = actualLoc.getOfs(); recIdx++; const int nextOfs = actualRec->nextOfs(); actualLoc = (nextOfs == DiskLoc::NullOfs ? DiskLoc() : DiskLoc(actualLoc.a(), nextOfs)); } if (ext->xnext.isNull()) { ASSERT_EQUALS(md->lastExtent(), extLoc); } extLoc = ext->xnext; } // both the expected and actual record lists must be done at this point ASSERT_EQUALS(records[recIdx].loc, DiskLoc()); ASSERT_EQUALS(dataSize, md->dataSize()); ASSERT_EQUALS(numRecs, md->numRecords()); } if (drecs) { int drecIdx = 0; for (int bucketIdx = 0; bucketIdx < RecordStoreV1Base::Buckets; bucketIdx++) { DiskLoc actualLoc = md->deletedListEntry(bucketIdx); if (md->isCapped() && bucketIdx == 1) { // In capped collections, the 2nd bucket (index 1) points to the drec before // the first drec in the capExtent. If the capExtent is the first Extent, // it should be Null. if (md->capExtent() == md->firstExtent()) { ASSERT_EQUALS(actualLoc, DiskLoc()); } else { ASSERT_NOT_EQUALS(actualLoc.a(), md->capExtent().a()); const DeletedRecord* actualDrec = &em->recordForV1(actualLoc)->asDeleted(); ASSERT_EQUALS(actualDrec->nextDeleted().a(), md->capExtent().a()); } // Don't do normal checking of bucket 1 in capped collections. Checking // other buckets to verify that they are Null. continue; } while (!actualLoc.isNull()) { const DeletedRecord* actualDrec = &em->recordForV1(actualLoc)->asDeleted(); const int actualSize = actualDrec->lengthWithHeaders(); ASSERT_EQUALS(actualLoc, drecs[drecIdx].loc); ASSERT_EQUALS(actualSize, drecs[drecIdx].size); // Make sure the drec is correct ASSERT_EQUALS(actualDrec->extentOfs(), 0); // in capped collections all drecs are linked into a single list in bucket 0 ASSERT_EQUALS(bucketIdx, md->isCapped() ? 0 : RecordStoreV1Base::bucket(actualSize)); drecIdx++; actualLoc = actualDrec->nextDeleted(); } } // both the expected and actual deleted lists must be done at this point ASSERT_EQUALS(drecs[drecIdx].loc, DiskLoc()); } } catch (...) { // If a test fails, provide extra info to make debugging easier printRecList(em, md); printDRecList(em, md); throw; } }
TEST_F(KVCatalogTest, Idx1) { unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create()); KVEngine* engine = helper->getEngine(); unique_ptr<RecordStore> rs; unique_ptr<KVCatalog> catalog; { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); ASSERT_OK(engine->createRecordStore(&opCtx, "catalog", "catalog", CollectionOptions())); rs = engine->getRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()); catalog.reset(new KVCatalog(rs.get(), false, false, nullptr)); uow.commit(); } { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); ASSERT_OK(newCollection(&opCtx, NamespaceString("a.b"), CollectionOptions(), KVPrefix::kNotPrefixed, catalog.get())); ASSERT_NOT_EQUALS("a.b", catalog->getCollectionIdent("a.b")); ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b"))); uow.commit(); } { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); BSONCollectionCatalogEntry::MetaData md; md.ns = "a.b"; BSONCollectionCatalogEntry::IndexMetaData imd; imd.spec = BSON("name" << "foo"); imd.ready = false; imd.head = RecordId(); imd.multikey = false; imd.prefix = KVPrefix::kNotPrefixed; imd.isBackgroundSecondaryBuild = false; md.indexes.push_back(imd); catalog->putMetaData(&opCtx, "a.b", md); uow.commit(); } string idxIndent; { MyOperationContext opCtx(engine); idxIndent = catalog->getIndexIdent(&opCtx, "a.b", "foo"); } { MyOperationContext opCtx(engine); ASSERT_EQUALS(idxIndent, catalog->getIndexIdent(&opCtx, "a.b", "foo")); ASSERT_TRUE(catalog->isUserDataIdent(catalog->getIndexIdent(&opCtx, "a.b", "foo"))); } { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); BSONCollectionCatalogEntry::MetaData md; md.ns = "a.b"; catalog->putMetaData(&opCtx, "a.b", md); // remove index BSONCollectionCatalogEntry::IndexMetaData imd; imd.spec = BSON("name" << "foo"); imd.ready = false; imd.head = RecordId(); imd.multikey = false; imd.prefix = KVPrefix::kNotPrefixed; imd.isBackgroundSecondaryBuild = false; md.indexes.push_back(imd); catalog->putMetaData(&opCtx, "a.b", md); uow.commit(); } { MyOperationContext opCtx(engine); ASSERT_NOT_EQUALS(idxIndent, catalog->getIndexIdent(&opCtx, "a.b", "foo")); } }
TEST(RandomTest, Seed3) { PseudoRandom a(11); PseudoRandom b(12); ASSERT_NOT_EQUALS(a.nextInt32(), b.nextInt32()); }
static void assertNotEqual(uint64_t a, uint64_t b) { ASSERT_NOT_EQUALS(a, b); }
void run() { Client::WriteContext ctx(&_txn, ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(&_txn, ns()); if (!coll) { coll = db->createCollection(&_txn, ns()); } fillData(); // The data we're going to later invalidate. set<DiskLoc> locs; getLocs(&locs, coll); // Build the mock scan stage which feeds the data. WorkingSet ws; auto_ptr<MockStage> ms(new MockStage(&ws)); insertVarietyOfObjects(ms.get(), coll); SortStageParams params; params.collection = coll; params.pattern = BSON("foo" << 1); params.limit = limit(); auto_ptr<SortStage> ss(new SortStage(&_txn, params, &ws, ms.get())); const int firstRead = 10; // Have sort read in data from the mock stage. for (int i = 0; i < firstRead; ++i) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState status = ss->work(&id); ASSERT_NOT_EQUALS(PlanStage::ADVANCED, status); } // We should have read in the first 'firstRead' locs. Invalidate the first. ss->saveState(); set<DiskLoc>::iterator it = locs.begin(); ss->invalidate(*it++, INVALIDATION_DELETION); ss->restoreState(&_txn); // Read the rest of the data from the mock stage. while (!ms->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; ss->work(&id); } // Release to prevent double-deletion. ms.release(); // Let's just invalidate everything now. ss->saveState(); while (it != locs.end()) { ss->invalidate(*it++, INVALIDATION_DELETION); } ss->restoreState(&_txn); // Invalidation of data in the sort stage fetches it but passes it through. int count = 0; while (!ss->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState status = ss->work(&id); if (PlanStage::ADVANCED != status) { continue; } WorkingSetMember* member = ws.get(id); ASSERT(member->hasObj()); ASSERT(!member->hasLoc()); ++count; } ctx.commit(); // Returns all docs. ASSERT_EQUALS(limit() ? limit() : numObj(), count); }
TEST_F(KVCatalogTest, RestartForPrefixes) { storageGlobalParams.groupCollections = true; ON_BLOCK_EXIT([&] { storageGlobalParams.groupCollections = false; }); KVPrefix abCollPrefix = KVPrefix::getNextPrefix(NamespaceString("a.b")); KVPrefix fooIndexPrefix = KVPrefix::getNextPrefix(NamespaceString("a.b")); unique_ptr<KVHarnessHelper> helper(KVHarnessHelper::create()); KVEngine* engine = helper->getEngine(); { unique_ptr<RecordStore> rs; unique_ptr<KVCatalog> catalog; { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); ASSERT_OK(engine->createRecordStore(&opCtx, "catalog", "catalog", CollectionOptions())); rs = engine->getRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()); catalog.reset(new KVCatalog(rs.get(), false, false, nullptr)); uow.commit(); } { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); ASSERT_OK(newCollection( &opCtx, NamespaceString("a.b"), CollectionOptions(), abCollPrefix, catalog.get())); ASSERT_NOT_EQUALS("a.b", catalog->getCollectionIdent("a.b")); ASSERT_TRUE(catalog->isUserDataIdent(catalog->getCollectionIdent("a.b"))); uow.commit(); } { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); BSONCollectionCatalogEntry::MetaData md; md.ns = "a.b"; BSONCollectionCatalogEntry::IndexMetaData imd; imd.spec = BSON("name" << "foo"); imd.ready = false; imd.head = RecordId(); imd.multikey = false; imd.prefix = fooIndexPrefix; imd.isBackgroundSecondaryBuild = false; md.indexes.push_back(imd); md.prefix = abCollPrefix; catalog->putMetaData(&opCtx, "a.b", md); uow.commit(); } } engine = helper->restartEngine(); { MyOperationContext opCtx(engine); WriteUnitOfWork uow(&opCtx); unique_ptr<RecordStore> rs = engine->getRecordStore(&opCtx, "catalog", "catalog", CollectionOptions()); unique_ptr<KVCatalog> catalog = stdx::make_unique<KVCatalog>(rs.get(), false, false, nullptr); catalog->init(&opCtx); const BSONCollectionCatalogEntry::MetaData md = catalog->getMetaData(&opCtx, "a.b"); ASSERT_EQ("a.b", md.ns); ASSERT_EQ(abCollPrefix, md.prefix); ASSERT_EQ(fooIndexPrefix, md.indexes[md.findIndexOffset("foo")].prefix); } }
/** * Test that if two PsuedoRandom's have different seeds, then nextCanonicalDouble() will return * different values. */ TEST(RandomTest, NextCanonicalDifferentSeeds) { PseudoRandom a(12); PseudoRandom b(11); ASSERT_NOT_EQUALS(a.nextCanonicalDouble(), b.nextCanonicalDouble()); }