TEST_F(ShardConnFixture, KilledGoodConnShouldNotClearPool) { ShardConnection conn1(TARGET_HOST, "test.user"); ShardConnection conn2(TARGET_HOST, "test.user"); ShardConnection conn3(TARGET_HOST, "test.user"); const uint64_t upperBoundCreationTime = conn3.get()->getSockCreationMicroSec(); conn3.done(); const uint64_t badCreationTime = conn1.get()->getSockCreationMicroSec(); conn1.kill(); conn2.done(); ShardConnection conn4(TARGET_HOST, "test.user"); ShardConnection conn5(TARGET_HOST, "test.user"); ASSERT_GREATER_THAN(conn4.get()->getSockCreationMicroSec(), badCreationTime); ASSERT_LESS_THAN_OR_EQUALS(conn4.get()->getSockCreationMicroSec(), upperBoundCreationTime); ASSERT_GREATER_THAN(conn5.get()->getSockCreationMicroSec(), badCreationTime); ASSERT_LESS_THAN_OR_EQUALS(conn5.get()->getSockCreationMicroSec(), upperBoundCreationTime); checkNewConns(assertGreaterThan, upperBoundCreationTime, 10); }
TEST(MockDBClientConnTest, Delay) { MockRemoteDBServer server("test"); server.setCommandReply("serverStatus", BSON("ok" << 1)); server.setDelay(150); MockDBClientConnection conn(&server); { mongo::Timer timer; conn.query("x.x"); const int nowInMilliSec = timer.millis(); ASSERT_GREATER_THAN_OR_EQUALS(nowInMilliSec, 140); ASSERT_LESS_THAN_OR_EQUALS(nowInMilliSec, 160); } { mongo::Timer timer; BSONObj response; conn.runCommand("x.x", BSON("serverStatus" << 1), response); const int nowInMilliSec = timer.millis(); ASSERT_GREATER_THAN_OR_EQUALS(nowInMilliSec, 140); ASSERT_LESS_THAN_OR_EQUALS(nowInMilliSec, 160); } ASSERT_EQUALS(1U, server.getQueryCount()); ASSERT_EQUALS(1U, server.getCmdCount()); }
void run() { // Insert a ton of documents with a: [1, 2, 3] for (size_t i = 0; i < 1000; ++i) { insert(BSON("a" << BSON_ARRAY(1 << 2 << 3))); } // Insert a ton of other documents with a: [4, 5, 6] for (size_t i = 0; i < 1000; ++i) { insert(BSON("a" << BSON_ARRAY(4 << 5 << 6))); } // Make an index on a:1 addIndex(BSON("a" << 1)); AutoGetCollectionForRead ctx(&_txn, ns()); Collection* coll = ctx.getCollection(); // Set up the distinct stage. std::vector<IndexDescriptor*> indexes; coll->getIndexCatalog()->findIndexesByKeyPattern(&_txn, BSON("a" << 1), false, &indexes); verify(indexes.size() == 1); DistinctParams params; params.descriptor = indexes[0]; ASSERT_TRUE(params.descriptor->isMultikey(&_txn)); verify(params.descriptor); params.direction = 1; // Distinct-ing over the 0-th field of the keypattern. params.fieldNo = 0; // We'll look at all values in the bounds. params.bounds.isSimpleRange = false; OrderedIntervalList oil("a"); oil.intervals.push_back(IndexBoundsBuilder::allValues()); params.bounds.fields.push_back(oil); WorkingSet ws; DistinctScan distinct(&_txn, params, &ws); // We should see each number in the range [1, 6] exactly once. std::set<int> seen; WorkingSetID wsid; PlanStage::StageState state; while (PlanStage::IS_EOF != (state = distinct.work(&wsid))) { if (PlanStage::ADVANCED == state) { // Check int value. int currentNumber = getIntFieldDotted(ws, wsid, "a"); ASSERT_GREATER_THAN_OR_EQUALS(currentNumber, 1); ASSERT_LESS_THAN_OR_EQUALS(currentNumber, 6); // Should see this number only once. ASSERT_TRUE(seen.find(currentNumber) == seen.end()); seen.insert(currentNumber); } } ASSERT_EQUALS(6U, seen.size()); }
void run() { Client::WriteContext ctx(ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(ns()); if (!coll) { coll = db->createCollection(ns()); } for (int i = 0; i < 50; ++i) { insert(BSON("foo" << i << "bar" << i)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); WorkingSet ws; scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL)); // Foo <= 20 IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1), coll); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 20); params.bounds.endKey = BSONObj(); params.bounds.endKeyInclusive = true; params.direction = -1; ah->addChild(new IndexScan(params, &ws, NULL)); // Bar >= 10 params.descriptor = getIndex(BSON("bar" << 1), coll); params.bounds.startKey = BSON("" << 10); params.bounds.endKey = BSONObj(); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); // ah reads the first child into its hash table. // ah should read foo=20, foo=19, ..., foo=0 in that order. // Read half of them... for (int i = 0; i < 10; ++i) { WorkingSetID out; PlanStage::StageState status = ah->work(&out); ASSERT_EQUALS(PlanStage::NEED_TIME, status); } // ...yield ah->prepareToYield(); // ...invalidate one of the read objects set<DiskLoc> data; getLocs(&data, coll); for (set<DiskLoc>::const_iterator it = data.begin(); it != data.end(); ++it) { if (it->obj()["foo"].numberInt() == 15) { ah->invalidate(*it); remove(it->obj()); break; } } ah->recoverFromYield(); // And expect to find foo==15 it flagged for review. const unordered_set<WorkingSetID>& flagged = ws.getFlagged(); ASSERT_EQUALS(size_t(1), flagged.size()); // Expect to find the right value of foo in the flagged item. WorkingSetMember* member = ws.get(*flagged.begin()); ASSERT_TRUE(NULL != member); ASSERT_EQUALS(WorkingSetMember::OWNED_OBJ, member->state); BSONElement elt; ASSERT_TRUE(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(15, elt.numberInt()); // Now, finish up the AND. Since foo == bar, we would have 11 results, but we subtract // one because of a mid-plan invalidation, so 10. int count = 0; while (!ah->isEOF()) { WorkingSetID id; PlanStage::StageState status = ah->work(&id); if (PlanStage::ADVANCED != status) { continue; } ++count; member = ws.get(id); ASSERT_TRUE(member->getFieldDotted("foo", &elt)); ASSERT_LESS_THAN_OR_EQUALS(elt.numberInt(), 20); ASSERT_NOT_EQUALS(15, elt.numberInt()); ASSERT_TRUE(member->getFieldDotted("bar", &elt)); ASSERT_GREATER_THAN_OR_EQUALS(elt.numberInt(), 10); } ASSERT_EQUALS(10, count); }
// Test a full buffer TEST(FTDCFileManagerTest, TestFull) { Client* client = &cc(); FTDCConfig c; c.maxFileSizeBytes = 300; c.maxDirectorySizeBytes = 1000; c.maxSamplesPerInterimMetricChunk = 1; unittest::TempDir tempdir("metrics_testpath"); boost::filesystem::path dir(tempdir.path()); createDirectoryClean(dir); FTDCCollectorCollection rotate; auto swMgr = FTDCFileManager::create(&c, dir, &rotate, client); ASSERT_OK(swMgr.getStatus()); auto mgr = std::move(swMgr.getValue()); // Test a large numbers of zeros, and incremental numbers in a full buffer for (int j = 0; j < 10; j++) { ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client, BSON("name" << "joe" << "key1" << 3230792343LL << "key2" << 235135), Date_t())); for (size_t i = 0; i <= FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault - 2; i++) { ASSERT_OK( mgr->writeSampleAndRotateIfNeeded(client, BSON("name" << "joe" << "key1" << static_cast<long long int>(i * j * 37) << "key2" << static_cast<long long int>(i * (645 << j))), Date_t())); } ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client, BSON("name" << "joe" << "key1" << 34 << "key2" << 45), Date_t())); // Add Value ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client, BSON("name" << "joe" << "key1" << 34 << "key2" << 45), Date_t())); } mgr->close(); auto files = scanDirectory(dir); int sum = 0; for (auto& file : files) { int fs = boost::filesystem::file_size(file); ASSERT_TRUE(fs < c.maxFileSizeBytes * 1.10); unittest::log() << "File " << file.generic_string() << " has size " << fs; if (file.generic_string().find("interim") == std::string::npos) { sum += fs; } } ASSERT_LESS_THAN_OR_EQUALS(sum, c.maxDirectorySizeBytes * 1.10); ASSERT_GREATER_THAN_OR_EQUALS(sum, c.maxDirectorySizeBytes * 0.90); }