void run() { ScopedTransaction transaction(&_txn, MODE_IX); Lock::DBLock lk(_txn.lockState(), nsToDatabaseSubstring(ns()), MODE_X); OldClientContext ctx(&_txn, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { WriteUnitOfWork wuow(&_txn); coll = db->createCollection(&_txn, ns()); wuow.commit(); } WorkingSet ws; // Add an object to the DB. insert(BSON("foo" << 5)); set<RecordId> recordIds; getRecordIds(&recordIds, coll); ASSERT_EQUALS(size_t(1), recordIds.size()); // Create a mock stage that returns the WSM. auto mockStage = make_unique<QueuedDataStage>(&_txn, &ws); // Mock data. { WorkingSetID id = ws.allocate(); WorkingSetMember* mockMember = ws.get(id); mockMember->recordId = *recordIds.begin(); ws.transitionToRecordIdAndIdx(id); // State is RecordId and index, shouldn't be able to get the foo data inside. BSONElement elt; ASSERT_FALSE(mockMember->getFieldDotted("foo", &elt)); mockStage->pushBack(id); } // Make the filter. BSONObj filterObj = BSON("foo" << 6); const CollatorInterface* collator = nullptr; StatusWithMatchExpression statusWithMatcher = MatchExpressionParser::parse( filterObj, ExtensionsCallbackDisallowExtensions(), collator); verify(statusWithMatcher.isOK()); unique_ptr<MatchExpression> filterExpr = std::move(statusWithMatcher.getValue()); // Matcher requires that foo==6 but we only have data with foo==5. unique_ptr<FetchStage> fetchStage( new FetchStage(&_txn, &ws, mockStage.release(), filterExpr.get(), coll)); // First call should return a fetch request as it's not in memory. WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state; // Normally we'd return the object but we have a filter that prevents it. state = fetchStage->work(&id); ASSERT_EQUALS(PlanStage::NEED_TIME, state); // No more data to fetch, so, EOF. state = fetchStage->work(&id); ASSERT_EQUALS(PlanStage::IS_EOF, state); }
void run() { Client::WriteContext ctx(&_txn, ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(&_txn, ns()); if (!coll) { WriteUnitOfWork wuow(&_txn); coll = db->createCollection(&_txn, ns()); wuow.commit(); } WorkingSet ws; std::set<WorkingSetID> expectedResultIds; std::set<WorkingSetID> resultIds; // Create a KeepMutationsStage with an EOF child, and flag 50 objects. We expect these // objects to be returned by the KeepMutationsStage. MatchExpression* nullFilter = NULL; std::auto_ptr<KeepMutationsStage> keep(new KeepMutationsStage(nullFilter, &ws, new EOFStage())); for (size_t i = 0; i < 50; ++i) { WorkingSetID id = ws.allocate(); WorkingSetMember* member = ws.get(id); member->state = WorkingSetMember::OWNED_OBJ; member->obj = BSON("x" << 1); ws.flagForReview(id); expectedResultIds.insert(id); } // Call work() on the KeepMutationsStage. The stage should start streaming the // already-flagged objects. WorkingSetID id = getNextResult(keep.get()); resultIds.insert(id); // Flag more objects, then call work() again on the KeepMutationsStage, and expect none // of the newly-flagged objects to be returned (the KeepMutationsStage does not // incorporate objects flagged since the streaming phase started). // // This condition triggers SERVER-15580 (the new flagging causes a rehash of the // unordered_set "WorkingSet::_flagged", which invalidates all iterators, which were // previously being dereferenced in KeepMutationsStage::work()). // Note that std::unordered_set<>::insert() triggers a rehash if the new number of // elements is greater than or equal to max_load_factor()*bucket_count(). size_t rehashSize = static_cast<size_t>(ws.getFlagged().max_load_factor() * ws.getFlagged().bucket_count()); while (ws.getFlagged().size() <= rehashSize) { WorkingSetID id = ws.allocate(); WorkingSetMember* member = ws.get(id); member->state = WorkingSetMember::OWNED_OBJ; member->obj = BSON("x" << 1); ws.flagForReview(id); } while ((id = getNextResult(keep.get())) != WorkingSet::INVALID_ID) { resultIds.insert(id); } // Assert that only the first 50 objects were returned. ASSERT(expectedResultIds == resultIds); }
void run() { OldClientWriteContext ctx(&_txn, ns()); Collection* coll = ctx.getCollection(); // Get the RecordIds that would be returned by an in-order scan. vector<RecordId> recordIds; getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds); // Configure the scan. CollectionScanParams params; params.collection = coll; params.direction = CollectionScanParams::FORWARD; params.tailable = false; WorkingSet ws; unique_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL)); int count = 0; while (count < 10) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); ASSERT_EQUALS(coll->docFor(&_txn, recordIds[count]).value()["foo"].numberInt(), member->obj.value()["foo"].numberInt()); ++count; } } // Remove recordIds[count]. scan->saveState(); { WriteUnitOfWork wunit(&_txn); scan->invalidate(&_txn, recordIds[count], INVALIDATION_DELETION); wunit.commit(); // to avoid rollback of the invalidate } remove(coll->docFor(&_txn, recordIds[count]).value()); scan->restoreState(); // Skip over recordIds[count]. ++count; // Expect the rest. while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); ASSERT_EQUALS(coll->docFor(&_txn, recordIds[count]).value()["foo"].numberInt(), member->obj.value()["foo"].numberInt()); ++count; } } ASSERT_EQUALS(numObj(), count); }
void run() { Client::WriteContext ctx(&_txn, ns()); Collection* coll = ctx.ctx().db()->getCollection( &_txn, ns() ); // Get the DiskLocs that would be returned by an in-order scan. vector<DiskLoc> locs; getLocs(coll, CollectionScanParams::FORWARD, &locs); // Configure the scan. CollectionScanParams params; params.collection = coll; params.direction = CollectionScanParams::FORWARD; params.tailable = false; WorkingSet ws; scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL)); int count = 0; while (count < 10) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); ASSERT_EQUALS(coll->docFor(&_txn, locs[count])["foo"].numberInt(), member->obj["foo"].numberInt()); ++count; } } // Remove locs[count]. scan->saveState(); scan->invalidate(locs[count], INVALIDATION_DELETION); remove(coll->docFor(&_txn, locs[count])); scan->restoreState(&_txn); // Skip over locs[count]. ++count; // Expect the rest. while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); ASSERT_EQUALS(coll->docFor(&_txn, locs[count])["foo"].numberInt(), member->obj["foo"].numberInt()); ++count; } } ctx.commit(); ASSERT_EQUALS(numObj(), count); }
void run() { Client::WriteContext ctx(ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(ns()); if (!coll) { coll = db->createCollection(ns()); } for (int i = 0; i < 50; ++i) { insert(BSON("foo" << 1 << "bar" << i)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); WorkingSet ws; scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL)); // Scan over foo == 1 IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1), coll); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); // Intersect with 7 <= bar < 10000 params.descriptor = getIndex(BSON("bar" << 1), coll); params.bounds.startKey = BSON("" << 7); params.bounds.endKey = BSON("" << 10000); ah->addChild(new IndexScan(params, &ws, NULL)); WorkingSetID lastId = WorkingSet::INVALID_ID; int count = 0; while (!ah->isEOF()) { WorkingSetID id; PlanStage::StageState status = ah->work(&id); if (PlanStage::ADVANCED != status) { continue; } BSONObj thisObj = ws.get(id)->loc.obj(); ASSERT_EQUALS(7 + count, thisObj["bar"].numberInt()); ++count; if (WorkingSet::INVALID_ID != lastId) { BSONObj lastObj = ws.get(lastId)->loc.obj(); ASSERT_LESS_THAN(lastObj["bar"].woCompare(thisObj["bar"]), 0); } lastId = id; } ASSERT_EQUALS(count, 43); }
void run() { OldClientWriteContext ctx(&_txn, nss.ns()); addIndex(BSON("a" << 1 << "b" << 1)); addIndex(BSON("a" << 1 << "c" << 1)); addIndex(BSON("d" << 1)); for (int i = 0; i < 10; i++) { insert(BSON("a" << 1 << "e" << 1 << "d" << 1)); } // Running this query should not create any cache entries. For the first branch, it's // because plans using the {a: 1, b: 1} and {a: 1, c: 1} indices should tie during plan // ranking. For the second branch it's because there is only one relevant index. BSONObj query = fromjson("{$or: [{a: 1, e: 1}, {d: 1}]}"); Collection* collection = ctx.getCollection(); auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(query); auto statusWithCQ = CanonicalQuery::canonicalize( txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); // Get planner params. QueryPlannerParams plannerParams; fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams); WorkingSet ws; std::unique_ptr<SubplanStage> subplan( new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get())); PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); // Nothing is in the cache yet, so neither branch should have been planned from // the plan cache. ASSERT_FALSE(subplan->branchPlannedFromCache(0)); ASSERT_FALSE(subplan->branchPlannedFromCache(1)); // If we run the query again, it should again be the case that neither branch gets planned // from the cache (because the first call to pickBestPlan() refrained from creating any // cache entries). ws.clear(); subplan.reset(new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get())); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); ASSERT_FALSE(subplan->branchPlannedFromCache(0)); ASSERT_FALSE(subplan->branchPlannedFromCache(1)); }
void run() { OldClientWriteContext ctx(&_txn, nss.ns()); addIndex(BSON("b" << 1 << "a" << 1)); addIndex(BSON("c" << 1 << "a" << 1)); BSONObj query = fromjson("{a: 1, $or: [{b: 2}, {c: 3}]}"); // Two of these documents match. insert(BSON("_id" << 1 << "a" << 1 << "b" << 2)); insert(BSON("_id" << 2 << "a" << 2 << "b" << 2)); insert(BSON("_id" << 3 << "a" << 1 << "c" << 3)); insert(BSON("_id" << 4 << "a" << 1 << "c" << 4)); auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(query); auto cq = unittest::assertGet(CanonicalQuery::canonicalize( txn(), std::move(qr), ExtensionsCallbackDisallowExtensions())); Collection* collection = ctx.getCollection(); // Get planner params. QueryPlannerParams plannerParams; fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams); WorkingSet ws; std::unique_ptr<SubplanStage> subplan( new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get())); // Plan selection should succeed due to falling back on regular planning. PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); // Work the stage until it produces all results. size_t numResults = 0; PlanStage::StageState stageState = PlanStage::NEED_TIME; while (stageState != PlanStage::IS_EOF) { WorkingSetID id = WorkingSet::INVALID_ID; stageState = subplan->work(&id); ASSERT_NE(stageState, PlanStage::DEAD); ASSERT_NE(stageState, PlanStage::FAILURE); if (stageState == PlanStage::ADVANCED) { ++numResults; WorkingSetMember* member = ws.get(id); ASSERT(member->hasObj()); ASSERT(member->obj.value() == BSON("_id" << 1 << "a" << 1 << "b" << 2) || member->obj.value() == BSON("_id" << 3 << "a" << 1 << "c" << 3)); } } ASSERT_EQ(numResults, 2U); }
void run() { OldClientWriteContext ctx(&_txn, nss.ns()); addIndex(BSON("a" << 1)); addIndex(BSON("a" << 1 << "b" << 1)); addIndex(BSON("c" << 1)); for (int i = 0; i < 10; i++) { insert(BSON("a" << 1 << "b" << i << "c" << i)); } // This query should result in a plan cache entry for the first $or branch, because // there are two competing indices. The second branch has only one relevant index, so // its winning plan should not be cached. BSONObj query = fromjson("{$or: [{a: 1, b: 3}, {c: 1}]}"); Collection* collection = ctx.getCollection(); auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(query); auto statusWithCQ = CanonicalQuery::canonicalize( txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()); ASSERT_OK(statusWithCQ.getStatus()); std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); // Get planner params. QueryPlannerParams plannerParams; fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams); WorkingSet ws; std::unique_ptr<SubplanStage> subplan( new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get())); PlanYieldPolicy yieldPolicy(PlanExecutor::YIELD_MANUAL, _clock); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); // Nothing is in the cache yet, so neither branch should have been planned from // the plan cache. ASSERT_FALSE(subplan->branchPlannedFromCache(0)); ASSERT_FALSE(subplan->branchPlannedFromCache(1)); // If we repeat the same query, the plan for the first branch should have come from // the cache. ws.clear(); subplan.reset(new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get())); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); ASSERT_TRUE(subplan->branchPlannedFromCache(0)); ASSERT_FALSE(subplan->branchPlannedFromCache(1)); }
void run() { OldClientWriteContext ctx(&_txn, nss.ns()); addIndex(BSON("a" << 1 << "b" << 1)); addIndex(BSON("a" << 1)); addIndex(BSON("c" << 1)); for (int i = 0; i < 10; i++) { insert(BSON("a" << 1 << "b" << i << "c" << i)); } // Running this query should not create any cache entries. For the first branch, it's // because there are no matching results. For the second branch it's because there is only // one relevant index. BSONObj query = fromjson("{$or: [{a: 1, b: 15}, {c: 1}]}"); Collection* collection = ctx.getCollection(); auto statusWithCQ = CanonicalQuery::canonicalize(nss, query); ASSERT_OK(statusWithCQ.getStatus()); std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); // Get planner params. QueryPlannerParams plannerParams; fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams); WorkingSet ws; std::unique_ptr<SubplanStage> subplan( new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get())); PlanYieldPolicy yieldPolicy(nullptr, PlanExecutor::YIELD_MANUAL); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); // Nothing is in the cache yet, so neither branch should have been planned from // the plan cache. ASSERT_FALSE(subplan->branchPlannedFromCache(0)); ASSERT_FALSE(subplan->branchPlannedFromCache(1)); // If we run the query again, it should again be the case that neither branch gets planned // from the cache (because the first call to pickBestPlan() refrained from creating any // cache entries). ws.clear(); subplan.reset(new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get())); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); ASSERT_FALSE(subplan->branchPlannedFromCache(0)); ASSERT_FALSE(subplan->branchPlannedFromCache(1)); }
void run() { OldClientWriteContext ctx(&_txn, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { WriteUnitOfWork wuow(&_txn); coll = db->createCollection(&_txn, ns()); wuow.commit(); } WorkingSet ws; // Add 10 objects to the collection. for (size_t i = 0; i < 10; ++i) { insert(BSON("x" << 1)); } // Create 10 objects that are flagged. for (size_t i = 0; i < 10; ++i) { WorkingSetID id = ws.allocate(); WorkingSetMember* member = ws.get(id); member->state = WorkingSetMember::OWNED_OBJ; member->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("x" << 2)); ws.flagForReview(id); } // Create a collscan to provide the 10 objects in the collection. CollectionScanParams params; params.collection = coll; params.direction = CollectionScanParams::FORWARD; params.tailable = false; params.start = RecordId(); CollectionScan* cs = new CollectionScan(&_txn, params, &ws, NULL); // Create a KeepMutations stage to merge in the 10 flagged objects. // Takes ownership of 'cs' MatchExpression* nullFilter = NULL; std::unique_ptr<KeepMutationsStage> keep(new KeepMutationsStage(nullFilter, &ws, cs)); for (size_t i = 0; i < 10; ++i) { WorkingSetID id = getNextResult(keep.get()); WorkingSetMember* member = ws.get(id); ASSERT_FALSE(ws.isFlagged(id)); ASSERT_EQUALS(member->obj.value()["x"].numberInt(), 1); } { WorkingSetID out; ASSERT_EQ(cs->work(&out), PlanStage::IS_EOF); } // Flagged results *must* be at the end. for (size_t i = 0; i < 10; ++i) { WorkingSetID id = getNextResult(keep.get()); WorkingSetMember* member = ws.get(id); ASSERT(ws.isFlagged(id)); ASSERT_EQUALS(member->obj.value()["x"].numberInt(), 2); } }
void run() { OldClientWriteContext ctx(&_txn, ns()); addIndex(BSON("a" << 1 << "b" << 1)); addIndex(BSON("a" << 1 << "c" << 1)); for (int i = 0; i < 10; i++) { insert(BSON("a" << 1 << "b" << i << "c" << i)); } // This query should result in a plan cache entry for the first branch. The second // branch should tie, meaning that nothing is inserted into the plan cache. BSONObj query = fromjson("{$or: [{a: 1, b: 3}, {a: 1}]}"); Collection* collection = ctx.getCollection(); CanonicalQuery* rawCq; ASSERT_OK(CanonicalQuery::canonicalize(ns(), query, &rawCq)); boost::scoped_ptr<CanonicalQuery> cq(rawCq); // Get planner params. QueryPlannerParams plannerParams; fillOutPlannerParams(&_txn, collection, cq.get(), &plannerParams); WorkingSet ws; boost::scoped_ptr<SubplanStage> subplan(new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get())); PlanYieldPolicy yieldPolicy(NULL, PlanExecutor::YIELD_MANUAL); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); // Nothing is in the cache yet, so neither branch should have been planned from // the plan cache. ASSERT_FALSE(subplan->branchPlannedFromCache(0)); ASSERT_FALSE(subplan->branchPlannedFromCache(1)); // If we repeat the same query, then the first branch should come from the cache, // but the second is re-planned due to tying on the first run. ws.clear(); subplan.reset(new SubplanStage(&_txn, collection, &ws, plannerParams, cq.get())); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); ASSERT_TRUE(subplan->branchPlannedFromCache(0)); ASSERT_FALSE(subplan->branchPlannedFromCache(1)); }
/** * Returns a vector of all of the documents currently in 'collection'. * * Uses a forward collection scan stage to get the docs, and populates 'out' with * the results. */ void getCollContents(Collection* collection, vector<BSONObj>* out) { WorkingSet ws; CollectionScanParams params; params.direction = CollectionScanParams::FORWARD; params.tailable = false; unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, collection, params, &ws, NULL)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); verify(member->hasObj()); out->push_back(member->obj.value().getOwned()); } } }
void run() { OperationContextImpl txn; Client::WriteContext ctx(&txn, ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(&txn, ns()); if (!coll) { coll = db->createCollection(&txn, ns()); } WorkingSet ws; // Add 10 objects to the collection. for (size_t i = 0; i < 10; ++i) { insert(BSON("x" << 1)); } // Create 10 objects that are flagged. for (size_t i = 0; i < 10; ++i) { WorkingSetID id = ws.allocate(); WorkingSetMember* member = ws.get(id); member->state = WorkingSetMember::OWNED_OBJ; member->obj = BSON("x" << 2); ws.flagForReview(id); } // Create a collscan to provide the 10 objects in the collection. CollectionScanParams params; params.collection = coll; params.direction = CollectionScanParams::FORWARD; params.tailable = false; params.start = DiskLoc(); CollectionScan* cs = new CollectionScan(params, &ws, NULL); // Create a KeepMutations stage to merge in the 10 flagged objects. // Takes ownership of 'cs' KeepMutationsStage* keep = new KeepMutationsStage(NULL, &ws, cs); for (size_t i = 0; i < 10; ++i) { WorkingSetID id = getNextResult(keep); WorkingSetMember* member = ws.get(id); ASSERT_FALSE(ws.isFlagged(id)); ASSERT_EQUALS(member->obj["x"].numberInt(), 1); } ASSERT(cs->isEOF()); // Flagged results *must* be at the end. for (size_t i = 0; i < 10; ++i) { WorkingSetID id = getNextResult(keep); WorkingSetMember* member = ws.get(id); ASSERT(ws.isFlagged(id)); ASSERT_EQUALS(member->obj["x"].numberInt(), 2); } }
void getLocs() { _locs.clear(); WorkingSet ws; CollectionScanParams params; params.collection = _coll; params.direction = CollectionScanParams::FORWARD; params.tailable = false; scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); verify(member->hasLoc()); _locs.push_back(member->loc); } } }
void getRecordIds(Collection* collection, CollectionScanParams::Direction direction, vector<RecordId>* out) { WorkingSet ws; CollectionScanParams params; params.direction = direction; params.tailable = false; unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, collection, params, &ws, NULL)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); verify(member->hasRecordId()); out->push_back(member->recordId); } } }
void getLocs(Collection* collection, CollectionScanParams::Direction direction, vector<DiskLoc>* out) { WorkingSet ws; CollectionScanParams params; params.collection = collection; params.direction = direction; params.tailable = false; scoped_ptr<CollectionScan> scan(new CollectionScan(&_txn, params, &ws, NULL)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); verify(member->hasLoc()); out->push_back(member->loc); } } }
void run() { dbtests::WriteContextForTests ctx(&_opCtx, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(&_opCtx, ns()); if (!coll) { WriteUnitOfWork wuow(&_opCtx); coll = db->createCollection(&_opCtx, ns()); wuow.commit(); } WorkingSet ws; // Add an object to the DB. insert(BSON("foo" << 5)); set<RecordId> recordIds; getRecordIds(&recordIds, coll); ASSERT_EQUALS(size_t(1), recordIds.size()); // Create a mock stage that returns the WSM. auto mockStage = make_unique<QueuedDataStage>(&_opCtx, &ws); // Mock data. { WorkingSetID id = ws.allocate(); WorkingSetMember* mockMember = ws.get(id); mockMember->recordId = *recordIds.begin(); mockMember->obj = coll->docFor(&_opCtx, mockMember->recordId); ws.transitionToRecordIdAndObj(id); // Points into our DB. mockStage->pushBack(id); } { WorkingSetID id = ws.allocate(); WorkingSetMember* mockMember = ws.get(id); mockMember->recordId = RecordId(); mockMember->obj = Snapshotted<BSONObj>(SnapshotId(), BSON("foo" << 6)); mockMember->transitionToOwnedObj(); ASSERT_TRUE(mockMember->obj.value().isOwned()); mockStage->pushBack(id); } unique_ptr<FetchStage> fetchStage( new FetchStage(&_opCtx, &ws, mockStage.release(), NULL, coll)); WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state; // Don't bother doing any fetching if an obj exists already. state = fetchStage->work(&id); ASSERT_EQUALS(PlanStage::ADVANCED, state); state = fetchStage->work(&id); ASSERT_EQUALS(PlanStage::ADVANCED, state); // No more data to fetch, so, EOF. state = fetchStage->work(&id); ASSERT_EQUALS(PlanStage::IS_EOF, state); }
// static void WorkingSetCommon::getStatusMemberObject(const WorkingSet& ws, WorkingSetID wsid, BSONObj* objOut) { invariant(objOut); // Validate ID and working set member. if (WorkingSet::INVALID_ID == wsid) { return; } WorkingSetMember* member = ws.get(wsid); if (!member->hasOwnedObj()) { return; } BSONObj obj = member->obj.value(); if (!isValidStatusMemberObject(obj)) { return; } *objOut = obj; }
/** * Returns the projected value from the working set that would * be returned in the 'values' field of the distinct command result. * Limited to NumberInt BSON types because this is the only * BSON type used in this suite of tests. */ static int getIntFieldDotted(const WorkingSet& ws, WorkingSetID wsid, const std::string& field) { // For some reason (at least under OS X clang), we cannot refer to INVALID_ID // inside the test assertion macro. WorkingSetID invalid = WorkingSet::INVALID_ID; ASSERT_NOT_EQUALS(invalid, wsid); WorkingSetMember* member = ws.get(wsid); // Distinct hack execution is always covered. // Key value is retrieved from working set key data // instead of RecordId. ASSERT_FALSE(member->hasObj()); BSONElement keyElt; ASSERT_TRUE(member->getFieldDotted(field, &keyElt)); ASSERT_TRUE(keyElt.isNumber()); return keyElt.numberInt(); }
static size_t getNumResultsForStage(const WorkingSet& ws, CachedPlanStage* cachedPlanStage, CanonicalQuery* cq) { size_t numResults = 0; PlanStage::StageState state = PlanStage::NEED_TIME; while (state != PlanStage::IS_EOF) { WorkingSetID id = WorkingSet::INVALID_ID; state = cachedPlanStage->work(&id); ASSERT_NE(state, PlanStage::FAILURE); ASSERT_NE(state, PlanStage::DEAD); if (state == PlanStage::ADVANCED) { WorkingSetMember* member = ws.get(id); ASSERT(cq->root()->matchesBSON(member->obj.value())); numResults++; } } return numResults; }
void run() { Client::WriteContext ctx(ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(ns()); if (!coll) { coll = db->createCollection(ns()); } for (int i = 0; i < 50; ++i) { insert(BSON("foo" << i << "bar" << i)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); WorkingSet ws; scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL)); // Foo <= 20 IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1), coll); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 20); params.bounds.endKey = BSONObj(); params.bounds.endKeyInclusive = true; params.direction = -1; ah->addChild(new IndexScan(params, &ws, NULL)); // Bar >= 10 params.descriptor = getIndex(BSON("bar" << 1), coll); params.bounds.startKey = BSON("" << 10); params.bounds.endKey = BSONObj(); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); // ah reads the first child into its hash table. // ah should read foo=20, foo=19, ..., foo=0 in that order. // Read half of them... for (int i = 0; i < 10; ++i) { WorkingSetID out; PlanStage::StageState status = ah->work(&out); ASSERT_EQUALS(PlanStage::NEED_TIME, status); } // ...yield ah->prepareToYield(); // ...invalidate one of the read objects set<DiskLoc> data; getLocs(&data, coll); for (set<DiskLoc>::const_iterator it = data.begin(); it != data.end(); ++it) { if (it->obj()["foo"].numberInt() == 15) { ah->invalidate(*it); remove(it->obj()); break; } } ah->recoverFromYield(); // And expect to find foo==15 it flagged for review. const unordered_set<WorkingSetID>& flagged = ws.getFlagged(); ASSERT_EQUALS(size_t(1), flagged.size()); // Expect to find the right value of foo in the flagged item. WorkingSetMember* member = ws.get(*flagged.begin()); ASSERT_TRUE(NULL != member); ASSERT_EQUALS(WorkingSetMember::OWNED_OBJ, member->state); BSONElement elt; ASSERT_TRUE(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(15, elt.numberInt()); // Now, finish up the AND. Since foo == bar, we would have 11 results, but we subtract // one because of a mid-plan invalidation, so 10. int count = 0; while (!ah->isEOF()) { WorkingSetID id; PlanStage::StageState status = ah->work(&id); if (PlanStage::ADVANCED != status) { continue; } ++count; member = ws.get(id); ASSERT_TRUE(member->getFieldDotted("foo", &elt)); ASSERT_LESS_THAN_OR_EQUALS(elt.numberInt(), 20); ASSERT_NOT_EQUALS(15, elt.numberInt()); ASSERT_TRUE(member->getFieldDotted("bar", &elt)); ASSERT_GREATER_THAN_OR_EQUALS(elt.numberInt(), 10); } ASSERT_EQUALS(10, count); }
void run() { Client::WriteContext ctx(ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(ns()); if (!coll) { coll = db->createCollection(ns()); } fillData(); // The data we're going to later invalidate. set<DiskLoc> locs; getLocs(&locs, coll); // Build the mock scan stage which feeds the data. WorkingSet ws; auto_ptr<MockStage> ms(new MockStage(&ws)); insertVarietyOfObjects(ms.get(), coll); SortStageParams params; params.pattern = BSON("foo" << 1); params.limit = limit(); auto_ptr<SortStage> ss(new SortStage(params, &ws, ms.get())); const int firstRead = 10; // Have sort read in data from the mock stage. for (int i = 0; i < firstRead; ++i) { WorkingSetID id; PlanStage::StageState status = ss->work(&id); ASSERT_NOT_EQUALS(PlanStage::ADVANCED, status); } // We should have read in the first 'firstRead' locs. Invalidate the first. ss->prepareToYield(); set<DiskLoc>::iterator it = locs.begin(); ss->invalidate(*it++); ss->recoverFromYield(); // Read the rest of the data from the mock stage. while (!ms->isEOF()) { WorkingSetID id; ss->work(&id); } // Release to prevent double-deletion. ms.release(); // Let's just invalidate everything now. ss->prepareToYield(); while (it != locs.end()) { ss->invalidate(*it++); } ss->recoverFromYield(); // After invalidating all our data, we have nothing left to sort. int count = 0; while (!ss->isEOF()) { WorkingSetID id; PlanStage::StageState status = ss->work(&id); if (PlanStage::ADVANCED != status) { continue; } WorkingSetMember* member = ws.get(id); ASSERT(member->hasObj()); ASSERT(!member->hasLoc()); ++count; } // Therefore, we expect an empty result set from running the sort stage to completion. ASSERT_EQUALS(0, count); }
void run() { Client::WriteContext ctx(ns()); fillData(); // The data we're going to later invalidate. set<DiskLoc> locs; getLocs(&locs); // Build the mock stage which feeds the data. WorkingSet ws; auto_ptr<MockStage> ms(new MockStage(&ws)); insertVarietyOfObjects(ms.get()); SortStageParams params; params.pattern = BSON("foo" << 1); auto_ptr<SortStage> ss(new SortStage(params, &ws, ms.get())); const int firstRead = 10; // Have sort read in data from the mock stage. for (int i = 0; i < firstRead; ++i) { WorkingSetID id; PlanStage::StageState status = ss->work(&id); ASSERT_NOT_EQUALS(PlanStage::ADVANCED, status); } // We should have read in the first 'firstRead' locs. Invalidate the first. ss->prepareToYield(); set<DiskLoc>::iterator it = locs.begin(); ss->invalidate(*it++); ss->recoverFromYield(); // Read the rest of the data from the mock stage. while (!ms->isEOF()) { WorkingSetID id; ss->work(&id); } // Release to prevent double-deletion. ms.release(); // Let's just invalidate everything now. ss->prepareToYield(); while (it != locs.end()) { ss->invalidate(*it++); } ss->recoverFromYield(); // The sort should still work. int count = 0; while (!ss->isEOF()) { WorkingSetID id; PlanStage::StageState status = ss->work(&id); if (PlanStage::ADVANCED != status) { continue; } WorkingSetMember* member = ws.get(id); ASSERT(member->hasObj()); ASSERT(!member->hasLoc()); ++count; } // We've invalidated everything, but only 2/3 of our data had a DiskLoc to be // invalidated. We get the rest as-is. ASSERT_EQUALS(count, numObj()); }
void WorkingSetToolTipWidget::updateFileButtons() { MainWindow* mainWindow = dynamic_cast<MainWindow*>(Core::self()->uiController()->activeMainWindow()); Q_ASSERT(mainWindow); WorkingSetController* controller = Core::self()->workingSetControllerInternal(); ActiveToolTip* tooltip = controller->tooltip(); QString activeFile; if(mainWindow->area()->activeView()) activeFile = mainWindow->area()->activeView()->document()->documentSpecifier(); WorkingSet* currentWorkingSet = 0; QSet<QString> openFiles; if(!mainWindow->area()->workingSet().isEmpty()) { currentWorkingSet = controller->getWorkingSet(mainWindow->area()->workingSet()); openFiles = currentWorkingSet->fileList().toSet(); } bool allOpen = true; bool noneOpen = true; bool needResize = false; bool allHidden = true; for(QMap< QString, FileWidget* >::iterator it = m_fileWidgets.begin(); it != m_fileWidgets.end(); ++it) { if(openFiles.contains(it.key())) { noneOpen = false; (*it)->m_button->setToolTip(i18n("Remove this file from the current working set")); (*it)->m_button->setIcon(KIcon("list-remove")); (*it)->show(); }else{ allOpen = false; (*it)->m_button->setToolTip(i18n("Add this file to the current working set")); (*it)->m_button->setIcon(KIcon("list-add")); if(currentWorkingSet == m_set) { (*it)->hide(); needResize = true; } } if(!(*it)->isHidden()) allHidden = false; (*it)->m_label->setIsActiveFile(it.key() == activeFile); } // NOTE: allways hide merge&subtract all on current working set // if we want to enable mergeButton, we have to fix it's behavior since it operates directly on the // set contents and not on the m_fileWidgets m_mergeButton->setHidden(allOpen || currentWorkingSet == m_set); m_subtractButton->setHidden(noneOpen || mainWindow->area()->workingSet() == m_set->id()); m_deleteButton->setHidden(m_set->hasConnectedAreas()); if(m_set->id() == mainWindow->area()->workingSet()) { disconnect(m_openButton, SIGNAL(clicked(bool)), m_setButton, SLOT(loadSet())); connect(m_openButton, SIGNAL(clicked(bool)), m_setButton, SLOT(closeSet())); connect(m_openButton, SIGNAL(clicked(bool)), this, SIGNAL(shouldClose())); m_openButton->setIcon(KIcon("project-development-close")); m_openButton->setText(i18n("Close")); }else{
void run() { OldClientWriteContext ctx(&_txn, ns()); Database* db = ctx.db(); Collection* coll = db->getCollection(ns()); if (!coll) { WriteUnitOfWork wuow(&_txn); coll = db->createCollection(&_txn, ns()); wuow.commit(); } WorkingSet ws; // Sort by foo:1 MergeSortStageParams msparams; msparams.pattern = BSON("foo" << 1); auto ms = make_unique<MergeSortStage>(&_txn, msparams, &ws, coll); IndexScanParams params; params.bounds.isSimpleRange = true; params.bounds.startKey = objWithMinKey(1); params.bounds.endKey = objWithMaxKey(1); params.bounds.endKeyInclusive = true; params.direction = 1; // Index 'a'+i has foo equal to 'i'. int numIndices = 20; for (int i = 0; i < numIndices; ++i) { // 'a', 'b', ... string index(1, 'a' + i); insert(BSON(index << 1 << "foo" << i)); BSONObj indexSpec = BSON(index << 1 << "foo" << 1); addIndex(indexSpec); params.descriptor = getIndex(indexSpec, coll); ms->addChild(new IndexScan(&_txn, params, &ws, NULL)); } set<RecordId> recordIds; getRecordIds(&recordIds, coll); set<RecordId>::iterator it = recordIds.begin(); // Get 10 results. Should be getting results in order of 'recordIds'. int count = 0; while (!ms->isEOF() && count < 10) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState status = ms->work(&id); if (PlanStage::ADVANCED != status) { continue; } WorkingSetMember* member = ws.get(id); ASSERT_EQUALS(member->recordId, *it); BSONElement elt; string index(1, 'a' + count); ASSERT(member->getFieldDotted(index, &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(count, elt.numberInt()); ++count; ++it; } // Invalidate recordIds[11]. Should force a fetch and return the deleted document. ms->saveState(); ms->invalidate(&_txn, *it, INVALIDATION_DELETION); ms->restoreState(); // Make sure recordIds[11] was fetched for us. { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState status; do { status = ms->work(&id); } while (PlanStage::ADVANCED != status); WorkingSetMember* member = ws.get(id); ASSERT(!member->hasRecordId()); ASSERT(member->hasObj()); string index(1, 'a' + count); BSONElement elt; ASSERT_TRUE(member->getFieldDotted(index, &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(count, elt.numberInt()); ++it; ++count; } // And get the rest. while (!ms->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState status = ms->work(&id); if (PlanStage::ADVANCED != status) { continue; } WorkingSetMember* member = ws.get(id); ASSERT_EQUALS(member->recordId, *it); BSONElement elt; string index(1, 'a' + count); ASSERT_TRUE(member->getFieldDotted(index, &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(count, elt.numberInt()); ++count; ++it; } }
void run() { Client::WriteContext ctx(&_txn, ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(&_txn, ns()); if (!coll) { coll = db->createCollection(&_txn, ns()); } WorkingSet ws; // Sort by foo:1 MergeSortStageParams msparams; msparams.pattern = BSON("foo" << 1); auto_ptr<MergeSortStage> ms(new MergeSortStage(msparams, &ws, coll)); IndexScanParams params; params.bounds.isSimpleRange = true; params.bounds.startKey = objWithMinKey(1); params.bounds.endKey = objWithMaxKey(1); params.bounds.endKeyInclusive = true; params.direction = 1; // Index 'a'+i has foo equal to 'i'. int numIndices = 20; for (int i = 0; i < numIndices; ++i) { // 'a', 'b', ... string index(1, 'a' + i); insert(BSON(index << 1 << "foo" << i)); BSONObj indexSpec = BSON(index << 1 << "foo" << 1); addIndex(indexSpec); params.descriptor = getIndex(indexSpec, coll); ms->addChild(new IndexScan(&_txn, params, &ws, NULL)); } set<DiskLoc> locs; getLocs(&locs, coll); set<DiskLoc>::iterator it = locs.begin(); ctx.commit(); // Get 10 results. Should be getting results in order of 'locs'. int count = 0; while (!ms->isEOF() && count < 10) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState status = ms->work(&id); if (PlanStage::ADVANCED != status) { continue; } WorkingSetMember* member = ws.get(id); ASSERT_EQUALS(member->loc, *it); BSONElement elt; string index(1, 'a' + count); ASSERT(member->getFieldDotted(index, &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(count, elt.numberInt()); ++count; ++it; } // Invalidate locs[11]. Should force a fetch. We don't get it back. ms->prepareToYield(); ms->invalidate(*it, INVALIDATION_DELETION); ms->recoverFromYield(&_txn); // Make sure locs[11] was fetched for us. { // TODO: If we have "return upon invalidation" ever triggerable, do the following test. /* WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState status; do { status = ms->work(&id); } while (PlanStage::ADVANCED != status); WorkingSetMember* member = ws.get(id); ASSERT(!member->hasLoc()); ASSERT(member->hasObj()); string index(1, 'a' + count); BSONElement elt; ASSERT_TRUE(member->getFieldDotted(index, &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(count, elt.numberInt()); */ ++it; ++count; } // And get the rest. while (!ms->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState status = ms->work(&id); if (PlanStage::ADVANCED != status) { continue; } WorkingSetMember* member = ws.get(id); ASSERT_EQUALS(member->loc, *it); BSONElement elt; string index(1, 'a' + count); ASSERT_TRUE(member->getFieldDotted(index, &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(count, elt.numberInt()); ++count; ++it; } }
void run() { Client::WriteContext ctx(&_txn, ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(&_txn, ns()); if (!coll) { coll = db->createCollection(&_txn, ns()); } fillData(); // The data we're going to later invalidate. set<DiskLoc> locs; getLocs(&locs, coll); // Build the mock scan stage which feeds the data. WorkingSet ws; auto_ptr<MockStage> ms(new MockStage(&ws)); insertVarietyOfObjects(ms.get(), coll); SortStageParams params; params.collection = coll; params.pattern = BSON("foo" << 1); params.limit = limit(); auto_ptr<SortStage> ss(new SortStage(&_txn, params, &ws, ms.get())); const int firstRead = 10; // Have sort read in data from the mock stage. for (int i = 0; i < firstRead; ++i) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState status = ss->work(&id); ASSERT_NOT_EQUALS(PlanStage::ADVANCED, status); } // We should have read in the first 'firstRead' locs. Invalidate the first. ss->saveState(); set<DiskLoc>::iterator it = locs.begin(); ss->invalidate(*it++, INVALIDATION_DELETION); ss->restoreState(&_txn); // Read the rest of the data from the mock stage. while (!ms->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; ss->work(&id); } // Release to prevent double-deletion. ms.release(); // Let's just invalidate everything now. ss->saveState(); while (it != locs.end()) { ss->invalidate(*it++, INVALIDATION_DELETION); } ss->restoreState(&_txn); // Invalidation of data in the sort stage fetches it but passes it through. int count = 0; while (!ss->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState status = ss->work(&id); if (PlanStage::ADVANCED != status) { continue; } WorkingSetMember* member = ws.get(id); ASSERT(member->hasObj()); ASSERT(!member->hasLoc()); ++count; } ctx.commit(); // Returns all docs. ASSERT_EQUALS(limit() ? limit() : numObj(), count); }
void run() { Client::WriteContext ctx(ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(ns()); if (!coll) { coll = db->createCollection(ns()); } // Insert a bunch of data for (int i = 0; i < 50; ++i) { insert(BSON("foo" << 1 << "bar" << 1)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); WorkingSet ws; scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL)); // Scan over foo == 1 IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1), coll); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); // Scan over bar == 1 params.descriptor = getIndex(BSON("bar" << 1), coll); ah->addChild(new IndexScan(params, &ws, NULL)); // Get the set of disklocs in our collection to use later. set<DiskLoc> data; getLocs(&data, coll); // We're making an assumption here that happens to be true because we clear out the // collection before running this: increasing inserts have increasing DiskLocs. // This isn't true in general if the collection is not dropped beforehand. WorkingSetID id; // Sorted AND looks at the first child, which is an index scan over foo==1. ah->work(&id); // The first thing that the index scan returns (due to increasing DiskLoc trick) is the // very first insert, which should be the very first thing in data. Let's invalidate it // and make sure it shows up in the flagged results. ah->prepareToYield(); ah->invalidate(*data.begin()); remove(data.begin()->obj()); ah->recoverFromYield(); // Make sure the nuked obj is actually in the flagged data. ASSERT_EQUALS(ws.getFlagged().size(), size_t(1)); WorkingSetMember* member = ws.get(*ws.getFlagged().begin()); ASSERT_EQUALS(WorkingSetMember::OWNED_OBJ, member->state); BSONElement elt; ASSERT_TRUE(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT_TRUE(member->getFieldDotted("bar", &elt)); ASSERT_EQUALS(1, elt.numberInt()); set<DiskLoc>::iterator it = data.begin(); // Proceed along, AND-ing results. int count = 0; while (!ah->isEOF() && count < 10) { WorkingSetID id; PlanStage::StageState status = ah->work(&id); if (PlanStage::ADVANCED != status) { continue; } ++count; ++it; member = ws.get(id); ASSERT_TRUE(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT_TRUE(member->getFieldDotted("bar", &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT_EQUALS(member->loc, *it); } // Move 'it' to a result that's yet to show up. for (int i = 0; i < count + 10; ++i) { ++it; } // Remove a result that's coming up. It's not the 'target' result of the AND so it's // not flagged. ah->prepareToYield(); ah->invalidate(*it); remove(it->obj()); ah->recoverFromYield(); // Get all results aside from the two we killed. while (!ah->isEOF()) { WorkingSetID id; PlanStage::StageState status = ah->work(&id); if (PlanStage::ADVANCED != status) { continue; } ++count; member = ws.get(id); ASSERT_TRUE(member->getFieldDotted("foo", &elt)); ASSERT_EQUALS(1, elt.numberInt()); ASSERT_TRUE(member->getFieldDotted("bar", &elt)); ASSERT_EQUALS(1, elt.numberInt()); } ASSERT_EQUALS(count, 48); ASSERT_EQUALS(size_t(1), ws.getFlagged().size()); }
void run() { Client::WriteContext ctx(ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(ns()); if (!coll) { coll = db->createCollection(ns()); } for (int i = 0; i < 50; ++i) { insert(BSON("_id" << i << "foo" << i << "bar" << i << "baz" << i)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); addIndex(BSON("baz" << 1)); WorkingSet ws; scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL)); // Foo <= 20 (descending) IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1), coll); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 20); params.bounds.endKey = BSONObj(); params.bounds.endKeyInclusive = true; params.direction = -1; ah->addChild(new IndexScan(params, &ws, NULL)); // Bar <= 19 (descending) params.descriptor = getIndex(BSON("bar" << 1), coll); params.bounds.startKey = BSON("" << 19); ah->addChild(new IndexScan(params, &ws, NULL)); // First call to work reads the first result from the children. // The first result is for the first scan over foo is {foo: 20, bar: 20, baz: 20}. // The first result is for the second scan over bar is {foo: 19, bar: 19, baz: 19}. WorkingSetID id; PlanStage::StageState status = ah->work(&id); ASSERT_EQUALS(PlanStage::NEED_TIME, status); const unordered_set<WorkingSetID>& flagged = ws.getFlagged(); ASSERT_EQUALS(size_t(0), flagged.size()); // "delete" deletedObj (by invalidating the DiskLoc of the obj that matches it). BSONObj deletedObj = BSON("_id" << 20 << "foo" << 20 << "bar" << 20 << "baz" << 20); ah->prepareToYield(); set<DiskLoc> data; getLocs(&data, coll); for (set<DiskLoc>::const_iterator it = data.begin(); it != data.end(); ++it) { if (0 == deletedObj.woCompare(it->obj())) { ah->invalidate(*it, INVALIDATION_DELETION); break; } } ah->recoverFromYield(); // The deleted obj should show up in flagged. ASSERT_EQUALS(size_t(1), flagged.size()); // And not in our results. int count = 0; while (!ah->isEOF()) { WorkingSetID id; PlanStage::StageState status = ah->work(&id); if (PlanStage::ADVANCED != status) { continue; } WorkingSetMember* wsm = ws.get(id); ASSERT_NOT_EQUALS(0, deletedObj.woCompare(wsm->loc.obj())); ++count; } ASSERT_EQUALS(count, 20); }