Пример #1
0
    void run() {
        // Insert a ton of documents with a: 1
        for (size_t i = 0; i < 1000; ++i) {
            insert(BSON("a" << 1));
        }

        // Insert a ton of other documents with a: 2
        for (size_t i = 0; i < 1000; ++i) {
            insert(BSON("a" << 2));
        }

        // Make an index on a:1
        addIndex(BSON("a" << 1));

        AutoGetCollectionForRead ctx(&_txn, ns());
        Collection* coll = ctx.getCollection();

        // Set up the distinct stage.
        std::vector<IndexDescriptor*> indexes;
        coll->getIndexCatalog()->findIndexesByKeyPattern(&_txn, BSON("a" << 1), false, &indexes);
        ASSERT_EQ(indexes.size(), 1U);

        DistinctParams params;
        params.descriptor = indexes[0];
        params.direction = 1;
        // Distinct-ing over the 0-th field of the keypattern.
        params.fieldNo = 0;
        // We'll look at all values in the bounds.
        params.bounds.isSimpleRange = false;
        OrderedIntervalList oil("a");
        oil.intervals.push_back(IndexBoundsBuilder::allValues());
        params.bounds.fields.push_back(oil);

        WorkingSet ws;
        DistinctScan distinct(&_txn, params, &ws);

        WorkingSetID wsid;
        // Get our first result.
        int firstResultWorks = 0;
        while (PlanStage::ADVANCED != distinct.work(&wsid)) {
            ++firstResultWorks;
        }
        // 5 is a bogus number.  There's some amount of setup done by the first few calls but
        // we should return the first result relatively promptly.
        ASSERT_LESS_THAN(firstResultWorks, 5);
        ASSERT_EQUALS(1, getIntFieldDotted(ws, wsid, "a"));

        // Getting our second result should be very quick as we just skip
        // over the first result.
        int secondResultWorks = 0;
        while (PlanStage::ADVANCED != distinct.work(&wsid)) {
            ++secondResultWorks;
        }
        ASSERT_EQUALS(2, getIntFieldDotted(ws, wsid, "a"));
        // This is 0 because we don't have to loop for several values; we just skip over
        // all the 'a' values.
        ASSERT_EQUALS(0, secondResultWorks);

        ASSERT_EQUALS(PlanStage::IS_EOF, distinct.work(&wsid));
    }
Пример #2
0
        /**
         * Use the MultiPlanRunner to pick the best plan for the query 'cq'.  Goes through
         * normal planning to generate solutions and feeds them to the MPR.
         *
         * Takes ownership of 'cq'.  Caller DOES NOT own the returned QuerySolution*.
         */
        QuerySolution* pickBestPlan(CanonicalQuery* cq) {
            Client::ReadContext ctx(ns);
            Collection* collection = ctx.ctx().db()->getCollection(ns);

            QueryPlannerParams plannerParams;
            fillOutPlannerParams(collection, cq, &plannerParams);
            // Turn this off otherwise it pops up in some plans.
            plannerParams.options &= ~QueryPlannerParams::KEEP_MUTATIONS;

            // Plan.
            vector<QuerySolution*> solutions;
            Status status = QueryPlanner::plan(*cq, plannerParams, &solutions);
            ASSERT(status.isOK());

            ASSERT_GREATER_THAN_OR_EQUALS(solutions.size(), 1U);

            // Fill out the MPR.
            _mpr.reset(new MultiPlanRunner(collection, cq));

            // Put each solution from the planner into the MPR.
            for (size_t i = 0; i < solutions.size(); ++i) {
                WorkingSet* ws;
                PlanStage* root;
                ASSERT(StageBuilder::build(*solutions[i], &root, &ws));
                // Takes ownership of all arguments.
                _mpr->addPlan(solutions[i], root, ws);
            }

            // And return a pointer to the best solution.  The MPR owns the pointer.
            size_t bestPlan = numeric_limits<size_t>::max();
            BSONObj unused;
            ASSERT(_mpr->pickBestPlan(&bestPlan, &unused));
            ASSERT_LESS_THAN(bestPlan, solutions.size());
            return solutions[bestPlan];
        }
Пример #3
0
            /**
            * Tries to grab a series of connections from the pool, perform checks on
            * them, then put them back into the pool. After that, it checks these
            * connections can be retrieved again from the pool.
            *
            * @param checkFunc method for comparing new connections and arg2.
            * @param arg2 the value to pass as the 2nd parameter of checkFunc.
            * @param newConnsToCreate the number of new connections to make.
            */
            void checkNewConns(void (*checkFunc)(uint64_t, uint64_t), uint64_t arg2,
                               size_t newConnsToCreate) {

                vector<ScopedDbConnection*> newConnList;

                for (size_t x = 0; x < newConnsToCreate; x++) {
                    ScopedDbConnection* newConn = new ScopedDbConnection(TARGET_HOST);
                    checkFunc(newConn->get()->getSockCreationMicroSec(), arg2);
                    newConnList.push_back(newConn);
                }

                const uint64_t oldCreationTime = mongo::curTimeMicros64();

                for (vector<ScopedDbConnection*>::iterator iter = newConnList.begin();
                        iter != newConnList.end(); ++iter) {
                    (*iter)->done();
                    delete *iter;
                }
                newConnList.clear();

                // Check that connections created after the purge was put back to the pool.
                for (size_t x = 0; x < newConnsToCreate; x++) {
                    ScopedDbConnection* newConn = new ScopedDbConnection(TARGET_HOST);
                    ASSERT_LESS_THAN(newConn->get()->getSockCreationMicroSec(), oldCreationTime);
                    newConnList.push_back(newConn);
                }

                for (vector<ScopedDbConnection*>::iterator iter = newConnList.begin();
                        iter != newConnList.end(); ++iter) {
                    (*iter)->done();
                    delete *iter;
                }
            }
Пример #4
0
        void run() {
            Client::WriteContext ctx(ns());
            Database* db = ctx.ctx().db();
            Collection* coll = db->getCollection(ns());
            if (!coll) {
                coll = db->createCollection(ns());
            }

            for (int i = 0; i < 50; ++i) {
                insert(BSON("foo" << 1 << "bar" << i));
            }

            addIndex(BSON("foo" << 1));
            addIndex(BSON("bar" << 1));

            WorkingSet ws;
            scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL));

            // Scan over foo == 1
            IndexScanParams params;
            params.descriptor = getIndex(BSON("foo" << 1), coll);
            params.bounds.isSimpleRange = true;
            params.bounds.startKey = BSON("" << 1);
            params.bounds.endKey = BSON("" << 1);
            params.bounds.endKeyInclusive = true;
            params.direction = 1;
            ah->addChild(new IndexScan(params, &ws, NULL));

            // Intersect with 7 <= bar < 10000
            params.descriptor = getIndex(BSON("bar" << 1), coll);
            params.bounds.startKey = BSON("" << 7);
            params.bounds.endKey = BSON("" << 10000);
            ah->addChild(new IndexScan(params, &ws, NULL));

            WorkingSetID lastId = WorkingSet::INVALID_ID;

            int count = 0;
            while (!ah->isEOF()) {
                WorkingSetID id;
                PlanStage::StageState status = ah->work(&id);
                if (PlanStage::ADVANCED != status) { continue; }
                BSONObj thisObj = ws.get(id)->loc.obj();
                ASSERT_EQUALS(7 + count, thisObj["bar"].numberInt());
                ++count;
                if (WorkingSet::INVALID_ID != lastId) {
                    BSONObj lastObj = ws.get(lastId)->loc.obj();
                    ASSERT_LESS_THAN(lastObj["bar"].woCompare(thisObj["bar"]), 0);
                }
                lastId = id;
            }

            ASSERT_EQUALS(count, 43);
        }
Пример #5
0
    TEST(DBHelperTests, FindDiskLocs) {

        DBDirectClient client;
        OperationContextImpl txn;

        // Some unique tag we can use to make sure we're pulling back the right data
        OID tag = OID::gen();
        client.remove( ns, BSONObj() );

        int numDocsInserted = 10;
        for ( int i = 0; i < numDocsInserted; ++i ) {
            client.insert( ns, BSON( "_id" << i << "tag" << tag ) );
        }

        long long maxSizeBytes = 1024 * 1024 * 1024;

        set<DiskLoc> locs;
        long long numDocsFound;
        long long estSizeBytes;
        {
            // search _id range (0, 10)
            Lock::DBRead lk(txn.lockState(), ns);

            KeyRange range( ns,
                            BSON( "_id" << 0 ),
                            BSON( "_id" << numDocsInserted ),
                            BSON( "_id" << 1 ) );

            Status result = Helpers::getLocsInRange( &txn,
                                                     range,
                                                     maxSizeBytes,
                                                     &locs,
                                                     &numDocsFound,
                                                     &estSizeBytes );

            ASSERT_EQUALS( result, Status::OK() );
            ASSERT_EQUALS( numDocsFound, numDocsInserted );
            ASSERT_NOT_EQUALS( estSizeBytes, 0 );
            ASSERT_LESS_THAN( estSizeBytes, maxSizeBytes );

            Database* db = dbHolder().get(
                                    &txn, nsToDatabase(range.ns), storageGlobalParams.dbpath);
            const Collection* collection = db->getCollection(&txn, ns);

            // Make sure all the disklocs actually correspond to the right info
            for ( set<DiskLoc>::const_iterator it = locs.begin(); it != locs.end(); ++it ) {
                const BSONObj obj = collection->docFor(*it);
                ASSERT_EQUALS(obj["tag"].OID(), tag);
            }
        }
    }
Пример #6
0
    TEST(DBHelperTests, FindDiskLocs) {

        DBDirectClient client;
        // Some unique tag we can use to make sure we're pulling back the right data
        OID tag = OID::gen();
        client.remove( ns, BSONObj() );

        int numDocsInserted = 10;
        for ( int i = 0; i < numDocsInserted; ++i ) {
            client.insert( ns, BSON( "_id" << i << "tag" << tag ) );
        }

        long long maxSizeBytes = 1024 * 1024 * 1024;

        set<DiskLoc> locs;
        long long numDocsFound;
        long long estSizeBytes;
        {
            // search _id range (0, 10)
            Lock::DBRead lk( ns );
            Client::Context ctx( ns );
            KeyRange range( ns,
                            BSON( "_id" << 0 ),
                            BSON( "_id" << numDocsInserted ),
                            BSON( "_id" << 1 ) );

            Status result = Helpers::getLocsInRange( range,
                                                     maxSizeBytes,
                                                     &locs,
                                                     &numDocsFound,
                                                     &estSizeBytes );

            ASSERT_EQUALS( result, Status::OK() );
            ASSERT_EQUALS( numDocsFound, numDocsInserted );
            ASSERT_NOT_EQUALS( estSizeBytes, 0 );
            ASSERT_LESS_THAN( estSizeBytes, maxSizeBytes );

            // Make sure all the disklocs actually correspond to the right info
            for ( set<DiskLoc>::iterator it = locs.begin(); it != locs.end(); ++it ) {
                ASSERT_EQUALS( it->obj()["tag"].OID(), tag );
            }
        }
    }
Пример #7
0
        /**
         * Use the MultiPlanRunner to pick the best plan for the query 'cq'.  Goes through
         * normal planning to generate solutions and feeds them to the MPR.
         *
         * Takes ownership of 'cq'.  Caller DOES NOT own the returned QuerySolution*.
         */
        QuerySolution* pickBestPlan(CanonicalQuery* cq) {
            Client::ReadContext ctx(&_txn, ns);
            Collection* collection = ctx.ctx().db()->getCollection(&_txn, ns);

            QueryPlannerParams plannerParams;
            fillOutPlannerParams(&_txn, collection, cq, &plannerParams);
            // Turn this off otherwise it pops up in some plans.
            plannerParams.options &= ~QueryPlannerParams::KEEP_MUTATIONS;

            // Plan.
            vector<QuerySolution*> solutions;
            Status status = QueryPlanner::plan(*cq, plannerParams, &solutions);
            ASSERT(status.isOK());

            ASSERT_GREATER_THAN_OR_EQUALS(solutions.size(), 1U);

            // Fill out the MPR.
            _mps.reset(new MultiPlanStage(&_txn, collection, cq));
            WorkingSet* ws = new WorkingSet();
            // Put each solution from the planner into the MPR.
            for (size_t i = 0; i < solutions.size(); ++i) {
                PlanStage* root;
                ASSERT(StageBuilder::build(&_txn, collection, *solutions[i], ws, &root));
                // Takes ownership of all arguments.
                _mps->addPlan(solutions[i], root, ws);
            }

            _mps->pickBestPlan(); // This is what sets a backup plan, should we test for it.
            ASSERT(_mps->bestPlanChosen());

            size_t bestPlanIdx = _mps->bestPlanIdx();
            ASSERT_LESS_THAN(bestPlanIdx, solutions.size());

            // And return a pointer to the best solution.
            return _mps->bestSolution();
        }