Exemplo n.º 1
0
MapNode* generateMapTree(int maxRooms)
{
	MapNode* rootNode = new MapNode();
	std::vector<MapNode*>* leafList = new std::vector<MapNode*>();

	rootNode->refBlock.dimensions.x = MAX_X_SIZE;
	rootNode->refBlock.dimensions.y = MAX_Y_SIZE;
	rootNode->refBlock.position.x = 0;
	rootNode->refBlock.position.y = 0;
	generateMapHelper(rootNode, 1, maxRooms);
	
	//Now we need to create rooms in the leaves.
	getLeafNodes(rootNode, leafList);

	//for each leaf node...
	for(int i = 0; i < leafList->size(); i++)
	{
		//generate a room somewhere in this node.
		generateRoom((*leafList)[i]);
	}

	//Connect some corridors
	for(int i = 0; i < leafList->size(); i+=2)
	{
		//connect the two nodes together
		connectRooms((*leafList)[i], (*leafList)[i+1]);
	}
	return rootNode;
}
Exemplo n.º 2
0
void getLeafNodes(MapNode* rootNode, std::vector<MapNode*>* leaves)
{
	if(rootNode == NULL)
	{
		//we wen't too deep
		return;
	}

	if(rootNode->left == NULL && rootNode->right == NULL)
	{
		//its a leaf, add it.
		leaves->push_back(rootNode);
		return;
	}

	getLeafNodes(rootNode->left, leaves);
	getLeafNodes(rootNode->right, leaves);
	return;
}
Exemplo n.º 3
0
    bool QueryPlannerAnalysis::explodeForSort(const CanonicalQuery& query,
                                              const QueryPlannerParams& params,
                                              QuerySolutionNode** solnRoot) {
        vector<QuerySolutionNode*> leafNodes;

        if (!structureOKForExplode(*solnRoot)) {
            return false;
        }

        getLeafNodes(*solnRoot, &leafNodes);

        const BSONObj& desiredSort = query.getParsed().getSort();

        // How many scan leaves will result from our expansion?
        size_t totalNumScans = 0;

        // The value of entry i is how many scans we want to blow up for leafNodes[i].
        // We calculate this in the loop below and might as well reuse it if we blow up
        // that scan.
        vector<size_t> fieldsToExplode;

        // The sort order we're looking for has to possibly be provided by each of the index scans
        // upon explosion.
        for (size_t i = 0; i < leafNodes.size(); ++i) {
            // We can do this because structureOKForExplode is only true if the leaves are index
            // scans.
            IndexScanNode* isn = static_cast<IndexScanNode*>(leafNodes[i]);
            const IndexBounds& bounds = isn->bounds;

            // Not a point interval prefix, can't try to rewrite.
            if (bounds.isSimpleRange) {
                return false;
            }

            // How many scans will we create if we blow up this ixscan?
            size_t numScans = 1;

            // Skip every field that is a union of point intervals and build the resulting sort
            // order from the remaining fields.
            BSONObjIterator kpIt(isn->indexKeyPattern);
            size_t boundsIdx = 0;
            while (kpIt.more()) {
                const OrderedIntervalList& oil = bounds.fields[boundsIdx];
                if (!isUnionOfPoints(oil)) {
                    break;
                }
                numScans *= oil.intervals.size();
                kpIt.next();
                ++boundsIdx;
            }

            // There's no sort order left to gain by exploding.  Just go home.  TODO: verify nothing
            // clever we can do here.
            if (!kpIt.more()) {
                return false;
            }

            // The rest of the fields define the sort order we could obtain by exploding
            // the bounds.
            BSONObjBuilder resultingSortBob;
            while (kpIt.more()) {
                resultingSortBob.append(kpIt.next());
            }

            // See if it's the order we're looking for.
            BSONObj possibleSort = resultingSortBob.obj();
            if (0 != possibleSort.woCompare(desiredSort)) {
                return false;
            }

            // Do some bookkeeping to see how many ixscans we'll create total.
            totalNumScans += numScans;

            // And for this scan how many fields we expand.
            fieldsToExplode.push_back(boundsIdx);
        }

        // Too many ixscans spoil the performance.
        if (totalNumScans > QueryPlannerAnalysis::kMaxScansToExplode) {
            QLOG() << "Could expand ixscans to pull out sort order but resulting scan count"
                   << "(" << totalNumScans << ") is too high.";
            return false;
        }

        // If we're here, we can (probably?  depends on how restrictive the structure check is)
        // get our sort order via ixscan blow-up.
        for (size_t i = 0; i < leafNodes.size(); ++i) {
            IndexScanNode* isn = static_cast<IndexScanNode*>(leafNodes[i]);
            QuerySolutionNode* newNode = explodeScan(isn, desiredSort, fieldsToExplode[i]);
            // Replace 'isn' with 'newNode'
            replaceNodeInTree(solnRoot, isn, newNode);
            // And get rid of the old data access node.
            delete isn;
        }

        return true;
    }
Exemplo n.º 4
0
    // static
    QuerySolution* QueryPlannerAnalysis::analyzeDataAccess(const CanonicalQuery& query,
                                                   const QueryPlannerParams& params,
                                                   QuerySolutionNode* solnRoot) {
        auto_ptr<QuerySolution> soln(new QuerySolution());
        soln->filterData = query.getQueryObj();
        verify(soln->filterData.isOwned());
        soln->ns = query.ns();
        soln->indexFilterApplied = params.indexFiltersApplied;

        solnRoot->computeProperties();

        // solnRoot finds all our results.  Let's see what transformations we must perform to the
        // data.

        // If we're answering a query on a sharded system, we need to drop documents that aren't
        // logically part of our shard.
        if (params.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
            // TODO: We could use params.shardKey to do fetch analysis instead of always fetching.
            if (!solnRoot->fetched()) {
                FetchNode* fetch = new FetchNode();
                fetch->children.push_back(solnRoot);
                solnRoot = fetch;
            }
            ShardingFilterNode* sfn = new ShardingFilterNode();
            sfn->children.push_back(solnRoot);
            solnRoot = sfn;
        }

        bool hasSortStage = false;
        solnRoot = analyzeSort(query, params, solnRoot, &hasSortStage);

        // This can happen if we need to create a blocking sort stage and we're not allowed to.
        if (NULL == solnRoot) { return NULL; }

        // A solution can be blocking if it has a blocking sort stage or
        // a hashed AND stage.
        bool hasAndHashStage = hasNode(solnRoot, STAGE_AND_HASH);
        soln->hasBlockingStage = hasSortStage || hasAndHashStage;

        // If we can (and should), add the keep mutations stage.

        // We cannot keep mutated documents if:
        //
        // 1. The query requires an index to evaluate the predicate ($text).  We can't tell whether
        // or not the doc actually satisfies the $text predicate since we can't evaluate a
        // text MatchExpression.
        //
        // 2. The query implies a sort ($geoNear).  It would be rather expensive and hacky to merge
        // the document at the right place.
        //
        // 3. There is an index-provided sort.  Ditto above comment about merging.
        //
        // TODO: do we want some kind of pre-planning step where we look for certain nodes and cache
        // them?  We do lookups in the tree a few times.  This may not matter as most trees are
        // shallow in terms of query nodes.
        bool cannotKeepFlagged = hasNode(solnRoot, STAGE_TEXT)
                              || hasNode(solnRoot, STAGE_GEO_NEAR_2D)
                              || hasNode(solnRoot, STAGE_GEO_NEAR_2DSPHERE)
                              || (!query.getParsed().getSort().isEmpty() && !hasSortStage);

        // Only these stages can produce flagged results.  A stage has to hold state past one call
        // to work(...) in order to possibly flag a result.
        bool couldProduceFlagged = hasNode(solnRoot, STAGE_GEO_2D)
                                || hasAndHashStage
                                || hasNode(solnRoot, STAGE_AND_SORTED)
                                || hasNode(solnRoot, STAGE_FETCH);

        bool shouldAddMutation = !cannotKeepFlagged && couldProduceFlagged;

        if (shouldAddMutation && (params.options & QueryPlannerParams::KEEP_MUTATIONS)) {
            KeepMutationsNode* keep = new KeepMutationsNode();

            // We must run the entire expression tree to make sure the document is still valid.
            keep->filter.reset(query.root()->shallowClone());

            if (STAGE_SORT == solnRoot->getType()) {
                // We want to insert the invalidated results before the sort stage, if there is one.
                verify(1 == solnRoot->children.size());
                keep->children.push_back(solnRoot->children[0]);
                solnRoot->children[0] = keep;
            }
            else {
                keep->children.push_back(solnRoot);
                solnRoot = keep;
            }
        }

        // Project the results.
        if (NULL != query.getProj()) {
            QLOG() << "PROJECTION: fetched status: " << solnRoot->fetched() << endl;
            QLOG() << "PROJECTION: Current plan is:\n" << solnRoot->toString() << endl;

            ProjectionNode::ProjectionType projType = ProjectionNode::DEFAULT;
            BSONObj coveredKeyObj;

            if (query.getProj()->requiresDocument()) {
                QLOG() << "PROJECTION: claims to require doc adding fetch.\n";
                // If the projection requires the entire document, somebody must fetch.
                if (!solnRoot->fetched()) {
                    FetchNode* fetch = new FetchNode();
                    fetch->children.push_back(solnRoot);
                    solnRoot = fetch;
                }
            }
            else if (!query.getProj()->wantIndexKey()) {
                // The only way we're here is if it's a simple projection.  That is, we can pick out
                // the fields we want to include and they're not dotted.  So we want to execute the
                // projection in the fast-path simple fashion.  Just don't know which fast path yet.
                QLOG() << "PROJECTION: requires fields\n";
                const vector<string>& fields = query.getProj()->getRequiredFields();
                bool covered = true;
                for (size_t i = 0; i < fields.size(); ++i) {
                    if (!solnRoot->hasField(fields[i])) {
                        QLOG() << "PROJECTION: not covered due to field "
                             << fields[i] << endl;
                        covered = false;
                        break;
                    }
                }

                QLOG() << "PROJECTION: is covered?: = " << covered << endl;

                // If any field is missing from the list of fields the projection wants,
                // a fetch is required.
                if (!covered) {
                    FetchNode* fetch = new FetchNode();
                    fetch->children.push_back(solnRoot);
                    solnRoot = fetch;

                    // It's simple but we'll have the full document and we should just iterate
                    // over that.
                    projType = ProjectionNode::SIMPLE_DOC;
                    QLOG() << "PROJECTION: not covered, fetching.";
                }
                else {
                    if (solnRoot->fetched()) {
                        // Fetched implies hasObj() so let's run with that.
                        projType = ProjectionNode::SIMPLE_DOC;
                        QLOG() << "PROJECTION: covered via FETCH, using SIMPLE_DOC fast path";
                    }
                    else {
                        // If we're here we're not fetched so we're covered.  Let's see if we can
                        // get out of using the default projType.  If there's only one leaf
                        // underneath and it's giving us index data we can use the faster covered
                        // impl.
                        vector<QuerySolutionNode*> leafNodes;
                        getLeafNodes(solnRoot, &leafNodes);

                        if (1 == leafNodes.size()) {
                            // Both the IXSCAN and DISTINCT stages provide covered key data.
                            if (STAGE_IXSCAN == leafNodes[0]->getType()) {
                                projType = ProjectionNode::COVERED_ONE_INDEX;
                                IndexScanNode* ixn = static_cast<IndexScanNode*>(leafNodes[0]);
                                coveredKeyObj = ixn->indexKeyPattern;
                                QLOG() << "PROJECTION: covered via IXSCAN, using COVERED fast path";
                            }
                            else if (STAGE_DISTINCT == leafNodes[0]->getType()) {
                                projType = ProjectionNode::COVERED_ONE_INDEX;
                                DistinctNode* dn = static_cast<DistinctNode*>(leafNodes[0]);
                                coveredKeyObj = dn->indexKeyPattern;
                                QLOG() << "PROJECTION: covered via DISTINCT, using COVERED fast path";
                            }
                        }
                    }
                }
            }

            // We now know we have whatever data is required for the projection.
            ProjectionNode* projNode = new ProjectionNode();
            projNode->children.push_back(solnRoot);
            projNode->fullExpression = query.root();
            projNode->projection = query.getParsed().getProj();
            projNode->projType = projType;
            projNode->coveredKeyObj = coveredKeyObj;
            solnRoot = projNode;
        }
        else {
            // If there's no projection, we must fetch, as the user wants the entire doc.
            if (!solnRoot->fetched()) {
                FetchNode* fetch = new FetchNode();
                fetch->children.push_back(solnRoot);
                solnRoot = fetch;
            }
        }

        if (0 != query.getParsed().getSkip()) {
            SkipNode* skip = new SkipNode();
            skip->skip = query.getParsed().getSkip();
            skip->children.push_back(solnRoot);
            solnRoot = skip;
        }

        // When there is both a blocking sort and a limit, the limit will
        // be enforced by the blocking sort.
        // Otherwise, we need to limit the results in the case of a hard limit
        // (ie. limit in raw query is negative)
        if (0 != query.getParsed().getNumToReturn() &&
            !hasSortStage &&
            !query.getParsed().wantMore()) {

            LimitNode* limit = new LimitNode();
            limit->limit = query.getParsed().getNumToReturn();
            limit->children.push_back(solnRoot);
            solnRoot = limit;
        }

        soln->root.reset(solnRoot);
        return soln.release();
    }