예제 #1
0
    bool DocumentSourceCursor::coalesce(const intrusive_ptr<DocumentSource>& nextSource) {
        // Note: Currently we assume the $limit is logically after any $sort or
        // $match. If we ever pull in $match or $sort using this method, we
        // will need to keep track of the order of the sub-stages.

        if (!_limit) {
            _limit = dynamic_cast<DocumentSourceLimit*>(nextSource.get());
            return _limit.get(); // false if next is not a $limit
        }
        else {
            return _limit->coalesce(nextSource);
        }

        return false;
    }
예제 #2
0
bool DocumentSourceFilter::coalesce(
    const intrusive_ptr<DocumentSource> &pNextSource) {

    /* we only know how to coalesce other filters */
    DocumentSourceFilter *pDocFilter =
        dynamic_cast<DocumentSourceFilter *>(pNextSource.get());
    if (!pDocFilter)
        return false;

    /*
      Two adjacent filters can be combined by creating a conjunction of
      their predicates.
     */
    intrusive_ptr<ExpressionNary> pAnd(ExpressionAnd::create());
    pAnd->addOperand(pFilter);
    pAnd->addOperand(pDocFilter->pFilter);
    pFilter = pAnd;

    return true;
}
예제 #3
0
bool DocumentSourceMatch::accept(
    const intrusive_ptr<Document> &pDocument) const {

    /*
      The matcher only takes BSON documents, so we have to make one.

      LATER
      We could optimize this by making a document with only the
      fields referenced by the Matcher.  We could do this by looking inside
      the Matcher's BSON before it is created, and recording those.  The
      easiest implementation might be to hold onto an ExpressionDocument
      in here, and give that pDocument to create the created subset of
      fields, and then convert that instead.
    */
    BSONObjBuilder objBuilder;
    pDocument->toBson(&objBuilder);
    BSONObj obj(objBuilder.done());

    return matcher.matches(obj);
}
예제 #4
0
 inline void swap(cow_ptr& other)
 {
     m_ptr.swap(other.m_ptr);
 }
예제 #5
0
파일: pipeline.cpp 프로젝트: darkiri/mongo
    intrusive_ptr<Pipeline> Pipeline::parseCommand(
        string &errmsg, BSONObj &cmdObj,
        const intrusive_ptr<ExpressionContext> &pCtx) {
        intrusive_ptr<Pipeline> pPipeline(new Pipeline(pCtx));
        vector<BSONElement> pipeline;

        /* gather the specification for the aggregation */
        for(BSONObj::iterator cmdIterator = cmdObj.begin();
                cmdIterator.more(); ) {
            BSONElement cmdElement(cmdIterator.next());
            const char *pFieldName = cmdElement.fieldName();

            // ignore top-level fields prefixed with $. They are for the command processor, not us.
            if (pFieldName[0] == '$') {
                continue;
            }

            /* look for the aggregation command */
            if (!strcmp(pFieldName, commandName)) {
                pPipeline->collectionName = cmdElement.String();
                continue;
            }

            /* check for the collection name */
            if (!strcmp(pFieldName, pipelineName)) {
                pipeline = cmdElement.Array();
                continue;
            }

            /* check for explain option */
            if (!strcmp(pFieldName, explainName)) {
                pPipeline->explain = cmdElement.Bool();
                continue;
            }

            /* if the request came from the router, we're in a shard */
            if (!strcmp(pFieldName, fromRouterName)) {
                pCtx->setInShard(cmdElement.Bool());
                continue;
            }

            /* check for debug options */
            if (!strcmp(pFieldName, splitMongodPipelineName)) {
                pPipeline->splitMongodPipeline = true;
                continue;
            }

            /* we didn't recognize a field in the command */
            ostringstream sb;
            sb <<
               "unrecognized field \"" <<
               cmdElement.fieldName();
            errmsg = sb.str();
            return intrusive_ptr<Pipeline>();
        }

        /*
          If we get here, we've harvested the fields we expect for a pipeline.

          Set up the specified document source pipeline.
        */
        SourceContainer& sources = pPipeline->sources; // shorthand

        /* iterate over the steps in the pipeline */
        const size_t nSteps = pipeline.size();
        for(size_t iStep = 0; iStep < nSteps; ++iStep) {
            /* pull out the pipeline element as an object */
            BSONElement pipeElement(pipeline[iStep]);
            uassert(15942, str::stream() << "pipeline element " <<
                    iStep << " is not an object",
                    pipeElement.type() == Object);
            BSONObj bsonObj(pipeElement.Obj());

            // Parse a pipeline stage from 'bsonObj'.
            uassert(16435, "A pipeline stage specification object must contain exactly one field.",
                    bsonObj.nFields() == 1);
            BSONElement stageSpec = bsonObj.firstElement();
            const char* stageName = stageSpec.fieldName();

            // Create a DocumentSource pipeline stage from 'stageSpec'.
            StageDesc key;
            key.pName = stageName;
            const StageDesc* pDesc = (const StageDesc*)
                    bsearch(&key, stageDesc, nStageDesc, sizeof(StageDesc),
                            stageDescCmp);

            uassert(16436,
                    str::stream() << "Unrecognized pipeline stage name: '" << stageName << "'",
                    pDesc);
            intrusive_ptr<DocumentSource> stage = (*pDesc->pFactory)(&stageSpec, pCtx);
            verify(stage);
            stage->setPipelineStep(iStep);
            sources.push_back(stage);
        }

        /* if there aren't any pipeline stages, there's nothing more to do */
        if (sources.empty())
            return pPipeline;

        /*
          Move filters up where possible.

          CW TODO -- move filter past projections where possible, and noting
          corresponding field renaming.
        */

        /*
          Wherever there is a match immediately following a sort, swap them.
          This means we sort fewer items.  Neither changes the documents in
          the stream, so this transformation shouldn't affect the result.

          We do this first, because then when we coalesce operators below,
          any adjacent matches will be combined.
         */
        for (size_t srcn = sources.size(), srci = 1; srci < srcn; ++srci) {
            intrusive_ptr<DocumentSource> &pSource = sources[srci];
            if (dynamic_cast<DocumentSourceMatch *>(pSource.get())) {
                intrusive_ptr<DocumentSource> &pPrevious = sources[srci - 1];
                if (dynamic_cast<DocumentSourceSort *>(pPrevious.get())) {
                    /* swap this item with the previous */
                    intrusive_ptr<DocumentSource> pTemp(pPrevious);
                    pPrevious = pSource;
                    pSource = pTemp;
                }
            }
        }

        /* Move limits in front of skips. This is more optimal for sharding
         * since currently, we can only split the pipeline at a single source
         * and it is better to limit the results coming from each shard
         */
        for(int i = sources.size() - 1; i >= 1 /* not looking at 0 */; i--) {
            DocumentSourceLimit* limit =
                dynamic_cast<DocumentSourceLimit*>(sources[i].get());
            DocumentSourceSkip* skip =
                dynamic_cast<DocumentSourceSkip*>(sources[i-1].get());
            if (limit && skip) {
                // Increase limit by skip since the skipped docs now pass through the $limit
                limit->setLimit(limit->getLimit() + skip->getSkip());
                swap(sources[i], sources[i-1]);

                // Start at back again. This is needed to handle cases with more than 1 $limit
                // (S means skip, L means limit)
                //
                // These two would work without second pass (assuming back to front ordering)
                // SL   -> LS
                // SSL  -> LSS
                //
                // The following cases need a second pass to handle the second limit
                // SLL  -> LLS
                // SSLL -> LLSS
                // SLSL -> LLSS
                i = sources.size(); // decremented before next pass
            }
        }

        /*
          Coalesce adjacent filters where possible.  Two adjacent filters
          are equivalent to one filter whose predicate is the conjunction of
          the two original filters' predicates.  For now, capture this by
          giving any DocumentSource the option to absorb it's successor; this
          will also allow adjacent projections to coalesce when possible.

          Run through the DocumentSources, and give each one the opportunity
          to coalesce with its successor.  If successful, remove the
          successor.

          Move all document sources to a temporary list.
        */
        SourceContainer tempSources;
        sources.swap(tempSources);

        /* move the first one to the final list */
        sources.push_back(tempSources[0]);

        /* run through the sources, coalescing them or keeping them */
        for (size_t tempn = tempSources.size(), tempi = 1; tempi < tempn; ++tempi) {
            /*
              If we can't coalesce the source with the last, then move it
              to the final list, and make it the new last.  (If we succeeded,
              then we're still on the same last, and there's no need to move
              or do anything with the source -- the destruction of tempSources
              will take care of the rest.)
            */
            intrusive_ptr<DocumentSource> &pLastSource = sources.back();
            intrusive_ptr<DocumentSource> &pTemp = tempSources[tempi];
            verify(pTemp && pLastSource);
            if (!pLastSource->coalesce(pTemp))
                sources.push_back(pTemp);
        }

        /* optimize the elements in the pipeline */
        for(SourceContainer::iterator iter(sources.begin()),
                                      listEnd(sources.end());
                                    iter != listEnd;
                                    ++iter) {
            if (!*iter) {
                errmsg = "Pipeline received empty document as argument";
                return intrusive_ptr<Pipeline>();
            }

            (*iter)->optimize();
        }

        return pPipeline;
    }
예제 #6
0
inline typename boost::interprocess::intrusive_ptr<T, VP>::pointer
   get_pointer(intrusive_ptr<T, VP> p)
{  return p.get();   }
예제 #7
0
//!Returns a != b.get().
//!Does not throw
template<class T, class VP> inline
bool operator!=(const typename intrusive_ptr<T, VP>::pointer &a,
                       intrusive_ptr<T, VP> const & b)
{  return a != b.get(); }
예제 #8
0
//!Returns a.get() != b.get().
//!Does not throw
template<class T, class U, class VP> inline
bool operator!=(intrusive_ptr<T, VP> const & a,
                intrusive_ptr<U, VP> const & b)
{  return a.get() != b.get(); }
예제 #9
0
 template<class U> intrusive_ptr(intrusive_ptr<U> const & rhs): p_(rhs.get())
 {
     if(p_ != 0) intrusive_ptr_add_ref(p_);
 }
예제 #10
0
파일: command.hpp 프로젝트: ajac/libcppa
 void handle_results () {
     m_handle.deliver(m_actor_facade->m_map_result(m_result));
 }
예제 #11
0
std::shared_ptr<PlanExecutor> PipelineD::prepareExecutor(
    OperationContext* txn,
    Collection* collection,
    const NamespaceString& nss,
    const intrusive_ptr<Pipeline>& pipeline,
    const intrusive_ptr<ExpressionContext>& expCtx,
    const intrusive_ptr<DocumentSourceSort>& sortStage,
    const DepsTracker& deps,
    const BSONObj& queryObj,
    BSONObj* sortObj,
    BSONObj* projectionObj) {
    // The query system has the potential to use an index to provide a non-blocking sort and/or to
    // use the projection to generate a covered plan. If this is possible, it is more efficient to
    // let the query system handle those parts of the pipeline. If not, it is more efficient to use
    // a $sort and/or a ParsedDeps object. Thus, we will determine whether the query system can
    // provide a non-blocking sort or a covered projection before we commit to a PlanExecutor.
    //
    // To determine if the query system can provide a non-blocking sort, we pass the
    // NO_BLOCKING_SORT planning option, meaning 'getExecutor' will not produce a PlanExecutor if
    // the query system would use a blocking sort stage.
    //
    // To determine if the query system can provide a covered projection, we pass the
    // NO_UNCOVERED_PROJECTS planning option, meaning 'getExecutor' will not produce a PlanExecutor
    // if the query system would need to fetch the document to do the projection. The following
    // logic uses the above strategies, with multiple calls to 'attemptToGetExecutor' to determine
    // the most efficient way to handle the $sort and $project stages.
    //
    // LATER - We should attempt to determine if the results from the query are returned in some
    // order so we can then apply other optimizations there are tickets for, such as SERVER-4507.
    size_t plannerOpts = QueryPlannerParams::DEFAULT | QueryPlannerParams::NO_BLOCKING_SORT;

    // If we are connecting directly to the shard rather than through a mongos, don't filter out
    // orphaned documents.
    if (ShardingState::get(txn)->needCollectionMetadata(txn, nss.ns())) {
        plannerOpts |= QueryPlannerParams::INCLUDE_SHARD_FILTER;
    }

    if (deps.hasNoRequirements()) {
        // If we don't need any fields from the input document, performing a count is faster, and
        // will output empty documents, which is okay.
        plannerOpts |= QueryPlannerParams::IS_COUNT;
    }

    // The only way to get a text score is to let the query system handle the projection. In all
    // other cases, unless the query system can do an index-covered projection and avoid going to
    // the raw record at all, it is faster to have ParsedDeps filter the fields we need.
    if (!deps.needTextScore) {
        plannerOpts |= QueryPlannerParams::NO_UNCOVERED_PROJECTIONS;
    }

    std::shared_ptr<PlanExecutor> exec;

    BSONObj emptyProjection;
    if (sortStage) {
        // See if the query system can provide a non-blocking sort.
        auto swExecutorSort = attemptToGetExecutor(
            txn, collection, expCtx, queryObj, emptyProjection, *sortObj, plannerOpts);

        if (swExecutorSort.isOK()) {
            // Success! Now see if the query system can also cover the projection.
            auto swExecutorSortAndProj = attemptToGetExecutor(
                txn, collection, expCtx, queryObj, *projectionObj, *sortObj, plannerOpts);

            if (swExecutorSortAndProj.isOK()) {
                // Success! We have a non-blocking sort and a covered projection.
                exec = std::move(swExecutorSortAndProj.getValue());
            } else {
                // The query system couldn't cover the projection.
                *projectionObj = BSONObj();
                exec = std::move(swExecutorSort.getValue());
            }

            // We know the sort is being handled by the query system, so remove the $sort stage.
            pipeline->sources.pop_front();

            if (sortStage->getLimitSrc()) {
                // We need to reinsert the coalesced $limit after removing the $sort.
                pipeline->sources.push_front(sortStage->getLimitSrc());
            }
            return exec;
        }
        // The query system can't provide a non-blocking sort.
        *sortObj = BSONObj();
    }

    // Either there was no $sort stage, or the query system could not provide a non-blocking
    // sort.
    dassert(sortObj->isEmpty());

    // See if the query system can cover the projection.
    auto swExecutorProj = attemptToGetExecutor(
        txn, collection, expCtx, queryObj, *projectionObj, *sortObj, plannerOpts);
    if (swExecutorProj.isOK()) {
        // Success! We have a covered projection.
        return std::move(swExecutorProj.getValue());
    }

    // The query system couldn't provide a covered projection.
    *projectionObj = BSONObj();
    // If this doesn't work, nothing will.
    return uassertStatusOK(attemptToGetExecutor(
        txn, collection, expCtx, queryObj, *projectionObj, *sortObj, plannerOpts));
}
예제 #12
0
shared_ptr<PlanExecutor> PipelineD::prepareCursorSource(
    OperationContext* txn,
    Collection* collection,
    const NamespaceString& nss,
    const intrusive_ptr<Pipeline>& pPipeline,
    const intrusive_ptr<ExpressionContext>& pExpCtx) {
    // We will be modifying the source vector as we go.
    Pipeline::SourceContainer& sources = pPipeline->sources;

    // Inject a MongodImplementation to sources that need them.
    for (auto&& source : sources) {
        DocumentSourceNeedsMongod* needsMongod =
            dynamic_cast<DocumentSourceNeedsMongod*>(source.get());
        if (needsMongod) {
            needsMongod->injectMongodInterface(std::make_shared<MongodImplementation>(pExpCtx));
        }
    }

    if (!sources.empty()) {
        if (sources.front()->isValidInitialSource()) {
            if (dynamic_cast<DocumentSourceMergeCursors*>(sources.front().get())) {
                // Enable the hooks for setting up authentication on the subsequent internal
                // connections we are going to create. This would normally have been done
                // when SetShardVersion was called, but since SetShardVersion is never called
                // on secondaries, this is needed.
                ShardedConnectionInfo::addHook();
            }
            return std::shared_ptr<PlanExecutor>();  // don't need a cursor
        }

        auto sampleStage = dynamic_cast<DocumentSourceSample*>(sources.front().get());
        // Optimize an initial $sample stage if possible.
        if (collection && sampleStage) {
            const long long sampleSize = sampleStage->getSampleSize();
            const long long numRecords = collection->getRecordStore()->numRecords(txn);
            auto exec = createRandomCursorExecutor(collection, txn, sampleSize, numRecords);
            if (exec) {
                // Replace $sample stage with $sampleFromRandomCursor stage.
                sources.pop_front();
                std::string idString = collection->ns().isOplog() ? "ts" : "_id";
                sources.emplace_front(DocumentSourceSampleFromRandomCursor::create(
                    pExpCtx, sampleSize, idString, numRecords));

                const BSONObj initialQuery;
                return addCursorSource(
                    pPipeline, pExpCtx, exec, pPipeline->getDependencies(initialQuery));
            }
        }
    }

    // Look for an initial match. This works whether we got an initial query or not. If not, it
    // results in a "{}" query, which will be what we want in that case.
    const BSONObj queryObj = pPipeline->getInitialQuery();
    if (!queryObj.isEmpty()) {
        if (dynamic_cast<DocumentSourceMatch*>(sources.front().get())) {
            // If a $match query is pulled into the cursor, the $match is redundant, and can be
            // removed from the pipeline.
            sources.pop_front();
        } else {
            // A $geoNear stage, the only other stage that can produce an initial query, is also
            // a valid initial stage and will be handled above.
            MONGO_UNREACHABLE;
        }
    }

    // Find the set of fields in the source documents depended on by this pipeline.
    DepsTracker deps = pPipeline->getDependencies(queryObj);

    BSONObj projForQuery = deps.toProjection();

    /*
      Look for an initial sort; we'll try to add this to the
      Cursor we create.  If we're successful in doing that (further down),
      we'll remove the $sort from the pipeline, because the documents
      will already come sorted in the specified order as a result of the
      index scan.
    */
    intrusive_ptr<DocumentSourceSort> sortStage;
    BSONObj sortObj;
    if (!sources.empty()) {
        sortStage = dynamic_cast<DocumentSourceSort*>(sources.front().get());
        if (sortStage) {
            // build the sort key
            sortObj = sortStage->serializeSortKey(/*explain*/ false).toBson();
        }
    }

    // Create the PlanExecutor.
    auto exec = prepareExecutor(txn,
                                collection,
                                nss,
                                pPipeline,
                                pExpCtx,
                                sortStage,
                                deps,
                                queryObj,
                                &sortObj,
                                &projForQuery);

    return addCursorSource(pPipeline, pExpCtx, exec, deps, queryObj, sortObj, projForQuery);
}
예제 #13
0
 inline T const& operator*() const { return *m_ptr.get(); }
예제 #14
0
 inline T const* operator->() const { return m_ptr.get(); }
예제 #15
0
 inline T const* get() const { return m_ptr.get(); }
예제 #16
0
template<class T> void swap(intrusive_ptr<T> & lhs, intrusive_ptr<T> & rhs)
{
    lhs.swap(rhs);
}
예제 #17
0
template<class T, class U> intrusive_ptr<T> dynamic_pointer_cast(intrusive_ptr<U> const & p)
{
    return dynamic_cast<T *>(p.get());
}
예제 #18
0
    bool PipelineCommand::executePipeline(
        BSONObjBuilder &result, string &errmsg, const string &ns,
        intrusive_ptr<Pipeline> &pPipeline,
        intrusive_ptr<DocumentSourceCursor> &pSource,
        intrusive_ptr<ExpressionContext> &pCtx) {

        /* this is the normal non-debug path */
        if (!pPipeline->getSplitMongodPipeline())
            return pPipeline->run(result, errmsg, pSource);

        /* setup as if we're in the router */
        pCtx->setInRouter(true);

        /*
          Here, we'll split the pipeline in the same way we would for sharding,
          for testing purposes.

          Run the shard pipeline first, then feed the results into the remains
          of the existing pipeline.

          Start by splitting the pipeline.
         */
        intrusive_ptr<Pipeline> pShardSplit(
            pPipeline->splitForSharded());

        /*
          Write the split pipeline as we would in order to transmit it to
          the shard servers.
        */
        BSONObjBuilder shardBuilder;
        pShardSplit->toBson(&shardBuilder);
        BSONObj shardBson(shardBuilder.done());

        DEV (log() << "\n---- shardBson\n" <<
             shardBson.jsonString(Strict, 1) << "\n----\n").flush();

        /* for debugging purposes, show what the pipeline now looks like */
        DEV {
            BSONObjBuilder pipelineBuilder;
            pPipeline->toBson(&pipelineBuilder);
            BSONObj pipelineBson(pipelineBuilder.done());
            (log() << "\n---- pipelineBson\n" <<
             pipelineBson.jsonString(Strict, 1) << "\n----\n").flush();
        }

        /* on the shard servers, create the local pipeline */
        intrusive_ptr<ExpressionContext> pShardCtx(
            ExpressionContext::create(&InterruptStatusMongod::status));
        intrusive_ptr<Pipeline> pShardPipeline(
            Pipeline::parseCommand(errmsg, shardBson, pShardCtx));
        if (!pShardPipeline.get()) {
            return false;
        }

        /* run the shard pipeline */
        BSONObjBuilder shardResultBuilder;
        string shardErrmsg;
        pShardPipeline->run(shardResultBuilder, shardErrmsg, pSource);
        BSONObj shardResult(shardResultBuilder.done());

        /* pick out the shard result, and prepare to read it */
        intrusive_ptr<DocumentSourceBsonArray> pShardSource;
        BSONObjIterator shardIter(shardResult);
        while(shardIter.more()) {
            BSONElement shardElement(shardIter.next());
            const char *pFieldName = shardElement.fieldName();

            if ((strcmp(pFieldName, "result") == 0) ||
                (strcmp(pFieldName, "serverPipeline") == 0)) {
                pShardSource = DocumentSourceBsonArray::create(
                    &shardElement, pCtx);

                /*
                  Connect the output of the shard pipeline with the mongos
                  pipeline that will merge the results.
                */
                return pPipeline->run(result, errmsg, pShardSource);
            }
        }

        /* NOTREACHED */
        verify(false);
        return false;
    }
예제 #19
0
BSONObj toBson(const intrusive_ptr<DocumentSource>& source) {
    vector<Value> arr;
    source->serializeToArray(arr);
    ASSERT_EQUALS(arr.size(), 1UL);
    return arr[0].getDocument().toBson();
}
예제 #20
0
    boost::shared_ptr<Runner> PipelineD::prepareCursorSource(
            Collection* collection,
            const intrusive_ptr<Pipeline>& pPipeline,
            const intrusive_ptr<ExpressionContext>& pExpCtx) {
        // get the full "namespace" name
        const string& fullName = pExpCtx->ns.ns();
        pExpCtx->opCtx->lockState()->assertAtLeastReadLocked(fullName);

        // We will be modifying the source vector as we go
        Pipeline::SourceContainer& sources = pPipeline->sources;

        // Inject a MongodImplementation to sources that need them.
        for (size_t i = 0; i < sources.size(); i++) {
            DocumentSourceNeedsMongod* needsMongod =
                dynamic_cast<DocumentSourceNeedsMongod*>(sources[i].get());
            if (needsMongod) {
                needsMongod->injectMongodInterface(
                    boost::make_shared<MongodImplementation>(pExpCtx));
            }
        }

        if (!sources.empty() && sources.front()->isValidInitialSource()) {
            if (dynamic_cast<DocumentSourceMergeCursors*>(sources.front().get())) {
                // Enable the hooks for setting up authentication on the subsequent internal
                // connections we are going to create. This would normally have been done
                // when SetShardVersion was called, but since SetShardVersion is never called
                // on secondaries, this is needed.
                ShardedConnectionInfo::addHook();
            }
            return boost::shared_ptr<Runner>(); // don't need a cursor
        }


        // Look for an initial match. This works whether we got an initial query or not.
        // If not, it results in a "{}" query, which will be what we want in that case.
        const BSONObj queryObj = pPipeline->getInitialQuery();
        if (!queryObj.isEmpty()) {
            // This will get built in to the Cursor we'll create, so
            // remove the match from the pipeline
            sources.pop_front();
        }

        // Find the set of fields in the source documents depended on by this pipeline.
        const DepsTracker deps = pPipeline->getDependencies(queryObj);

        // Passing query an empty projection since it is faster to use ParsedDeps::extractFields().
        // This will need to change to support covering indexes (SERVER-12015). There is an
        // exception for textScore since that can only be retrieved by a query projection.
        const BSONObj projectionForQuery = deps.needTextScore ? deps.toProjection() : BSONObj();

        /*
          Look for an initial sort; we'll try to add this to the
          Cursor we create.  If we're successful in doing that (further down),
          we'll remove the $sort from the pipeline, because the documents
          will already come sorted in the specified order as a result of the
          index scan.
        */
        intrusive_ptr<DocumentSourceSort> sortStage;
        BSONObj sortObj;
        if (!sources.empty()) {
            sortStage = dynamic_cast<DocumentSourceSort*>(sources.front().get());
            if (sortStage) {
                // build the sort key
                sortObj = sortStage->serializeSortKey(/*explain*/false).toBson();
            }
        }

        // Create the Runner.
        //
        // If we try to create a Runner that includes both the match and the
        // sort, and the two are incompatible wrt the available indexes, then
        // we don't get a Runner back.
        //
        // So we try to use both first.  If that fails, try again, without the
        // sort.
        //
        // If we don't have a sort, jump straight to just creating a Runner
        // without the sort.
        //
        // If we are able to incorporate the sort into the Runner, remove it
        // from the head of the pipeline.
        //
        // LATER - we should be able to find this out before we create the
        // cursor.  Either way, we can then apply other optimizations there
        // are tickets for, such as SERVER-4507.
        const size_t runnerOptions = QueryPlannerParams::DEFAULT
                                   | QueryPlannerParams::INCLUDE_SHARD_FILTER
                                   | QueryPlannerParams::NO_BLOCKING_SORT
                                   ;
        boost::shared_ptr<Runner> runner;
        bool sortInRunner = false;

        const WhereCallbackReal whereCallback(pExpCtx->ns.db());

        if (sortStage) {
            CanonicalQuery* cq;
            Status status =
                CanonicalQuery::canonicalize(pExpCtx->ns,
                                             queryObj,
                                             sortObj,
                                             projectionForQuery,
                                             &cq,
                                             whereCallback);
            Runner* rawRunner;
            if (status.isOK() && getRunner(collection, cq, &rawRunner, runnerOptions).isOK()) {
                // success: The Runner will handle sorting for us using an index.
                runner.reset(rawRunner);
                sortInRunner = true;

                sources.pop_front();
                if (sortStage->getLimitSrc()) {
                    // need to reinsert coalesced $limit after removing $sort
                    sources.push_front(sortStage->getLimitSrc());
                }
            }
        }

        if (!runner.get()) {
            const BSONObj noSort;
            CanonicalQuery* cq;
            uassertStatusOK(
                CanonicalQuery::canonicalize(pExpCtx->ns,
                                             queryObj,
                                             noSort,
                                             projectionForQuery,
                                             &cq,
                                             whereCallback));

            Runner* rawRunner;
            uassertStatusOK(getRunner(collection, cq, &rawRunner, runnerOptions));
            runner.reset(rawRunner);
        }


        // DocumentSourceCursor expects a yielding Runner that has had its state saved.
        runner->saveState();

        // Put the Runner into a DocumentSourceCursor and add it to the front of the pipeline.
        intrusive_ptr<DocumentSourceCursor> pSource =
            DocumentSourceCursor::create(fullName, runner, pExpCtx);

        // Note the query, sort, and projection for explain.
        pSource->setQuery(queryObj);
        if (sortInRunner)
            pSource->setSort(sortObj);

        pSource->setProjection(deps.toProjection(), deps.toParsedDeps());

        while (!sources.empty() && pSource->coalesce(sources.front())) {
            sources.pop_front();
        }

        pPipeline->addInitialSource(pSource);

        return runner;
    }
예제 #21
0
//!Returns a.get() == b.
//!Does not throw
template<class T, class VP> inline
bool operator==(intrusive_ptr<T, VP> const & a,
                       const typename intrusive_ptr<T, VP>::pointer &b)
{  return a.get() == b; }
예제 #22
0
template<class T, class U> inline bool operator==(intrusive_ptr<T> const & a, intrusive_ptr<U> const & b)
{
    return a.get() == b.get();
}
예제 #23
0
//!Exchanges the contents of the two intrusive_ptrs.
//!Does not throw
template<class T, class VP> inline
void swap(intrusive_ptr<T, VP> & lhs,
          intrusive_ptr<T, VP> & rhs)
{  lhs.swap(rhs); }
예제 #24
0
template<class T> inline bool operator==(intrusive_ptr<T> const & a, T * b)
{
    return a.get() == b;
}
예제 #25
0
 //!Constructor from related. Copies the internal pointer and if "p" is not
 //!zero calls intrusive_ptr_add_ref(get_pointer(p)). Does not throw
 template<class U> intrusive_ptr
    (intrusive_ptr<U, VP> const & rhs)
    :  m_ptr(rhs.get())
 {
    if(m_ptr != 0) intrusive_ptr_add_ref(ipcdetail::get_pointer(m_ptr));
 }
예제 #26
0
template<class T> inline bool operator==(T * a, intrusive_ptr<T> const & b)
{
    return a == b.get();
}
예제 #27
0
파일: value.cpp 프로젝트: milkie/mongo
    int Value::compare(const intrusive_ptr<const Value> &rL,
                       const intrusive_ptr<const Value> &rR) {
        BSONType lType = rL->getType();
        BSONType rType = rR->getType();

        /*
          Special handling for Undefined and NULL values; these are types,
          so it's easier to handle them here before we go below to handle
          values of the same types.  This allows us to compare Undefined and
          NULL values with everything else.  As coded now:
          (*) Undefined is less than everything except itself (which is equal)
          (*) NULL is less than everything except Undefined and itself
         */
        if (lType == Undefined) {
            if (rType == Undefined)
                return 0;

            /* if rType is anything else, the left value is less */
            return -1;
        }
        
        if (lType == jstNULL) {
            if (rType == Undefined)
                return 1;
            if (rType == jstNULL)
                return 0;

            return -1;
        }

        if ((rType == Undefined) || (rType == jstNULL)) {
            /*
              We know the left value isn't Undefined, because of the above.
              Count a NULL value as greater than an undefined one.
            */
            return 1;
        }

        /* if the comparisons are numeric, prepare to promote the values */
        if (((lType == NumberDouble) || (lType == NumberLong) ||
             (lType == NumberInt)) &&
            ((rType == NumberDouble) || (rType == NumberLong) ||
             (rType == NumberInt))) {

            /* if the biggest type of either is a double, compare as doubles */
            if ((lType == NumberDouble) || (rType == NumberDouble)) {
                const double left = rL->getDouble();
                const double right = rR->getDouble();
                if (left < right)
                    return -1;
                if (left > right)
                    return 1;
                return 0;
            }

            /* if the biggest type of either is a long, compare as longs */
            if ((lType == NumberLong) || (rType == NumberLong)) {
                const long long left = rL->getLong();
                const long long right = rR->getLong();
                if (left < right)
                    return -1;
                if (left > right)
                    return 1;
                return 0;
            }

            /* if we got here, they must both be ints; compare as ints */
            {
                const int left = rL->getInt();
                const int right = rR->getInt();
                if (left < right)
                    return -1;
                if (left > right)
                    return 1;
                return 0;
            }
        }

        // CW TODO for now, only compare like values
        uassert(16016, str::stream() <<
                "can't compare values of BSON types " << typeName(lType) <<
                " and " << typeName(rType),
                lType == rType);

        switch(lType) {
        case NumberDouble:
        case NumberInt:
        case NumberLong:
            /* these types were handled above */
            verify(false);

        case String:
            return rL->stringValue.compare(rR->stringValue);

        case Object:
            return Document::compare(rL->getDocument(), rR->getDocument());

        case Array: {
            intrusive_ptr<ValueIterator> pli(rL->getArray());
            intrusive_ptr<ValueIterator> pri(rR->getArray());

            while(true) {
                /* have we run out of left array? */
                if (!pli->more()) {
                    if (!pri->more())
                        return 0; // the arrays are the same length

                    return -1; // the left array is shorter
                }

                /* have we run out of right array? */
                if (!pri->more())
                    return 1; // the right array is shorter

                /* compare the two corresponding elements */
                intrusive_ptr<const Value> plv(pli->next());
                intrusive_ptr<const Value> prv(pri->next());
                const int cmp = Value::compare(plv, prv);
                if (cmp)
                    return cmp; // values are unequal
            }

            /* NOTREACHED */
            verify(false);
            break;
        }

        case BinData:
        case Symbol:
        case CodeWScope:
            uassert(16017, str::stream() <<
                    "comparisons of values of BSON type " << typeName(lType) <<
                    " are not supported", false);
            // pBuilder->appendBinData(fieldName, ...);
            break;

        case jstOID:
            if (rL->oidValue < rR->oidValue)
                return -1;
            if (rL->oidValue == rR->oidValue)
                return 0;
            return 1;

        case Bool:
            if (rL->simple.boolValue == rR->simple.boolValue)
                return 0;
            if (rL->simple.boolValue)
                return 1;
            return -1;

        case Date: {
            // need to convert to long long to handle dates before 1970
            // see BSONElement::compareElementValues
            long long l = static_cast<long long>(rL->dateValue.millis);
            long long r = static_cast<long long>(rR->dateValue.millis);
            if (l < r)
                return -1;
            if (l > r)
                return 1;
            return 0;
        }

        case RegEx:
            return rL->stringValue.compare(rR->stringValue);

        case Timestamp:
            if (rL->timestampValue < rR->timestampValue)
                return -1;
            if (rL->timestampValue > rR->timestampValue)
                return 1;
            return 0;

        case Undefined:
        case jstNULL:
            return 0; // treat two Undefined or NULL values as equal

            /* these shouldn't happen in this context */
        case MinKey:
        case EOO:
        case DBRef:
        case Code:
        case MaxKey:
            verify(false);
            break;
        } // switch(lType)

        /* NOTREACHED */
        return 0;
    }
예제 #28
0
template<class T> inline bool operator!=(intrusive_ptr<T> const & a, intrusive_ptr<T> const & b)
{
    return a.get() != b.get();
}
예제 #29
0
vector<intrusive_ptr<DocumentSource>> DocumentSourceBucket::createFromBson(
    BSONElement elem, const intrusive_ptr<ExpressionContext>& pExpCtx) {
    uassert(40201,
            str::stream() << "Argument to $bucket stage must be an object, but found type: "
                          << typeName(elem.type())
                          << ".",
            elem.type() == BSONType::Object);

    const BSONObj bucketObj = elem.embeddedObject();
    BSONObjBuilder groupObjBuilder;
    BSONObjBuilder switchObjBuilder;

    VariablesIdGenerator idGenerator;
    VariablesParseState vps(&idGenerator);

    vector<Value> boundaryValues;
    BSONElement groupByField;
    Value defaultValue;

    bool outputFieldSpecified = false;
    for (auto&& argument : bucketObj) {
        const auto argName = argument.fieldNameStringData();
        if ("groupBy" == argName) {
            groupByField = argument;

            const bool groupByIsExpressionInObject = groupByField.type() == BSONType::Object &&
                groupByField.embeddedObject().firstElementFieldName()[0] == '$';

            const bool groupByIsPrefixedPath =
                groupByField.type() == BSONType::String && groupByField.valueStringData()[0] == '$';
            uassert(40202,
                    str::stream() << "The $bucket 'groupBy' field must be defined as a $-prefixed "
                                     "path or an expression, but found: "
                                  << groupByField.toString(false, false)
                                  << ".",
                    groupByIsExpressionInObject || groupByIsPrefixedPath);
        } else if ("boundaries" == argName) {
            uassert(
                40200,
                str::stream() << "The $bucket 'boundaries' field must be an array, but found type: "
                              << typeName(argument.type())
                              << ".",
                argument.type() == BSONType::Array);

            for (auto&& boundaryElem : argument.embeddedObject()) {
                auto exprConst = getExpressionConstant(boundaryElem, vps);
                uassert(40191,
                        str::stream() << "The $bucket 'boundaries' field must be an array of "
                                         "constant values, but found value: "
                                      << boundaryElem.toString(false, false)
                                      << ".",
                        exprConst);
                boundaryValues.push_back(exprConst->getValue());
            }

            uassert(40192,
                    str::stream()
                        << "The $bucket 'boundaries' field must have at least 2 values, but found "
                        << boundaryValues.size()
                        << " value(s).",
                    boundaryValues.size() >= 2);

            // Make sure that the boundaries are unique, sorted in ascending order, and have the
            // same canonical type.
            for (size_t i = 1; i < boundaryValues.size(); ++i) {
                Value lower = boundaryValues[i - 1];
                Value upper = boundaryValues[i];
                int lowerCanonicalType = canonicalizeBSONType(lower.getType());
                int upperCanonicalType = canonicalizeBSONType(upper.getType());

                uassert(40193,
                        str::stream() << "All values in the the 'boundaries' option to $bucket "
                                         "must have the same type. Found conflicting types "
                                      << typeName(lower.getType())
                                      << " and "
                                      << typeName(upper.getType())
                                      << ".",
                        lowerCanonicalType == upperCanonicalType);
                uassert(40194,
                        str::stream()
                            << "The 'boundaries' option to $bucket must be sorted, but elements "
                            << i - 1
                            << " and "
                            << i
                            << " are not in ascending order ("
                            << lower.toString()
                            << " is not less than "
                            << upper.toString()
                            << ").",
                        pExpCtx->getValueComparator().evaluate(lower < upper));
            }
        } else if ("default" == argName) {
            // If there is a default, make sure that it parses to a constant expression then add
            // default to switch.
            auto exprConst = getExpressionConstant(argument, vps);
            uassert(40195,
                    str::stream()
                        << "The $bucket 'default' field must be a constant expression, but found: "
                        << argument.toString(false, false)
                        << ".",
                    exprConst);

            defaultValue = exprConst->getValue();
            defaultValue.addToBsonObj(&switchObjBuilder, "default");
        } else if ("output" == argName) {
            outputFieldSpecified = true;
            uassert(
                40196,
                str::stream() << "The $bucket 'output' field must be an object, but found type: "
                              << typeName(argument.type())
                              << ".",
                argument.type() == BSONType::Object);

            for (auto&& outputElem : argument.embeddedObject()) {
                groupObjBuilder.append(outputElem);
            }
        } else {
            uasserted(40197, str::stream() << "Unrecognized option to $bucket: " << argName << ".");
        }
    }

    const bool isMissingRequiredField = groupByField.eoo() || boundaryValues.empty();
    uassert(40198,
            "$bucket requires 'groupBy' and 'boundaries' to be specified.",
            !isMissingRequiredField);

    Value lowerValue = boundaryValues.front();
    Value upperValue = boundaryValues.back();
    if (canonicalizeBSONType(defaultValue.getType()) ==
        canonicalizeBSONType(lowerValue.getType())) {
        // If the default has the same canonical type as the bucket's boundaries, then make sure the
        // default is less than the lowest boundary or greater than or equal to the highest
        // boundary.
        const auto& valueCmp = pExpCtx->getValueComparator();
        const bool hasValidDefault = valueCmp.evaluate(defaultValue < lowerValue) ||
            valueCmp.evaluate(defaultValue >= upperValue);
        uassert(40199,
                "The $bucket 'default' field must be less than the lowest boundary or greater than "
                "or equal to the highest boundary.",
                hasValidDefault);
    }

    // Make the branches for the $switch expression.
    BSONArrayBuilder branchesBuilder;
    for (size_t i = 1; i < boundaryValues.size(); ++i) {
        Value lower = boundaryValues[i - 1];
        Value upper = boundaryValues[i];
        BSONObj caseExpr =
            BSON("$and" << BSON_ARRAY(BSON("$gte" << BSON_ARRAY(groupByField << lower))
                                      << BSON("$lt" << BSON_ARRAY(groupByField << upper))));
        branchesBuilder.append(BSON("case" << caseExpr << "then" << lower));
    }

    // Add the $switch expression to the group BSON object.
    switchObjBuilder.append("branches", branchesBuilder.arr());
    groupObjBuilder.append("_id", BSON("$switch" << switchObjBuilder.obj()));

    // If no output is specified, add a count field by default.
    if (!outputFieldSpecified) {
        groupObjBuilder.append("count", BSON("$sum" << 1));
    }

    BSONObj groupObj = BSON("$group" << groupObjBuilder.obj());
    BSONObj sortObj = BSON("$sort" << BSON("_id" << 1));

    auto groupSource = DocumentSourceGroup::createFromBson(groupObj.firstElement(), pExpCtx);
    auto sortSource = DocumentSourceSort::createFromBson(sortObj.firstElement(), pExpCtx);

    return {groupSource, sortSource};
}
예제 #30
0
template<class T> inline bool operator<(intrusive_ptr<T> const & a, intrusive_ptr<T> const & b)
{
    return std::less<T *>()(a.get(), b.get());
}