예제 #1
0
BSONObjBuilder& BSONObjBuilderValueStream::operator<<(const BSONElement& e) {
    _builder->appendAs(e, _fieldName);
    _fieldName = StringData();
    return *_builder;
}
예제 #2
0
파일: uuid.cpp 프로젝트: vnvizitiu/mongo
BSONObjBuilder& BSONObjBuilderValueStream::operator<<<UUID>(UUID value) {
    value.appendToBuilder(_builder, _fieldName);
    _fieldName = StringData();
    return *_builder;
}
예제 #3
0
void BSONObjBuilderValueStream::reset() {
    _fieldName = StringData();
    _subobj.reset();
}
예제 #4
0
    Status UpdateDriver::populateDocumentWithQueryFields(const CanonicalQuery* query,
                                                         mutablebson::Document& doc) const {

        MatchExpression* root = query->root();

        MatchExpression::MatchType rootType = root->matchType();

        // These copies are needed until we apply the modifiers at the end.
        std::vector<BSONObj> copies;

        // We only care about equality and "and"ed equality fields, everything else is ignored
        if (rootType != MatchExpression::EQ && rootType != MatchExpression::AND)
            return Status::OK();

        if (isDocReplacement()) {
            BSONElement idElem = query->getQueryObj().getField("_id");

            // Replacement mods need the _id field copied explicitly.
            if (idElem.ok()) {
                mb::Element elem = doc.makeElement(idElem);
                return doc.root().pushFront(elem);
            }

            return Status::OK();
        }

        // Create a new UpdateDriver to create the base doc from the query
        Options opts;
        opts.logOp = false;
        opts.modOptions = modOptions();

        UpdateDriver insertDriver(opts);
        insertDriver.setContext(ModifierInterface::ExecInfo::INSERT_CONTEXT);

        // If we are a single equality match query
        if (root->matchType() == MatchExpression::EQ) {
            EqualityMatchExpression* eqMatch =
                    static_cast<EqualityMatchExpression*>(root);

            const BSONElement matchData = eqMatch->getData();
            BSONElement childElem = matchData;

            // Make copy to new path if not the same field name (for cases like $all)
            if (!root->path().empty() && matchData.fieldNameStringData() != root->path()) {
                BSONObjBuilder copyBuilder;
                copyBuilder.appendAs(eqMatch->getData(), root->path());
                const BSONObj copy = copyBuilder.obj();
                copies.push_back(copy);
                childElem = copy[root->path()];
            }

            // Add this element as a $set modifier
            Status s = insertDriver.addAndParse(modifiertable::MOD_SET,
                                                childElem);
            if (!s.isOK())
                return s;

        }
        else {

            // parse query $set mods, including only equality stuff
            for (size_t i = 0; i < root->numChildren(); ++i) {
                MatchExpression* child = root->getChild(i);
                if (child->matchType() == MatchExpression::EQ) {
                    EqualityMatchExpression* eqMatch =
                            static_cast<EqualityMatchExpression*>(child);

                    const BSONElement matchData = eqMatch->getData();
                    BSONElement childElem = matchData;

                    // Make copy to new path if not the same field name (for cases like $all)
                    if (!child->path().empty() &&
                            matchData.fieldNameStringData() != child->path()) {
                        BSONObjBuilder copyBuilder;
                        copyBuilder.appendAs(eqMatch->getData(), child->path());
                        const BSONObj copy = copyBuilder.obj();
                        copies.push_back(copy);
                        childElem = copy[child->path()];
                    }

                    // Add this element as a $set modifier
                    Status s = insertDriver.addAndParse(modifiertable::MOD_SET,
                                                        childElem);
                    if (!s.isOK())
                        return s;
                }
            }
        }

        // update the document with base field
        Status s = insertDriver.update(StringData(), &doc);
        copies.clear();
        if (!s.isOK()) {
            return Status(ErrorCodes::UnsupportedFormat,
                          str::stream() << "Cannot create base during"
                                           " insert of update. Caused by :"
                                        << s.toString());
        }

        return Status::OK();
    }
예제 #5
0
    TEST(ResourceId, Constructors) {
        ResourceId resIdString(RESOURCE_COLLECTION, std::string("TestDB.collection"));
        ResourceId resIdStringData(RESOURCE_COLLECTION, StringData("TestDB.collection"));

        ASSERT_EQUALS(resIdString, resIdStringData);
    }
예제 #6
0
inline void StringEnumColumn::insert(size_t row_ndx)
{
    insert(row_ndx, m_nullable ? realm::null() : StringData(""));
}
예제 #7
0
// to_str() is used by the integer index. The existing StringIndex is re-used for this
// by making IntegerColumn convert its integers to strings by calling to_str().
template <class T> inline StringData to_str(const T& value)
{
    REALM_STATIC_ASSERT((std::is_same<T, int64_t>::value), "");
    const char* c = reinterpret_cast<const char*>(&value);
    return StringData(c, sizeof(T));
}
예제 #8
0
    ProjectionStage::ProjectionStage(const ProjectionStageParams& params,
                                     WorkingSet* ws,
                                     PlanStage* child)
        : _ws(ws),
          _child(child),
          _projImpl(params.projImpl) {

        if (ProjectionStageParams::NO_FAST_PATH == _projImpl) {
            _exec.reset(new ProjectionExec(params.projObj, params.fullExpression));
        }
        else {
            // We shouldn't need the full expression if we're fast-pathing.
            invariant(NULL == params.fullExpression);

            _projObj = params.projObj;

            // Sanity-check the input.
            invariant(_projObj.isOwned());
            invariant(!_projObj.isEmpty());

            // The _id is included by default.
            bool includeId = true;

            // Figure out what fields are in the projection.  TODO: we can get this from the
            // ParsedProjection...modify that to have this type instead of a vector.
            BSONObjIterator projObjIt(_projObj);
            while (projObjIt.more()) {
                BSONElement elt = projObjIt.next();
                // Must deal with the _id case separately as there is an implicit _id: 1 in the
                // projection.
                if (mongoutils::str::equals(elt.fieldName(), kIdField)
                    && !elt.trueValue()) {
                    includeId = false;
                    continue;
                }
                _includedFields.insert(elt.fieldNameStringData());
            }

            if (includeId) {
                _includedFields.insert(kIdField);
            }

            // If we're pulling data out of one index we can pre-compute the indices of the fields
            // in the key that we pull data from and avoid looking up the field name each time.
            if (ProjectionStageParams::COVERED_ONE_INDEX == params.projImpl) {
                // Sanity-check.
                _coveredKeyObj = params.coveredKeyObj;
                invariant(_coveredKeyObj.isOwned());

                BSONObjIterator kpIt(_coveredKeyObj);
                while (kpIt.more()) {
                    BSONElement elt = kpIt.next();
                    unordered_set<StringData, StringData::Hasher>::iterator fieldIt;
                    fieldIt = _includedFields.find(elt.fieldNameStringData());

                    if (_includedFields.end() == fieldIt) {
                        // Push an unused value on the back to keep _includeKey and _keyFieldNames
                        // in sync.
                        _keyFieldNames.push_back(StringData());
                        _includeKey.push_back(false);
                    }
                    else {
                        // If we are including this key field store its field name.
                        _keyFieldNames.push_back(*fieldIt);
                        _includeKey.push_back(true);
                    }
                }
            }
            else {
                invariant(ProjectionStageParams::SIMPLE_DOC == params.projImpl);
            }
        }
    }
예제 #9
0
 static inline StringData makeStringDataFromBSONElement(const BSONElement& element) {
     return StringData(element.valuestr(), element.valuestrsize() - 1);
 }
예제 #10
0
Status ModifierAddToSet::init(const BSONElement& modExpr, const Options& opts, bool* positional) {
    // Perform standard field name and updateable checks.
    _fieldRef.parse(modExpr.fieldName());
    Status status = fieldchecker::isUpdatable(_fieldRef);
    if (!status.isOK()) {
        return status;
    }

    // If a $-positional operator was used, get the index in which it occurred
    // and ensure only one occurrence.
    size_t foundCount;
    bool foundDollar = fieldchecker::isPositional(_fieldRef, &_posDollar, &foundCount);

    if (positional)
        *positional = foundDollar;

    if (foundDollar && foundCount > 1) {
        return Status(ErrorCodes::BadValue,
                      str::stream() << "Too many positional (i.e. '$') elements found in path '"
                                    << _fieldRef.dottedField()
                                    << "'");
    }

    // TODO: The driver could potentially do this re-writing.

    // If the type of the value is 'Object', we might be dealing with a $each. See if that
    // is the case.
    if (modExpr.type() == mongo::Object) {
        BSONElement modExprObjPayload = modExpr.embeddedObject().firstElement();
        if (!modExprObjPayload.eoo() && StringData(modExprObjPayload.fieldName()) == "$each") {
            // It is a $each. Verify that the payload is an array as is required for $each,
            // set our flag, and store the array as our value.
            if (modExprObjPayload.type() != mongo::Array) {
                return Status(ErrorCodes::BadValue,
                              str::stream() << "The argument to $each in $addToSet must "
                                               "be an array but it was of type "
                                            << typeName(modExprObjPayload.type()));
            }

            status = _valDoc.root().appendElement(modExprObjPayload);
            if (!status.isOK())
                return status;

            _val = _valDoc.root().leftChild();
        }
    }

    // If this wasn't an 'each', turn it into one. No need to sort or de-dup since we only
    // have one element.
    if (_val == _valDoc.end()) {
        mb::Element each = _valDoc.makeElementArray("$each");

        status = each.appendElement(modExpr);
        if (!status.isOK())
            return status;

        status = _valDoc.root().pushBack(each);
        if (!status.isOK())
            return status;

        _val = each;
    }

    // Check if no invalid data (such as fields with '$'s) are being used in the $each
    // clause.
    mb::ConstElement valCursor = _val.leftChild();
    while (valCursor.ok()) {
        const BSONType type = valCursor.getType();
        dassert(valCursor.hasValue());
        switch (type) {
            case mongo::Object: {
                Status s = valCursor.getValueObject().storageValidEmbedded();
                if (!s.isOK())
                    return s;

                break;
            }
            case mongo::Array: {
                Status s = valCursor.getValueArray().storageValidEmbedded();
                if (!s.isOK())
                    return s;

                break;
            }
            default:
                break;
        }

        valCursor = valCursor.rightSibling();
    }

    setCollator(opts.collator);
    return Status::OK();
}
예제 #11
0
        /*
         * Runs the command object cmdobj on the db with name dbname and puts result in result.
         * @param dbname, name of db
         * @param cmdobj, object that contains entire command
         * @param options
         * @param errmsg, reference to error message
         * @param result, reference to builder for result
         * @param fromRepl
         * @return true if successful, false otherwise
         */
        bool FTSCommand::_run(OperationContext* txn,
                              const string& dbname,
                              BSONObj& cmdObj,
                              int cmdOptions,
                              const string& ns,
                              const string& searchString,
                              string language, // "" for not-set
                              int limit,
                              BSONObj& filter,
                              BSONObj& projection,
                              string& errmsg,
                              BSONObjBuilder& result ) {

            Timer comm;

            // Rewrite the cmd as a normal query.
            BSONObjBuilder queryBob;
            queryBob.appendElements(filter);

            BSONObjBuilder textBob;
            textBob.append("$search", searchString);
            if (!language.empty()) {
                textBob.append("$language", language);
            }
            queryBob.append("$text", textBob.obj());

            // This is the query we exec.
            BSONObj queryObj = queryBob.obj();

            // We sort by the score.
            BSONObj sortSpec = BSON("$s" << BSON("$meta" << LiteParsedQuery::metaTextScore));

            // We also project the score into the document and strip it out later during the reformatting
            // of the results.
            BSONObjBuilder projBob;
            projBob.appendElements(projection);
            projBob.appendElements(sortSpec);
            BSONObj projObj = projBob.obj();

            Client::ReadContext ctx(txn, ns);

            CanonicalQuery* cq;
            Status canonicalizeStatus = 
                    CanonicalQuery::canonicalize(ns, 
                                                 queryObj,
                                                 sortSpec,
                                                 projObj, 
                                                 0,
                                                 limit,
                                                 BSONObj(),
                                                 &cq,
                                                 WhereCallbackReal(txn, StringData(dbname)));
            if (!canonicalizeStatus.isOK()) {
                errmsg = canonicalizeStatus.reason();
                return false;
            }

            PlanExecutor* rawExec;
            Status getExecStatus = getExecutor(
                txn, ctx.ctx().db()->getCollection(txn, ns), cq, &rawExec);
            if (!getExecStatus.isOK()) {
                errmsg = getExecStatus.reason();
                return false;
            }

            auto_ptr<PlanExecutor> exec(rawExec);

            BSONArrayBuilder resultBuilder(result.subarrayStart("results"));

            // Quoth: "leave a mb for other things"
            int resultSize = 1024 * 1024;

            int numReturned = 0;

            BSONObj obj;
            while (PlanExecutor::ADVANCED == exec->getNext(&obj, NULL)) {
                if ((resultSize + obj.objsize()) >= BSONObjMaxUserSize) {
                    break;
                }
                // We return an array of results.  Add another element.
                BSONObjBuilder oneResultBuilder(resultBuilder.subobjStart());
                oneResultBuilder.append("score", obj["$s"].number());

                // Strip out the score from the returned obj.
                BSONObjIterator resIt(obj);
                BSONObjBuilder resBob;
                while (resIt.more()) {
                    BSONElement elt = resIt.next();
                    if (!mongoutils::str::equals("$s", elt.fieldName())) {
                        resBob.append(elt);
                    }
                }
                oneResultBuilder.append("obj", resBob.obj());
                BSONObj addedArrayObj = oneResultBuilder.done();
                resultSize += addedArrayObj.objsize();
                numReturned++;
            }

            resultBuilder.done();

            // returns some stats to the user
            BSONObjBuilder stats(result.subobjStart("stats"));

            // Fill in nscanned from the explain.
            PlanSummaryStats summary;
            Explain::getSummaryStats(exec.get(), &summary);
            stats.appendNumber("nscanned", summary.totalKeysExamined);
            stats.appendNumber("nscannedObjects", summary.totalDocsExamined);

            stats.appendNumber( "n" , numReturned );
            stats.append( "timeMicros", (int)comm.micros() );
            stats.done();

            return true;
        }
예제 #12
0
TEST(ParseNumber, NotNullTerminated) {
    ASSERT_PARSES(int, StringData("1234", 3), 123);
}
예제 #13
0
ConnectionPool::ConnectionList::iterator ConnectionPool::acquireConnection(
    const HostAndPort& target, Date_t now, Milliseconds timeout) {
    stdx::unique_lock<stdx::mutex> lk(_mutex);

    // Clean up connections on stale/unused hosts
    _cleanUpStaleHosts_inlock(now);

    for (HostConnectionMap::iterator hostConns;
         (hostConns = _connections.find(target)) != _connections.end();) {
        // Clean up the requested host to remove stale/unused connections
        _cleanUpOlderThan_inlock(now, &hostConns->second);

        if (hostConns->second.empty()) {
            // prevent host from causing unnecessary cleanups
            _lastUsedHosts[hostConns->first] = kNeverTooStale;
            break;
        }

        _inUseConnections.splice(
            _inUseConnections.begin(), hostConns->second, hostConns->second.begin());

        const ConnectionList::iterator candidate = _inUseConnections.begin();
        lk.unlock();

        try {
            if (candidate->conn->isStillConnected()) {
                // setSoTimeout takes a double representing the number of seconds for send and
                // receive timeouts.  Thus, we must express 'timeout' in milliseconds and divide by
                // 1000.0 to get the number of seconds with a fractional part.
                candidate->conn->setSoTimeout(durationCount<Milliseconds>(timeout) / 1000.0);
                return candidate;
            }
        } catch (...) {
            lk.lock();
            _destroyConnection_inlock(&_inUseConnections, candidate);
            throw;
        }

        lk.lock();
        _destroyConnection_inlock(&_inUseConnections, candidate);
    }

    // No idle connection in the pool; make a new one.
    lk.unlock();

    std::unique_ptr<DBClientConnection> conn;
    if (_hook) {
        conn.reset(new DBClientConnection(
            false,  // auto reconnect
            0,      // socket timeout
            {},     // MongoURI
            [this, target](const executor::RemoteCommandResponse& isMasterReply) {
                return _hook->validateHost(target, BSONObj(), isMasterReply);
            }));
    } else {
        conn.reset(new DBClientConnection());
    }

    // setSoTimeout takes a double representing the number of seconds for send and receive
    // timeouts.  Thus, we must express 'timeout' in milliseconds and divide by 1000.0 to get
    // the number of seconds with a fractional part.
    conn->setSoTimeout(durationCount<Milliseconds>(timeout) / 1000.0);

    uassertStatusOK(conn->connect(target, StringData()));
    conn->setTags(_messagingPortTags);

    if (isInternalAuthSet()) {
        conn->auth(getInternalUserAuthParams());
    }

    if (_hook) {
        auto postConnectRequest = uassertStatusOK(_hook->makeRequest(target));

        // We might not have a postConnectRequest
        if (postConnectRequest != boost::none) {
            auto start = Date_t::now();
            auto reply =
                conn->runCommand(OpMsgRequest::fromDBAndBody(postConnectRequest->dbname,
                                                             postConnectRequest->cmdObj,
                                                             postConnectRequest->metadata));

            auto rcr = executor::RemoteCommandResponse(reply->getCommandReply().getOwned(),
                                                       Date_t::now() - start);

            uassertStatusOK(_hook->handleReply(target, std::move(rcr)));
        }
    }

    lk.lock();
    return _inUseConnections.insert(_inUseConnections.begin(), ConnectionInfo(conn.release(), now));
}
예제 #14
0
// todo, should be removed
inline StringData to_str(const char* value)
{
    return StringData(value);
}
예제 #15
0
BufBuilder& BSONObjBuilderValueStream::subarrayStart() {
    StringData tmp = _fieldName;
    _fieldName = StringData();
    return _builder->subarrayStart(tmp);
}
예제 #16
0
파일: v8_utils.cpp 프로젝트: ANTco/mongo
 std::string toSTLString(const v8::Handle<v8::Value>& o) {
     return StringData(V8String(o)).toString();
 }
예제 #17
0
inline void StringEnumColumn::add()
{
    add(m_nullable ? realm::null() : StringData(""));
}
예제 #18
0
파일: update.cpp 프로젝트: bukamanush/mongo
    UpdateResult update(UpdateRequest& request, UpdateDriver* driver) {

        const NamespaceString& nsString = request.getNamespaceString();

        validateUpdate( nsString.ns().c_str(), request.getUpdates(), request.getQuery() );

        NamespaceDetails* nsDetails = nsdetails( nsString.ns() );
        NamespaceDetailsTransient* nsDetailsTransient =
            &NamespaceDetailsTransient::get( nsString.ns().c_str() );

        OpDebug& debug = request.getDebug();

        // TODO: This seems a bit circuitious.
        debug.updateobj = request.getUpdates();

        driver->refreshIndexKeys( nsDetailsTransient->indexKeys() );

        shared_ptr<Cursor> cursor = getOptimizedCursor(
            nsString.ns(), request.getQuery(), BSONObj(), request.getQueryPlanSelectionPolicy() );

        // If the update was marked with '$isolated' (a.k.a '$atomic'), we are not allowed to
        // yield while evaluating the update loop below.
        //
        // TODO: Old code checks this repeatedly within the update loop. Is that necessary? It seems
        // that once atomic should be always atomic.
        const bool isolated =
            cursor->ok() &&
            cursor->matcher() &&
            cursor->matcher()->docMatcher().atomic();

        // The 'cursor' the optimizer gave us may contain query plans that generate duplicate
        // diskloc's. We set up here the mechanims that will prevent us from processing those
        // twice if we see them. We also set up a 'ClientCursor' so that we can support
        // yielding.
        //
        // TODO: Is it valid to call this on a non-ok cursor?
        const bool dedupHere = cursor->autoDedup();

        //
        // We'll start assuming we have one or more documents for this update. (Othwerwise,
        // we'll fallback to upserting.)
        //

        // We record that this will not be an upsert, in case a mod doesn't want to be applied
        // when in strict update mode.
        driver->setContext( ModifierInterface::ExecInfo::UPDATE_CONTEXT );

        // Let's fetch each of them and pipe them through the update expression, making sure to
        // keep track of the necessary stats. Recall that we'll be pulling documents out of
        // cursors and some of them do not deduplicate the entries they generate. We have
        // deduping logic in here, too -- for now.
        unordered_set<DiskLoc, DiskLoc::Hasher> seenLocs;
        int numMatched = 0;
        debug.nscanned = 0;

        Client& client = cc();

        mutablebson::Document doc;

        // If we are going to be yielding, we will need a ClientCursor scoped to this loop. We
        // only loop as long as the underlying cursor is OK.
        for ( auto_ptr<ClientCursor> clientCursor; cursor->ok(); ) {

            // If we haven't constructed a ClientCursor, and if the client allows us to throw
            // page faults, and if we are referring to a location that is likely not in
            // physical memory, then throw a PageFaultException. The entire operation will be
            // restarted.
            if ( clientCursor.get() == NULL &&
                 client.allowedToThrowPageFaultException() &&
                 !cursor->currLoc().isNull() &&
                 !cursor->currLoc().rec()->likelyInPhysicalMemory() ) {
                // We should never throw a PFE if we have already updated items.
                dassert((numMatched == 0) || (numMatched == debug.nupdateNoops));
                throw PageFaultException( cursor->currLoc().rec() );
            }

            if ( !isolated && debug.nscanned != 0 ) {

                // We are permitted to yield. To do so we need a ClientCursor, so create one
                // now if we have not yet done so.
                if ( !clientCursor.get() )
                    clientCursor.reset(
                        new ClientCursor( QueryOption_NoCursorTimeout, cursor, nsString.ns() ) );

                // Ask the client cursor to yield. We get two bits of state back: whether or not
                // we yielded, and whether or not we correctly recovered from yielding.
                bool yielded = false;
                const bool recovered = clientCursor->yieldSometimes(
                    ClientCursor::WillNeed, &yielded );

                if ( !recovered ) {
                    // If we failed to recover from the yield, then the ClientCursor is already
                    // gone. Release it so we don't destroy it a second time.
                    clientCursor.release();
                    break;
                }

                if ( !cursor->ok() ) {
                    // If the cursor died while we were yielded, just get out of the update loop.
                    break;
                }

                if ( yielded ) {
                    // We yielded and recovered OK, and our cursor is still good. Details about
                    // our namespace may have changed while we were yielded, so we re-acquire
                    // them here. If we can't do so, escape the update loop. Otherwise, refresh
                    // the driver so that it knows about what is currently indexed.
                    nsDetails = nsdetails( nsString.ns() );
                    if ( !nsDetails )
                        break;
                    nsDetailsTransient = &NamespaceDetailsTransient::get( nsString.ns().c_str() );

                    // TODO: This copies the index keys, but it may not need to do so.
                    driver->refreshIndexKeys( nsDetailsTransient->indexKeys() );
                }

            }

            // Let's fetch the next candidate object for this update.
            Record* record = cursor->_current();
            DiskLoc loc = cursor->currLoc();
            const BSONObj oldObj = loc.obj();

            // We count how many documents we scanned even though we may skip those that are
            // deemed duplicated. The final 'numUpdated' and 'nscanned' numbers may differ for
            // that reason.
            debug.nscanned++;

            // Skips this document if it:
            // a) doesn't match the query portion of the update
            // b) was deemed duplicate by the underlying cursor machinery
            //
            // Now, if we are going to update the document,
            // c) we don't want to do so while the cursor is at it, as that may invalidate
            // the cursor. So, we advance to next document, before issuing the update.
            MatchDetails matchDetails;
            matchDetails.requestElemMatchKey();
            if ( !cursor->currentMatches( &matchDetails ) ) {
                // a)
                cursor->advance();
                continue;
            }
            else if ( cursor->getsetdup( loc ) && dedupHere ) {
                // b)
                cursor->advance();
                continue;
            }
            else if (!driver->isDocReplacement() && request.isMulti()) {
                // c)
                cursor->advance();
                if ( dedupHere ) {
                    if ( seenLocs.count( loc ) ) {
                        continue;
                    }
                }

                // There are certain kind of cursors that hold multiple pointers to data
                // underneath. $or cursors is one example. In a $or cursor, it may be the case
                // that when we did the last advance(), we finished consuming documents from
                // one of $or child and started consuming the next one. In that case, it is
                // possible that the last document of the previous child is the same as the
                // first document of the next (see SERVER-5198 and jstests/orp.js).
                //
                // So we advance the cursor here until we see a new diskloc.
                //
                // Note that we won't be yielding, and we may not do so for a while if we find
                // a particularly duplicated sequence of loc's. That is highly unlikely,
                // though.  (See SERVER-5725, if curious, but "stage" based $or will make that
                // ticket moot).
                while( cursor->ok() && loc == cursor->currLoc() ) {
                    cursor->advance();
                }
            }

            // For some (unfortunate) historical reasons, not all cursors would be valid after
            // a write simply because we advanced them to a document not affected by the write.
            // To protect in those cases, not only we engaged in the advance() logic above, but
            // we also tell the cursor we're about to write a document that we've just seen.
            // prepareToTouchEarlierIterate() requires calling later
            // recoverFromTouchingEarlierIterate(), so we make a note here to do so.
            bool touchPreviousDoc = request.isMulti() && cursor->ok();
            if ( touchPreviousDoc ) {
                if ( clientCursor.get() )
                    clientCursor->setDoingDeletes( true );
                cursor->prepareToTouchEarlierIterate();
            }

            // Found a matching document
            numMatched++;

            // Ask the driver to apply the mods. It may be that the driver can apply those "in
            // place", that is, some values of the old document just get adjusted without any
            // change to the binary layout on the bson layer. It may be that a whole new
            // document is needed to accomodate the new bson layout of the resulting document.
            doc.reset( oldObj, mutablebson::Document::kInPlaceEnabled );
            BSONObj logObj;

            // If there was a matched field, obtain it.
            string matchedField;
            if (matchDetails.hasElemMatchKey())
                matchedField = matchDetails.elemMatchKey();

            Status status = driver->update( matchedField, &doc, &logObj );
            if ( !status.isOK() ) {
                uasserted( 16837, status.reason() );
            }

            // If the driver applied the mods in place, we can ask the mutable for what
            // changed. We call those changes "damages". :) We use the damages to inform the
            // journal what was changed, and then apply them to the original document
            // ourselves. If, however, the driver applied the mods out of place, we ask it to
            // generate a new, modified document for us. In that case, the file manager will
            // take care of the journaling details for us.
            //
            // This code flow is admittedly odd. But, right now, journaling is baked in the file
            // manager. And if we aren't using the file manager, we have to do jounaling
            // ourselves.
            bool objectWasChanged = false;
            BSONObj newObj;
            const char* source = NULL;
            mutablebson::DamageVector damages;
            bool inPlace = doc.getInPlaceUpdates(&damages, &source);
            if ( inPlace && !damages.empty() && !driver->modsAffectIndices() ) {
                nsDetails->paddingFits();

                // All updates were in place. Apply them via durability and writing pointer.
                mutablebson::DamageVector::const_iterator where = damages.begin();
                const mutablebson::DamageVector::const_iterator end = damages.end();
                for( ; where != end; ++where ) {
                    const char* sourcePtr = source + where->sourceOffset;
                    void* targetPtr = getDur().writingPtr(
                        const_cast<char*>(oldObj.objdata()) + where->targetOffset,
                        where->size);
                    std::memcpy(targetPtr, sourcePtr, where->size);
                }
                newObj = oldObj;
                debug.fastmod = true;

                objectWasChanged = true;
            }
            else {

                // The updates were not in place. Apply them through the file manager.
                newObj = doc.getObject();
                DiskLoc newLoc = theDataFileMgr.updateRecord(nsString.ns().c_str(),
                                                             nsDetails,
                                                             nsDetailsTransient,
                                                             record,
                                                             loc,
                                                             newObj.objdata(),
                                                             newObj.objsize(),
                                                             debug);

                // If we've moved this object to a new location, make sure we don't apply
                // that update again if our traversal picks the objecta again.
                //
                // We also take note that the diskloc if the updates are affecting indices.
                // Chances are that we're traversing one of them and they may be multi key and
                // therefore duplicate disklocs.
                if ( newLoc != loc || driver->modsAffectIndices()  ) {
                    seenLocs.insert( newLoc );
                }

                objectWasChanged = true;
            }

            // Log Obj
            if ( request.shouldUpdateOpLog() ) {
                if ( driver->isDocReplacement() || !logObj.isEmpty() ) {
                    BSONObj idQuery = driver->makeOplogEntryQuery(newObj, request.isMulti());
                    logOp("u", nsString.ns().c_str(), logObj , &idQuery,
                          NULL, request.isFromMigration(), &newObj);
                }
            }

            // If it was noop since the document didn't change, record that.
            if (!objectWasChanged)
                debug.nupdateNoops++;

            if (!request.isMulti()) {
                break;
            }

            // If we used the cursor mechanism that prepares an earlier seen document for a
            // write we need to tell such mechanisms that the write is over.
            if ( touchPreviousDoc ) {
                cursor->recoverFromTouchingEarlierIterate();
            }

            getDur().commitIfNeeded();

        }

        // TODO: Can this be simplified?
        if ((numMatched > 0) || (numMatched == 0 && !request.isUpsert()) ) {
            debug.nupdated = numMatched;
            return UpdateResult( numMatched > 0 /* updated existing object(s) */,
                                 !driver->isDocReplacement() /* $mod or obj replacement */,
                                 numMatched /* # of docments update, even no-ops */,
                                 BSONObj() );
        }

        //
        // We haven't found any existing document so an insert is done
        // (upsert is true).
        //
        debug.upsert = true;

        // Since this is an insert (no docs found and upsert:true), we will be logging it
        // as an insert in the oplog. We don't need the driver's help to build the
        // oplog record, then. We also set the context of the update driver to the INSERT_CONTEXT.
        // Some mods may only work in that context (e.g. $setOnInsert).
        driver->setLogOp( false );
        driver->setContext( ModifierInterface::ExecInfo::INSERT_CONTEXT );

        BSONObj baseObj;

        // Reset the document we will be writing to
        doc.reset( baseObj, mutablebson::Document::kInPlaceDisabled );
        if ( request.getQuery().hasElement("_id") ) {
            uassertStatusOK(doc.root().appendElement(request.getQuery().getField("_id")));
        }


        // If this is a $mod base update, we need to generate a document by examining the
        // query and the mods. Otherwise, we can use the object replacement sent by the user
        // update command that was parsed by the driver before.
        // In the following block we handle the query part, and then do the regular mods after.
        if ( *request.getUpdates().firstElementFieldName() == '$' ) {
            uassertStatusOK(UpdateDriver::createFromQuery(request.getQuery(), doc));
            debug.fastmodinsert = true;
        }

        // Apply the update modifications and then log the update as an insert manually.
        Status status = driver->update( StringData(), &doc, NULL /* no oplog record */);
        if ( !status.isOK() ) {
            uasserted( 16836, status.reason() );
        }

        BSONObj newObj = doc.getObject();
        theDataFileMgr.insertWithObjMod( nsString.ns().c_str(), newObj, false, request.isGod() );
        if ( request.shouldUpdateOpLog() ) {
            logOp( "i", nsString.ns().c_str(), newObj,
                   NULL, NULL, request.isFromMigration(), &newObj );
        }

        debug.nupdated = 1;
        return UpdateResult( false /* updated a non existing document */,
                             !driver->isDocReplacement() /* $mod or obj replacement? */,
                             1 /* count of updated documents */,
                             newObj /* object that was upserted */ );
    }
예제 #19
0
 void CmdInsert::redactForLogging( mutablebson::Document* cmdObj ) {
     redactTooLongLog( cmdObj, StringData( "documents", StringData::LiteralTag() ) );
 }
예제 #20
0
    }

    void clear();

    void distinct(IntegerColumn& result) const;
    bool has_duplicate_values() const REALM_NOEXCEPT;

    /// By default, duplicate values are allowed.
    void set_allow_duplicate_values(bool) REALM_NOEXCEPT;

#ifdef REALM_DEBUG
    void Verify() const;
    void verify_entries(const StringColumn& column) const;
    void do_dump_node_structure(std::ostream&, int) const;
    void to_dot() const { to_dot(std::cerr); }
    void to_dot(std::ostream&, StringData title = StringData()) const;
#endif

    typedef int32_t key_type;

    static key_type create_key(StringData) REALM_NOEXCEPT;
    static key_type create_key(StringData, size_t) REALM_NOEXCEPT;

private:
    std::unique_ptr<Array> m_array;
    ColumnBase* m_target_column;
    bool m_deny_duplicate_values;

    struct inner_node_tag {};
    StringIndex(inner_node_tag, Allocator&);