Пример #1
0
    CollectionBase::IndexerBase::~IndexerBase() {
        Lock::assertWriteLocked(_cl->_ns);

        if (_idx && _cl->_indexBuildInProgress) {
            verify(_idx.get() == _cl->_indexes.back().get());
            // Pop back the index from the index vector. We still
            // have a shared pointer (_idx), so it won't close here.
            _cl->_indexes.pop_back();
            _cl->_indexBuildInProgress = false;
            verify(_cl->_nIndexes == (int) _cl->_indexes.size());
            // If we catch any exceptions, eat them. We can only enter this block
            // if we're already propogating an exception (ie: not under normal
            // operation) so it's okay to just print to the log and continue.
            try {
                _idx->close();
            } catch (const DBException &e) {
                TOKULOG(0) << "Caught DBException exception while destroying IndexerBase: "
                           << e.getCode() << ", " << e.what() << endl;
            } catch (...) {
                TOKULOG(0) << "Caught generic exception while destroying IndexerBase." << endl;
            }
        } else {
            // the indexer is destructing before it got a chance to actually
            // build anything, which is the case if prepare() throws before
            // creating the indexer and setting _cl->_indexBuildInProgress, etc.
        }
    }
Пример #2
0
    // Open the dictionary. Creates it if necessary.
    bool IndexDetails::open(const bool may_create) {
        const string dname = indexNamespace();
        if (may_create) {
            addNewNamespaceToCatalog(dname);
        }

        TOKULOG(1) << "Opening IndexDetails " << dname << endl;
        try {
            _db.reset(new storage::Dictionary(dname, _info, *_descriptor, may_create,
                                              _info["background"].trueValue()));
            return true;
        } catch (storage::Dictionary::NeedsCreate) {
            if (cc().upgradingSystemUsers() &&
                isSystemUsersCollection(parentNS()) &&
                keyPattern() == oldSystemUsersKeyPattern) {
                // We're upgrading the system.users collection, and we are missing the old index.
                // That's ok, we'll signal the caller about this by returning a NULL pointer from
                // IndexDetails::make.  See #673
                return false;
            }
            // Unlike for NamespaceIndex, this dictionary must exist on disk if we think it should
            // exist.  This error only gets thrown if may_create is false, which happens when we're
            // trying to open a collection for which we have serialized info.  Therefore, this is a
            // fatal non-user error.
            msgasserted(16988, mongoutils::str::stream() << "dictionary " << dname
                               << " should exist, but we got ENOENT");
        }
    }
Пример #3
0
    // Skip the key comprised of the first k fields of currentKey and the
    // rest set to max/min key for direction > 0 or < 0 respectively.
    void IndexCursor::skipPrefix(const BSONObj &key, const int k) {
        TOKULOG(3) << "skipPrefix skipping first " << k << " elements in key " << key << endl;
        BSONObjBuilder b(key.objsize());
        BSONObjIterator it = key.begin();
        const int nFields = key.nFields();
        for ( int i = 0; i < nFields; i++ ) {
            if ( i < k ) {
                b.append( it.next() );
            } else {
                if ( _ordering.descending( 1 << i ) ) {
                    // Descending sort order, so min key skips forward.
                    forward() ? b.appendMinKey( "" ) : b.appendMaxKey( "" );
                } else {
                    // Regular ascending order. Max key skips forward.
                    forward() ? b.appendMaxKey( "" ) : b.appendMinKey( "" );
                }
            }
        }

        // This differs from findKey in that we set PK to max to move forward and min
        // to move backward, resulting in a "skip" of the key prefix, not a "find".
        const bool isSecondary = !_cl->isPKIndex(_idx);
        const BSONObj &pk = forward() ? maxKey : minKey;
        setPosition( b.done(), isSecondary ? pk : BSONObj() );
    }
Пример #4
0
    // Open the dictionary. Creates it if necessary.
    bool IndexDetailsBase::open(const bool may_create, const bool use_memcmp_magic) {
        const string dname = indexNamespace();

        TOKULOG(1) << "Opening IndexDetails " << dname << endl;
        try {
            // We use the memcmp magic API only for single-key, ascending _id indexes,
            // because the _id field is always unique (and therefore we can simply
            // compare the OID fields if they exist and that will be sufficient)
            if (use_memcmp_magic) {
                verify(_unique);
            }
            _db.reset(new storage::Dictionary(dname, _info, *_descriptor, may_create,
                                              _info["background"].trueValue(), use_memcmp_magic));
            return true;
        } catch (storage::Dictionary::NeedsCreate) {
            if (cc().upgradingSystemUsers() &&
                isSystemUsersCollection(parentNS()) &&
                keyPattern() == oldSystemUsersKeyPattern) {
                // We're upgrading the system.users collection, and we are missing the old index.
                // That's ok, we'll signal the caller about this by returning a NULL pointer from
                // IndexDetailsBase::make.  See #673
                return false;
            }
            // This dictionary must exist on disk if we think it should exist.
            // This error only gets thrown if may_create is false, which happens when we're
            // trying to open a collection for which we have serialized info.
            // Therefore, this is a fatal non-user error.
            msgasserted(16988, mongoutils::str::stream() << "dictionary " << dname
                               << " should exist, but we got ENOENT");
        }
    }
Пример #5
0
 BSONObj IndexCursor::current() {
     // If the index is clustering, the full documenet is always stored in _currObj.
     // If the index is not clustering, _currObj starts as empty and gets filled
     // with the full document on the first call to current().
     if ( _currObj.isEmpty() ) {
         _nscannedObjects++;
         bool found = _cl->findByPK( _currPK, _currObj );
         if ( !found ) {
             // If we didn't find the associated object, we must be either:
             // - a snapshot transaction whose context deleted the current pk
             // - a read uncommitted cursor with stale data
             // In either case, we may advance and try again exactly once.
             TOKULOG(4) << "current() did not find associated object for pk " << _currPK << endl;
             advance();
             if ( ok() ) {
                 found = _cl->findByPK( _currPK, _currObj );
                 uassert( 16741, str::stream()
                             << toString() << ": could not find associated document with pk "
                             << _currPK << ", index key " << _currKey, found );
             }
         }
     }
     bool shouldAppendPK = _cl->isCapped() && cc().opSettings().shouldCappedAppendPK();
     if (shouldAppendPK) {
         BSONObjBuilder b;
         b.appendElements(_currObj);
         b.append("$_", _currPK);
         return b.obj();
     }
     return _currObj;
 }
Пример #6
0
 IndexCursor::IndexCursor( Collection *cl, const IndexDetails &idx,
                           const BSONObj &startKey, const BSONObj &endKey,
                           bool endKeyInclusive, int direction, int numWanted ) :
     _cl(cl),
     _idx(idx),
     _ordering(Ordering::make(_idx.keyPattern())),
     _startKey(startKey),
     _endKey(endKey),
     _endKeyInclusive(endKeyInclusive),
     _multiKey(_cl->isMultikey(_cl->idxNo(_idx))),
     _direction(direction),
     _bounds(),
     _boundsMustMatch(true),
     _nscanned(0),
     _nscannedObjects(0),
     _prelock(!cc().opSettings().getJustOne() && numWanted == 0),
     _cursor(_idx, cursor_flags()),
     _tailable(false),
     _ok(false),
     _getf_iteration(0)
 {
     verify( _cl != NULL );
     TOKULOG(3) << toString() << ": constructor: bounds " << prettyIndexBounds() << endl;
     DBC* cursor = _cursor.dbc();
     cursor->c_set_check_interrupt_callback(cursor, cursor_check_interrupt, &_interrupt_extra);
     initializeDBC();
 }
Пример #7
0
    void NamespaceIndexRollback::transfer(NamespaceIndexRollback &parent) {
        TOKULOG(1) << "NamespaceIndexRollback::transfer processing "
                   << _namespaces.size() + _dbs.size() << " roll items." << endl;

        // Promote rollback entries to parent.
        parent._namespaces.insert(_namespaces.begin(), _namespaces.end());
        parent._dbs.insert(_dbs.begin(), _dbs.end());
    }
Пример #8
0
    IndexCursor::IndexCursor( Collection *cl, const IndexDetails &idx,
                              const shared_ptr< FieldRangeVector > &bounds,
                              int singleIntervalLimit, int direction, int numWanted ) :
        _cl(cl),
        _idx(idx),
        _ordering(Ordering::make(_idx.keyPattern())),
        _startKey(),
        _endKey(),
        _endKeyInclusive(true),
        _multiKey(_cl->isMultikey(_cl->idxNo(_idx))),
        _direction(direction),
        _bounds(bounds),
        _boundsMustMatch(true),
        _nscanned(0),
        _nscannedObjects(0),
        _prelock(!cc().opSettings().getJustOne() && numWanted == 0),
        _cursor(_idx, cursor_flags()),
        _tailable(false),
        _ok(false),
        _getf_iteration(0)
    {
        verify( _cl != NULL );
        _boundsIterator.reset( new FieldRangeVectorIterator( *_bounds , singleIntervalLimit ) );
        _boundsIterator->prepDive();
        _startKey = _bounds->startKey();
        _endKey = _bounds->endKey();
        _endKeyInclusive = _bounds->endKeyInclusive();
        TOKULOG(3) << toString() << ": constructor: bounds " << prettyIndexBounds() << endl;
        DBC* cursor = _cursor.dbc();
        cursor->c_set_check_interrupt_callback(cursor, cursor_check_interrupt, &_interrupt_extra);
        initializeDBC();

        // Fairly bad hack:
        //
        // Primary keys are not skipped properly when a non-inclusive start bound is specified.
        // See IndexCursor::skipToNextKey()
        //
        // Do a single advance here - the PK is unique so the next key is guaranteed to be
        // strictly greater than the start key. We have to play games with _nscanned because
        // advance()'s checkCurrentAgainstBounds() is going to increment it by 1 (we don't want that).
        if (ok() && _cl->isPKIndex(_idx) && !_bounds->startKeyInclusive() && _currKey == _startKey) {
            const long long oldNScanned = _nscanned;
            advance();
            verify(oldNScanned <= _nscanned);
            _nscanned = oldNScanned;
        }
        DEV {
            // At this point, the current key should be consistent with
            // _startKey and _bounds->startKeyInclusive()
            if (ok() && !_bounds->startKeyInclusive()) {
                if (forward()) {
                    verify(_currKey.woCompare(_startKey, _ordering) > 0);
                } else {
                    verify(_currKey.woCompare(_startKey, _ordering) < 0);
                }
            }
        }
    }
Пример #9
0
    static void updateUsingMods(NamespaceDetails *d, const BSONObj &pk, const BSONObj &obj,
                                ModSetState &mss, struct LogOpUpdateDetails* loud) {

        BSONObj newObj = mss.createNewFromMods();
        checkTooLarge( newObj );
        TOKULOG(3) << "updateUsingMods used mod set, transformed " << obj << " to " << newObj << endl;

        updateOneObject( d, pk, obj, newObj, loud );
    }
Пример #10
0
    static void updateNoMods(NamespaceDetails *d, const BSONObj &pk, const BSONObj &obj,
                             const BSONObj &updateobj, struct LogOpUpdateDetails* loud) {

        BSONElementManipulator::lookForTimestamps( updateobj );
        checkNoMods( updateobj );
        TOKULOG(3) << "updateNoMods replacing pk " << pk << ", obj " << obj << " with updateobj " << updateobj << endl;

        updateOneObject( d, pk, obj, updateobj, loud );
    }
Пример #11
0
    /* note: this is only (as-is) called for

             - not multi
             - not mods is indexed
             - not upsert
    */
    static UpdateResult _updateById(const BSONObj &pk,
                                    bool isOperatorUpdate,
                                    ModSet* mods,
                                    NamespaceDetails* d,
                                    const char* ns,
                                    const BSONObj& updateobj,
                                    BSONObj patternOrig,
                                    bool logop,
                                    OpDebug& debug,
                                    bool fromMigrate = false) {

        BSONObj obj;
        {
            TOKULOG(3) << "_updateById looking for pk " << pk << endl;
            dassert(pk == patternOrig["_id"].wrap(""));
            bool found = d->findById( patternOrig, obj );
            TOKULOG(3) << "_updateById findById() got " << obj << endl;
            if ( !found ) {
                // no upsert support in _updateById yet, so we are done.
                return UpdateResult( 0 , 0 , 0 , BSONObj() );
            }
        }

        d->notifyOfWriteOp();

        /* look for $inc etc.  note as listed here, all fields to inc must be this type, you can't set some
           regular ones at the moment. */
        struct LogOpUpdateDetails loud;
        loud.logop = logop;
        loud.ns = ns;
        loud.fromMigrate = fromMigrate;
        if ( isOperatorUpdate ) {
            auto_ptr<ModSetState> mss = mods->prepare( obj );

            // mod set update, ie: $inc: 10 increments by 10.
            updateUsingMods( d, pk, obj, *mss, &loud );
            return UpdateResult( 1 , 1 , 1 , BSONObj() );

        } // end $operator update

        // replace-style update
        updateNoMods( d, pk, obj, updateobj, &loud );
        return UpdateResult( 1 , 0 , 1 , BSONObj() );
    }
Пример #12
0
    void IndexCursor::_advance() {
        // Reset this flag at the start of a new iteration.
        // See IndexCursor::checkCurrentAgainstBounds()
        _boundsMustMatch = true;

        // first try to get data from the bulk fetch buffer
        _ok = _buffer.next();
        // if there is not data remaining in the bulk fetch buffer,
        // do a fractal tree call to get more rows
        if ( !ok() ) {
            _ok = fetchMoreRows();
        }
        // at this point, if there are rows to be gotten,
        // it is residing in the bulk fetch buffer.
        // Get a row from the bulk fetch buffer
        if ( ok() ) {
            getCurrentFromBuffer();
            TOKULOG(3) << "_advance moved to K, PK, Obj" << _currKey << _currPK << _currObj << endl;
        } else {
            TOKULOG(3) << "_advance exhausted" << endl;
        }
    }
Пример #13
0
 // Check if the current key is beyond endKey.
 void IndexCursor::checkEnd() {
     if ( !ok() ) {
         return;
     }
     if ( !_endKey.isEmpty() ) {
         const int cmp = _endKey.woCompare( _currKey, _ordering );
         const int sign = cmp == 0 ? 0 : (cmp > 0 ? 1 : -1);
         if ( (sign != 0 && sign != _direction) || (sign == 0 && !_endKeyInclusive) ) {
             _ok = false;
             TOKULOG(3) << toString() << ": checkEnd() stopping @ curr, end: " << _currKey << _endKey << endl;
         }
     }
 }
Пример #14
0
    void IndexCursor::setPosition(const BSONObj &key, const BSONObj &pk) {
        TOKULOG(3) << toString() << ": setPosition(): getf " << key << ", pk " << pk << ", direction " << _direction << endl;

        // Empty row buffer, reset fetch iteration, go get more rows.
        _buffer.empty();
        _getf_iteration = 0;

        storage::Key sKey( key, !pk.isEmpty() ? &pk : NULL );
        DBT key_dbt = sKey.dbt();;

        int r;
        const int rows_to_fetch = getf_fetch_count();
        struct cursor_getf_extra extra(&_buffer, rows_to_fetch);
        DBC *cursor = _cursor.dbc();
        if ( forward() ) {
            r = cursor->c_getf_set_range(cursor, getf_flags(), &key_dbt, cursor_getf, &extra);
        } else {
            r = cursor->c_getf_set_range_reverse(cursor, getf_flags(), &key_dbt, cursor_getf, &extra);
        }
        if ( extra.ex != NULL ) {
            throw *extra.ex;
        }
        if (r == TOKUDB_INTERRUPTED) {
            _interrupt_extra.throwException();
        }
        if ( r != 0 && r != DB_NOTFOUND ) {
            extra.throwException();
            storage::handle_ydb_error(r);
        }

        _getf_iteration++;
        _ok = extra.rows_fetched > 0 ? true : false;
        if ( ok() ) {
            getCurrentFromBuffer();
        }

        TOKULOG(3) << "setPosition hit K, PK, Obj " << _currKey << _currPK << _currObj << endl;
    }
Пример #15
0
    NamespaceDetails::Indexer::~Indexer() {
        Lock::assertWriteLocked(_d->_ns);

        if (_d->_indexBuildInProgress) {
            verify(_idx.get() == _d->_indexes.back().get());
            // Pop back the index from the index vector. We still
            // have a shared pointer (_idx), so it won't close here.
            _d->_indexes.pop_back();
            _d->_indexBuildInProgress = false;
            verify(_d->_nIndexes == (int) _d->_indexes.size());
            // If we catch any exceptions, eat them. We can only enter this block
            // if we're already propogating an exception (ie: not under normal
            // operation) so it's okay to just print to the log and continue.
            try {
                _idx->close();
            } catch (const DBException &e) {
                TOKULOG(0) << "Caught DBException exception while destroying Indexer: "
                           << e.getCode() << ", " << e.what() << endl;
            } catch (...) {
                TOKULOG(0) << "Caught generic exception while destroying Indexer." << endl;
            }
        }
    }
Пример #16
0
    static void insertAndLog(const char *ns, NamespaceDetails *d, BSONObj &newObj,
                             bool logop, bool fromMigrate) {

        checkNoMods( newObj );
        TOKULOG(3) << "insertAndLog for upsert: " << newObj << endl;

        // We cannot pass NamespaceDetails::NO_UNIQUE_CHECKS because we still need to check secondary indexes.
        // We know if we are in this function that we did a query for the object and it didn't exist yet, so the unique check on the PK won't fail.
        // To prove this to yourself, look at the callers of insertAndLog and see that they return an UpdateResult that says the object didn't exist yet.
        checkBulkLoad(ns);
        insertOneObject(d, newObj);
        if (logop) {
            OpLogHelpers::logInsert(ns, newObj, &cc().txn());
        }
    }
Пример #17
0
 CollectionMap::~CollectionMap() {
     for (CollectionStringMap::const_iterator it = _collections.begin(); it != _collections.end(); ++it) {
         shared_ptr<Collection> cl = it->second;
         try {
             cl->close();
         }
         catch (DBException &e) {
             // shouldn't throw in destructor
             msgasserted(16779, mongoutils::str::stream() << "caught exception while closing " << (string) it->first << " to close CollectionMap " << _database << ": " << e.what());
         }
     }
     if (_metadb != NULL) {
         TOKULOG(1) << "Closing CollectionMap " << _database << endl;
         const int r = _metadb->close();
         if (r != 0) {
             msgasserted(16920, mongoutils::str::stream() << "failed to close metadb for CollectionMap " << _database);
         }
     }
 }
Пример #18
0
 NamespaceIndex::~NamespaceIndex() {
     for (NamespaceDetailsMap::const_iterator it = _namespaces.begin(); it != _namespaces.end(); ++it) {
         shared_ptr<NamespaceDetails> d = it->second;
         try {
             d->close();
         }
         catch (DBException &e) {
             // shouldn't throw in destructor
             msgasserted(16779, mongoutils::str::stream() << "caught exception while closing " << (string) it->first << " to close NamespaceIndex " << _database << ": " << e.what());
         }
     }
     if (_nsdb != NULL) {
         TOKULOG(1) << "Closing NamespaceIndex " << _database << endl;
         const int r = _nsdb->close();
         if (r != 0) {
             msgasserted(16920, mongoutils::str::stream() << "failed to close nsdb for NamespaceIndex " << _database);
         }
     }
 }
Пример #19
0
        void Dictionary::open(const BSONObj &info,
                              const mongo::Descriptor &descriptor, const bool may_create,
                              const bool hot_index) {
            int readPageSize = 65536;
            int pageSize = 4 * 1024 * 1024;
            TOKU_COMPRESSION_METHOD compression = TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD;
            BSONObj key_pattern = info["key"].Obj();
            
            BSONElement e;
            e = info["readPageSize"];
            if (e.ok() && !e.isNull()) {
                readPageSize = BytesQuantity<int>(e);
                uassert(16743, "readPageSize must be a number > 0.", readPageSize > 0);
                TOKULOG(1) << "db " << _dname << ", using read page size " << readPageSize << endl;
            }
            e = info["pageSize"];
            if (e.ok() && !e.isNull()) {
                pageSize = BytesQuantity<int>(e);
                uassert(16445, "pageSize must be a number > 0.", pageSize > 0);
                TOKULOG(1) << "db " << _dname << ", using page size " << pageSize << endl;
            }
            e = info["compression"];
            if (e.ok() && !e.isNull()) {
                std::string str = e.String();
                if (str == "lzma") {
                    compression = TOKU_LZMA_METHOD;
                } else if (str == "quicklz") {
                    compression = TOKU_QUICKLZ_METHOD;
                } else if (str == "zlib") {
                    compression = TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD;
                } else if (str == "none") {
                    compression = TOKU_NO_COMPRESSION;
                } else {
                    uassert(16442, "compression must be one of: lzma, quicklz, zlib, none.", false);
                }
                TOKULOG(1) << "db " << _dname << ", using compression method \"" << str << "\"" << endl;
            }

            int r = _db->set_readpagesize(_db, readPageSize);
            if (r != 0) {
                handle_ydb_error(r);
            }

            r = _db->set_pagesize(_db, pageSize);
            if (r != 0) {
                handle_ydb_error(r);
            }

            r = _db->set_compression_method(_db, compression);
            if (r != 0) {
                handle_ydb_error(r);
            }

            // If this is a non-creating open for a read-only (or non-existent)
            // transaction, we can use an alternate stack since there's nothing
            // to roll back and no locktree locks to hold.
            const bool needAltTxn = !may_create && (!cc().hasTxn() || cc().txn().readOnly());
            scoped_ptr<Client::AlternateTransactionStack> altStack(!needAltTxn ? NULL :
                                                                   new Client::AlternateTransactionStack());
            scoped_ptr<Client::Transaction> altTxn(!needAltTxn ? NULL :
                                                   new Client::Transaction(0));

            const int db_flags = may_create ? DB_CREATE : 0;
            r = _db->open(_db, cc().txn().db_txn(), _dname.c_str(), NULL,
                          DB_BTREE, db_flags, S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH);
            if (r == ENOENT && !may_create) {
                throw NeedsCreate();
            }
            if (r != 0) {
                handle_ydb_error(r);
            }
            if (may_create) {
                set_db_descriptor(_db, descriptor, hot_index);
            }
            verify_or_upgrade_db_descriptor(_db, descriptor, hot_index);

            if (altTxn.get() != NULL) {
                altTxn->commit();
            }
        }
Пример #20
0
    UpdateResult _updateObjects( const char* ns,
                                 const BSONObj& updateobj,
                                 const BSONObj& patternOrig,
                                 bool upsert,
                                 bool multi,
                                 bool logop ,
                                 OpDebug& debug,
                                 bool fromMigrate,
                                 const QueryPlanSelectionPolicy& planPolicy ) {

        TOKULOG(2) << "update: " << ns
                   << " update: " << updateobj
                   << " query: " << patternOrig
                   << " upsert: " << upsert << " multi: " << multi << endl;

        debug.updateobj = updateobj;

        NamespaceDetails *d = getAndMaybeCreateNS(ns, logop);

        auto_ptr<ModSet> mods;
        const bool isOperatorUpdate = updateobj.firstElementFieldName()[0] == '$';
        bool modsAreIndexed = false;

        if ( isOperatorUpdate ) {
            if ( d->indexBuildInProgress() ) {
                set<string> bgKeys;
                d->inProgIdx().keyPattern().getFieldNames(bgKeys);
                mods.reset( new ModSet(updateobj, d->indexKeys(), &bgKeys) );
            }
            else {
                mods.reset( new ModSet(updateobj, d->indexKeys()) );
            }
            modsAreIndexed = mods->isIndexed();
        }


        int idIdxNo = -1;
        if ( planPolicy.permitOptimalIdPlan() && !multi && !modsAreIndexed &&
             (idIdxNo = d->findIdIndex()) >= 0 && mayUpdateById(d, patternOrig) ) {
            debug.idhack = true;
            IndexDetails &idx = d->idx(idIdxNo);
            BSONObj pk = idx.getKeyFromQuery(patternOrig);
            TOKULOG(3) << "_updateObjects using simple _id query, pattern " << patternOrig << ", pk " << pk << endl;
            UpdateResult result = _updateById( pk,
                                               isOperatorUpdate,
                                               mods.get(),
                                               d,
                                               ns,
                                               updateobj,
                                               patternOrig,
                                               logop,
                                               debug,
                                               fromMigrate);
            if ( result.existing || ! upsert ) {
                return result;
            }
            else if ( upsert && ! isOperatorUpdate && ! logop) {
                debug.upsert = true;
                BSONObj objModified = updateobj;
                insertAndLog( ns, d, objModified, logop, fromMigrate );
                return UpdateResult( 0 , 0 , 1 , updateobj );
            }
        }

        int numModded = 0;
        debug.nscanned = 0;
        shared_ptr<Cursor> c = getOptimizedCursor( ns, patternOrig, BSONObj(), planPolicy );

        if( c->ok() ) {
            set<BSONObj> seenObjects;
            MatchDetails details;
            auto_ptr<ClientCursor> cc;
            do {

                debug.nscanned++;

                if ( mods.get() && mods->hasDynamicArray() ) {
                    // The Cursor must have a Matcher to record an elemMatchKey.  But currently
                    // a modifier on a dynamic array field may be applied even if there is no
                    // elemMatchKey, so a matcher cannot be required.
                    //verify( c->matcher() );
                    details.requestElemMatchKey();
                }

                if ( !c->currentMatches( &details ) ) {
                    c->advance();
                    continue;
                }

                BSONObj currPK = c->currPK();
                if ( c->getsetdup( currPK ) ) {
                    c->advance();
                    continue;
                }

                BSONObj currentObj = c->current();
                BSONObj pattern = patternOrig;

                if ( logop ) {
                    BSONObjBuilder idPattern;
                    BSONElement id;
                    // NOTE: If the matching object lacks an id, we'll log
                    // with the original pattern.  This isn't replay-safe.
                    // It might make sense to suppress the log instead
                    // if there's no id.
                    if ( currentObj.getObjectID( id ) ) {
                        idPattern.append( id );
                        pattern = idPattern.obj();
                    }
                    else {
                        uassert( 10157 ,  "multi-update requires all modified objects to have an _id" , ! multi );
                    }
                }

                /* look for $inc etc.  note as listed here, all fields to inc must be this type, you can't set some
                   regular ones at the moment. */
                struct LogOpUpdateDetails loud;
                loud.logop = logop;
                loud.ns = ns;
                loud.fromMigrate = fromMigrate;
                if ( isOperatorUpdate ) {

                    if ( multi ) {
                        // Make our own copies of the currPK and currentObj before we invalidate
                        // them by advancing the cursor.
                        currPK = currPK.copy();
                        currentObj = currentObj.copy();

                        // Advance past the document to be modified. This used to be because of SERVER-5198,
                        // but TokuMX does it because we want to avoid needing to do manual deduplication
                        // of this PK on the next iteration if the current update modifies the next
                        // entry in the index. For example, an index scan over a:1 with mod {$inc: {a:1}}
                        // would cause every other key read to be a duplicate if we didn't advance here.
                        while ( c->ok() && currPK == c->currPK() ) {
                            c->advance();
                        }

                        // Multi updates need to do their own deduplication because updates may modify the
                        // keys the cursor is in the process of scanning over.
                        if ( seenObjects.count( currPK ) ) {
                            continue;
                        } else {
                            seenObjects.insert( currPK );
                        }
                    }

                    ModSet* useMods = mods.get();

                    auto_ptr<ModSet> mymodset;
                    if ( details.hasElemMatchKey() && mods->hasDynamicArray() ) {
                        useMods = mods->fixDynamicArray( details.elemMatchKey() );
                        mymodset.reset( useMods );
                    }

                    auto_ptr<ModSetState> mss = useMods->prepare( currentObj );
                    updateUsingMods( d, currPK, currentObj, *mss, &loud );

                    numModded++;
                    if ( ! multi )
                        return UpdateResult( 1 , 1 , numModded , BSONObj() );

                    continue;
                } // end if operator is update

                uassert( 10158 ,  "multi update only works with $ operators" , ! multi );

                updateNoMods( d, currPK, currentObj, updateobj, &loud );

                return UpdateResult( 1 , 0 , 1 , BSONObj() );
            } while ( c->ok() );
        } // endif

        if ( numModded )
            return UpdateResult( 1 , 1 , numModded , BSONObj() );

        if ( upsert ) {
            BSONObj newObj = updateobj;
            if ( updateobj.firstElementFieldName()[0] == '$' ) {
                // upsert of an $operation. build a default object
                BSONObj newObj = mods->createNewFromQuery( patternOrig );
                debug.fastmodinsert = true;
                insertAndLog( ns, d, newObj, logop, fromMigrate );
                return UpdateResult( 0 , 1 , 1 , newObj );
            }
            uassert( 10159 ,  "multi update only works with $ operators" , ! multi );
            debug.upsert = true;
            insertAndLog( ns, d, newObj, logop, fromMigrate );
            return UpdateResult( 0 , 0 , 1 , newObj );
        }

        return UpdateResult( 0 , isOperatorUpdate , 0 , BSONObj() );
    }