Beispiel #1
0
    // throws DBException
    void buildAnIndex(const std::string& ns,
                      NamespaceDetails* d,
                      IndexDetails& idx,
                      bool mayInterrupt) {

        BSONObj idxInfo = idx.info.obj();

        MONGO_TLOG(0) << "build index on: " << ns << " properties: " << idxInfo.jsonString() << endl;

        Timer t;
        unsigned long long n;

        verify( Lock::isWriteLocked(ns) );

        if( inDBRepair || !idxInfo["background"].trueValue() ) {
            int idxNo = IndexBuildsInProgress::get(ns.c_str(), idx.info.obj()["name"].valuestr());
            n = BtreeBasedBuilder::fastBuildIndex(ns.c_str(), d, idx, mayInterrupt, idxNo);
            verify( !idx.head.isNull() );
        }
        else {
            BackgroundIndexBuildJob j(ns.c_str());
            n = j.go(ns, d, idx);
        }
        MONGO_TLOG(0) << "build index done.  scanned " << n << " total records. " << t.millis() / 1000.0 << " secs" << endl;
    }
Beispiel #2
0
static void finishCurrentOp( Client* client, CurOp* currentOp, WriteErrorDetail* opError ) {

    currentOp->done();
    int executionTime = currentOp->debug().executionTime = currentOp->totalTimeMillis();
    currentOp->debug().recordStats();

    if ( opError ) {
        currentOp->debug().exceptionInfo = ExceptionInfo( opError->getErrMessage(),
                                           opError->getErrCode() );

        MONGO_TLOG(3) << " Caught Assertion in " << opToString( currentOp->getOp() )
                      << ", continuing " << causedBy( opError->getErrMessage() ) << endl;
    }

    bool logAll = logger::globalLogDomain()->shouldLog( logger::LogSeverity::Debug( 1 ) );
    bool logSlow = executionTime
                   > ( serverGlobalParams.slowMS + currentOp->getExpectedLatencyMs() );

    if ( logAll || logSlow ) {
        MONGO_TLOG(0) << currentOp->debug().report( *currentOp ) << endl;
    }

    if ( currentOp->shouldDBProfile( executionTime ) ) {
        profile( *client, currentOp->getOp(), *currentOp );
    }
}
Beispiel #3
0
    // throws DBException
    void buildAnIndex( Collection* collection,
                       IndexDetails& idx,
                       bool mayInterrupt ) {

        string ns = collection->ns().ns(); // our copy

        BSONObj idxInfo = idx.info.obj();

        MONGO_TLOG(0) << "build index on: " << ns
                      << " properties: " << idxInfo.jsonString() << endl;

        audit::logCreateIndex( currentClient.get(), &idxInfo, idx.indexName(), ns );

        Timer t;
        unsigned long long n;

        verify( Lock::isWriteLocked( ns ) );

        if( inDBRepair || !idxInfo["background"].trueValue() ) {
            int idxNo = collection->details()->findIndexByName( idx.info.obj()["name"].valuestr(),
                                                                true );
            verify( idxNo >= 0 );
            n = BtreeBasedBuilder::fastBuildIndex( ns.c_str(), collection->details(),
                                                   idx, mayInterrupt, idxNo );
            verify( !idx.head.isNull() );
        }
        else {
            BackgroundIndexBuildJob j( ns );
            n = j.go( collection, idx );
        }
        MONGO_TLOG(0) << "build index done.  scanned " << n << " total records. " << t.millis() / 1000.0 << " secs" << endl;
    }
Beispiel #4
0
        bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
            string ns = dbname + "." + cmdObj.firstElement().valuestrsafe();

            NamespaceString ns_string(ns);
            const bool full = cmdObj["full"].trueValue();
            const bool scanData = full || cmdObj["scandata"].trueValue();

            if ( !ns_string.isNormal() && full ) {
                errmsg = "Can only run full validate on a regular collection";
                return false;
            }

            if (!serverGlobalParams.quiet) {
                MONGO_TLOG(0) << "CMD: validate " << ns << endl;
            }

            Client::ReadContext ctx(ns_string.ns());

            Database* db = cc().database();
            if ( !db ) {
                errmsg = "database not found";
                return false;
            }

            Collection* collection = db->getCollection( ns );
            if ( !collection ) {
                errmsg = "collection not found";
                return false;
            }

            result.append( "ns", ns );

            ValidateResults results;
            Status status = collection->validate( full, scanData, &results, &result );
            if ( !status.isOK() )
                return appendCommandStatus( result, status );

            result.appendBool("valid", results.valid);
            result.append("errors", results.errors);

            if ( !full ){
                result.append("warning", "Some checks omitted for speed. use {full:true} option to do more thorough scan.");
            }

            if ( !results.valid ) {
                result.append("advice", "ns corrupt. See http://dochub.mongodb.org/core/data-recovery");
            }

            return true;
        }
Beispiel #5
0
        bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg,
                 BSONObjBuilder& result, bool fromRepl) {

            string ns = dbname + "." + cmdObj.firstElement().valuestrsafe();
            const NamespaceDetails* nsd = nsdetails(ns);
            if (!cmdLine.quiet) {
                MONGO_TLOG(0) << "CMD: indexStats " << ns << endl;
            }
            if (!nsd) {
                errmsg = "ns not found";
                return false;
            }

            IndexStatsParams params;

            // { index: _index_name }
            BSONElement indexName = cmdObj["index"];
            if (!indexName.ok() || indexName.type() != String) {
                errmsg = "an index name is required, use {index: \"indexname\"}";
                return false;
            }
            params.indexName = indexName.String();

            BSONElement expandNodes = cmdObj["expandNodes"];
            if (expandNodes.ok()) {
                if (expandNodes.type() != mongo::Array) {
                    errmsg = "expandNodes must be an array of numbers";
                    return false;
                }
                vector<BSONElement> arr = expandNodes.Array();
                for (vector<BSONElement>::const_iterator it = arr.begin(); it != arr.end(); ++it) {
                    if (!it->isNumber()) {
                        errmsg = "expandNodes must be an array of numbers";
                        return false;
                    }
                    params.expandNodes.push_back(int(it->Number()));
                }
            }

            BSONObjBuilder resultBuilder;
            if (!runInternal(nsd, params, errmsg, resultBuilder))
                return false;
            result.appendElements(resultBuilder.obj());
            return true;
        }
Beispiel #6
0
        bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
            string ns = dbname + "." + cmdObj.firstElement().valuestrsafe();

            if (!serverGlobalParams.quiet) {
                MONGO_TLOG(0) << "CMD: validate " << ns << endl;
            }

            Database* db = cc().database();
            if ( !db ) {
                errmsg = "ns nout found";
                return false;
            }

            Collection* collection = db->getCollection( ns );
            if ( !collection ) {
                errmsg = "ns not found";
                return false;
            }

            result.append( "ns", ns );
            validateNS( ns , collection, cmdObj, result);
            return true;
        }
Beispiel #7
0
    // throws DBException
    void buildAnIndex( OperationContext* txn,
                       Collection* collection,
                       IndexCatalogEntry* btreeState,
                       bool mayInterrupt ) {

        string ns = collection->ns().ns(); // our copy
        const IndexDescriptor* idx = btreeState->descriptor();
        const BSONObj& idxInfo = idx->infoObj();

        MONGO_TLOG(0) << "build index on: " << ns
                      << " properties: " << idx->toString() << endl;
        audit::logCreateIndex( currentClient.get(), &idxInfo, idx->indexName(), ns );

        Timer t;

        verify( Lock::isWriteLocked( ns ) );
        // this is so that people know there are more keys to look at when doing
        // things like in place updates, etc...
        collection->infoCache()->addedIndex();

        if ( collection->numRecords() == 0 ) {
            Status status = btreeState->accessMethod()->initializeAsEmpty(txn);
            massert( 17343,
                     str::stream() << "IndexAccessMethod::initializeAsEmpty failed" << status.toString(),
                     status.isOK() );
            MONGO_TLOG(0) << "\t added index to empty collection";
            return;
        }

        scoped_ptr<BackgroundOperation> backgroundOperation;
        bool doInBackground = false;

        if ( idxInfo["background"].trueValue() && !inDBRepair ) {
            doInBackground = true;
            backgroundOperation.reset( new BackgroundOperation(ns) );
            uassert( 13130,
                     "can't start bg index b/c in recursive lock (db.eval?)",
                     !Lock::nested() );
            log() << "\t building index in background";
        }

        Status status = btreeState->accessMethod()->initializeAsEmpty(txn);
        massert( 17342,
                 str::stream()
                 << "IndexAccessMethod::initializeAsEmpty failed"
                 << status.toString(),
                 status.isOK() );

        IndexAccessMethod* bulk = doInBackground ?
            NULL : btreeState->accessMethod()->initiateBulk(txn,
                                                            collection->numRecords());
        scoped_ptr<IndexAccessMethod> bulkHolder(bulk);
        IndexAccessMethod* iam = bulk ? bulk : btreeState->accessMethod();

        if ( bulk )
            log() << "\t building index using bulk method";

        unsigned long long n = addExistingToIndex( txn,
                                                   collection,
                                                   btreeState->descriptor(),
                                                   iam,
                                                   doInBackground );

        if ( bulk ) {
            LOG(1) << "\t bulk commit starting";
            std::set<DiskLoc> dupsToDrop;

            Status status = btreeState->accessMethod()->commitBulk( bulk,
                                                                    mayInterrupt,
                                                                    &dupsToDrop );

            // Code above us expects a uassert in case of dupkey errors.
            if (ErrorCodes::DuplicateKey == status.code()) {
                uassertStatusOK(status);
            }

            // Any other errors are probably bad and deserve a massert.
            massert( 17398,
                     str::stream() << "commitBulk failed: " << status.toString(),
                     status.isOK() );

            if ( dupsToDrop.size() )
                log() << "\t bulk dropping " << dupsToDrop.size() << " dups";

            for( set<DiskLoc>::const_iterator i = dupsToDrop.begin(); i != dupsToDrop.end(); ++i ) {
                BSONObj toDelete;
                collection->deleteDocument( txn,
                                            *i,
                                            false /* cappedOk */,
                                            true /* noWarn */,
                                            &toDelete );
                if (isMasterNs(ns.c_str())) {
                    logOp( txn, "d", ns.c_str(), toDelete );
                }
                
                txn->recoveryUnit()->commitIfNeeded();

                RARELY if ( mayInterrupt ) {
                    txn->checkForInterrupt();
                }
            }
        }

        verify( !btreeState->head().isNull() );
        MONGO_TLOG(0) << "build index done.  scanned " << n << " total records. "
                      << t.millis() / 1000.0 << " secs" << endl;

        // this one is so people know that the index is finished
        collection->infoCache()->addedIndex();
    }
Beispiel #8
0
    bool WriteBatchExecutor::applyWriteItem(const string& ns,
                                            const WriteBatch::WriteItem& writeItem,
                                            BSONObjBuilder* results) {
        // Clear operation's LastError before starting.
        _le->reset(true);

        uint64_t itemTimeMicros = 0;
        bool opSuccess = true;

        // Each write operation executes in its own PageFaultRetryableSection.  This means that
        // a single batch can throw multiple PageFaultException's, which is not the case for
        // other operations.
        PageFaultRetryableSection s;
        while (true) {
            try {
                // Execute the write item as a child operation of the current operation.
                CurOp childOp(_client, _client->curop());

                // TODO Modify CurOp "wrapped" constructor to take an opcode, so calling .reset()
                // is unneeded
                childOp.reset(_client->getRemote(), getOpCode(writeItem.getWriteType()));

                childOp.ensureStarted();
                OpDebug& opDebug = childOp.debug();
                opDebug.ns = ns;
                {
                    Client::WriteContext ctx(ns);

                    switch(writeItem.getWriteType()) {
                    case WriteBatch::WRITE_INSERT:
                        opSuccess = applyInsert(ns, writeItem, &childOp);
                        break;
                    case WriteBatch::WRITE_UPDATE:
                        opSuccess = applyUpdate(ns, writeItem, &childOp);
                        break;
                    case WriteBatch::WRITE_DELETE:
                        opSuccess = applyDelete(ns, writeItem, &childOp);
                        break;
                    }
                }
                childOp.done();
                itemTimeMicros = childOp.totalTimeMicros();

                opDebug.executionTime = childOp.totalTimeMillis();
                opDebug.recordStats();

                // Log operation if running with at least "-v", or if exceeds slow threshold.
                if (logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1))
                    || opDebug.executionTime > cmdLine.slowMS + childOp.getExpectedLatencyMs()) {

                    MONGO_TLOG(1) << opDebug.report(childOp) << endl;
                }

                // TODO Log operation if logLevel >= 3 and assertion thrown (as assembleResponse()
                // does).

                // Save operation to system.profile if shouldDBProfile().
                if (childOp.shouldDBProfile(opDebug.executionTime)) {
                    profile(*_client, getOpCode(writeItem.getWriteType()), childOp);
                }
                break;
            }
            catch (PageFaultException& e) {
                e.touch();
            }
        }

        // Fill caller's builder with results of operation, using LastError.
        results->append("ok", opSuccess);
        _le->appendSelf(*results, false);
        results->append("micros", static_cast<long long>(itemTimeMicros));

        return opSuccess;
    }
Beispiel #9
0
    /**
     * Run a query with a cursor provided by the query optimizer, or FindingStartCursor.
     * @yields the db lock.
     */
    string queryWithQueryOptimizer( int queryOptions, const string& ns,
                                    const BSONObj &jsobj, CurOp& curop,
                                    const BSONObj &query, const BSONObj &order,
                                    const shared_ptr<ParsedQuery> &pq_shared,
                                    const BSONObj &oldPlan,
                                    const ChunkVersion &shardingVersionAtStart,
                                    scoped_ptr<PageFaultRetryableSection>& parentPageFaultSection,
                                    scoped_ptr<NoPageFaultsAllowed>& noPageFault,
                                    Message &result ) {

        const ParsedQuery &pq( *pq_shared );
        shared_ptr<Cursor> cursor;
        QueryPlanSummary queryPlan;
        
        if ( pq.hasOption( QueryOption_OplogReplay ) ) {
            cursor = FindingStartCursor::getCursor( ns.c_str(), query, order );
        }
        else {
            cursor = getOptimizedCursor( ns.c_str(),
                                         query,
                                         order,
                                         QueryPlanSelectionPolicy::any(),
                                         pq_shared,
                                         false,
                                         &queryPlan );
        }
        verify( cursor );
        
        scoped_ptr<QueryResponseBuilder> queryResponseBuilder
                ( QueryResponseBuilder::make( pq, cursor, queryPlan, oldPlan ) );
        bool saveClientCursor = false;
        OpTime slaveReadTill;
        ClientCursorHolder ccPointer( new ClientCursor( QueryOption_NoCursorTimeout, cursor,
                                                         ns ) );
        
        for( ; cursor->ok(); cursor->advance() ) {

            bool yielded = false;
            if ( !ccPointer->yieldSometimes( ClientCursor::MaybeCovered, &yielded ) ||
                !cursor->ok() ) {
                cursor.reset();
                queryResponseBuilder->noteYield();
                // !!! TODO The queryResponseBuilder still holds cursor.  Currently it will not do
                // anything unsafe with the cursor in handoff(), but this is very fragile.
                //
                // We don't fail the query since we're fine with returning partial data if the
                // collection was dropped.
                // NOTE see SERVER-2454.
                // TODO This is wrong.  The cursor could be gone if the closeAllDatabases command
                // just ran.
                break;
            }

            if ( yielded ) {
                queryResponseBuilder->noteYield();
            }
            
            if ( pq.getMaxScan() && cursor->nscanned() > pq.getMaxScan() ) {
                break;
            }
            
            if ( !queryResponseBuilder->addMatch() ) {
                continue;
            }
            
            // Note slave's position in the oplog.
            if ( pq.hasOption( QueryOption_OplogReplay ) ) {
                BSONObj current = cursor->current();
                BSONElement e = current["ts"];
                if ( e.type() == Date || e.type() == Timestamp ) {
                    slaveReadTill = e._opTime();
                }
            }
            
            if ( !cursor->supportGetMore() || pq.isExplain() ) {
                if ( queryResponseBuilder->enoughTotalResults() ) {
                    break;
                }
            }
            else if ( queryResponseBuilder->enoughForFirstBatch() ) {
                // if only 1 requested, no cursor saved for efficiency...we assume it is findOne()
                if ( pq.wantMore() && pq.getNumToReturn() != 1 ) {
                    queryResponseBuilder->finishedFirstBatch();
                    if ( cursor->advance() ) {
                        saveClientCursor = true;
                    }
                }
                break;
            }
        }
        
        if ( cursor ) {
            if ( pq.hasOption( QueryOption_CursorTailable ) && pq.getNumToReturn() != 1 ) {
                cursor->setTailable();
            }
            
            // If the tailing request succeeded.
            if ( cursor->tailable() ) {
                saveClientCursor = true;
            }
        }
        
        if ( ! shardingState.getVersion( ns ).isWriteCompatibleWith( shardingVersionAtStart ) ) {
            // if the version changed during the query
            // we might be missing some data
            // and its safe to send this as mongos can resend
            // at this point
            throw SendStaleConfigException(ns, "version changed during initial query",
                                           shardingVersionAtStart,
                                           shardingState.getVersion(ns));
        }
        
        parentPageFaultSection.reset(0);
        noPageFault.reset( new NoPageFaultsAllowed() );

        int nReturned = queryResponseBuilder->handoff( result );

        ccPointer.reset();
        long long cursorid = 0;
        if ( saveClientCursor ) {
            // Create a new ClientCursor, with a default timeout.
            ccPointer.reset( new ClientCursor( queryOptions, cursor, ns,
                                              jsobj.getOwned() ) );
            cursorid = ccPointer->cursorid();
            DEV { MONGO_TLOG(2) << "query has more, cursorid: " << cursorid << endl; }
            if ( cursor->supportYields() ) {
                ClientCursor::YieldData data;
                ccPointer->prepareToYield( data );
            }
            else {
                ccPointer->c()->noteLocation();
            }
            
            // Save slave's position in the oplog.
            if ( pq.hasOption( QueryOption_OplogReplay ) && !slaveReadTill.isNull() ) {
                ccPointer->slaveReadTill( slaveReadTill );
            }
            
            if ( !ccPointer->ok() && ccPointer->c()->tailable() ) {
                DEV {
                    MONGO_TLOG(0) << "query has no more but tailable, cursorid: " << cursorid <<
                        endl;
                }
            }
            
            if( queryOptions & QueryOption_Exhaust ) {
                curop.debug().exhaust = true;
            }
            
            // Set attributes for getMore.
            ccPointer->setCollMetadata( queryResponseBuilder->collMetadata() );
            ccPointer->setPos( nReturned );
            ccPointer->pq = pq_shared;
            ccPointer->fields = pq.getFieldPtr();

            // If the query had a time limit, remaining time is "rolled over" to the cursor (for
            // use by future getmore ops).
            ccPointer->setLeftoverMaxTimeMicros( curop.getRemainingMaxTimeMicros() );

            ccPointer.release();
        }
Beispiel #10
0
        bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& anObjBuilder, bool /*fromRepl*/) {
            BSONElement e = jsobj.firstElement();
            const string toDeleteNs = dbname + '.' + e.valuestr();
            if (!serverGlobalParams.quiet) {
                MONGO_TLOG(0) << "CMD: dropIndexes " << toDeleteNs << endl;
            }

            Lock::DBWrite dbXLock(dbname);
            Client::Context ctx(toDeleteNs);

            Collection* collection = cc().database()->getCollection( toDeleteNs );
            if ( ! collection ) {
                errmsg = "ns not found";
                return false;
            }

            stopIndexBuilds(cc().database(), jsobj);

            IndexCatalog* indexCatalog = collection->getIndexCatalog();
            anObjBuilder.appendNumber("nIndexesWas", indexCatalog->numIndexesTotal() );


            BSONElement f = jsobj.getField("index");
            if ( f.type() == String ) {

                string indexToDelete = f.valuestr();

                if ( indexToDelete == "*" ) {
                    Status s = indexCatalog->dropAllIndexes( false );
                    if ( !s.isOK() ) {
                        appendCommandStatus( anObjBuilder, s );
                        return false;
                    }
                    anObjBuilder.append("msg", "non-_id indexes dropped for collection");
                    return true;
                }

                IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName( indexToDelete );
                if ( desc == NULL ) {
                    errmsg = str::stream() << "index not found with name [" << indexToDelete << "]";
                    return false;
                }

                if ( desc->isIdIndex() ) {
                    errmsg = "cannot drop _id index";
                    return false;
                }

                Status s = indexCatalog->dropIndex( desc );
                if ( !s.isOK() ) {
                    appendCommandStatus( anObjBuilder, s );
                    return false;
                }

                return true;
            }

            if ( f.type() == Object ) {
                IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByKeyPattern( f.embeddedObject() );
                if ( desc == NULL ) {
                    errmsg = "can't find index with key:";
                    errmsg += f.embeddedObject().toString();
                    return false;
                }

                if ( desc->isIdIndex() ) {
                    errmsg = "cannot drop _id index";
                    return false;
                }

                Status s = indexCatalog->dropIndex( desc );
                if ( !s.isOK() ) {
                    appendCommandStatus( anObjBuilder, s );
                    return false;
                }

                return true;
            }

            errmsg = "invalid index name spec";
            return false;
        }
Beispiel #11
0
        bool run(const string& dbname , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
            static DBDirectClient db;

            BSONElement e = jsobj.firstElement();
            string toDeleteNs = dbname + '.' + e.valuestr();

            MONGO_TLOG(0) << "CMD: reIndex " << toDeleteNs << endl;

            Lock::DBWrite dbXLock(dbname);
            Client::Context ctx(toDeleteNs);

            Collection* collection = cc().database()->getCollection( toDeleteNs );

            if ( !collection ) {
                errmsg = "ns not found";
                return false;
            }

            BackgroundOperation::assertNoBgOpInProgForNs( toDeleteNs );

            std::vector<BSONObj> indexesInProg = stopIndexBuilds(cc().database(), jsobj);

            list<BSONObj> all;
            auto_ptr<DBClientCursor> i = db.query( dbname + ".system.indexes" , BSON( "ns" << toDeleteNs ) , 0 , 0 , 0 , QueryOption_SlaveOk );
            BSONObjBuilder b;
            while ( i->more() ) {
                const BSONObj spec = i->next().removeField("v").getOwned();
                const BSONObj key = spec.getObjectField("key");
                const Status keyStatus = validateKeyPattern(key);
                if (!keyStatus.isOK()) {
                    errmsg = str::stream()
                        << "Cannot rebuild index " << spec << ": " << keyStatus.reason()
                        << " For more info see http://dochub.mongodb.org/core/index-validation";
                    return false;
                }

                b.append( BSONObjBuilder::numStr( all.size() ) , spec );
                all.push_back( spec );
            }
            result.appendNumber( "nIndexesWas", collection->getIndexCatalog()->numIndexesTotal() );

            Status s = collection->getIndexCatalog()->dropAllIndexes( true );
            if ( !s.isOK() ) {
                errmsg = "dropIndexes failed";
                return appendCommandStatus( result, s );
            }

            for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); i++ ) {
                BSONObj o = *i;
                LOG(1) << "reIndex ns: " << toDeleteNs << " index: " << o << endl;
                Status s = collection->getIndexCatalog()->createIndex( o, false );
                if ( !s.isOK() )
                    return appendCommandStatus( result, s );
            }

            result.append( "nIndexes" , (int)all.size() );
            result.appendArray( "indexes" , b.obj() );

            IndexBuilder::restoreIndexes(indexesInProg);
            return true;
        }
    uint64_t BtreeBasedBuilder::fastBuildIndex( Collection* collection,
                                                IndexDescriptor* idx,
                                                bool mayInterrupt ) {
        CurOp * op = cc().curop();

        Timer t;

        MONGO_TLOG(1) << "fastBuildIndex " << collection->ns() << ' ' << idx->toString() << endl;

        bool dupsAllowed = !idx->unique() || ignoreUniqueIndex(idx->getOnDisk());
        bool dropDups = idx->dropDups() || inDBRepair;
        BSONObj order = idx->keyPattern();

        getDur().writingDiskLoc(idx->getOnDisk().head).Null();

        if ( logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(2) ) )
            printMemInfo( "before index start" );

        /* get and sort all the keys ----- */
        ProgressMeterHolder pm(op->setMessage("index: (1/3) external sort",
                                              "Index: (1/3) External Sort Progress",
                                              collection->numRecords(),
                                              10));
        SortPhaseOne phase1;
        addKeysToPhaseOne(collection, idx, order, &phase1, pm.get(), mayInterrupt );
        pm.finished();

        BSONObjExternalSorter& sorter = *(phase1.sorter);

        if( phase1.multi ) {
            collection->getIndexCatalog()->markMultikey( idx );
        }

        if ( logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(2) ) )
            printMemInfo( "before final sort" );
        phase1.sorter->sort( mayInterrupt );
        if ( logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(2) ) )
            printMemInfo( "after final sort" );

        LOG(t.seconds() > 5 ? 0 : 1) << "\t external sort used : " << sorter.numFiles()
                                     << " files " << " in " << t.seconds() << " secs" << endl;

        set<DiskLoc> dupsToDrop;

        /* build index --- */
        if( idx->version() == 0 )
            buildBottomUpPhases2And3<V0>(dupsAllowed,
                                         idx,
                                         sorter,
                                         dropDups,
                                         dupsToDrop,
                                         op,
                                         &phase1,
                                         pm,
                                         t,
                                         mayInterrupt);
        else if( idx->version() == 1 )
            buildBottomUpPhases2And3<V1>(dupsAllowed,
                                         idx,
                                         sorter,
                                         dropDups,
                                         dupsToDrop,
                                         op,
                                         &phase1,
                                         pm,
                                         t,
                                         mayInterrupt);
        else
            verify(false);

        if( dropDups )
            log() << "\t fastBuildIndex dupsToDrop:" << dupsToDrop.size() << endl;

        doDropDups(collection, dupsToDrop, mayInterrupt);

        return phase1.n;
    }
Beispiel #13
0
    bool WriteBatchExecutor::applyWriteItem( const BatchItemRef& itemRef,
                                             WriteStats* stats,
                                             BSONObj* upsertedID,
                                             BatchedErrorDetail* error ) {
        const BatchedCommandRequest& request = *itemRef.getRequest();
        const string& ns = request.getNS();

        // Clear operation's LastError before starting.
        _le->reset( true );

        //uint64_t itemTimeMicros = 0;
        bool opSuccess = true;

        // Each write operation executes in its own PageFaultRetryableSection.  This means that
        // a single batch can throw multiple PageFaultException's, which is not the case for
        // other operations.
        PageFaultRetryableSection s;
        while ( true ) {
            try {
                // Execute the write item as a child operation of the current operation.
                CurOp childOp( _client, _client->curop() );

                HostAndPort remote =
                    _client->hasRemote() ? _client->getRemote() : HostAndPort( "0.0.0.0", 0 );

                // TODO Modify CurOp "wrapped" constructor to take an opcode, so calling .reset()
                // is unneeded
                childOp.reset( remote, getOpCode( request.getBatchType() ) );

                childOp.ensureStarted();
                OpDebug& opDebug = childOp.debug();
                opDebug.ns = ns;
                {
                    Lock::DBWrite dbLock( ns );
                    Client::Context ctx( ns,
                                         storageGlobalParams.dbpath, // TODO: better constructor?
                                         false /* don't check version here */);

                    opSuccess = doWrite( ns, itemRef, &childOp, stats, upsertedID, error );
                }
                childOp.done();
                //itemTimeMicros = childOp.totalTimeMicros();

                opDebug.executionTime = childOp.totalTimeMillis();
                opDebug.recordStats();

                // Log operation if running with at least "-v", or if exceeds slow threshold.
                if (logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1))
                     || opDebug.executionTime >
                        serverGlobalParams.slowMS + childOp.getExpectedLatencyMs()) {

                    MONGO_TLOG(1) << opDebug.report( childOp ) << endl;
                }

                // TODO Log operation if logLevel >= 3 and assertion thrown (as assembleResponse()
                // does).

                // Save operation to system.profile if shouldDBProfile().
                if ( childOp.shouldDBProfile( opDebug.executionTime ) ) {
                    profile( *_client, getOpCode( request.getBatchType() ), childOp );
                }
                break;
            }
            catch ( PageFaultException& e ) {
                e.touch();
            }
        }

        return opSuccess;
    }