Exemple #1
0
    void run() {
        // Create a new collection.
        Database* db = _ctx.db();
        Collection* coll;
        {
            WriteUnitOfWork wunit(&_opCtx);
            ASSERT_OK(db->dropCollection(&_opCtx, _nss));
            coll = db->createCollection(&_opCtx, _nss);

            OpDebug* const nullOpDebug = nullptr;
            ASSERT_OK(coll->insertDocument(&_opCtx,
                                           InsertStatement(BSON("_id" << 1 << "a"
                                                                      << "dup")),
                                           nullOpDebug,
                                           true));
            ASSERT_OK(coll->insertDocument(&_opCtx,
                                           InsertStatement(BSON("_id" << 2 << "a"
                                                                      << "dup")),
                                           nullOpDebug,
                                           true));
            wunit.commit();
        }

        MultiIndexBlock indexer;

        const BSONObj spec = BSON("name"
                                  << "a"
                                  << "ns"
                                  << coll->ns().ns()
                                  << "key"
                                  << BSON("a" << 1)
                                  << "v"
                                  << static_cast<int>(kIndexVersion)
                                  << "unique"
                                  << true
                                  << "background"
                                  << background);

        ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(&_opCtx, coll); });

        ASSERT_OK(indexer.init(&_opCtx, coll, spec, MultiIndexBlock::kNoopOnInitFn).getStatus());

        auto desc =
            coll->getIndexCatalog()->findIndexByName(&_opCtx, "a", true /* includeUnfinished */);
        ASSERT(desc);

        // Hybrid index builds check duplicates explicitly.
        ASSERT_OK(indexer.insertAllDocumentsInCollection(&_opCtx, coll));

        auto status = indexer.checkConstraints(&_opCtx);
        ASSERT_EQUALS(status.code(), ErrorCodes::DuplicateKey);
    }
Exemple #2
0
    bool buildIndexInterrupted(const BSONObj& key) {
        try {
            MultiIndexBlock indexer;

            ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(&_opCtx, collection()); });

            uassertStatusOK(
                indexer.init(&_opCtx, collection(), key, MultiIndexBlock::kNoopOnInitFn));
            uassertStatusOK(indexer.insertAllDocumentsInCollection(&_opCtx, collection()));
            WriteUnitOfWork wunit(&_opCtx);
            ASSERT_OK(indexer.commit(&_opCtx,
                                     collection(),
                                     MultiIndexBlock::kNoopOnCreateEachFn,
                                     MultiIndexBlock::kNoopOnCommitFn));
            wunit.commit();
        } catch (const DBException& e) {
            if (ErrorCodes::isInterruption(e.code()))
                return true;

            throw;
        }
        return false;
    }
Exemple #3
0
Status IndexBuildBase::createIndex(const std::string& dbname, const BSONObj& indexSpec) {
    MultiIndexBlock indexer;
    ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(&_opCtx, collection()); });
    Status status =
        indexer.init(&_opCtx, collection(), indexSpec, MultiIndexBlock::kNoopOnInitFn).getStatus();
    if (status == ErrorCodes::IndexAlreadyExists) {
        return Status::OK();
    }
    if (!status.isOK()) {
        return status;
    }
    status = indexer.insertAllDocumentsInCollection(&_opCtx, collection());
    if (!status.isOK()) {
        return status;
    }
    WriteUnitOfWork wunit(&_opCtx);
    ASSERT_OK(indexer.commit(&_opCtx,
                             collection(),
                             MultiIndexBlock::kNoopOnCreateEachFn,
                             MultiIndexBlock::kNoopOnCommitFn));
    wunit.commit();
    return Status::OK();
}
Exemple #4
0
    StatusWith<DiskLoc> Collection::insertDocument( const BSONObj& doc,
                                                    MultiIndexBlock& indexBlock ) {
        StatusWith<DiskLoc> loc = _recordStore->insertRecord( doc.objdata(),
                                                              doc.objsize(),
                                                              0 );

        if ( !loc.isOK() )
            return loc;

        InsertDeleteOptions indexOptions;
        indexOptions.logIfError = false;
        indexOptions.dupsAllowed = true; // in repair we should be doing no checking

        Status status = indexBlock.insert( doc, loc.getValue(), indexOptions );
        if ( !status.isOK() )
            return StatusWith<DiskLoc>( status );

        return loc;
    }
Exemple #5
0
    void run() {
        // Create a new collection.
        Database* db = _ctx.db();
        Collection* coll;
        {
            WriteUnitOfWork wunit(&_opCtx);
            ASSERT_OK(db->dropCollection(&_opCtx, _nss));
            coll = db->createCollection(&_opCtx, _nss);

            OpDebug* const nullOpDebug = nullptr;
            ASSERT_OK(coll->insertDocument(&_opCtx,
                                           InsertStatement(BSON("_id" << 1 << "a"
                                                                      << "dup")),
                                           nullOpDebug,
                                           true));
            ASSERT_OK(coll->insertDocument(&_opCtx,
                                           InsertStatement(BSON("_id" << 2 << "a"
                                                                      << "dup")),
                                           nullOpDebug,
                                           true));
            wunit.commit();
        }

        MultiIndexBlock indexer;
        indexer.ignoreUniqueConstraint();

        const BSONObj spec = BSON("name"
                                  << "a"
                                  << "ns"
                                  << coll->ns().ns()
                                  << "key"
                                  << BSON("a" << 1)
                                  << "v"
                                  << static_cast<int>(kIndexVersion)
                                  << "unique"
                                  << true
                                  << "background"
                                  << background);

        ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(&_opCtx, coll); });

        ASSERT_OK(indexer.init(&_opCtx, coll, spec, MultiIndexBlock::kNoopOnInitFn).getStatus());
        ASSERT_OK(indexer.insertAllDocumentsInCollection(&_opCtx, coll));

        WriteUnitOfWork wunit(&_opCtx);
        ASSERT_OK(indexer.commit(
            &_opCtx, coll, MultiIndexBlock::kNoopOnCreateEachFn, MultiIndexBlock::kNoopOnCommitFn));
        wunit.commit();
    }
    void Collection::_compactExtent(const DiskLoc diskloc, int extentNumber,
                                    MultiIndexBlock& indexesToInsertTo,
                                    const CompactOptions* compactOptions, CompactStats* stats ) {

        log() << "compact begin extent #" << extentNumber
              << " for namespace " << _ns << " " << diskloc;

        unsigned oldObjSize = 0; // we'll report what the old padding was
        unsigned oldObjSizeWithPadding = 0;

        Extent *e = diskloc.ext();
        e->assertOk();
        verify( e->validates(diskloc) );

        {
            // the next/prev pointers within the extent might not be in order so we first
            // page the whole thing in sequentially
            log() << "compact paging in len=" << e->length/1000000.0 << "MB" << endl;
            Timer t;
            size_t length = e->length;

            touch_pages( reinterpret_cast<const char*>(e), length );
            int ms = t.millis();
            if( ms > 1000 )
                log() << "compact end paging in " << ms << "ms "
                      << e->length/1000000.0/t.seconds() << "MB/sec" << endl;
        }

        {
            log() << "compact copying records" << endl;
            long long datasize = 0;
            long long nrecords = 0;
            DiskLoc L = e->firstRecord;
            if( !L.isNull() ) {
                while( 1 ) {
                    Record *recOld = L.rec();
                    L = getExtentManager()->getNextRecordInExtent(L);
                    BSONObj objOld = BSONObj::make(recOld);

                    if ( compactOptions->validateDocuments && !objOld.valid() ) {
                        // object is corrupt!
                        log() << "compact skipping corrupt document!";
                        stats->corruptDocuments++;
                    }
                    else {
                        unsigned docSize = objOld.objsize();

                        nrecords++;
                        oldObjSize += docSize;
                        oldObjSizeWithPadding += recOld->netLength();

                        unsigned lenWHdr = docSize + Record::HeaderSize;
                        unsigned lenWPadding = lenWHdr;

                        switch( compactOptions->paddingMode ) {
                        case CompactOptions::NONE:
                            if ( details()->isUserFlagSet(NamespaceDetails::Flag_UsePowerOf2Sizes) )
                                lenWPadding = details()->quantizePowerOf2AllocationSpace(lenWPadding);
                            break;
                        case CompactOptions::PRESERVE:
                            // if we are preserving the padding, the record should not change size
                            lenWPadding = recOld->lengthWithHeaders();
                            break;
                        case CompactOptions::MANUAL:
                            lenWPadding = compactOptions->computeRecordSize(lenWPadding);
                            if (lenWPadding < lenWHdr || lenWPadding > BSONObjMaxUserSize / 2 ) {
                                lenWPadding = lenWHdr;
                            }
                            break;
                        }

                        CompactDocWriter writer( objOld, lenWPadding );
                        StatusWith<DiskLoc> status = _recordStore->insertRecord( &writer, 0 );
                        uassertStatusOK( status.getStatus() );
                        datasize += _recordStore->recordFor( status.getValue() )->netLength();

                        InsertDeleteOptions options;
                        options.logIfError = false;
                        options.dupsAllowed = true; // in compact we should be doing no checking

                        indexesToInsertTo.insert( objOld, status.getValue(), options );
                    }

                    if( L.isNull() ) {
                        // we just did the very last record from the old extent.  it's still pointed to
                        // by the old extent ext, but that will be fixed below after this loop
                        break;
                    }

                    // remove the old records (orphan them) periodically so our commit block doesn't get too large
                    bool stopping = false;
                    RARELY stopping = *killCurrentOp.checkForInterruptNoAssert() != 0;
                    if( stopping || getDur().aCommitIsNeeded() ) {
                        e->firstRecord.writing() = L;
                        Record *r = L.rec();
                        getDur().writingInt(r->prevOfs()) = DiskLoc::NullOfs;
                        getDur().commitIfNeeded();
                        killCurrentOp.checkForInterrupt();
                    }
                }
            } // if !L.isNull()

            verify( details()->firstExtent() == diskloc );
            verify( details()->lastExtent() != diskloc );
            DiskLoc newFirst = e->xnext;
            details()->firstExtent().writing() = newFirst;
            newFirst.ext()->xprev.writing().Null();
            getDur().writing(e)->markEmpty();
            getExtentManager()->freeExtents( diskloc, diskloc );

            getDur().commitIfNeeded();

            {
                double op = 1.0;
                if( oldObjSize )
                    op = static_cast<double>(oldObjSizeWithPadding)/oldObjSize;
                log() << "compact finished extent #" << extentNumber << " containing " << nrecords
                      << " documents (" << datasize/1000000.0 << "MB)"
                      << " oldPadding: " << op << ' ' << static_cast<unsigned>(op*100.0)/100;
            }
        }

    }
StatusWith<CompactStats> compactCollection(OperationContext* opCtx,
                                           Collection* collection,
                                           const CompactOptions* compactOptions) {
    dassert(opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_X));

    DisableDocumentValidation validationDisabler(opCtx);

    auto recordStore = collection->getRecordStore();
    auto indexCatalog = collection->getIndexCatalog();

    if (!recordStore->compactSupported())
        return StatusWith<CompactStats>(ErrorCodes::CommandNotSupported,
                                        str::stream()
                                            << "cannot compact collection with record store: "
                                            << recordStore->name());

    if (recordStore->compactsInPlace()) {
        CompactStats stats;
        Status status = recordStore->compact(opCtx);
        if (!status.isOK())
            return StatusWith<CompactStats>(status);

        // Compact all indexes (not including unfinished indexes)
        status = indexCatalog->compactIndexes(opCtx);
        if (!status.isOK())
            return StatusWith<CompactStats>(status);

        return StatusWith<CompactStats>(stats);
    }

    if (indexCatalog->numIndexesInProgress(opCtx))
        return StatusWith<CompactStats>(ErrorCodes::BadValue,
                                        "cannot compact when indexes in progress");

    std::vector<BSONObj> indexSpecs;
    {
        std::unique_ptr<IndexCatalog::IndexIterator> ii(
            indexCatalog->getIndexIterator(opCtx, false));
        while (ii->more()) {
            const IndexDescriptor* descriptor = ii->next()->descriptor();

            // Compact always creates the new index in the foreground.
            const BSONObj spec =
                descriptor->infoObj().removeField(IndexDescriptor::kBackgroundFieldName);
            const BSONObj key = spec.getObjectField("key");
            const Status keyStatus =
                index_key_validate::validateKeyPattern(key, descriptor->version());
            if (!keyStatus.isOK()) {
                return StatusWith<CompactStats>(
                    ErrorCodes::CannotCreateIndex,
                    str::stream() << "Cannot compact collection due to invalid index " << spec
                                  << ": "
                                  << keyStatus.reason()
                                  << " For more info see"
                                  << " http://dochub.mongodb.org/core/index-validation");
            }
            indexSpecs.push_back(spec);
        }
    }

    // Give a chance to be interrupted *before* we drop all indexes.
    opCtx->checkForInterrupt();

    {
        // note that the drop indexes call also invalidates all clientcursors for the namespace,
        // which is important and wanted here
        WriteUnitOfWork wunit(opCtx);
        log() << "compact dropping indexes";
        indexCatalog->dropAllIndexes(opCtx, true);
        wunit.commit();
    }

    CompactStats stats;

    MultiIndexBlock indexer;
    indexer.ignoreUniqueConstraint();  // in compact we should be doing no checking

    // The 'indexer' could throw, so ensure build cleanup occurs.
    ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(opCtx, collection); });

    Status status =
        indexer.init(opCtx, collection, indexSpecs, MultiIndexBlock::kNoopOnInitFn).getStatus();
    if (!status.isOK())
        return StatusWith<CompactStats>(status);

    status = recordStore->compact(opCtx);
    if (!status.isOK())
        return StatusWith<CompactStats>(status);

    log() << "starting index commits";
    status = indexer.dumpInsertsFromBulk(opCtx);
    if (!status.isOK())
        return StatusWith<CompactStats>(status);

    {
        WriteUnitOfWork wunit(opCtx);
        status = indexer.commit(opCtx,
                                collection,
                                MultiIndexBlock::kNoopOnCreateEachFn,
                                MultiIndexBlock::kNoopOnCommitFn);
        if (!status.isOK()) {
            return StatusWith<CompactStats>(status);
        }
        wunit.commit();
    }

    return StatusWith<CompactStats>(stats);
}