void RecordStoreV1Base::increaseStorageSize( TransactionExperiment* txn,
                                                 int size,
                                                 int quotaMax ) {
        DiskLoc eloc = _extentManager->allocateExtent( _ns,
                                                       isCapped(),
                                                       size,
                                                       quotaMax );

        Extent *e = _extentManager->getExtent( eloc, false );

        invariant( e );

        DiskLoc emptyLoc = txn->writing(e)->reuse( _ns, isCapped() );

        if ( _details->lastExtent().isNull() ) {
            verify( _details->firstExtent().isNull() );
            _details->setFirstExtent( txn, eloc );
            _details->setLastExtent( txn, eloc );
            _details->setCapExtent( txn, eloc );
            verify( e->xprev.isNull() );
            verify( e->xnext.isNull() );
        }
        else {
            verify( !_details->firstExtent().isNull() );
            *txn->writing(&e->xprev) = _details->lastExtent();
            *txn->writing(&_extentManager->getExtent(_details->lastExtent())->xnext) = eloc;
            _details->setLastExtent( txn, eloc );
        }

        _details->setLastExtentSize( txn, e->length );

        addDeletedRec(txn, emptyLoc);
    }
void RecordStoreV1Base::appendCustomStats(OperationContext* txn,
                                          BSONObjBuilder* result,
                                          double scale) const {
    result->append("lastExtentSize", _details->lastExtentSize(txn) / scale);
    result->append("paddingFactor", 1.0);  // hard coded
    result->append("paddingFactorNote",
                   "paddingFactor is unused and unmaintained in 3.0. It "
                   "remains hard coded to 1.0 for compatibility only.");
    result->append("userFlags", _details->userFlags());
    result->appendBool("capped", isCapped());
    if (isCapped()) {
        result->appendNumber("max", _details->maxCappedDocs());
        result->appendNumber("maxSize", static_cast<long long>(storageSize(txn, NULL, 0) / scale));
    }
}
Exemple #3
0
 void Collection::temp_cappedTruncateAfter(OperationContext* txn,
                                           DiskLoc end,
                                           bool inclusive) {
     invariant( isCapped() );
     reinterpret_cast<CappedRecordStoreV1*>(
         _recordStore.get())->temp_cappedTruncateAfter( txn, end, inclusive );
 }
void RecordStoreV1Base::increaseStorageSize(OperationContext* txn, int size, bool enforceQuota) {
    DiskLoc eloc = _extentManager->allocateExtent(txn, isCapped(), size, enforceQuota);
    Extent* e = _extentManager->getExtent(eloc);
    invariant(e);

    *txn->recoveryUnit()->writing(&e->nsDiagnostic) = _ns;

    txn->recoveryUnit()->writing(&e->xnext)->Null();
    txn->recoveryUnit()->writing(&e->xprev)->Null();
    txn->recoveryUnit()->writing(&e->firstRecord)->Null();
    txn->recoveryUnit()->writing(&e->lastRecord)->Null();

    DiskLoc emptyLoc = _findFirstSpot(txn, eloc, e);

    if (_details->lastExtent(txn).isNull()) {
        invariant(_details->firstExtent(txn).isNull());
        _details->setFirstExtent(txn, eloc);
        _details->setLastExtent(txn, eloc);
        _details->setCapExtent(txn, eloc);
        invariant(e->xprev.isNull());
        invariant(e->xnext.isNull());
    } else {
        invariant(!_details->firstExtent(txn).isNull());
        *txn->recoveryUnit()->writing(&e->xprev) = _details->lastExtent(txn);
        *txn->recoveryUnit()->writing(
            &_extentManager->getExtent(_details->lastExtent(txn))->xnext) = eloc;
        _details->setLastExtent(txn, eloc);
    }

    _details->setLastExtentSize(txn, e->length);

    addDeletedRec(txn, emptyLoc);
}
Exemple #5
0
    void Collection::deleteDocument( OperationContext* txn,
                                     const DiskLoc& loc,
                                     bool cappedOK,
                                     bool noWarn,
                                     BSONObj* deletedId ) {
        if ( isCapped() && !cappedOK ) {
            log() << "failing remove on a capped ns " << _ns << endl;
            uasserted( 10089,  "cannot remove from a capped collection" );
            return;
        }

        BSONObj doc = docFor( loc );

        if ( deletedId ) {
            BSONElement e = doc["_id"];
            if ( e.type() ) {
                *deletedId = e.wrap();
            }
        }

        /* check if any cursors point to us.  if so, advance them. */
        _cursorCache.invalidateDocument(loc, INVALIDATION_DELETION);

        _indexCatalog.unindexRecord(txn, doc, loc, noWarn);

        _recordStore->deleteRecord( txn, loc );

        _infoCache.notifyOfWriteOp();
    }
    void RecordStoreV1Base::appendCustomStats( BSONObjBuilder* result, double scale ) const {
        result->append( "lastExtentSize", _details->lastExtentSize() / scale );
        result->append( "paddingFactor", _details->paddingFactor() );
        result->append( "userFlags", _details->userFlags() );

        if ( isCapped() ) {
            result->appendBool( "capped", true );
            result->appendNumber( "max", _details->maxCappedDocs() );
        }
    }
    void RecordStoreV1Base::appendCustomStats( OperationContext* txn,
                                               BSONObjBuilder* result,
                                               double scale ) const {
        result->append( "lastExtentSize", _details->lastExtentSize(txn) / scale );
        result->append( "paddingFactor", _details->paddingFactor() );
        result->append( "userFlags", _details->userFlags() );

        if ( isCapped() ) {
            result->appendBool( "capped", true );
            result->appendNumber( "max", _details->maxCappedDocs() );
            result->appendNumber( "maxSize", static_cast<long long>( storageSize( txn, NULL, 0 ) ) );
        }
    }
    int RecordStoreV1Base::getRecordAllocationSize( int minRecordSize ) const {

        if ( isCapped() )
            return minRecordSize;

        invariant( _details->paddingFactor() >= 1 );

        if ( _details->isUserFlagSet( Flag_UsePowerOf2Sizes ) ) {
            // quantize to the nearest bucketSize (or nearest 1mb boundary for large sizes).
            return quantizePowerOf2AllocationSpace(minRecordSize);
        }

        // adjust for padding factor
        return static_cast<int>(minRecordSize * _details->paddingFactor());
    }
Exemple #9
0
 Collection::Collection( OperationContext* txn,
                         const StringData& fullNS,
                         CollectionCatalogEntry* details,
                         RecordStore* recordStore,
                         Database* database )
     : _ns( fullNS ),
       _details( details ),
       _recordStore( recordStore ),
       _database( database ),
       _infoCache( this ),
       _indexCatalog( this ),
       _cursorCache( fullNS ) {
     _magic = 1357924;
     _indexCatalog.init(txn);
     if ( isCapped() )
         _recordStore->setCappedDeleteCallback( this );
 }
Exemple #10
0
    /* combine adjacent deleted records *for the current extent* of the capped collection

       this is O(n^2) but we call it for capped tables where typically n==1 or 2!
       (or 3...there will be a little unused sliver at the end of the extent.)
    */
    void NamespaceDetails::compact() {
        DDD( "NamespaceDetails::compact enter" );
        verify( isCapped() );
        
        vector<DiskLoc> drecs;
        
        // Pull out capExtent's DRs from deletedList
        DiskLoc i = cappedFirstDeletedInCurExtent();
        for (; !i.isNull() && inCapExtent( i ); i = i.drec()->nextDeleted() ) {
            DDD( "\t" << i );
            drecs.push_back( i );
        }

        getDur().writingDiskLoc( cappedFirstDeletedInCurExtent() ) = i;
        
        std::sort( drecs.begin(), drecs.end() );
        DDD( "\t drecs.size(): " << drecs.size() );

        vector<DiskLoc>::const_iterator j = drecs.begin();
        verify( j != drecs.end() );
        DiskLoc a = *j;
        while ( 1 ) {
            j++;
            if ( j == drecs.end() ) {
                DDD( "\t compact adddelrec" );
                addDeletedRec(a.drec(), a);
                break;
            }
            DiskLoc b = *j;
            while ( a.a() == b.a() && a.getOfs() + a.drec()->lengthWithHeaders() == b.getOfs() ) {
                // a & b are adjacent.  merge.
                getDur().writingInt( a.drec()->lengthWithHeaders() ) += b.drec()->lengthWithHeaders();
                j++;
                if ( j == drecs.end() ) {
                    DDD( "\t compact adddelrec2" );
                    addDeletedRec(a.drec(), a);
                    return;
                }
                b = *j;
            }
            DDD( "\t compact adddelrec3" );
            addDeletedRec(a.drec(), a);
            a = b;
        }

    }
    bool CreateCollectionDialog::validateOptionDependencies() const
    {
        bool result(false);
        // Validate capped options
        if (isCapped()) {
            if (!(getSizeInputValue() > 0)) {
                QMessageBox::critical(NULL, tr("Error"), tr("Maximum size is required for capped collections"));
                return false;
            }
            result = true;
        }
        else {
            result = true;
        }

        // Completed checking all dependendencies.
        return result;
    }
StatusWith<RecordId> RecordStoreV1Base::updateRecord(OperationContext* txn,
                                                     const RecordId& oldLocation,
                                                     const char* data,
                                                     int dataSize,
                                                     bool enforceQuota,
                                                     UpdateNotifier* notifier) {
    MmapV1RecordHeader* oldRecord = recordFor(DiskLoc::fromRecordId(oldLocation));
    if (oldRecord->netLength() >= dataSize) {
        // Make sure to notify other queries before we do an in-place update.
        if (notifier) {
            Status callbackStatus = notifier->recordStoreGoingToUpdateInPlace(txn, oldLocation);
            if (!callbackStatus.isOK())
                return StatusWith<RecordId>(callbackStatus);
        }

        // we fit
        memcpy(txn->recoveryUnit()->writingPtr(oldRecord->data(), dataSize), data, dataSize);
        return StatusWith<RecordId>(oldLocation);
    }

    // We enforce the restriction of unchanging capped doc sizes above the storage layer.
    invariant(!isCapped());

    // we have to move
    if (dataSize + MmapV1RecordHeader::HeaderSize > MaxAllowedAllocation) {
        return StatusWith<RecordId>(ErrorCodes::InvalidLength, "record has to be <= 16.5MB");
    }

    StatusWith<RecordId> newLocation = _insertRecord(txn, data, dataSize, enforceQuota);
    if (!newLocation.isOK())
        return newLocation;

    // insert worked, so we delete old record
    if (notifier) {
        Status moveStatus = notifier->recordStoreGoingToMove(
            txn, oldLocation, oldRecord->data(), oldRecord->netLength());
        if (!moveStatus.isOK())
            return StatusWith<RecordId>(moveStatus);
    }

    deleteRecord(txn, oldLocation);

    return newLocation;
}
Exemple #13
0
    StatusWith<DiskLoc> Collection::insertDocument( OperationContext* txn,
                                                    const BSONObj& docToInsert,
                                                    bool enforceQuota ) {
        if ( _indexCatalog.findIdIndex() ) {
            if ( docToInsert["_id"].eoo() ) {
                return StatusWith<DiskLoc>( ErrorCodes::InternalError,
                                            str::stream() << "Collection::insertDocument got "
                                            "document without _id for ns:" << _ns.ns() );
            }
        }

        if ( isCapped() ) {
            // TOOD: old god not done
            Status ret = _indexCatalog.checkNoIndexConflicts( docToInsert );
            if ( !ret.isOK() )
                return StatusWith<DiskLoc>( ret );
        }

        return _insertDocument( txn, docToInsert, enforceQuota );
    }
    StatusWith<DiskLoc> RecordStoreV1Base::updateRecord( OperationContext* txn,
                                                         const DiskLoc& oldLocation,
                                                         const char* data,
                                                         int dataSize,
                                                         bool enforceQuota,
                                                         UpdateMoveNotifier* notifier ) {
        Record* oldRecord = recordFor( oldLocation );
        if ( oldRecord->netLength() >= dataSize ) {
            // we fit
            _paddingFits( txn );
            memcpy( txn->recoveryUnit()->writingPtr( oldRecord->data(), dataSize ), data, dataSize );
            return StatusWith<DiskLoc>( oldLocation );
        }

        if ( isCapped() )
            return StatusWith<DiskLoc>( ErrorCodes::InternalError,
                                        "failing update: objects in a capped ns cannot grow",
                                        10003 );

        // we have to move

        _paddingTooSmall( txn );

        StatusWith<DiskLoc> newLocation = _insertRecord( txn, data, dataSize, enforceQuota );
        if ( !newLocation.isOK() )
            return newLocation;

        // insert worked, so we delete old record
        if ( notifier ) {
            Status moveStatus = notifier->recordStoreGoingToMove( txn,
                                                                  oldLocation,
                                                                  oldRecord->data(),
                                                                  oldRecord->netLength() );
            if ( !moveStatus.isOK() )
                return StatusWith<DiskLoc>( moveStatus );
        }

        deleteRecord( txn, oldLocation );

        return newLocation;
    }
    DiskLoc RecordStoreV1Base::_findFirstSpot( OperationContext* txn,
                                               const DiskLoc& extDiskLoc, Extent* e ) {
        DiskLoc emptyLoc = extDiskLoc;
        emptyLoc.inc( Extent::HeaderSize() );
        int delRecLength = e->length - Extent::HeaderSize();
        if ( delRecLength >= 32*1024 && _ns.find('$') != string::npos && !isCapped() ) {
            // probably an index. so skip forward to keep its records page aligned
            int& ofs = emptyLoc.GETOFS();
            int newOfs = (ofs + 0xfff) & ~0xfff;
            delRecLength -= (newOfs-ofs);
            dassert( delRecLength > 0 );
            ofs = newOfs;
        }

        DeletedRecord* empty = txn->recoveryUnit()->writing(drec(emptyLoc));
        empty->lengthWithHeaders() = delRecLength;
        empty->extentOfs() = e->myLoc.getOfs();
        empty->nextDeleted().Null();
        return emptyLoc;

    }
    void NamespaceDetails::cappedCheckMigrate() {
        // migrate old NamespaceDetails format
        verify( isCapped() );
        if ( _capExtent.a() == 0 && _capExtent.getOfs() == 0 ) {
            //capFirstNewRecord = DiskLoc();
            _capFirstNewRecord.writing().setInvalid();
            // put all the DeletedRecords in cappedListOfAllDeletedRecords()
            for ( int i = 1; i < Buckets; ++i ) {
                DiskLoc first = _deletedList[ i ];
                if ( first.isNull() )
                    continue;
                DiskLoc last = first;
                for (; !last.drec()->nextDeleted().isNull(); last = last.drec()->nextDeleted() );
                last.drec()->nextDeleted().writing() = cappedListOfAllDeletedRecords();
                cappedListOfAllDeletedRecords().writing() = first;
                _deletedList[i].writing() = DiskLoc();
            }
            // NOTE cappedLastDelRecLastExtent() set to DiskLoc() in above

            // Last, in case we're killed before getting here
            _capExtent.writing() = _firstExtent;
        }
    }
Exemple #17
0
    StatusWith<DiskLoc> Collection::_insertDocument( OperationContext* txn,
                                                     const BSONObj& docToInsert,
                                                     bool enforceQuota ) {

        // TODO: for now, capped logic lives inside NamespaceDetails, which is hidden
        //       under the RecordStore, this feels broken since that should be a
        //       collection access method probably

        StatusWith<DiskLoc> loc = _recordStore->insertRecord( txn,
                                                              docToInsert.objdata(),
                                                              docToInsert.objsize(),
                                                              enforceQuota ? largestFileNumberInQuota() : 0 );
        if ( !loc.isOK() )
            return loc;

        _infoCache.notifyOfWriteOp();

        try {
            _indexCatalog.indexRecord(txn, docToInsert, loc.getValue());
        }
        catch ( AssertionException& e ) {
            if ( isCapped() ) {
                return StatusWith<DiskLoc>( ErrorCodes::InternalError,
                                            str::stream() << "unexpected index insertion failure on"
                                            << " capped collection" << e.toString()
                                            << " - collection and its index will not match" );
            }

            // indexRecord takes care of rolling back indexes
            // so we just have to delete the main storage
            _recordStore->deleteRecord( txn, loc.getValue() );
            return StatusWith<DiskLoc>( e.toStatus( "insertDocument" ) );
        }

        return loc;
    }
    StatusWith<CompactStats> Collection::compact( const CompactOptions* compactOptions ) {

        if ( isCapped() )
            return StatusWith<CompactStats>( ErrorCodes::BadValue,
                                             "cannot compact capped collection" );

        if ( _indexCatalog.numIndexesInProgress() )
            return StatusWith<CompactStats>( ErrorCodes::BadValue,
                                             "cannot compact when indexes in progress" );

        NamespaceDetails* d = details();

        // this is a big job, so might as well make things tidy before we start just to be nice.
        getDur().commitIfNeeded();

        list<DiskLoc> extents;
        for( DiskLoc L = d->firstExtent(); !L.isNull(); L = L.ext()->xnext )
            extents.push_back(L);
        log() << "compact " << extents.size() << " extents" << endl;

        // same data, but might perform a little different after compact?
        _infoCache.reset();

        vector<BSONObj> indexSpecs;
        {
            IndexCatalog::IndexIterator ii( _indexCatalog.getIndexIterator( false ) );
            while ( ii.more() ) {
                IndexDescriptor* descriptor = ii.next();

                const BSONObj spec = _compactAdjustIndexSpec(descriptor->infoObj());
                const BSONObj key = spec.getObjectField("key");
                const Status keyStatus = validateKeyPattern(key);
                if (!keyStatus.isOK()) {
                    return StatusWith<CompactStats>(
                        ErrorCodes::CannotCreateIndex,
                        str::stream() << "Cannot rebuild index " << spec << ": "
                                      << keyStatus.reason()
                                      << " For more info see"
                                      << " http://dochub.mongodb.org/core/index-validation");
                }
                indexSpecs.push_back(spec);
            }
        }

        log() << "compact orphan deleted lists" << endl;
        d->orphanDeletedList();

        // Start over from scratch with our extent sizing and growth
        d->setLastExtentSize( 0 );

        // before dropping indexes, at least make sure we can allocate one extent!
        // this will allocate an extent and add to free list
        // if it cannot, it will throw an exception
        increaseStorageSize( _details->lastExtentSize(), true );

        // note that the drop indexes call also invalidates all clientcursors for the namespace,
        // which is important and wanted here
        log() << "compact dropping indexes" << endl;
        Status status = _indexCatalog.dropAllIndexes( true );
        if ( !status.isOK() ) {
            return StatusWith<CompactStats>( status );
        }

        getDur().commitIfNeeded();
        killCurrentOp.checkForInterrupt();

        CompactStats stats;

        MultiIndexBlock multiIndexBlock( this );
        status = multiIndexBlock.init( indexSpecs );
        if ( !status.isOK() )
            return StatusWith<CompactStats>( status );

        // reset data size and record counts to 0 for this namespace
        // as we're about to tally them up again for each new extent
        d->setStats( 0, 0 );

        ProgressMeterHolder pm(cc().curop()->setMessage("compact extent",
                                                        "Extent Compacting Progress",
                                                        extents.size()));

        int extentNumber = 0;
        for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) {
            _compactExtent(*i, extentNumber++, multiIndexBlock, compactOptions, &stats );
            pm.hit();
        }

        verify( d->firstExtent().ext()->xprev.isNull() );

        // indexes will do their own progress meter?
        pm.finished();

        log() << "starting index commits";

        status = multiIndexBlock.commit();
        if ( !status.isOK() )
            return StatusWith<CompactStats>( status );

        return StatusWith<CompactStats>( stats );
    }
    void NamespaceDetails::emptyCappedCollection( const char *ns ) {
        DEV verify( this == nsdetails(ns) );
        massert( 13424, "collection must be capped", isCapped() );
        massert( 13425, "background index build in progress", !_indexBuildsInProgress );

        vector<BSONObj> indexes = Helpers::findAll( Namespace( ns ).getSisterNS( "system.indexes" ) , BSON( "ns" << ns ) );
        for ( unsigned i=0; i<indexes.size(); i++ ) {
            indexes[i] = indexes[i].copy();
        }

        if ( _nIndexes ) {
            string errmsg;
            BSONObjBuilder note;
            bool res = dropIndexes( this , ns , "*" , errmsg , note , true );
            massert( 13426 , str::stream() << "failed during index drop: " << errmsg , res );
        }

        // Clear all references to this namespace.
        ClientCursor::invalidate( ns );
        NamespaceDetailsTransient::resetCollection( ns );

        // Get a writeable reference to 'this' and reset all pertinent
        // attributes.
        NamespaceDetails *t = writingWithoutExtra();

        t->cappedLastDelRecLastExtent() = DiskLoc();
        t->cappedListOfAllDeletedRecords() = DiskLoc();

        // preserve firstExtent/lastExtent
        t->_capExtent = _firstExtent;
        t->_stats.datasize = _stats.nrecords = 0;
        // lastExtentSize preserve
        // nIndexes preserve 0
        // capped preserve true
        // max preserve
        t->_paddingFactor = 1.0;
        t->_systemFlags = 0;
        t->_capFirstNewRecord = DiskLoc();
        t->_capFirstNewRecord.setInvalid();
        t->cappedLastDelRecLastExtent().setInvalid();
        // dataFileVersion preserve
        // indexFileVersion preserve
        t->_multiKeyIndexBits = 0;
        t->_reservedA = 0;
        t->_extraOffset = 0;
        // indexBuildInProgress preserve 0
        memset(t->_reserved, 0, sizeof(t->_reserved));

        // Reset all existing extents and recreate the deleted list.
        for( DiskLoc ext = _firstExtent; !ext.isNull(); ext = ext.ext()->xnext ) {
            DiskLoc prev = ext.ext()->xprev;
            DiskLoc next = ext.ext()->xnext;
            DiskLoc empty = ext.ext()->reuse( ns, true );
            ext.ext()->xprev.writing() = prev;
            ext.ext()->xnext.writing() = next;
            addDeletedRec( empty.drec(), empty );
        }

        for ( unsigned i=0; i<indexes.size(); i++ ) {
            theDataFileMgr.insertWithObjMod(Namespace( ns ).getSisterNS( "system.indexes" ).c_str(),
                                            indexes[i],
                                            false,
                                            true);
        }
        
    }
    StatusWith<CompactStats> Collection::compact( const CompactOptions* compactOptions ) {

        if ( isCapped() )
            return StatusWith<CompactStats>( ErrorCodes::BadValue,
                                             "cannot compact capped collection" );

        if ( _indexCatalog.numIndexesInProgress() )
            return StatusWith<CompactStats>( ErrorCodes::BadValue,
                                             "cannot compact when indexes in progress" );

        NamespaceDetails* d = details();

        // this is a big job, so might as well make things tidy before we start just to be nice.
        getDur().commitIfNeeded();

        list<DiskLoc> extents;
        for( DiskLoc L = d->firstExtent(); !L.isNull(); L = L.ext()->xnext )
            extents.push_back(L);
        log() << "compact " << extents.size() << " extents" << endl;

        // same data, but might perform a little different after compact?
        _infoCache.reset();

        vector<BSONObj> indexSpecs;
        {
            IndexCatalog::IndexIterator ii( _indexCatalog.getIndexIterator( false ) );
            while ( ii.more() ) {
                IndexDescriptor* descriptor = ii.next();
                indexSpecs.push_back( _compactAdjustIndexSpec( descriptor->infoObj() ) );
            }
        }

        log() << "compact orphan deleted lists" << endl;
        d->orphanDeletedList();

        // Start over from scratch with our extent sizing and growth
        d->setLastExtentSize( 0 );

        // before dropping indexes, at least make sure we can allocate one extent!
        if ( allocateSpaceForANewRecord( _ns.ns().c_str(),
                                         d,
                                         Record::HeaderSize+1,
                                         false).isNull() ) {
            return StatusWith<CompactStats>( ErrorCodes::InternalError,
                                             "compact error no space available to allocate" );
        }

        // note that the drop indexes call also invalidates all clientcursors for the namespace,
        // which is important and wanted here
        log() << "compact dropping indexes" << endl;
        Status status = _indexCatalog.dropAllIndexes( true );
        if ( !status.isOK() ) {
            return StatusWith<CompactStats>( status );
        }

        getDur().commitIfNeeded();

        CompactStats stats;

        OwnedPointerVector<IndexCatalog::IndexBuildBlock> indexBuildBlocks;
        vector<IndexAccessMethod*> indexesToInsertTo;
        vector< std::pair<IndexAccessMethod*,IndexAccessMethod*> > bulkToCommit;
        for ( size_t i = 0; i < indexSpecs.size(); i++ ) {
            killCurrentOp.checkForInterrupt(false);
            BSONObj info = indexSpecs[i];
            info = _compactAdjustIndexSpec( info );
            info = _indexCatalog.fixIndexSpec( info );
            auto_ptr<IndexCatalog::IndexBuildBlock> block( new IndexCatalog::IndexBuildBlock( this,info ) );
            Status status = block->init();
            if ( !status.isOK() )
                return StatusWith<CompactStats>(status);

            IndexAccessMethod* accessMethod = block->getEntry()->accessMethod();
            status = accessMethod->initializeAsEmpty();
            if ( !status.isOK() )
                return StatusWith<CompactStats>(status);

            IndexAccessMethod* bulk = accessMethod->initiateBulk();
            if ( bulk ) {
                indexesToInsertTo.push_back( bulk );
                bulkToCommit.push_back( std::pair<IndexAccessMethod*,IndexAccessMethod*>( accessMethod, bulk ) );
            }
            else {
                indexesToInsertTo.push_back( accessMethod );
            }

            indexBuildBlocks.mutableVector().push_back( block.release() );
        }

        // reset data size and record counts to 0 for this namespace
        // as we're about to tally them up again for each new extent
        d->setStats( 0, 0 );

        ProgressMeterHolder pm(cc().curop()->setMessage("compact extent",
                                                        "Extent Compacting Progress",
                                                        extents.size()));

        int extentNumber = 0;
        for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) {
            _compactExtent(*i, extentNumber++, indexesToInsertTo, compactOptions, &stats );
            pm.hit();
        }

        verify( d->firstExtent().ext()->xprev.isNull() );

        // indexes will do their own progress meter?
        pm.finished();

        log() << "starting index commits";

        for ( size_t i = 0; i < bulkToCommit.size(); i++ ) {
            bulkToCommit[i].first->commitBulk( bulkToCommit[i].second, false, NULL );
        }

        for ( size_t i = 0; i < indexBuildBlocks.size(); i++ ) {
            IndexCatalog::IndexBuildBlock* block = indexBuildBlocks.mutableVector()[i];
            block->success();
        }

        return StatusWith<CompactStats>( stats );
    }
    Status RecordStoreV1Base::validate( OperationContext* txn,
                                        bool full, bool scanData,
                                        ValidateAdaptor* adaptor,
                                        ValidateResults* results, BSONObjBuilder* output ) const {

        // 1) basic status that require no iteration
        // 2) extent level info
        // 3) check extent start and end
        // 4) check each non-deleted record
        // 5) check deleted list

        // -------------

        // 1111111111111111111
        if ( isCapped() ){
            output->appendBool("capped", true);
            output->appendNumber("max", _details->maxCappedDocs());
        }

        output->appendNumber("datasize", _details->dataSize());
        output->appendNumber("nrecords", _details->numRecords());
        output->appendNumber("lastExtentSize", _details->lastExtentSize(txn));
        output->appendNumber("padding", _details->paddingFactor());

        if ( _details->firstExtent(txn).isNull() )
            output->append( "firstExtent", "null" );
        else
            output->append( "firstExtent",
                            str::stream() << _details->firstExtent(txn).toString()
                            << " ns:"
                            << _getExtent( txn, _details->firstExtent(txn) )->nsDiagnostic.toString());
        if ( _details->lastExtent(txn).isNull() )
            output->append( "lastExtent", "null" );
        else
            output->append( "lastExtent", str::stream() << _details->lastExtent(txn).toString()
                            << " ns:"
                            << _getExtent( txn, _details->lastExtent(txn) )->nsDiagnostic.toString());

        // 22222222222222222222222222
        { // validate extent basics
            BSONArrayBuilder extentData;
            int extentCount = 0;
            DiskLoc extentDiskLoc;
            try {
                if ( !_details->firstExtent(txn).isNull() ) {
                    _getExtent( txn, _details->firstExtent(txn) )->assertOk();
                    _getExtent( txn, _details->lastExtent(txn) )->assertOk();
                }

                extentDiskLoc = _details->firstExtent(txn);
                while (!extentDiskLoc.isNull()) {
                    Extent* thisExtent = _getExtent( txn, extentDiskLoc );
                    if (full) {
                        extentData << thisExtent->dump();
                    }
                    if (!thisExtent->validates(extentDiskLoc, &results->errors)) {
                        results->valid = false;
                    }
                    DiskLoc nextDiskLoc = thisExtent->xnext;
                    
                    if (extentCount > 0 && !nextDiskLoc.isNull()
                        &&  _getExtent( txn, nextDiskLoc )->xprev != extentDiskLoc) {
                        StringBuilder sb;
                        sb << "'xprev' pointer " << _getExtent( txn, nextDiskLoc )->xprev.toString()
                           << " in extent " << nextDiskLoc.toString()
                           << " does not point to extent " << extentDiskLoc.toString();
                        results->errors.push_back( sb.str() );
                        results->valid = false;
                    }
                    if (nextDiskLoc.isNull() && extentDiskLoc != _details->lastExtent(txn)) {
                        StringBuilder sb;
                        sb << "'lastExtent' pointer " << _details->lastExtent(txn).toString()
                           << " does not point to last extent in list " << extentDiskLoc.toString();
                        results->errors.push_back( sb.str() );
                        results->valid = false;
                    }
                    extentDiskLoc = nextDiskLoc;
                    extentCount++;
                    txn->checkForInterrupt();
                }
            }
            catch (const DBException& e) {
                StringBuilder sb;
                sb << "exception validating extent " << extentCount
                   << ": " << e.what();
                results->errors.push_back( sb.str() );
                results->valid = false;
                return Status::OK();
            }
            output->append("extentCount", extentCount);

            if ( full )
                output->appendArray( "extents" , extentData.arr() );

        }

        try {
            // 333333333333333333333333333
            bool testingLastExtent = false;
            try {
                DiskLoc firstExtentLoc = _details->firstExtent(txn);
                if (firstExtentLoc.isNull()) {
                    // this is ok
                }
                else {
                    output->append("firstExtentDetails", _getExtent(txn, firstExtentLoc)->dump());
                    if (!_getExtent(txn, firstExtentLoc)->xprev.isNull()) {
                        StringBuilder sb;
                        sb << "'xprev' pointer in 'firstExtent' " << _details->firstExtent(txn).toString()
                           << " is " << _getExtent(txn, firstExtentLoc)->xprev.toString()
                           << ", should be null";
                        results->errors.push_back( sb.str() );
                        results->valid = false;
                    }
                }
                testingLastExtent = true;
                DiskLoc lastExtentLoc = _details->lastExtent(txn);
                if (lastExtentLoc.isNull()) {
                    // this is ok
                }
                else {
                    if (firstExtentLoc != lastExtentLoc) {
                        output->append("lastExtentDetails", _getExtent(txn, lastExtentLoc)->dump());
                        if (!_getExtent(txn, lastExtentLoc)->xnext.isNull()) {
                            StringBuilder sb;
                            sb << "'xnext' pointer in 'lastExtent' " << lastExtentLoc.toString()
                               << " is " << _getExtent(txn, lastExtentLoc)->xnext.toString()
                               << ", should be null";
                            results->errors.push_back( sb.str() );
                            results->valid = false;
                        }
                    }
                }
            }
            catch (const DBException& e) {
                StringBuilder sb;
                sb << "exception processing '"
                   << (testingLastExtent ? "lastExtent" : "firstExtent")
                   << "': " << e.what();
                results->errors.push_back( sb.str() );
                results->valid = false;
            }

            // 4444444444444444444444444

            set<DiskLoc> recs;
            if( scanData ) {
                int n = 0;
                int nInvalid = 0;
                long long nQuantizedSize = 0;
                long long nPowerOf2QuantizedSize = 0;
                long long len = 0;
                long long nlen = 0;
                long long bsonLen = 0;
                int outOfOrder = 0;
                DiskLoc cl_last;

                scoped_ptr<RecordIterator> iterator( getIterator( txn,
                                                                  DiskLoc(),
                                                                  false,
                                                                  CollectionScanParams::FORWARD ) );
                DiskLoc cl;
                while ( !( cl = iterator->getNext() ).isNull() ) {
                    n++;

                    if ( n < 1000000 )
                        recs.insert(cl);
                    if ( isCapped() ) {
                        if ( cl < cl_last )
                            outOfOrder++;
                        cl_last = cl;
                    }

                    Record *r = recordFor(cl);
                    len += r->lengthWithHeaders();
                    nlen += r->netLength();

                    if ( r->lengthWithHeaders() ==
                         quantizeAllocationSpace( r->lengthWithHeaders() ) ) {
                        // Count the number of records having a size consistent with
                        // the quantizeAllocationSpace quantization implementation.
                        ++nQuantizedSize;
                    }

                    if ( r->lengthWithHeaders() ==
                         quantizePowerOf2AllocationSpace( r->lengthWithHeaders() ) ) {
                        // Count the number of records having a size consistent with the
                        // quantizePowerOf2AllocationSpace quantization implementation.
                        ++nPowerOf2QuantizedSize;
                    }

                    if (full){
                        size_t dataSize = 0;
                        const Status status = adaptor->validate( r->toRecordData(), &dataSize );
                        if (!status.isOK()) {
                            results->valid = false;
                            if (nInvalid == 0) // only log once;
                                results->errors.push_back( "invalid object detected (see logs)" );

                            nInvalid++;
                            log() << "Invalid object detected in " << _ns
                                  << ": " << status.reason();
                        }
                        else {
                            bsonLen += dataSize;
                        }
                    }
                }

                if ( isCapped() && !_details->capLooped() ) {
                    output->append("cappedOutOfOrder", outOfOrder);
                    if ( outOfOrder > 1 ) {
                        results->valid = false;
                        results->errors.push_back( "too many out of order records" );
                    }
                }
                output->append("objectsFound", n);

                if (full) {
                    output->append("invalidObjects", nInvalid);
                }

                output->appendNumber("nQuantizedSize", nQuantizedSize);
                output->appendNumber("nPowerOf2QuantizedSize", nPowerOf2QuantizedSize);
                output->appendNumber("bytesWithHeaders", len);
                output->appendNumber("bytesWithoutHeaders", nlen);

                if (full) {
                    output->appendNumber("bytesBson", bsonLen);
                }
            } // end scanData

            // 55555555555555555555555555
            BSONArrayBuilder deletedListArray;
            for ( int i = 0; i < Buckets; i++ ) {
                deletedListArray << _details->deletedListEntry(i).isNull();
            }

            int ndel = 0;
            long long delSize = 0;
            BSONArrayBuilder delBucketSizes;
            int incorrect = 0;
            for ( int i = 0; i < Buckets; i++ ) {
                DiskLoc loc = _details->deletedListEntry(i);
                try {
                    int k = 0;
                    while ( !loc.isNull() ) {
                        if ( recs.count(loc) )
                            incorrect++;
                        ndel++;

                        if ( loc.questionable() ) {
                            if( isCapped() && !loc.isValid() && i == 1 ) {
                                /* the constructor for NamespaceDetails intentionally sets deletedList[1] to invalid
                                   see comments in namespace.h
                                */
                                break;
                            }

                            string err( str::stream() << "bad pointer in deleted record list: "
                                        << loc.toString()
                                        << " bucket: " << i
                                        << " k: " << k );
                            results->errors.push_back( err );
                            results->valid = false;
                            break;
                        }

                        const DeletedRecord* d = deletedRecordFor(loc);
                        delSize += d->lengthWithHeaders();
                        loc = d->nextDeleted();
                        k++;
                        txn->checkForInterrupt();
                    }
                    delBucketSizes << k;
                }
                catch (...) {
                    results->errors.push_back( (string)"exception in deleted chain for bucket " +
                                               BSONObjBuilder::numStr(i) );
                    results->valid = false;
                }
            }
            output->appendNumber("deletedCount", ndel);
            output->appendNumber("deletedSize", delSize);
            if ( full ) {
                output->append( "delBucketSizes", delBucketSizes.arr() );
            }

            if ( incorrect ) {
                results->errors.push_back( BSONObjBuilder::numStr(incorrect) +
                                           " records from datafile are in deleted list" );
                results->valid = false;
            }

        }
        catch (AssertionException) {
            results->errors.push_back( "exception during validate" );
            results->valid = false;
        }

        return Status::OK();
    }