Esempio n. 1
0
 void NamespaceDetails::cappedTruncateLastDelUpdate() {
     if ( _capExtent == _firstExtent ) {
         // Only one extent of the collection is in use, so there
         // is no deleted record in a previous extent, so nullify
         // cappedLastDelRecLastExtent().
         cappedLastDelRecLastExtent().writing() = DiskLoc();
     }
     else {
         // Scan through all deleted records in the collection
         // until the last deleted record for the extent prior
         // to the new capExtent is found.  Then set
         // cappedLastDelRecLastExtent() to that deleted record.
         DiskLoc i = cappedListOfAllDeletedRecords();
         for( ;
              !i.drec()->nextDeleted().isNull() &&
                  !inCapExtent( i.drec()->nextDeleted() );
              i = i.drec()->nextDeleted() );
         // In our capped storage model, every extent must have at least one
         // deleted record.  Here we check that 'i' is not the last deleted
         // record.  (We expect that there will be deleted records in the new
         // capExtent as well.)
         verify( !i.drec()->nextDeleted().isNull() );
         cappedLastDelRecLastExtent().writing() = i;
     }
 }
Esempio n. 2
0
    DiskLoc NamespaceDetails::__capAlloc( int len ) {
        DiskLoc prev = cappedLastDelRecLastExtent();
        DiskLoc i = cappedFirstDeletedInCurExtent();
        DiskLoc ret;
        for (; !i.isNull() && inCapExtent( i ); prev = i, i = i.drec()->nextDeleted() ) {
            // We need to keep at least one DR per extent in cappedListOfAllDeletedRecords(),
            // so make sure there's space to create a DR at the end.
            if ( i.drec()->lengthWithHeaders() >= len + 24 ) {
                ret = i;
                break;
            }
        }

        /* unlink ourself from the deleted list */
        if ( !ret.isNull() ) {
            if ( prev.isNull() )
                cappedListOfAllDeletedRecords().writing() = ret.drec()->nextDeleted();
            else
                prev.drec()->nextDeleted().writing() = ret.drec()->nextDeleted();
            ret.drec()->nextDeleted().writing().setInvalid(); // defensive.
            verify( ret.drec()->extentOfs() < ret.getOfs() );
        }

        return ret;
    }
Esempio n. 3
0
    void NamespaceDetails::advanceCapExtent( const char *ns ) {
        // We want cappedLastDelRecLastExtent() to be the last DeletedRecord of the prev cap extent
        // (or DiskLoc() if new capExtent == firstExtent)
        if ( capExtent == lastExtent )
            getDur().writingDiskLoc( cappedLastDelRecLastExtent() ) = DiskLoc();
        else {
            DiskLoc i = cappedFirstDeletedInCurExtent();
            for (; !i.isNull() && nextIsInCapExtent( i ); i = i.drec()->nextDeleted );
            getDur().writingDiskLoc( cappedLastDelRecLastExtent() ) = i;
        }

        getDur().writingDiskLoc( capExtent ) = theCapExtent()->xnext.isNull() ? firstExtent : theCapExtent()->xnext;

        /* this isn't true if a collection has been renamed...that is ok just used for diagnostics */
        //dassert( theCapExtent()->ns == ns );

        theCapExtent()->assertOk();
        getDur().writingDiskLoc( capFirstNewRecord ) = DiskLoc();
    }
Esempio n. 4
0
File: cap.cpp Progetto: ALFIO/mongo
    void NamespaceDetails::emptyCappedCollection( const char *ns ) {
        DEV assert( this == nsdetails(ns) );
        massert( 13424, "collection must be capped", capped );
        massert( 13425, "background index build in progress", !backgroundIndexBuildInProgress );
        massert( 13426, "indexes present", nIndexes == 0 );

        ClientCursor::invalidate( ns );
		NamespaceDetailsTransient::clearForPrefix( ns );

        cappedLastDelRecLastExtent() = DiskLoc();
        cappedListOfAllDeletedRecords() = DiskLoc();
        
        // preserve firstExtent/lastExtent
        capExtent = firstExtent;
        stats.datasize = stats.nrecords = 0;
        // lastExtentSize preserve
        // nIndexes preserve 0
        // capped preserve true
        // max preserve
        paddingFactor = 1.0;
        flags = 0;
        capFirstNewRecord = DiskLoc();
        capFirstNewRecord.setInvalid();
        cappedLastDelRecLastExtent().setInvalid();
        // dataFileVersion preserve
        // indexFileVersion preserve
        multiKeyIndexBits = 0;
        reservedA = 0;
        extraOffset = 0;
        // backgroundIndexBuildInProgress preserve 0
        memset(reserved, 0, sizeof(reserved));

        for( DiskLoc ext = firstExtent; !ext.isNull(); ext = ext.ext()->xnext ) {
            DiskLoc prev = ext.ext()->xprev;
            DiskLoc next = ext.ext()->xnext;
            DiskLoc empty = ext.ext()->reuse( ns );
            ext.ext()->xprev = prev;
            ext.ext()->xnext = next;
            addDeletedRec( empty.drec(), empty );
        }
    }
Esempio n. 5
0
    void NamespaceDetails::cappedTruncateAfter(const char *ns, DiskLoc end, bool inclusive) {
        DEV verify( this == nsdetails(ns) );
        verify( cappedLastDelRecLastExtent().isValid() );

        // We iteratively remove the newest document until the newest document
        // is 'end', then we remove 'end' if requested.
        bool foundLast = false;
        while( 1 ) {
            if ( foundLast ) {
                // 'end' has been found and removed, so break.
                break;
            }
            getDur().commitIfNeeded();
            // 'curr' will point to the newest document in the collection.
            DiskLoc curr = theCapExtent()->lastRecord;
            verify( !curr.isNull() );
            if ( curr == end ) {
                if ( inclusive ) {
                    // 'end' has been found, so break next iteration.
                    foundLast = true;
                }
                else {
                    // 'end' has been found, so break.
                    break;
                }
            }

            // TODO The algorithm used in this function cannot generate an
            // empty collection, but we could call emptyCappedCollection() in
            // this case instead of asserting.
            uassert( 13415, "emptying the collection is not allowed", _stats.nrecords > 1 );

            // Delete the newest record, and coalesce the new deleted
            // record with existing deleted records.
            theDataFileMgr.deleteRecord(this, ns, curr.rec(), curr, true);
            compact();

            // This is the case where we have not yet had to remove any
            // documents to make room for other documents, and we are allocating
            // documents from free space in fresh extents instead of reusing
            // space from familiar extents.
            if ( !capLooped() ) {

                // We just removed the last record from the 'capExtent', and
                // the 'capExtent' can't be empty, so we set 'capExtent' to
                // capExtent's prev extent.
                if ( theCapExtent()->lastRecord.isNull() ) {
                    verify( !theCapExtent()->xprev.isNull() );
                    // NOTE Because we didn't delete the last document, and
                    // capLooped() is false, capExtent is not the first extent
                    // so xprev will be nonnull.
                    _capExtent.writing() = theCapExtent()->xprev;
                    theCapExtent()->assertOk();

                    // update cappedLastDelRecLastExtent()
                    cappedTruncateLastDelUpdate();
                }
                continue;
            }

            // This is the case where capLooped() is true, and we just deleted
            // from capExtent, and we just deleted capFirstNewRecord, which was
            // the last record on the fresh side of capExtent.
            // NOTE In this comparison, curr and potentially capFirstNewRecord
            // may point to invalid data, but we can still compare the
            // references themselves.
            if ( curr == _capFirstNewRecord ) {

                // Set 'capExtent' to the first nonempty extent prior to the
                // initial capExtent.  There must be such an extent because we
                // have not deleted the last document in the collection.  It is
                // possible that all extents other than the capExtent are empty.
                // In this case we will keep the initial capExtent and specify
                // that all records contained within are on the fresh rather than
                // stale side of the extent.
                DiskLoc newCapExtent = _capExtent;
                do {
                    // Find the previous extent, looping if necessary.
                    newCapExtent = ( newCapExtent == _firstExtent ) ? _lastExtent : newCapExtent.ext()->xprev;
                    newCapExtent.ext()->assertOk();
                }
                while ( newCapExtent.ext()->firstRecord.isNull() );
                _capExtent.writing() = newCapExtent;

                // Place all documents in the new capExtent on the fresh side
                // of the capExtent by setting capFirstNewRecord to the first
                // document in the new capExtent.
                _capFirstNewRecord.writing() = theCapExtent()->firstRecord;

                // update cappedLastDelRecLastExtent()
                cappedTruncateLastDelUpdate();
            }
        }
    }
Esempio n. 6
0
    DiskLoc NamespaceDetails::cappedAlloc(const char *ns, int len) {
        
        if ( len > theCapExtent()->length ) {
            // the extent check is a way to try and improve performance
            uassert( 16328 , str::stream() << "document is larger than capped size " 
                     << len << " > " << storageSize() , len <= storageSize() );
        }
        
        // signal done allocating new extents.
        if ( !cappedLastDelRecLastExtent().isValid() )
            getDur().writingDiskLoc( cappedLastDelRecLastExtent() ) = DiskLoc();

        verify( len < 400000000 );
        int passes = 0;
        int maxPasses = ( len / 30 ) + 2; // 30 is about the smallest entry that could go in the oplog
        if ( maxPasses < 5000 ) {
            // this is for bacwards safety since 5000 was the old value
            maxPasses = 5000;
        }
        DiskLoc loc;

        // delete records until we have room and the max # objects limit achieved.

        /* this fails on a rename -- that is ok but must keep commented out */
        //verify( theCapExtent()->ns == ns );

        theCapExtent()->assertOk();
        DiskLoc firstEmptyExtent;
        while ( 1 ) {
            if ( _stats.nrecords < maxCappedDocs() ) {
                loc = __capAlloc( len );
                if ( !loc.isNull() )
                    break;
            }

            // If on first iteration through extents, don't delete anything.
            if ( !_capFirstNewRecord.isValid() ) {
                advanceCapExtent( ns );

                if ( _capExtent != _firstExtent )
                    _capFirstNewRecord.writing().setInvalid();
                // else signal done with first iteration through extents.
                continue;
            }

            if ( !_capFirstNewRecord.isNull() &&
                    theCapExtent()->firstRecord == _capFirstNewRecord ) {
                // We've deleted all records that were allocated on the previous
                // iteration through this extent.
                advanceCapExtent( ns );
                continue;
            }

            if ( theCapExtent()->firstRecord.isNull() ) {
                if ( firstEmptyExtent.isNull() )
                    firstEmptyExtent = _capExtent;
                advanceCapExtent( ns );
                if ( firstEmptyExtent == _capExtent ) {
                    maybeComplain( ns, len );
                    return DiskLoc();
                }
                continue;
            }

            DiskLoc fr = theCapExtent()->firstRecord;
            theDataFileMgr.deleteRecord(this, ns, fr.rec(), fr, true); // ZZZZZZZZZZZZ
            compact();
            if( ++passes > maxPasses ) {
                StringBuilder sb;
                sb << "passes >= maxPasses in NamespaceDetails::cappedAlloc: ns: " << ns
                   << ", len: " << len
                   << ", maxPasses: " << maxPasses
                   << ", _maxDocsInCapped: " << _maxDocsInCapped
                   << ", nrecords: " << _stats.nrecords
                   << ", datasize: " << _stats.datasize;
                msgasserted(10345, sb.str());
            }
        }

        // Remember first record allocated on this iteration through capExtent.
        if ( _capFirstNewRecord.isValid() && _capFirstNewRecord.isNull() )
            getDur().writingDiskLoc(_capFirstNewRecord) = loc;

        return loc;
    }
Esempio n. 7
0
 DiskLoc &NamespaceDetails::cappedFirstDeletedInCurExtent() {
     if ( cappedLastDelRecLastExtent().isNull() )
         return cappedListOfAllDeletedRecords();
     else
         return cappedLastDelRecLastExtent().drec()->nextDeleted();
 }
Esempio n. 8
0
    DiskLoc NamespaceDetails::cappedAlloc(const char *ns, int len) {
        // signal done allocating new extents.
        if ( !cappedLastDelRecLastExtent().isValid() )
            getDur().writingDiskLoc( cappedLastDelRecLastExtent() ) = DiskLoc();

        verify( len < 400000000 );
        int passes = 0;
        int maxPasses = ( len / 30 ) + 2; // 30 is about the smallest entry that could go in the oplog
        if ( maxPasses < 5000 ) {
            // this is for bacwards safety since 5000 was the old value
            maxPasses = 5000;
        }
        DiskLoc loc;

        // delete records until we have room and the max # objects limit achieved.

        /* this fails on a rename -- that is ok but must keep commented out */
        //verify( theCapExtent()->ns == ns );

        theCapExtent()->assertOk();
        DiskLoc firstEmptyExtent;
        while ( 1 ) {
            if ( stats.nrecords < max ) {
                loc = __capAlloc( len );
                if ( !loc.isNull() )
                    break;
            }

            // If on first iteration through extents, don't delete anything.
            if ( !capFirstNewRecord.isValid() ) {
                advanceCapExtent( ns );

                if ( capExtent != firstExtent )
                    capFirstNewRecord.writing().setInvalid();
                // else signal done with first iteration through extents.
                continue;
            }

            if ( !capFirstNewRecord.isNull() &&
                    theCapExtent()->firstRecord == capFirstNewRecord ) {
                // We've deleted all records that were allocated on the previous
                // iteration through this extent.
                advanceCapExtent( ns );
                continue;
            }

            if ( theCapExtent()->firstRecord.isNull() ) {
                if ( firstEmptyExtent.isNull() )
                    firstEmptyExtent = capExtent;
                advanceCapExtent( ns );
                if ( firstEmptyExtent == capExtent ) {
                    maybeComplain( ns, len );
                    return DiskLoc();
                }
                continue;
            }

            DiskLoc fr = theCapExtent()->firstRecord;
            theDataFileMgr.deleteRecord(ns, fr.rec(), fr, true); // ZZZZZZZZZZZZ
            compact();
            if( ++passes > maxPasses ) {
                log() << "passes ns:" << ns << " len:" << len << " maxPasses: " << maxPasses << '\n';
                log() << "passes max:" << max << " nrecords:" << stats.nrecords << " datasize: " << stats.datasize << endl;
                massert( 10345 ,  "passes >= maxPasses in capped collection alloc", false );
            }
        }

        // Remember first record allocated on this iteration through capExtent.
        if ( capFirstNewRecord.isValid() && capFirstNewRecord.isNull() )
            getDur().writingDiskLoc(capFirstNewRecord) = loc;

        return loc;
    }
Esempio n. 9
0
File: cap.cpp Progetto: ALFIO/mongo
    /* everything from end on, eliminate from the capped collection.
       @param inclusive if true, deletes end (i.e. closed or open range)
    */
    void NamespaceDetails::cappedTruncateAfter(const char *ns, DiskLoc end, bool inclusive) {
        DEV assert( this == nsdetails(ns) );
        assert( cappedLastDelRecLastExtent().isValid() );
        
        bool foundLast = false;
        while( 1 ) {
            if ( foundLast ) {
                break;
            }
            DiskLoc curr = theCapExtent()->lastRecord;
            assert( !curr.isNull() );
            if ( curr == end ) {
                if ( inclusive ) {
                    foundLast = true;
                } else {
                    break;
                }
            }
            
            uassert( 13415, "emptying the collection is not allowed", stats.nrecords > 1 );
            
            if ( !capLooped() ) {
                theDataFileMgr.deleteRecord(ns, curr.rec(), curr, true);
                compact();
                if ( theCapExtent()->lastRecord.isNull() ) {
                    assert( !theCapExtent()->xprev.isNull() );
                    capExtent = theCapExtent()->xprev;
                    theCapExtent()->assertOk();
                    if ( capExtent == firstExtent ) {
                        cappedLastDelRecLastExtent() = DiskLoc();
                    } else {
                        // slow - there's no prev ptr for deleted rec
                        DiskLoc i = cappedListOfAllDeletedRecords();
                        for( ;
                            !i.drec()->nextDeleted.isNull() &&
                            !inCapExtent( i.drec()->nextDeleted );
                            i = i.drec()->nextDeleted );
                        assert( !i.drec()->nextDeleted.isNull() ); // I believe there is always at least one drec per extent
                        cappedLastDelRecLastExtent() = i;
                    }
                }
                continue;
            }

            theDataFileMgr.deleteRecord(ns, curr.rec(), curr, true);
            compact();
            if ( curr == capFirstNewRecord ) { // invalid, but can compare locations
                capExtent = ( capExtent == firstExtent ) ? lastExtent : theCapExtent()->xprev;
                theCapExtent()->assertOk();
                assert( !theCapExtent()->firstRecord.isNull() );
                capFirstNewRecord = theCapExtent()->firstRecord;
                if ( capExtent == firstExtent ) {
                    cappedLastDelRecLastExtent() = DiskLoc();
                } else {
                    // slow - there's no prev ptr for deleted rec
                    DiskLoc i = cappedListOfAllDeletedRecords();
                    for( ;
                        !i.drec()->nextDeleted.isNull() &&
                        !inCapExtent( i.drec()->nextDeleted );
                        i = i.drec()->nextDeleted );
                    assert( !i.drec()->nextDeleted.isNull() ); // I believe there is always at least one drec per extent
                    cappedLastDelRecLastExtent() = i;
                }
            }
        }
    }