Example #1
0
 int64_t HeapRecordStore::storageSize(OperationContext* txn,
                                      BSONObjBuilder* extraInfo,
                                      int infoLevel) const {
     // Note: not making use of extraInfo or infoLevel since we don't have extents
     const int64_t recordOverhead = numRecords() * HeapRecord::HeaderSize;
     return _dataSize + recordOverhead;
 }
Example #2
0
bool MobileRecordStore::_resetNumRecsIfNeeded(OperationContext* opCtx, int64_t newNumRecs) {
    bool wasReset = false;
    int64_t currNumRecs = numRecords(opCtx);
    if (currNumRecs != newNumRecs) {
        wasReset = true;
        stdx::lock_guard<stdx::mutex> lock(_numRecsMutex);
        _numRecs = newNumRecs;
    }
    return wasReset;
}
Example #3
0
    bool HeapRecordStore::cappedAndNeedDelete() const {
        if (!_isCapped)
            return false;

        if (_dataSize > _cappedMaxSize)
            return true;

        if ((_cappedMaxDocs != -1) && (numRecords() > _cappedMaxDocs))
            return true;

        return false;
    }
Example #4
0
    bool HeapRecordStore::cappedAndNeedDelete(OperationContext* txn) const {
        if (!_isCapped)
            return false;

        if (_data->dataSize > _cappedMaxSize)
            return true;

        if ((_cappedMaxDocs != -1) && (numRecords(txn) > _cappedMaxDocs))
            return true;

        return false;
    }
Example #5
0
/**
 * SQLite does not directly support truncate. The SQLite documentation recommends a DELETE
 * statement without a WHERE clause. A Truncate Optimizer deletes all of the table content
 * without having to visit each row of the table individually.
 */
Status MobileRecordStore::truncate(OperationContext* opCtx) {
    MobileSession* session = MobileRecoveryUnit::get(opCtx)->getSession(opCtx, false);

    int64_t numRecsBefore = numRecords(opCtx);
    _changeNumRecs(opCtx, -numRecsBefore);
    int64_t dataSizeBefore = dataSize(opCtx);
    _changeDataSize(opCtx, -dataSizeBefore);

    SqliteStatement::execQuery(session, "DELETE FROM \"", _ident, "\";");

    return Status::OK();
}
    bool KVRecordStoreCapped::needsDelete(OperationContext* txn) const {
        if (dataSize(txn) >= _cappedMaxSize) {
            // .. too many bytes
            return true;
        }

        if ((_cappedMaxDocs != -1) && (numRecords(txn) > _cappedMaxDocs)) {
            // .. too many documents
            return true;
        }

        // we're ok
        return false;
    }
Status WiredTigerRecordStore::truncate(OperationContext* txn) {
    WiredTigerCursor startWrap(_uri, _tableId, true, txn);
    WT_CURSOR* start = startWrap.get();
    int ret = WT_OP_CHECK(start->next(start));
    // Empty collections don't have anything to truncate.
    if (ret == WT_NOTFOUND) {
        return Status::OK();
    }
    invariantWTOK(ret);

    WT_SESSION* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn)->getSession();
    invariantWTOK(WT_OP_CHECK(session->truncate(session, NULL, start, NULL, NULL)));
    _changeNumRecords(txn, -numRecords(txn));
    _increaseDataSize(txn, -dataSize(txn));

    return Status::OK();
}
    void KVRecordStoreCapped::deleteAsNeeded(OperationContext *txn) {
        if (!needsDelete(txn)) {
            // nothing to do
            return;
        }

        // Only one thread should do deletes at a time, otherwise they'll conflict.
        boost::mutex::scoped_lock lock(_cappedDeleteMutex, boost::defer_lock);
        if (_cappedMaxDocs != -1) {
            lock.lock();
        } else {
            if (!lock.try_lock()) {
                // Someone else is deleting old records. Apply back-pressure if too far behind,
                // otherwise continue.
                if ((dataSize(txn) - _cappedMaxSize) < _cappedMaxSizeSlack)
                    return;

                lock.lock();

                // If we already waited, let someone else do cleanup unless we are significantly
                // over the limit.
                if ((dataSize(txn) - _cappedMaxSize) < (2 * _cappedMaxSizeSlack))
                    return;
            }
        }

        // we do this is a side transaction in case it aborts
        TempRecoveryUnitSwap swap(txn);

        int64_t ds = dataSize(txn);
        int64_t nr = numRecords(txn);
        int64_t sizeOverCap = (ds > _cappedMaxSize) ? ds - _cappedMaxSize : 0;
        int64_t sizeSaved = 0;
        int64_t docsOverCap = (_cappedMaxDocs != -1 && nr > _cappedMaxDocs) ? nr - _cappedMaxDocs : 0;
        int64_t docsRemoved = 0;

        try {
            WriteUnitOfWork wuow(txn);

            // We're going to notify the underlying store that we've
            // deleted this range of ids.  In TokuFT, this will trigger an
            // optimize.
            RecordId firstDeleted, lastDeleted;

            Timer t;

            // Delete documents while we are over-full and the iterator has more.
            //
            // Note that the iterator we get has the _idTracker's logic
            // already built in, so we don't need to worry about deleting
            // records that are not yet committed, including the one we
            // just inserted
            for (boost::scoped_ptr<RecordIterator> iter(getIterator(txn));
                 ((sizeSaved < sizeOverCap || docsRemoved < docsOverCap) &&
                  !iter->isEOF());
                 ) {
                const RecordId oldest = iter->getNext();

                ++docsRemoved;
                sizeSaved += iter->dataFor(oldest).size();

                if (_cappedDeleteCallback) {
                    // need to notify higher layers that a RecordId is about to be deleted
                    uassertStatusOK(_cappedDeleteCallback->aboutToDeleteCapped(txn, oldest, iter->dataFor(oldest)));
                }
                deleteRecord(txn, oldest);

                if (firstDeleted.isNull()) {
                    firstDeleted = oldest;
                }
                dassert(oldest > lastDeleted);
                lastDeleted = oldest;

                // Now, decide whether to keep working, we want to balance
                // staying on top of the deletion workload with the
                // latency of the client that's doing the deletes for
                // everyone.
                if (sizeOverCap >= _cappedMaxSizeSlack) {
                    // If we're over the slack amount, everyone's going to
                    // block on us anyway, so we may as well keep working.
                    continue;
                }
                if (sizeOverCap < (_cappedMaxSizeSlack / 4) && docsRemoved >= 1000) {
                    // If we aren't too much over and we've done a fair
                    // amount of work, take a break.
                    break;
                } else if (docsRemoved % 1000 == 0 && t.seconds() >= 4) {
                    // If we're under the slack amount and we've already
                    // spent a second working on this, return and give
                    // someone else a chance to shoulder that latency.
                    break;
                }
            }

            if (docsRemoved > 0) {
                _db->justDeletedCappedRange(txn, Slice::of(KeyString(firstDeleted)), Slice::of(KeyString(lastDeleted)),
                                            sizeSaved, docsRemoved);
                wuow.commit();
                dassert(lastDeleted > _lastDeletedId);
                _lastDeletedId = lastDeleted;
            }
        } catch (WriteConflictException) {
            log() << "Got conflict truncating capped, ignoring.";
            return;
        }
    }