void RocksRecordStore::cappedDeleteAsNeeded(OperationContext* txn) { if (!cappedAndNeedDelete()) return; // This persistent iterator is necessary since you can't read your own writes boost::scoped_ptr<rocksdb::Iterator> iter( _db->NewIterator( _readOptions( txn ), _columnFamily ) ); iter->SeekToFirst(); // XXX TODO there is a bug here where if the size of the write batch exceeds the cap size // then iter will not be valid and it will crash. To fix this we need the ability to // query the write batch, and delete the oldest record in the write batch until the // size of the write batch is less than the cap // XXX PROBLEMS // 2 threads could delete the same document // multiple inserts using the same snapshot will delete the same document while ( cappedAndNeedDelete() && iter->Valid() ) { invariant(_numRecords > 0); rocksdb::Slice slice = iter->key(); DiskLoc oldest = _makeDiskLoc( slice ); if ( _cappedDeleteCallback ) uassertStatusOK(_cappedDeleteCallback->aboutToDeleteCapped(txn, oldest)); deleteRecord(txn, oldest); iter->Next(); } }
void HeapRecordStore::cappedDeleteAsNeeded(OperationContext* txn) { while (cappedAndNeedDelete()) { invariant(!_records.empty()); DiskLoc oldest = _records.begin()->first; if (_cappedDeleteCallback) uassertStatusOK(_cappedDeleteCallback->aboutToDeleteCapped(txn, oldest)); deleteRecord(txn, oldest); } }
int64_t WiredTigerRecordStore::cappedDeleteAsNeeded(OperationContext* txn, const RecordId& justInserted) { // We only want to do the checks occasionally as they are expensive. // This variable isn't thread safe, but has loose semantics anyway. dassert(!_isOplog || _cappedMaxDocs == -1); if (!cappedAndNeedDelete()) return 0; // ensure only one thread at a time can do deletes, otherwise they'll conflict. boost::unique_lock<boost::timed_mutex> lock(_cappedDeleterMutex, boost::defer_lock); // NOLINT if (_cappedMaxDocs != -1) { lock.lock(); // Max docs has to be exact, so have to check every time. } else if (_hasBackgroundThread) { // We are foreground, and there is a background thread, // Check if we need some back pressure. if ((_dataSize.load() - _cappedMaxSize) < _cappedMaxSizeSlack) { return 0; } // Back pressure needed! // We're not actually going to delete anything, but we're going to syncronize // on the deleter thread. // Don't wait forever: we're in a transaction, we could block eviction. if (!lock.try_lock()) { Date_t before = Date_t::now(); (void)lock.try_lock_for(boost::chrono::milliseconds(200)); // NOLINT auto delay = boost::chrono::milliseconds( // NOLINT durationCount<Milliseconds>(Date_t::now() - before)); _cappedSleep.fetchAndAdd(1); _cappedSleepMS.fetchAndAdd(delay.count()); } return 0; } else { if (!lock.try_lock()) { // Someone else is deleting old records. Apply back-pressure if too far behind, // otherwise continue. if ((_dataSize.load() - _cappedMaxSize) < _cappedMaxSizeSlack) return 0; // Don't wait forever: we're in a transaction, we could block eviction. Date_t before = Date_t::now(); bool gotLock = lock.try_lock_for(boost::chrono::milliseconds(200)); // NOLINT auto delay = boost::chrono::milliseconds( // NOLINT durationCount<Milliseconds>(Date_t::now() - before)); _cappedSleep.fetchAndAdd(1); _cappedSleepMS.fetchAndAdd(delay.count()); if (!gotLock) return 0; // If we already waited, let someone else do cleanup unless we are significantly // over the limit. if ((_dataSize.load() - _cappedMaxSize) < (2 * _cappedMaxSizeSlack)) return 0; } } return cappedDeleteAsNeeded_inlock(txn, justInserted); }