Status RocksRecordStore::updateWithDamages( OperationContext* txn, const DiskLoc& loc, const char* damangeSource, const mutablebson::DamageVector& damages ) { RocksRecoveryUnit* ru = _getRecoveryUnit( txn ); rocksdb::Slice key = _makeKey( loc ); // get original value std::string value; rocksdb::Status status; status = _db->Get( _readOptions( txn ), _columnFamily, key, &value ); if ( !status.ok() ) { if ( status.IsNotFound() ) return Status( ErrorCodes::InternalError, "doc not found for in-place update" ); log() << "rocks Get failed, blowing up: " << status.ToString(); invariant( false ); } // apply changes to our copy for( size_t i = 0; i < damages.size(); i++ ) { mutablebson::DamageEvent event = damages[i]; const char* sourcePtr = damangeSource + event.sourceOffset; invariant( event.targetOffset + event.size < value.length() ); value.replace( event.targetOffset, event.size, sourcePtr, event.size ); } // write back ru->writeBatch()->Put( _columnFamily, key, value ); return Status::OK(); }
StatusWith<DiskLoc> RocksRecordStore::updateRecord( OperationContext* txn, const DiskLoc& loc, const char* data, int len, bool enforceQuota, UpdateMoveNotifier* notifier ) { RocksRecoveryUnit* ru = _getRecoveryUnit( txn ); std::string old_value; // XXX Be sure to also first query the write batch once Facebook implements that rocksdb::Status status = _db->Get( _readOptions( txn ), _columnFamily, _makeKey( loc ), &old_value ); if ( !status.ok() ) { return StatusWith<DiskLoc>( ErrorCodes::InternalError, status.ToString() ); } int old_length = old_value.size(); ru->writeBatch()->Put( _columnFamily, _makeKey( loc ), rocksdb::Slice( data, len ) ); _increaseDataSize(txn, len - old_length); cappedDeleteAsNeeded(txn); return StatusWith<DiskLoc>( loc ); }
Status RocksSortedDataImpl::insert(OperationContext* txn, const BSONObj& key, const DiskLoc& loc, bool dupsAllowed) { if (key.objsize() >= kTempKeyMaxSize) { string msg = mongoutils::str::stream() << "RocksSortedDataImpl::insert: key too large to index, failing " << ' ' << key.objsize() << ' ' << key; return Status(ErrorCodes::KeyTooLong, msg); } RocksRecoveryUnit* ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn); if ( !dupsAllowed ) { // TODO need key locking to support unique indexes. Status status = dupKeyCheck( txn, key, loc ); if ( !status.isOK() ) { return status; } } ru->registerChange(new ChangeNumEntries(&_numEntries, true)); ru->writeBatch()->Put(_columnFamily.get(), makeString(key, loc), emptyByteSlice); return Status::OK(); }
void RocksRecordStore::dropRsMetaData( OperationContext* opCtx ) { RocksRecoveryUnit* ru = _getRecoveryUnit( opCtx ); boost::mutex::scoped_lock dataSizeLk( _dataSizeLock ); ru->writeBatch()->Delete( _metadataColumnFamily, _dataSizeKey ); boost::mutex::scoped_lock numRecordsLk( _numRecordsLock ); ru->writeBatch()->Delete( _metadataColumnFamily, _numRecordsKey ); }
void RocksRecordStore::_increaseDataSize( OperationContext* txn, int amount ) { boost::mutex::scoped_lock lk( _dataSizeLock ); _dataSize += amount; RocksRecoveryUnit* ru = _getRecoveryUnit( txn ); const char* ds_ptr = reinterpret_cast<char*>( &_dataSize ); ru->writeBatch()->Put( _metadataColumnFamily, rocksdb::Slice( _dataSizeKey ), rocksdb::Slice( ds_ptr, sizeof(long long) ) ); }
void RocksRecordStore::deleteRecord( OperationContext* txn, const DiskLoc& dl ) { RocksRecoveryUnit* ru = _getRecoveryUnit( txn ); std::string oldValue; _db->Get( _readOptions( txn ), _columnFamily, _makeKey( dl ), &oldValue ); int oldLength = oldValue.size(); ru->writeBatch()->Delete( _columnFamily, _makeKey( dl ) ); _changeNumRecords(txn, false); _increaseDataSize(txn, -oldLength); }
// XXX make sure these work with rollbacks (I don't think they will) void RocksRecordStore::_changeNumRecords( OperationContext* txn, bool insert ) { boost::mutex::scoped_lock lk( _numRecordsLock ); if ( insert ) { _numRecords++; } else { _numRecords--; } RocksRecoveryUnit* ru = _getRecoveryUnit( txn ); const char* nr_ptr = reinterpret_cast<char*>( &_numRecords ); ru->writeBatch()->Put( _metadataColumnFamily, rocksdb::Slice( _numRecordsKey ), rocksdb::Slice( nr_ptr, sizeof(long long) ) ); }
bool RocksSortedDataImpl::unindex(OperationContext* txn, const BSONObj& key, const DiskLoc& loc) { RocksRecoveryUnit* ru = RocksRecoveryUnit::getRocksRecoveryUnit(txn); const string keyData = makeString( key, loc ); string dummy; if (ru->Get(_columnFamily.get(), keyData, &dummy).IsNotFound()) { return false; } ru->registerChange(new ChangeNumEntries(&_numEntries, false)); ru->writeBatch()->Delete(_columnFamily.get(), keyData); return true; }
StatusWith<DiskLoc> RocksRecordStore::insertRecord( OperationContext* txn, const char* data, int len, bool enforceQuota ) { if ( _isCapped && len > _cappedMaxSize ) { return StatusWith<DiskLoc>( ErrorCodes::BadValue, "object to insert exceeds cappedMaxSize" ); } RocksRecoveryUnit* ru = _getRecoveryUnit( txn ); DiskLoc loc = _nextId(); ru->writeBatch()->Put( _columnFamily, _makeKey( loc ), rocksdb::Slice( data, len ) ); _changeNumRecords( txn, true ); _increaseDataSize( txn, len ); cappedDeleteAsNeeded(txn); return StatusWith<DiskLoc>( loc ); }