Ejemplo n.º 1
0
    StatusWith<DiskLoc> HeapRecordStore::insertRecord(OperationContext* txn,
                                                      const DocWriter* doc,
                                                      bool enforceQuota) {
        const int len = doc->documentSize();
        if (_isCapped && len > _cappedMaxSize) {
            // We use dataSize for capped rollover and we don't want to delete everything if we know
            // this won't fit.
            return StatusWith<DiskLoc>(ErrorCodes::BadValue,
                                       "object to insert exceeds cappedMaxSize");
        }

        // TODO padding?
        const int lengthWithHeaders = len + HeapRecord::HeaderSize;
        boost::shared_array<char> buf(new char[lengthWithHeaders]);
        HeapRecord* rec = reinterpret_cast<HeapRecord*>(buf.get());
        rec->lengthWithHeaders() = lengthWithHeaders;
        doc->writeDocument(rec->data());

        const DiskLoc loc = allocateLoc();
        _records[loc] = buf;
        _dataSize += len;

        cappedDeleteAsNeeded(txn);

        return StatusWith<DiskLoc>(loc);
    }
Ejemplo n.º 2
0
    StatusWith<DiskLoc> HeapRecordStore::updateRecord(OperationContext* txn,
                                                      const DiskLoc& loc,
                                                      const char* data,
                                                      int len,
                                                      bool enforceQuota,
                                                      UpdateMoveNotifier* notifier ) {
        HeapRecord* oldRecord = recordFor( loc );
        int oldLen = oldRecord->size;

        if (_isCapped && len > oldLen) {
            return StatusWith<DiskLoc>( ErrorCodes::InternalError,
                                        "failing update: objects in a capped ns cannot grow",
                                        10003 );
        }

        HeapRecord newRecord(len);
        memcpy(newRecord.data.get(), data, len);

        txn->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *oldRecord));
        _data->dataSize += len - oldLen;
        *oldRecord = newRecord;

        cappedDeleteAsNeeded(txn);

        return StatusWith<DiskLoc>(loc);
    }
Ejemplo n.º 3
0
    Status HeapRecordStore::updateWithDamages( OperationContext* txn,
                                               const DiskLoc& loc,
                                               const RecordData& oldRec,
                                               const char* damageSource,
                                               const mutablebson::DamageVector& damages ) {
        HeapRecord* oldRecord = recordFor( loc );
        const int len = oldRecord->size;

        HeapRecord newRecord(len);
        memcpy(newRecord.data.get(), oldRecord->data.get(), len);

        txn->recoveryUnit()->registerChange(new RemoveChange(_data, loc, *oldRecord));
        *oldRecord = newRecord;

        cappedDeleteAsNeeded(txn);

        char* root = newRecord.data.get();
        mutablebson::DamageVector::const_iterator where = damages.begin();
        const mutablebson::DamageVector::const_iterator end = damages.end();
        for( ; where != end; ++where ) {
            const char* sourcePtr = damageSource + where->sourceOffset;
            char* targetPtr = root + where->targetOffset;
            std::memcpy(targetPtr, sourcePtr, where->size);
        }

        *oldRecord = newRecord;

        return Status::OK();
    }
Ejemplo n.º 4
0
    StatusWith<DiskLoc> HeapRecordStore::insertRecord(OperationContext* txn,
                                                      const DocWriter* doc,
                                                      bool enforceQuota) {
        const int len = doc->documentSize();
        if (_isCapped && len > _cappedMaxSize) {
            // We use dataSize for capped rollover and we don't want to delete everything if we know
            // this won't fit.
            return StatusWith<DiskLoc>(ErrorCodes::BadValue,
                                       "object to insert exceeds cappedMaxSize");
        }

        HeapRecord rec(len);
        doc->writeDocument(rec.data.get());

        DiskLoc loc;
        if (_data->isOplog) {
            StatusWith<DiskLoc> status = extractAndCheckLocForOplog(rec.data.get(), len);
            if (!status.isOK())
                return status;
            loc = status.getValue();
        }
        else {
            loc = allocateLoc();
        }

        txn->recoveryUnit()->registerChange(new InsertChange(_data, loc));
        _data->dataSize += len;
        _data->records[loc] = rec;

        cappedDeleteAsNeeded(txn);

        return StatusWith<DiskLoc>(loc);
    }
Ejemplo n.º 5
0
StatusWith<RecordId> WiredTigerRecordStore::updateRecord(OperationContext* txn,
                                                         const RecordId& loc,
                                                         const char* data,
                                                         int len,
                                                         bool enforceQuota,
                                                         UpdateNotifier* notifier) {
    WiredTigerCursor curwrap(_uri, _tableId, true, txn);
    curwrap.assertInActiveTxn();
    WT_CURSOR* c = curwrap.get();
    invariant(c);
    c->set_key(c, _makeKey(loc));
    int ret = WT_OP_CHECK(c->search(c));
    invariantWTOK(ret);

    WT_ITEM old_value;
    ret = c->get_value(c, &old_value);
    invariantWTOK(ret);

    int old_length = old_value.size;

    c->set_key(c, _makeKey(loc));
    WiredTigerItem value(data, len);
    c->set_value(c, value.Get());
    ret = WT_OP_CHECK(c->insert(c));
    invariantWTOK(ret);

    _increaseDataSize(txn, len - old_length);

    cappedDeleteAsNeeded(txn, loc);

    return StatusWith<RecordId>(loc);
}
Ejemplo n.º 6
0
    StatusWith<DiskLoc> RocksRecordStore::updateRecord( OperationContext* txn,
                                                        const DiskLoc& loc,
                                                        const char* data,
                                                        int len,
                                                        bool enforceQuota,
                                                        UpdateMoveNotifier* notifier ) {
        RocksRecoveryUnit* ru = _getRecoveryUnit( txn );

        std::string old_value;
        // XXX Be sure to also first query the write batch once Facebook implements that
        rocksdb::Status status = _db->Get( _readOptions( txn ),
                                           _columnFamily,
                                           _makeKey( loc ),
                                           &old_value );

        if ( !status.ok() ) {
            return StatusWith<DiskLoc>( ErrorCodes::InternalError, status.ToString() );
        }

        int old_length = old_value.size();

        ru->writeBatch()->Put( _columnFamily, _makeKey( loc ), rocksdb::Slice( data, len ) );

        _increaseDataSize(txn, len - old_length);

        cappedDeleteAsNeeded(txn);

        return StatusWith<DiskLoc>( loc );
    }
Ejemplo n.º 7
0
StatusWith<RecordId> WiredTigerRecordStore::insertRecord(OperationContext* txn,
                                                         const char* data,
                                                         int len,
                                                         bool enforceQuota) {
    if (_isCapped && len > _cappedMaxSize) {
        return StatusWith<RecordId>(ErrorCodes::BadValue, "object to insert exceeds cappedMaxSize");
    }

    RecordId loc;
    if (_useOplogHack) {
        StatusWith<RecordId> status = extractAndCheckLocForOplog(data, len);
        if (!status.isOK())
            return status;
        loc = status.getValue();
        if (loc > _oplog_highestSeen) {
            stdx::lock_guard<stdx::mutex> lk(_uncommittedDiskLocsMutex);
            if (loc > _oplog_highestSeen) {
                _oplog_highestSeen = loc;
            }
        }
    } else if (_isCapped) {
        stdx::lock_guard<stdx::mutex> lk(_uncommittedDiskLocsMutex);
        loc = _nextId();
        _addUncommitedDiskLoc_inlock(txn, loc);
    } else {
        loc = _nextId();
    }

    WiredTigerCursor curwrap(_uri, _tableId, true, txn);
    curwrap.assertInActiveTxn();
    WT_CURSOR* c = curwrap.get();
    invariant(c);

    c->set_key(c, _makeKey(loc));
    WiredTigerItem value(data, len);
    c->set_value(c, value.Get());
    int ret = WT_OP_CHECK(c->insert(c));
    if (ret) {
        return StatusWith<RecordId>(wtRCToStatus(ret, "WiredTigerRecordStore::insertRecord"));
    }

    _changeNumRecords(txn, 1);
    _increaseDataSize(txn, len);

    cappedDeleteAsNeeded(txn, loc);

    return StatusWith<RecordId>(loc);
}
Ejemplo n.º 8
0
    StatusWith<DiskLoc> HeapRecordStore::updateRecord(OperationContext* txn,
                                                      const DiskLoc& oldLocation,
                                                      const char* data,
                                                      int len,
                                                      bool enforceQuota,
                                                      UpdateMoveNotifier* notifier ) {
        HeapRecord* oldRecord = recordFor( oldLocation );
        int oldLen = oldRecord->netLength();

        // If the length of the new data is <= the length of the old data then just
        // memcopy into the old space
        if ( len <= oldLen) {
            memcpy(oldRecord->data(), data, len);
            _dataSize += len - oldLen;
            return StatusWith<DiskLoc>(oldLocation);
        }

        if ( _isCapped ) {
            return StatusWith<DiskLoc>( ErrorCodes::InternalError,
                                        "failing update: objects in a capped ns cannot grow",
                                        10003 );
        }

        // If the length of the new data exceeds the size of the old Record, we need to allocate
        // a new Record, and delete the old one

        const int lengthWithHeaders = len + HeapRecord::HeaderSize;
        boost::shared_array<char> buf(new char[lengthWithHeaders]);
        HeapRecord* rec = reinterpret_cast<HeapRecord*>(buf.get());
        rec->lengthWithHeaders() = lengthWithHeaders;
        memcpy(rec->data(), data, len);

        _records[oldLocation] = buf;
        _dataSize += len - oldLen;

        cappedDeleteAsNeeded(txn);

        return StatusWith<DiskLoc>(oldLocation);
    }
Ejemplo n.º 9
0
    StatusWith<DiskLoc> RocksRecordStore::insertRecord( OperationContext* txn,
                                                        const char* data,
                                                        int len,
                                                        bool enforceQuota ) {
        if ( _isCapped && len > _cappedMaxSize ) {
            return StatusWith<DiskLoc>( ErrorCodes::BadValue,
                                       "object to insert exceeds cappedMaxSize" );
        }

        RocksRecoveryUnit* ru = _getRecoveryUnit( txn );

        DiskLoc loc = _nextId();

        ru->writeBatch()->Put( _columnFamily, _makeKey( loc ), rocksdb::Slice( data, len ) );

        _changeNumRecords( txn, true );
        _increaseDataSize( txn, len );

        cappedDeleteAsNeeded(txn);

        return StatusWith<DiskLoc>( loc );
    }