StatusWith<RecordId> WiredTigerRecordStore::insertRecord(OperationContext* txn,
                                                         const char* data,
                                                         int len,
                                                         bool enforceQuota) {
    if (_isCapped && len > _cappedMaxSize) {
        return StatusWith<RecordId>(ErrorCodes::BadValue, "object to insert exceeds cappedMaxSize");
    }

    RecordId loc;
    if (_useOplogHack) {
        StatusWith<RecordId> status = extractAndCheckLocForOplog(data, len);
        if (!status.isOK())
            return status;
        loc = status.getValue();
        if (loc > _oplog_highestSeen) {
            stdx::lock_guard<stdx::mutex> lk(_uncommittedDiskLocsMutex);
            if (loc > _oplog_highestSeen) {
                _oplog_highestSeen = loc;
            }
        }
    } else if (_isCapped) {
        stdx::lock_guard<stdx::mutex> lk(_uncommittedDiskLocsMutex);
        loc = _nextId();
        _addUncommitedDiskLoc_inlock(txn, loc);
    } else {
        loc = _nextId();
    }

    WiredTigerCursor curwrap(_uri, _tableId, true, txn);
    curwrap.assertInActiveTxn();
    WT_CURSOR* c = curwrap.get();
    invariant(c);

    c->set_key(c, _makeKey(loc));
    WiredTigerItem value(data, len);
    c->set_value(c, value.Get());
    int ret = WT_OP_CHECK(c->insert(c));
    if (ret) {
        return StatusWith<RecordId>(wtRCToStatus(ret, "WiredTigerRecordStore::insertRecord"));
    }

    _changeNumRecords(txn, 1);
    _increaseDataSize(txn, len);

    cappedDeleteAsNeeded(txn, loc);

    return StatusWith<RecordId>(loc);
}
Status MobileRecordStore::insertRecords(OperationContext* opCtx,
                                        std::vector<Record>* inOutRecords,
                                        const std::vector<Timestamp>& timestamps) {
    // Inserts record into SQLite table (or replaces if duplicate record id).
    MobileSession* session = MobileRecoveryUnit::get(opCtx)->getSession(opCtx, false);

    SqliteStatement insertStmt(
        *session, "INSERT OR REPLACE INTO \"", _ident, "\"(rec_id, data) VALUES(?, ?);");

    for (auto& record : *inOutRecords) {
        const auto data = record.data.data();
        const auto len = record.data.size();

        _changeNumRecs(opCtx, 1);
        _changeDataSize(opCtx, len);

        RecordId recId = _nextId();
        insertStmt.bindInt(0, recId.repr());
        insertStmt.bindBlob(1, data, len);
        insertStmt.step(SQLITE_DONE);

        record.id = recId;
        insertStmt.reset();
    }

    return Status::OK();
}
    StatusWith<RecordId> KVRecordStore::insertRecord(OperationContext* txn,
                                                     const char* data,
                                                     int len,
                                                     bool enforceQuota) {
        const RecordId id = _nextId();
        const Slice value(data, len);

        const Status status = _insertRecord(txn, id, value);
        if (!status.isOK()) {
            return StatusWith<RecordId>(status);
        }

        return StatusWith<RecordId>(id);
    }
Exemple #4
0
    StatusWith<DiskLoc> RocksRecordStore::insertRecord( OperationContext* txn,
                                                        const char* data,
                                                        int len,
                                                        bool enforceQuota ) {
        if ( _isCapped && len > _cappedMaxSize ) {
            return StatusWith<DiskLoc>( ErrorCodes::BadValue,
                                       "object to insert exceeds cappedMaxSize" );
        }

        RocksRecoveryUnit* ru = _getRecoveryUnit( txn );

        DiskLoc loc = _nextId();

        ru->writeBatch()->Put( _columnFamily, _makeKey( loc ), rocksdb::Slice( data, len ) );

        _changeNumRecords( txn, true );
        _increaseDataSize( txn, len );

        cappedDeleteAsNeeded(txn);

        return StatusWith<DiskLoc>( loc );
    }