StatusWith<RecordData> KVRecordStore::updateWithDamages( OperationContext* txn, const RecordId& id, const RecordData& oldRec, const char* damageSource, const mutablebson::DamageVector& damages ) { const KeyString key(id); const Slice oldValue(oldRec.data(), oldRec.size()); const KVUpdateWithDamagesMessage message(damageSource, damages); // updateWithDamages can't change the number or size of records, so we don't need to update // stats. const Status s = _db->update(txn, Slice::of(key), oldValue, message); if (!s.isOK()) { return StatusWith<RecordData>(s); } // We also need to reach in and screw with the old doc's data so that the update system gets // the new image, because the update system is assuming mmapv1's behavior. Sigh. for (mutablebson::DamageVector::const_iterator it = damages.begin(); it != damages.end(); it++) { const mutablebson::DamageEvent &event = *it; invariant(event.targetOffset + event.size < static_cast<uint32_t>(oldRec.size())); std::copy(damageSource + event.sourceOffset, damageSource + event.sourceOffset + event.size, /* eek */ const_cast<char *>(oldRec.data()) + event.targetOffset); } return StatusWith<RecordData>(oldRec); }
// Insert a record and try to perform an in-place update on it. TEST( RecordStoreTestHarness, UpdateWithDamages ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } string data = "00010111"; DiskLoc loc; const RecordData rec(data.c_str(), data.size() + 1); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); StatusWith<DiskLoc> res = rs->insertRecord( opCtx.get(), rec.data(), rec.size(), false ); ASSERT_OK( res.getStatus() ); loc = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { mutablebson::DamageVector dv( 3 ); dv[0].sourceOffset = 5; dv[0].targetOffset = 0; dv[0].size = 2; dv[1].sourceOffset = 3; dv[1].targetOffset = 2; dv[1].size = 3; dv[2].sourceOffset = 0; dv[2].targetOffset = 5; dv[2].size = 3; WriteUnitOfWork uow( opCtx.get() ); ASSERT_OK( rs->updateWithDamages( opCtx.get(), loc, rec, data.c_str(), dv ) ); uow.commit(); } } data = "11101000"; { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { RecordData record = rs->dataFor( opCtx.get(), loc ); ASSERT_EQUALS( data, record.data() ); } } }
TEST( RecordStoreTestHarness, Cursor1 ) { const int N = 10; scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); for ( int i = 0; i < N; i++ ) { string s = str::stream() << "eliot" << i; ASSERT_OK( rs->insertRecord( opCtx.get(), s.c_str(), s.size() + 1, false ).getStatus() ); } uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( N, rs->numRecords( opCtx.get() ) ); } { int x = 0; scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); scoped_ptr<RecordIterator> it( rs->getIterator( opCtx.get() ) ); while ( !it->isEOF() ) { DiskLoc loc = it->getNext(); RecordData data = it->dataFor( loc ); string s = str::stream() << "eliot" << x++; ASSERT_EQUALS( s, data.data() ); } ASSERT_EQUALS( N, x ); } { int x = N; scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); scoped_ptr<RecordIterator> it( rs->getIterator( opCtx.get(), DiskLoc(), false, CollectionScanParams::BACKWARD ) ); while ( !it->isEOF() ) { DiskLoc loc = it->getNext(); RecordData data = it->dataFor( loc ); string s = str::stream() << "eliot" << --x; ASSERT_EQUALS( s, data.data() ); } ASSERT_EQUALS( 0, x ); } }
TEST(RecordStoreTestHarness, Simple1) { unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper()); unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore()); { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } string s = "eliot was here"; RecordId loc1; { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { WriteUnitOfWork uow(opCtx.get()); StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), s.c_str(), s.size() + 1, false); ASSERT_OK(res.getStatus()); loc1 = res.getValue(); uow.commit(); } ASSERT_EQUALS(s, rs->dataFor(opCtx.get(), loc1).data()); } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(s, rs->dataFor(opCtx.get(), loc1).data()); ASSERT_EQUALS(1, rs->numRecords(opCtx.get())); RecordData rd; ASSERT(!rs->findRecord(opCtx.get(), RecordId(111, 17), &rd)); ASSERT(rd.data() == NULL); ASSERT(rs->findRecord(opCtx.get(), loc1, &rd)); ASSERT_EQUALS(s, rd.data()); } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { WriteUnitOfWork uow(opCtx.get()); StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), s.c_str(), s.size() + 1, false); ASSERT_OK(res.getStatus()); uow.commit(); } } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(2, rs->numRecords(opCtx.get())); } }
// Insert a record and try to call updateWithDamages() with an empty DamageVector. TEST( RecordStoreTestHarness, UpdateWithNoDamages ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); if (!rs->updateWithDamagesSupported()) return; { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 0, rs->numRecords( opCtx.get() ) ); } string data = "my record"; RecordId loc; const RecordData rec(data.c_str(), data.size() + 1); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), rec.data(), rec.size(), false ); ASSERT_OK( res.getStatus() ); loc = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( 1, rs->numRecords( opCtx.get() ) ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { mutablebson::DamageVector dv; WriteUnitOfWork uow( opCtx.get() ); ASSERT_OK( rs->updateWithDamages( opCtx.get(), loc, rec, "", dv ) ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { RecordData record = rs->dataFor( opCtx.get(), loc ); ASSERT_EQUALS( data, record.data() ); } } }
void Database::replace(const RecordID& id, const RecordData& record) { Dbt key(id.data(), id.size()); const std::string str = record.data(); Dbt data(const_cast<char*>(str.c_str()), str.size()); dbMain_.put(nullptr, &key, &data, /*flags*/0); }
bool KVRecordStore::findRecord( OperationContext* txn, const RecordId& loc, RecordData* out, bool skipPessimisticLocking ) const { RecordData rd = _getDataFor(_db.get(), txn, loc, skipPessimisticLocking); if (rd.data() == NULL) { return false; } *out = rd; return true; }
TEST( RecordStoreTestHarness, UpdateInPlace1 ) { scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() ); scoped_ptr<RecordStore> rs( harnessHelper->newNonCappedRecordStore() ); if (!rs->updateWithDamagesSupported()) return; string s1 = "aaa111bbb"; string s2 = "aaa222bbb"; RecordId loc; const RecordData s1Rec(s1.c_str(), s1.size() + 1); { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); StatusWith<RecordId> res = rs->insertRecord( opCtx.get(), s1Rec.data(), s1Rec.size(), -1 ); ASSERT_OK( res.getStatus() ); loc = res.getValue(); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( s1, rs->dataFor( opCtx.get(), loc ).data() ); } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); { WriteUnitOfWork uow( opCtx.get() ); const char* damageSource = "222"; mutablebson::DamageVector dv; dv.push_back( mutablebson::DamageEvent() ); dv[0].sourceOffset = 0; dv[0].targetOffset = 3; dv[0].size = 3; Status res = rs->updateWithDamages( opCtx.get(), loc, s1Rec, damageSource, dv ); ASSERT_OK( res ); uow.commit(); } } { scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() ); ASSERT_EQUALS( s2, rs->dataFor( opCtx.get(), loc ).data() ); } }
TEST( RocksRecordStoreTest, Snapshots1 ) { unittest::TempDir td( _rocksRecordStoreTestDir ); scoped_ptr<rocksdb::DB> db( getDB( td.path() ) ); DiskLoc loc; int size = -1; { RocksRecordStore rs( "foo.bar", db.get(), db->DefaultColumnFamily(), db->DefaultColumnFamily() ); string s = "test string"; size = s.length() + 1; MyOperationContext opCtx( db.get() ); { WriteUnitOfWork uow( opCtx.recoveryUnit() ); StatusWith<DiskLoc> res = rs.insertRecord( &opCtx, s.c_str(), s.size() + 1, -1 ); ASSERT_OK( res.getStatus() ); loc = res.getValue(); } } { MyOperationContext opCtx( db.get() ); MyOperationContext opCtx2( db.get() ); RocksRecordStore rs( "foo.bar", db.get(), db->DefaultColumnFamily(), db->DefaultColumnFamily() ); rs.deleteRecord( &opCtx, loc ); RecordData recData = rs.dataFor( loc/*, &opCtx */ ); ASSERT( !recData.data() && recData.size() == 0 ); // XXX this test doesn't yet work, but there should be some notion of snapshots, // and the op context that doesn't see the deletion shouldn't know that this data // has been deleted RecordData recData2 = rs.dataFor( loc/*, &opCtx2 */ ); ASSERT( recData.data() && recData.size() == size ); } }
void KVCatalog::init( OperationContext* opCtx ) { scoped_ptr<RecordIterator> it( _rs->getIterator( opCtx ) ); while ( !it->isEOF() ) { DiskLoc loc = it->getNext(); RecordData data = it->dataFor( loc ); BSONObj obj( data.data() ); // no locking needed since can only be one string ns = obj["ns"].String(); string ident = obj["ident"].String(); _idents[ns] = Entry( ident, loc ); } }
RecordID Database::add(const RecordData& record) { RecordID newId; Dbt key(newId.data(), 0); key.set_flags(DB_DBT_USERMEM); key.set_ulen(RecordID::size()); const std::string str = record.data(); Dbt data(const_cast<char*>(str.c_str()), str.size()); const int err = dbMain_.put(nullptr, &key, &data, DB_APPEND); assert (err == 0); assert (key.get_size() == RecordID::size()); return newId; }
// Insert multiple records and verify their contents by calling dataFor() // on each of the returned RecordIds. TEST(RecordStoreTestHarness, DataForMultiple) { unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper()); unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore()); { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } const int nToInsert = 10; RecordId locs[nToInsert]; for (int i = 0; i < nToInsert; i++) { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { stringstream ss; ss << "record----" << i; string data = ss.str(); WriteUnitOfWork uow(opCtx.get()); StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false); ASSERT_OK(res.getStatus()); locs[i] = res.getValue(); uow.commit(); } } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(nToInsert, rs->numRecords(opCtx.get())); } for (int i = 0; i < nToInsert; i++) { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { stringstream ss; ss << "record----" << i; string data = ss.str(); RecordData record = rs->dataFor(opCtx.get(), locs[i]); ASSERT_EQUALS(data.size() + 1, static_cast<size_t>(record.size())); ASSERT_EQUALS(data, record.data()); } } }
void KVCatalog::init(OperationContext* opCtx) { // No locking needed since called single threaded. scoped_ptr<RecordIterator> it(_rs->getIterator(opCtx)); while (!it->isEOF()) { RecordId loc = it->getNext(); RecordData data = it->dataFor(loc); BSONObj obj(data.data()); // No locking needed since can only be called from one thread. // No rollback since this is just loading already committed data. string ns = obj["ns"].String(); string ident = obj["ident"].String(); _idents[ns] = Entry(ident, loc); } // In the unlikely event that we have used this _rand before generate a new one. while (_hasEntryCollidingWithRand()) { _rand = _newRand(); } }
// Insert a record and verify its contents by calling dataFor() // on the returned RecordId. TEST(RecordStoreTestHarness, DataFor) { unique_ptr<HarnessHelper> harnessHelper(newHarnessHelper()); unique_ptr<RecordStore> rs(harnessHelper->newNonCappedRecordStore()); { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } string data = "record-"; RecordId loc; { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { WriteUnitOfWork uow(opCtx.get()); StatusWith<RecordId> res = rs->insertRecord(opCtx.get(), data.c_str(), data.size() + 1, false); ASSERT_OK(res.getStatus()); loc = res.getValue(); uow.commit(); } } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(1, rs->numRecords(opCtx.get())); } { unique_ptr<OperationContext> opCtx(harnessHelper->newOperationContext()); { RecordData record = rs->dataFor(opCtx.get(), loc); ASSERT_EQUALS(data.size() + 1, static_cast<size_t>(record.size())); ASSERT_EQUALS(data, record.data()); } } }
std::vector<std::string> KVCatalog::getAllIdents(OperationContext* opCtx) const { std::vector<std::string> v; scoped_ptr<RecordIterator> it(_rs->getIterator(opCtx)); while (!it->isEOF()) { RecordId loc = it->getNext(); RecordData data = it->dataFor(loc); BSONObj obj(data.data()); v.push_back(obj["ident"].String()); BSONElement e = obj["idxIdent"]; if (!e.isABSONObj()) continue; BSONObj idxIdent = e.Obj(); BSONObjIterator sub(idxIdent); while (sub.more()) { BSONElement e = sub.next(); v.push_back(e.String()); } } return v; }