static int op_alloc(const LogEntry* e, Page* p) { assert(e->update.arg_size >= sizeof(alloc_arg)); const alloc_arg* arg = stasis_log_entry_update_args_cptr(e); recordid rid = { p->id, arg->slot, arg->type }; int ret = operate_helper(e->xid,p,rid); int64_t size = stasis_record_length_read(e->xid,p,rid); if(e->update.arg_size == sizeof(alloc_arg) + size) { // if we're aborting a dealloc we better have a sane preimage to apply rid.size = size; stasis_record_write(e->xid,p,rid,(const byte*)(arg+1)); rid.size = arg->type; } else { // otherwise, no preimage assert(e->update.arg_size == sizeof(alloc_arg)); } return ret; }
static int op_increment(const LogEntry* e, Page* p) { int i; assert(e->update.arg_size == sizeof(slotid_t)); recordid r = {p->id, *(const slotid_t*)stasis_log_entry_update_args_cptr(e), sizeof(int)}; stasis_record_read(e->xid, p, r, (byte*)&i); i++; stasis_record_write(e->xid, p, r, (byte*)&i); return 0; }
static int op_linked_list_nta_remove(const LogEntry *e, Page* p) { assert(!p); const stasis_linked_list_remove_log * log = (const stasis_linked_list_remove_log *)stasis_log_entry_update_args_cptr(e); byte * key; int keySize; keySize = log->keySize; key = (byte*)(log+1); pthread_mutex_lock(&stasis_linked_list_mutex); // printf("Operate remove called: %d\n", *(int*)key); // Don't call the version that writes an undo entry! stasis_linked_list_remove_helper(e->xid, log->list, key, keySize); pthread_mutex_unlock(&stasis_linked_list_mutex); return 0; }
static int op_dealloc(const LogEntry* e, Page* p) { assert(e->update.arg_size >= sizeof(alloc_arg)); const alloc_arg* arg = stasis_log_entry_update_args_cptr(e); recordid rid = { p->id, arg->slot, arg->type }; // assert that we've got a sane preimage or we're aborting a talloc (no preimage) int64_t size = stasis_record_length_read(e->xid,p,rid); assert(e->update.arg_size == sizeof(alloc_arg) + size || e->update.arg_size == sizeof(alloc_arg)); stasis_record_free(e->xid, p, rid); assert(stasis_record_type_read(e->xid, p, rid) == INVALID_SLOT); return 0; }
void bLSM::replayLog() { lsn_t start = tbl_header.log_trunc; LogHandle * lh = start ? getLSNHandle(log_file, start) : getLogHandle(log_file); const LogEntry * e; while((e = nextInLog(lh))) { switch(e->type) { case UPDATELOG: { dataTuple * tup = dataTuple::from_bytes((byte*)stasis_log_entry_update_args_cptr(e)); insertTuple(tup); dataTuple::freetuple(tup); } break; case INTERNALLOG: { } break; default: assert(e->type == UPDATELOG); abort(); } } freeLogHandle(lh); recovering = false; printf("\nLog replay complete.\n"); }
static int op_linked_list_nta_insert(const LogEntry* e, Page* p) { assert(!p); const stasis_linked_list_remove_log * log = (const stasis_linked_list_remove_log *)stasis_log_entry_update_args_cptr(e);; byte * key; byte * value; int keySize, valueSize; keySize = log->keySize; valueSize = log->valueSize; key = (byte*)(log+1); value = ((byte*)(log+1))+keySize; pthread_mutex_lock(&stasis_linked_list_mutex); // printf("Operate insert called: rid.page = %d keysize = %d valuesize = %d %d {%d %d %d}\n", rid.page, log->keySize, log->valueSize, *(int*)key, value->page, value->slot, value->size); // Skip writing the undo! Recovery will write a CLR after we're done, effectively // wrapping this in a nested top action, so we needn't worry about that either. stasis_linked_list_insert_helper(e->xid, log->list, key, keySize, value, valueSize); pthread_mutex_unlock(&stasis_linked_list_mutex); return 0; }
static int op_realloc(const LogEntry* e, Page* p) { assert(e->update.arg_size >= sizeof(alloc_arg)); const alloc_arg* arg = stasis_log_entry_update_args_cptr(e); recordid rid = { p->id, arg->slot, arg->type }; assert(stasis_record_type_read(e->xid, p, rid) == INVALID_SLOT); int ret = operate_helper(e->xid, p, rid); int64_t size = stasis_record_length_read(e->xid,p,rid); assert(e->update.arg_size == sizeof(alloc_arg) + size); rid.size = size; byte * buf = stasis_record_write_begin(e->xid,p,rid); memcpy(buf, arg+1, size); stasis_record_write_done(e->xid,p,rid,buf); rid.size = arg->type; return ret; }