size_t dataPage::write_bytes(const byte * buf, ssize_t remaining, Page ** latch_p) { if(latch_p) { *latch_p = NULL; } recordid chunk = calc_chunk_from_offset(write_offset_); if(chunk.size > remaining) { chunk.size = remaining; } if(chunk.page >= first_page_ + page_count_) { chunk.size = 0; // no space (should not happen) } else { Page *p = alloc_ ? alloc_->load_page(xid_, chunk.page) : loadPage(xid_, chunk.page); assert(chunk.size); memcpy(data_at_offset_ptr(p, chunk.slot), buf, chunk.size); stasis_page_lsn_write(xid_, p, alloc_->get_lsn(xid_)); if(latch_p && !*latch_p) { writelock(p->rwlatch,0); *latch_p = p; } else { releasePage(p); } write_offset_ += chunk.size; } return chunk.size; }
void dataPage::initialize_page(pageid_t pageid) { //load the first page Page *p; #ifdef CHECK_FOR_SCRIBBLING p = alloc_ ? alloc->load_page(xid_, pageid) : loadPage(xid_, pageid); if(*stasis_page_type_ptr(p) == DATA_PAGE) { printf("Collision on page %lld\n", (long long)pageid); fflush(stdout); assert(*stasis_page_type_ptr(p) != DATA_PAGE); } #else p = loadUninitializedPage(xid_, pageid); #endif DEBUG("\t\t\t\t\t\t->%lld\n", pageid); //initialize header p->pageType = DATA_PAGE; //clear page (arranges for null-padding) XXX null pad more carefully and use sentinel value instead? memset(p->memAddr, 0, PAGE_SIZE); //we're the last page for now. *is_another_page_ptr(p) = 0; //write 0 to first data size *length_at_offset_ptr(p, calc_chunk_from_offset(write_offset_).slot) = 0; //set the page dirty stasis_page_lsn_write(xid_, p, alloc_->get_lsn(xid_)); releasePage(p); }
void stasis_operation_undo(const LogEntry * e, lsn_t effective_lsn, Page * p) { // Only handle update entries assert(e->type == UPDATELOG); assert(e->update.funcID != OPERATION_INVALID); int undo = stasis_operation_table[e->update.funcID].undo; assert(undo != OPERATION_INVALID); if(e->update.page == INVALID_PAGE || e->update.page == SEGMENT_PAGEID || e->update.page == MULTI_PAGEID) { // logical undos are executed unconditionally, as are segment-based undos DEBUG("OPERATION xid %d FuncID %d Undo, %d LSN %lld {logical}\n", e->xid, e->update.funcID, undo, e->LSN); stasis_operation_table[undo].run(e,0); } else { assert(p->id == e->update.page); if(stasis_page_lsn_read(p) < effective_lsn) { DEBUG("OPERATION xid %d Undo, %lld {%lld:%lld}\n", e->xid, e->LSN, e->update.page, stasis_page_lsn_read(p)); stasis_operation_table[undo].run(e,p); stasis_page_lsn_write(e->xid, p, effective_lsn); } else { DEBUG("OPERATION xid %d skip undo, %lld {%lld:%lld}\n", e->xid, e->LSN, e->update.page, stasis_page_lsn_read(p)); } } }
void stasis_operation_redo(const LogEntry * e, Page * p) { // Only handle update log entries assert(e->type == UPDATELOG); // If this is a logical operation, something is broken assert(e->update.page != INVALID_PAGE); assert(e->update.funcID != OPERATION_INVALID); assert(stasis_operation_table[e->update.funcID].redo != OPERATION_INVALID); if(stasis_operation_table[e->update.funcID].redo == OPERATION_NOOP) { return; } if((!p) || stasis_page_lsn_read(p) < e->LSN || e->update.funcID == OPERATION_SET_LSN_FREE || e->update.funcID == OPERATION_SET_LSN_FREE_INVERSE) { DEBUG("OPERATION xid %d Redo, %lld {%lld:%lld}\n", e->xid, e->LSN, e->update.page, stasis_page_lsn_read(p)); // Need to check the id field to find out what the REDO_action // is for this log type. // contrast with stasis_operation_do(), which doesn't check the .redo field stasis_operation_table[stasis_operation_table[e->update.funcID].redo] .run(e,p); if(p) stasis_page_lsn_write(e->xid, p, e->LSN); } else { DEBUG("OPERATION xid %d skip redo, %lld {%lld:%lld}\n", e->xid, e->LSN, e->update.page, stasis_page_lsn_read(p)); } }
void stasis_operation_do(const LogEntry * e, Page * p) { if(p) assertlocked(p->rwlatch); assert(e->update.funcID != OPERATION_INVALID); stasis_operation_table[e->update.funcID].run(e, p); DEBUG("OPERATION xid %d Do, %lld {%lld:%lld}\n", e->xid, e->LSN, e->update.page, p ? stasis_page_lsn_read(p) : -1); if(p) stasis_page_lsn_write(e->xid, p, e->LSN); }
bool dataPage::initialize_next_page() { recordid rid = calc_chunk_from_offset(write_offset_); assert(rid.slot == 0); DEBUG("\t\t%lld\n", (long long)rid.page); if(rid.page >= first_page_ + page_count_) { assert(rid.page == first_page_ + page_count_); if(alloc_->grow_extent(1)) { page_count_++; } else { return false; // The region is full } } else { abort(); } Page *p = alloc_ ? alloc_->load_page(xid_, rid.page-1) : loadPage(xid_, rid.page-1); *is_another_page_ptr(p) = (rid.page-1 == first_page_) ? 2 : 1; stasis_page_lsn_write(xid_, p, alloc_->get_lsn(xid_)); releasePage(p); initialize_page(rid.page); return true; }