inline void D(int x) { splay(root,x); Par(Lch(x))=Par(Rch(x))=0; root=Tjoin(Lch(x),Rch(x)); Tupdate(root); }
inline void zag(int x) { int y=Par(x),z=Par(y); Rch(y)=Lch(x),Par(Rch(y))=Lch(x)=y; if (Lch(z)==y) Lch(z)=x; else Rch(z)=x; Par(x)=z,Par(y)=x; Tupdate(y); }
inline void splay(int &root,int x) { for (int y,z;Par(x);) { y=Par(x),z=Par(y); if (!z) if (Lch(y)==x) zig(x); else zag(x); else if (Lch(z)==y) if (Lch(y)==x) zig(y),zig(x); else zag(x),zig(x); else if (Rch(y)==x) zag(y),zag(x); else zig(x),zag(x); } Tupdate(root=x); }
recordid TallocFromPage(int xid, pageid_t page, unsigned long size) { stasis_alloc_t* alloc = stasis_runtime_alloc_state(); short type; if(size >= BLOB_THRESHOLD_SIZE) { type = BLOB_SLOT; } else { assert(size > 0); type = size; } pthread_mutex_lock(&alloc->mut); if(!stasis_allocation_policy_can_xid_alloc_from_page(alloc->allocPolicy, xid, page)) { pthread_mutex_unlock(&alloc->mut); return NULLRID; } Page * p = loadPage(xid, page); writelock(p->rwlatch,0); recordid rid = stasis_record_alloc_begin(xid, p, type); if(rid.size != INVALID_SLOT) { stasis_record_alloc_done(xid,p,rid); stasis_allocation_policy_alloced_from_page(alloc->allocPolicy, xid, page); unlock(p->rwlatch); alloc_arg a = { rid.slot, type }; Tupdate(xid, rid.page, &a, sizeof(a), OPERATION_ALLOC); if(type == BLOB_SLOT) { rid.size = size; stasis_blob_alloc(xid,rid); } } else { unlock(p->rwlatch); } releasePage(p); pthread_mutex_unlock(&alloc->mut); stasis_transaction_table_set_argument(alloc->xact_table, xid, alloc->callback_id, AT_COMMIT, alloc); return rid; }
void Tincrement(int xid, recordid rid) { Tupdate(xid,rid.page,&rid.slot,sizeof(rid.slot),OPERATION_INCREMENT); }
void Tdealloc(int xid, recordid rid) { stasis_alloc_t* alloc = stasis_runtime_alloc_state(); // @todo this needs to garbage collect empty storage regions. pthread_mutex_lock(&alloc->mut); Page * p = loadPage(xid, rid.page); readlock(p->rwlatch,0); recordid newrid = stasis_record_dereference(xid, p, rid); stasis_allocation_policy_dealloced_from_page(alloc->allocPolicy, xid, newrid.page); int64_t size = stasis_record_length_read(xid,p,rid); int64_t type = stasis_record_type_read(xid,p,rid); if(type == NORMAL_SLOT) { type = size; } byte * preimage = malloc(sizeof(alloc_arg)+size); ((alloc_arg*)preimage)->slot = rid.slot; ((alloc_arg*)preimage)->type = type; // stasis_record_read() wants rid to have its raw size to prevent // code that doesn't know about record types from introducing memory // bugs. rid.size = size; stasis_record_read(xid, p, rid, preimage+sizeof(alloc_arg)); // restore rid to valid state. rid.size = type; // Ok to release latch; page is still pinned (so no WAL problems). // allocationPolicy protects us from running out of space due to concurrent // xacts. // Also, there can be no reordering of allocations / deallocations , // since we're holding alloc->mutex. However, we might reorder a Tset() // to and a Tdealloc() or Talloc() on the same page. If this happens, // it's an unsafe race in the application, and not technically our problem. // @todo Tupdate forces allocation to release a latch, leading to potentially nasty application bugs. Perhaps this is the wrong API! // @todo application-level allocation races can lead to unrecoverable logs. unlock(p->rwlatch); Tupdate(xid, rid.page, preimage, sizeof(alloc_arg)+size, OPERATION_DEALLOC); releasePage(p); pthread_mutex_unlock(&alloc->mut); if(type==BLOB_SLOT) { stasis_blob_dealloc(xid,(blob_record_t*)(preimage+sizeof(alloc_arg))); } free(preimage); stasis_transaction_table_set_argument(alloc->xact_table, xid, alloc->callback_id, AT_COMMIT, alloc); }
recordid Talloc(int xid, unsigned long size) { stasis_alloc_t* alloc = stasis_runtime_alloc_state(); short type; if(size >= BLOB_THRESHOLD_SIZE) { type = BLOB_SLOT; } else { assert(size >= 0); type = size; } recordid rid; pthread_mutex_lock(&alloc->mut); pageid_t pageid = stasis_allocation_policy_pick_suitable_page(alloc->allocPolicy, xid, stasis_record_type_to_size(type)); if(pageid == INVALID_PAGE) { stasis_alloc_reserve_new_region(alloc, xid); pageid = stasis_allocation_policy_pick_suitable_page(alloc->allocPolicy, xid, stasis_record_type_to_size(type)); } alloc->lastFreepage = pageid; Page * p = loadPage(xid, alloc->lastFreepage); writelock(p->rwlatch, 0); int rec_size = stasis_record_type_to_size(type); if(rec_size < 4) { rec_size = 4; } while(stasis_record_freespace(xid, p) < rec_size) { stasis_record_compact(p); int newFreespace = stasis_record_freespace(xid, p); if(newFreespace >= rec_size) { break; } unlock(p->rwlatch); stasis_allocation_policy_update_freespace(alloc->allocPolicy, pageid, newFreespace); releasePage(p); pageid = stasis_allocation_policy_pick_suitable_page(alloc->allocPolicy, xid, rec_size); if(pageid == INVALID_PAGE) { stasis_alloc_reserve_new_region(alloc, xid); pageid = stasis_allocation_policy_pick_suitable_page(alloc->allocPolicy, xid, rec_size); } alloc->lastFreepage = pageid; p = loadPage(xid, alloc->lastFreepage); writelock(p->rwlatch, 0); } rid = stasis_record_alloc_begin(xid, p, type); assert(rid.size != INVALID_SLOT); stasis_record_alloc_done(xid, p, rid); int newFreespace = stasis_record_freespace(xid, p); stasis_allocation_policy_alloced_from_page(alloc->allocPolicy, xid, pageid); stasis_allocation_policy_update_freespace(alloc->allocPolicy, pageid, newFreespace); unlock(p->rwlatch); alloc_arg a = { rid.slot, type }; Tupdate(xid, rid.page, &a, sizeof(a), OPERATION_ALLOC); if(type == BLOB_SLOT) { rid.size = size; stasis_blob_alloc(xid, rid); } releasePage(p); pthread_mutex_unlock(&alloc->mut); stasis_transaction_table_set_argument(alloc->xact_table, xid, alloc->callback_id, AT_COMMIT, alloc); return rid; // TODO return NULLRID on error }