Esempio n. 1
0
static int op_increment(const LogEntry* e, Page* p) {
  int i;

  assert(e->update.arg_size == sizeof(slotid_t));
  recordid r = {p->id, *(const slotid_t*)stasis_log_entry_update_args_cptr(e), sizeof(int)};

  stasis_record_read(e->xid, p, r, (byte*)&i);
  i++;
  stasis_record_write(e->xid, p, r, (byte*)&i);

  return 0;
}
Esempio n. 2
0
int TrecordSize(int xid, recordid rid) {
  int ret;
  Page * p;
  p = loadPage(xid, rid.page);
  readlock(p->rwlatch,0);
  rid.size = stasis_record_length_read(xid, p, rid);
  if(stasis_record_type_read(xid,p,rid) == BLOB_SLOT) {
    blob_record_t r;
    stasis_record_read(xid,p,rid,(byte*)&r);
    ret = r.size;
  } else {
    ret = rid.size;
  }
  unlock(p->rwlatch);
  releasePage(p);
  return ret;
}
Esempio n. 3
0
void stasis_blob_read(int xid, Page * p, recordid rid, byte * buf) {
  pageid_t chunk;
  recordid rawRid = rid;
  rawRid.size = BLOB_SLOT;
  byte * pbuf = alloca(PAGE_SIZE);
  blob_record_t rec;
  stasis_record_read(xid, p, rawRid, (byte*)&rec);

  for(chunk = 0; (chunk+1) * USABLE_SIZE_OF_PAGE < rid.size; chunk++) {
    DEBUG("Chunk = %d->%lld\n", chunk, (long long)rec.offset+chunk);
    TpageGet(xid, rec.offset+chunk, pbuf);
    memcpy(buf + (chunk * USABLE_SIZE_OF_PAGE), pbuf, USABLE_SIZE_OF_PAGE);
  }

  TpageGet(xid, rec.offset+chunk, pbuf);
  memcpy(buf + (chunk * USABLE_SIZE_OF_PAGE), pbuf, rid.size % USABLE_SIZE_OF_PAGE);
  DEBUG("Chunk = %d->%lld\n", chunk, (long long)rec.offset+chunk);
}
Esempio n. 4
0
void stasis_blob_write(int xid, Page * p, recordid rid, const byte* dat) {
    blob_record_t rec;
    recordid r = rid;
    r.size = sizeof(blob_record_t);
    stasis_record_read(xid, p, r, (byte*)&rec);

    assert(rec.offset);
    pageid_t chunk = 0;
    // Don't need to do any latching on the page range, since writes in race
    // have undefined semantics.
    for(; (chunk+1) * USABLE_SIZE_OF_PAGE < rid.size; chunk++) {
      // TODO: assert(page->pageType == BLOB_PAGE) in TpageSetRange?
      TpageSetRange(xid,rec.offset+chunk,0,((const byte*)dat)+(chunk*USABLE_SIZE_OF_PAGE),USABLE_SIZE_OF_PAGE);
    }
    // Painful; allocate buffer for zero padding. TODO: Remove zero padding?
    byte * buf = calloc(1,USABLE_SIZE_OF_PAGE);
    memcpy(buf, ((const byte*)dat)+(chunk*USABLE_SIZE_OF_PAGE), rid.size % USABLE_SIZE_OF_PAGE);
    TpageSetRange(xid,rec.offset+chunk,0,buf,USABLE_SIZE_OF_PAGE);
    free(buf);
}
Esempio n. 5
0
void Tdealloc(int xid, recordid rid) {
  stasis_alloc_t* alloc = stasis_runtime_alloc_state();

  // @todo this needs to garbage collect empty storage regions.

  pthread_mutex_lock(&alloc->mut);
  Page * p = loadPage(xid, rid.page);

  readlock(p->rwlatch,0);

  recordid newrid = stasis_record_dereference(xid, p, rid);
  stasis_allocation_policy_dealloced_from_page(alloc->allocPolicy, xid, newrid.page);

  int64_t size = stasis_record_length_read(xid,p,rid);
  int64_t type = stasis_record_type_read(xid,p,rid);

  if(type == NORMAL_SLOT) { type = size; }

  byte * preimage = malloc(sizeof(alloc_arg)+size);

  ((alloc_arg*)preimage)->slot = rid.slot;
  ((alloc_arg*)preimage)->type = type;

  // stasis_record_read() wants rid to have its raw size to prevent
  // code that doesn't know about record types from introducing memory
  // bugs.
  rid.size = size;
  stasis_record_read(xid, p, rid, preimage+sizeof(alloc_arg));
  // restore rid to valid state.
  rid.size = type;

  // Ok to release latch; page is still pinned (so no WAL problems).
  // allocationPolicy protects us from running out of space due to concurrent
  // xacts.

  // Also, there can be no reordering of allocations / deallocations ,
  // since we're holding alloc->mutex.  However, we might reorder a Tset()
  // to and a Tdealloc() or Talloc() on the same page.  If this happens,
  // it's an unsafe race in the application, and not technically our problem.

  // @todo  Tupdate forces allocation to release a latch, leading to potentially nasty application bugs.  Perhaps this is the wrong API!

  // @todo application-level allocation races can lead to unrecoverable logs.
  unlock(p->rwlatch);

  Tupdate(xid, rid.page, preimage,
          sizeof(alloc_arg)+size, OPERATION_DEALLOC);

  releasePage(p);

  pthread_mutex_unlock(&alloc->mut);

  if(type==BLOB_SLOT) {
    stasis_blob_dealloc(xid,(blob_record_t*)(preimage+sizeof(alloc_arg)));
  }

  free(preimage);

  stasis_transaction_table_set_argument(alloc->xact_table, xid, alloc->callback_id,
					AT_COMMIT, alloc);

}