Пример #1
0
static int op_alloc(const LogEntry* e, Page* p) {
  assert(e->update.arg_size >= sizeof(alloc_arg));

  const alloc_arg* arg = stasis_log_entry_update_args_cptr(e);
  recordid rid = {
    p->id,
    arg->slot,
    arg->type
  };

  int ret = operate_helper(e->xid,p,rid);

  int64_t size = stasis_record_length_read(e->xid,p,rid);

  if(e->update.arg_size == sizeof(alloc_arg) + size) {
    // if we're aborting a dealloc we better have a sane preimage to apply
    rid.size = size;
    stasis_record_write(e->xid,p,rid,(const byte*)(arg+1));
    rid.size = arg->type;
  } else {
    // otherwise, no preimage
    assert(e->update.arg_size == sizeof(alloc_arg));
  }
  return ret;
}
Пример #2
0
byte * stasis_record_write_begin(int xid, Page * p, recordid rid) {
  int page_type = p->pageType;
  assert(page_type);
  if(p->pageType != SLOTTED_LATCH_FREE_PAGE) {
    assert(stasis_record_length_read(xid, p, rid) ==  stasis_record_type_to_size(rid.size));
  }
  return page_impls[page_type].recordWrite(xid, p, rid);
}
Пример #3
0
int stasis_record_read(int xid, Page * p, recordid rid, byte *buf) {
  assert(rid.page == p->id);
  assert(rid.size <= BLOB_THRESHOLD_SIZE);

  const byte * dat = stasis_record_read_begin(xid,p,rid);
  memcpy(buf, dat, stasis_record_length_read(xid,p,rid));
  stasis_record_read_done(xid,p,rid,dat);

  return 0;
}
Пример #4
0
static int operate_helper(int xid, Page * p, recordid rid) {

  if(stasis_record_type_read(xid, p, rid) == INVALID_SLOT) {
    stasis_record_alloc_done(xid, p, rid);
  }

  assert(stasis_record_length_read(xid, p, rid) == stasis_record_type_to_size(rid.size));
  if(rid.size < 0) {
    assert(stasis_record_type_read(xid,p,rid) == rid.size);
  }
  return 0;
}
Пример #5
0
int TrecordSize(int xid, recordid rid) {
  int ret;
  Page * p;
  p = loadPage(xid, rid.page);
  readlock(p->rwlatch,0);
  rid.size = stasis_record_length_read(xid, p, rid);
  if(stasis_record_type_read(xid,p,rid) == BLOB_SLOT) {
    blob_record_t r;
    stasis_record_read(xid,p,rid,(byte*)&r);
    ret = r.size;
  } else {
    ret = rid.size;
  }
  unlock(p->rwlatch);
  releasePage(p);
  return ret;
}
Пример #6
0
static int op_dealloc(const LogEntry* e, Page* p) {
  assert(e->update.arg_size >= sizeof(alloc_arg));
  const alloc_arg* arg = stasis_log_entry_update_args_cptr(e);
  recordid rid = {
    p->id,
    arg->slot,
    arg->type
  };
  // assert that we've got a sane preimage or we're aborting a talloc (no preimage)
  int64_t size = stasis_record_length_read(e->xid,p,rid);
  assert(e->update.arg_size == sizeof(alloc_arg) + size ||
         e->update.arg_size == sizeof(alloc_arg));

  stasis_record_free(e->xid, p, rid);
  assert(stasis_record_type_read(e->xid, p, rid) == INVALID_SLOT);
  return 0;
}
Пример #7
0
static int op_realloc(const LogEntry* e, Page* p) {
  assert(e->update.arg_size >= sizeof(alloc_arg));
  const alloc_arg* arg = stasis_log_entry_update_args_cptr(e);

  recordid rid = {
    p->id,
    arg->slot,
    arg->type
  };
  assert(stasis_record_type_read(e->xid, p, rid) == INVALID_SLOT);
  int ret = operate_helper(e->xid, p, rid);

  int64_t size = stasis_record_length_read(e->xid,p,rid);

  assert(e->update.arg_size == sizeof(alloc_arg)
         + size);
  rid.size = size;
  byte * buf = stasis_record_write_begin(e->xid,p,rid);
  memcpy(buf, arg+1, size);
  stasis_record_write_done(e->xid,p,rid,buf);
  rid.size = arg->type;
  return ret;
}
Пример #8
0
void Tdealloc(int xid, recordid rid) {
  stasis_alloc_t* alloc = stasis_runtime_alloc_state();

  // @todo this needs to garbage collect empty storage regions.

  pthread_mutex_lock(&alloc->mut);
  Page * p = loadPage(xid, rid.page);

  readlock(p->rwlatch,0);

  recordid newrid = stasis_record_dereference(xid, p, rid);
  stasis_allocation_policy_dealloced_from_page(alloc->allocPolicy, xid, newrid.page);

  int64_t size = stasis_record_length_read(xid,p,rid);
  int64_t type = stasis_record_type_read(xid,p,rid);

  if(type == NORMAL_SLOT) { type = size; }

  byte * preimage = malloc(sizeof(alloc_arg)+size);

  ((alloc_arg*)preimage)->slot = rid.slot;
  ((alloc_arg*)preimage)->type = type;

  // stasis_record_read() wants rid to have its raw size to prevent
  // code that doesn't know about record types from introducing memory
  // bugs.
  rid.size = size;
  stasis_record_read(xid, p, rid, preimage+sizeof(alloc_arg));
  // restore rid to valid state.
  rid.size = type;

  // Ok to release latch; page is still pinned (so no WAL problems).
  // allocationPolicy protects us from running out of space due to concurrent
  // xacts.

  // Also, there can be no reordering of allocations / deallocations ,
  // since we're holding alloc->mutex.  However, we might reorder a Tset()
  // to and a Tdealloc() or Talloc() on the same page.  If this happens,
  // it's an unsafe race in the application, and not technically our problem.

  // @todo  Tupdate forces allocation to release a latch, leading to potentially nasty application bugs.  Perhaps this is the wrong API!

  // @todo application-level allocation races can lead to unrecoverable logs.
  unlock(p->rwlatch);

  Tupdate(xid, rid.page, preimage,
          sizeof(alloc_arg)+size, OPERATION_DEALLOC);

  releasePage(p);

  pthread_mutex_unlock(&alloc->mut);

  if(type==BLOB_SLOT) {
    stasis_blob_dealloc(xid,(blob_record_t*)(preimage+sizeof(alloc_arg)));
  }

  free(preimage);

  stasis_transaction_table_set_argument(alloc->xact_table, xid, alloc->callback_id,
					AT_COMMIT, alloc);

}