RC Row_maat::read(TxnManager * txn) { assert (CC_ALG == MAAT); RC rc = RCOK; uint64_t mtx_wait_starttime = get_sys_clock(); while(!ATOM_CAS(maat_avail,true,false)) { } INC_STATS(txn->get_thd_id(),mtx[30],get_sys_clock() - mtx_wait_starttime); DEBUG("READ %ld -- %ld: lw %ld\n",txn->get_txn_id(),_row->get_primary_key(),timestamp_last_write); // Copy uncommitted writes for(auto it = uncommitted_writes->begin(); it != uncommitted_writes->end(); it++) { uint64_t txn_id = *it; txn->uncommitted_writes->insert(txn_id); DEBUG(" UW %ld -- %ld: %ld\n",txn->get_txn_id(),_row->get_primary_key(),txn_id); } // Copy write timestamp if(txn->greatest_write_timestamp < timestamp_last_write) txn->greatest_write_timestamp = timestamp_last_write; //Add to uncommitted reads (soft lock) uncommitted_reads->insert(txn->get_txn_id()); ATOM_CAS(maat_avail,false,true); return rc; }
void TxnTable::restart_txn(uint64_t thd_id, uint64_t txn_id,uint64_t batch_id){ uint64_t pool_id = txn_id % pool_size; // set modify bit for this pool: txn_id % pool_size while(!ATOM_CAS(pool[pool_id]->modify,false,true)) { }; txn_node_t t_node = pool[pool_id]->head; while (t_node != NULL) { if(is_matching_txn_node(t_node,txn_id,batch_id)) { #if CC_ALG == CALVIN work_queue.enqueue(thd_id,Message::create_message(t_node->txn_man,RTXN),false); #else if(IS_LOCAL(txn_id)) work_queue.enqueue(thd_id,Message::create_message(t_node->txn_man,RTXN_CONT),false); else work_queue.enqueue(thd_id,Message::create_message(t_node->txn_man,RQRY_CONT),false); #endif break; } t_node = t_node->next; } // unset modify bit for this pool: txn_id % pool_size ATOM_CAS(pool[pool_id]->modify,true,false); }
bool index_btree::latch_node(bt_node * node, latch_t latch_type) { // TODO latch is disabled if (!ENABLE_LATCH) return true; bool success = false; // printf("%s : %d\n", __FILE__, __LINE__); // if ( g_cc_alg != HSTORE ) while ( !ATOM_CAS(node->latch, false, true) ) {} // pthread_mutex_lock(&node->locked); // printf("%s : %d\n", __FILE__, __LINE__); latch_t node_latch = node->latch_type; if (node_latch == LATCH_NONE || (node_latch == LATCH_SH && latch_type == LATCH_SH)) { node->latch_type = latch_type; if (node_latch == LATCH_NONE) M_ASSERT( (node->share_cnt == 0), "share cnt none 0!" ); if (node->latch_type == LATCH_SH) node->share_cnt ++; success = true; } else // latch_type incompatible success = false; // if ( g_cc_alg != HSTORE ) bool ok = ATOM_CAS(node->latch, true, false); assert(ok); // pthread_mutex_unlock(&node->locked); // assert(ATOM_CAS(node->locked, true, false)); return success; }
RC index_btree::upgrade_latch(bt_node * node) { if (!ENABLE_LATCH) return RCOK; bool success = false; // if ( g_cc_alg != HSTORE ) while ( !ATOM_CAS(node->latch, false, true) ) {} // pthread_mutex_lock(&node->locked); // while (!ATOM_CAS(node->locked, false, true)) {} M_ASSERT( (node->latch_type == LATCH_SH), "Error" ); if (node->share_cnt > 1) success = false; else { // share_cnt == 1 success = true; node->latch_type = LATCH_EX; node->share_cnt = 0; } // if ( g_cc_alg != HSTORE ) bool ok = ATOM_CAS(node->latch, true, false); assert(ok); // pthread_mutex_unlock(&node->locked); // assert( ATOM_CAS(node->locked, true, false) ); if (success) return RCOK; else return Abort; }
void TxnTable::update_min_ts(uint64_t thd_id, uint64_t txn_id,uint64_t batch_id,uint64_t ts){ uint64_t pool_id = txn_id % pool_size; while(!ATOM_CAS(pool[pool_id]->modify,false,true)) { }; if(ts < pool[pool_id]->min_ts) pool[pool_id]->min_ts = ts; ATOM_CAS(pool[pool_id]->modify,true,false); }
bool SimManager::is_warmup_done() { if(warmup) return true; bool done = ((get_sys_clock() - run_starttime) >= g_warmup_timer); if(done) { ATOM_CAS(warmup_end_time,0,get_sys_clock()); ATOM_CAS(warmup,false,true); } return done; }
void SimManager::set_done() { if(ATOM_CAS(sim_done, false, true)) { if(warmup_end_time == 0) warmup_end_time = run_starttime; SET_STATS(0, total_runtime, get_sys_clock() - warmup_end_time); } }
// 递增分配id进行hash,并返回匹配的socket static int reserve_id(struct socket_server *ss) { int i; for (i=0;i<MAX_SOCKET;i++) { // 递增 int id = ATOM_INC(&(ss->alloc_id)); if (id < 0) { // 回绕 id = ATOM_AND(&(ss->alloc_id), 0x7fffffff); } // 哈希匹配 struct socket *s = &ss->slot[HASH_ID(id)]; if (s->type == SOCKET_TYPE_INVALID) { // 如果是初始类型,修改为保留类型 if (ATOM_CAS(&s->type, SOCKET_TYPE_INVALID, SOCKET_TYPE_RESERVE)) { s->id = id; s->fd = -1; return id; } else { // 否则,重试 // retry --i; } } } return -1; }
void SimManager::set_starttime(uint64_t starttime) { if(ATOM_CAS(start_set, false, true)) { run_starttime = starttime; last_worker_epoch_time = starttime; sim_done = false; printf("Starttime set to %ld\n",run_starttime); } }
static ssize_t* get_allocated_field(uint32_t handle) { int h = (int)(handle & (SLOT_SIZE - 1)); mem_data *data = &mem_stats[h]; uint32_t old_handle = data->handle; ssize_t old_alloc = data->allocated; if(old_handle == 0 || old_alloc <= 0) { // data->allocated may less than zero, because it may not count at start. if(!ATOM_CAS(&data->handle, old_handle, handle)) { return 0; } if (old_alloc < 0) { ATOM_CAS(&data->allocated, old_alloc, 0); } } if(data->handle != handle) { return 0; } return &data->allocated; }
RC Row_maat::abort(access_t type, TxnManager * txn) { uint64_t mtx_wait_starttime = get_sys_clock(); while(!ATOM_CAS(maat_avail,true,false)) { } INC_STATS(txn->get_thd_id(),mtx[32],get_sys_clock() - mtx_wait_starttime); DEBUG("Maat Abort %ld: %d -- %ld\n",txn->get_txn_id(),type,_row->get_primary_key()); #if WORKLOAD == TPCC uncommitted_reads->erase(txn->get_txn_id()); uncommitted_writes->erase(txn->get_txn_id()); #else if(type == RD) { uncommitted_reads->erase(txn->get_txn_id()); } if(type == WR) { uncommitted_writes->erase(txn->get_txn_id()); } #endif ATOM_CAS(maat_avail,false,true); return Abort; }
latch_t index_btree::release_latch(bt_node * node) { if (!ENABLE_LATCH) return LATCH_SH; latch_t type = node->latch_type; // if ( g_cc_alg != HSTORE ) while ( !ATOM_CAS(node->latch, false, true) ) {} // pthread_mutex_lock(&node->locked); // while (!ATOM_CAS(node->locked, false, true)) {} M_ASSERT((node->latch_type != LATCH_NONE), "release latch fault"); if (node->latch_type == LATCH_EX) node->latch_type = LATCH_NONE; else if (node->latch_type == LATCH_SH) { node->share_cnt --; if (node->share_cnt == 0) node->latch_type = LATCH_NONE; } // if ( g_cc_alg != HSTORE ) bool ok = ATOM_CAS(node->latch, true, false); assert(ok); // pthread_mutex_unlock(&node->locked); // assert(ATOM_CAS(node->locked, true, false)); return type; }
RC YCSBTxnManager::acquire_locks() { uint64_t starttime = get_sys_clock(); assert(CC_ALG == CALVIN); YCSBQuery* ycsb_query = (YCSBQuery*) query; locking_done = false; RC rc = RCOK; incr_lr(); assert(ycsb_query->requests.size() == g_req_per_query); assert(phase == CALVIN_RW_ANALYSIS); for (uint32_t rid = 0; rid < ycsb_query->requests.size(); rid ++) { ycsb_request * req = ycsb_query->requests[rid]; uint64_t part_id = _wl->key_to_part( req->key ); DEBUG("LK Acquire (%ld,%ld) %d,%ld -> %ld\n",get_txn_id(),get_batch_id(),req->acctype,req->key,GET_NODE_ID(part_id)); if(GET_NODE_ID(part_id) != g_node_id) continue; INDEX * index = _wl->the_index; itemid_t * item; item = index_read(index, req->key, part_id); row_t * row = ((row_t *)item->location); RC rc2 = get_lock(row,req->acctype); if(rc2 != RCOK) { rc = rc2; } } if(decr_lr() == 0) { if(ATOM_CAS(lock_ready,false,true)) rc = RCOK; } txn_stats.wait_starttime = get_sys_clock(); /* if(rc == WAIT && lock_ready_cnt == 0) { if(ATOM_CAS(lock_ready,false,true)) //lock_ready = true; rc = RCOK; } */ INC_STATS(get_thd_id(),calvin_sched_time,get_sys_clock() - starttime); locking_done = true; return rc; }
RC Row_mvcc::access(txn_man * txn, TsType type, row_t * row) { RC rc = RCOK; ts_t ts = txn->get_ts(); uint64_t t1 = get_sys_clock(); if (g_central_man) glob_manager->lock_row(_row); else while (!ATOM_CAS(blatch, false, true)) PAUSE //pthread_mutex_lock( latch ); uint64_t t2 = get_sys_clock(); INC_STATS(txn->get_thd_id(), debug4, t2 - t1); #if DEBUG_CC for (uint32_t i = 0; i < _req_len; i++) if (_requests[i].valid) { assert(_requests[i].ts > _latest_wts); if (_exists_prewrite) assert(_prewrite_ts < _requests[i].ts); } #endif if (type == R_REQ) { if (ts < _oldest_wts) // the version was already recycled... This should be very rare rc = Abort; else if (ts > _latest_wts) { if (_exists_prewrite && _prewrite_ts < ts) { // exists a pending prewrite request before the current read. should wait. rc = WAIT; buffer_req(R_REQ, txn, false); txn->ts_ready = false; } else { // should just read rc = RCOK; txn->cur_row = _latest_row; if (ts > _max_served_rts) _max_served_rts = ts; } } else { rc = RCOK; // ts is between _oldest_wts and _latest_wts, should find the correct version uint32_t the_ts = 0; uint32_t the_i = _his_len; for (uint32_t i = 0; i < _his_len; i++) { if (_write_history[i].valid && _write_history[i].ts < ts && _write_history[i].ts > the_ts) { the_ts = _write_history[i].ts; the_i = i; } } if (the_i == _his_len) txn->cur_row = _row; else txn->cur_row = _write_history[the_i].row; } } else if (type == P_REQ) { if (ts < _latest_wts || ts < _max_served_rts || (_exists_prewrite && _prewrite_ts > ts)) rc = Abort; else if (_exists_prewrite) { // _prewrite_ts < ts rc = WAIT; buffer_req(P_REQ, txn, false); txn->ts_ready = false; } else { rc = RCOK; row_t * res_row = reserveRow(ts, txn); assert(res_row); res_row->copy(_latest_row); txn->cur_row = res_row; } } else if (type == W_REQ) { rc = RCOK; assert(ts > _latest_wts); assert(row == _write_history[_prewrite_his_id].row); _write_history[_prewrite_his_id].valid = true; _write_history[_prewrite_his_id].ts = ts; _latest_wts = ts; _latest_row = row; _exists_prewrite = false; _num_versions ++; update_buffer(txn, W_REQ); } else if (type == XP_REQ) { assert(row == _write_history[_prewrite_his_id].row); _write_history[_prewrite_his_id].valid = false; _write_history[_prewrite_his_id].reserved = false; _exists_prewrite = false; update_buffer(txn, XP_REQ); } else assert(false); INC_STATS(txn->get_thd_id(), debug3, get_sys_clock() - t2); if (g_central_man) glob_manager->release_row(_row); else blatch = false; //pthread_mutex_unlock( latch ); return rc; }
void SimManager::set_setup_done() { ATOM_CAS(sim_init_done, false, true); }
RC TPCCTxnManager::acquire_locks() { uint64_t starttime = get_sys_clock(); assert(CC_ALG == CALVIN); locking_done = false; RC rc = RCOK; RC rc2; INDEX * index; itemid_t * item; row_t* row; uint64_t key; incr_lr(); TPCCQuery* tpcc_query = (TPCCQuery*) query; uint64_t w_id = tpcc_query->w_id; uint64_t d_id = tpcc_query->d_id; uint64_t c_id = tpcc_query->c_id; uint64_t d_w_id = tpcc_query->d_w_id; uint64_t c_w_id = tpcc_query->c_w_id; uint64_t c_d_id = tpcc_query->c_d_id; char * c_last = tpcc_query->c_last; uint64_t part_id_w = wh_to_part(w_id); uint64_t part_id_c_w = wh_to_part(c_w_id); switch(tpcc_query->txn_type) { case TPCC_PAYMENT: if(GET_NODE_ID(part_id_w) == g_node_id) { // WH index = _wl->i_warehouse; item = index_read(index, w_id, part_id_w); row_t * row = ((row_t *)item->location); rc2 = get_lock(row,g_wh_update? WR:RD); if(rc2 != RCOK) rc = rc2; // Dist key = distKey(d_id, d_w_id); item = index_read(_wl->i_district, key, part_id_w); row = ((row_t *)item->location); rc2 = get_lock(row, WR); if(rc2 != RCOK) rc = rc2; } if(GET_NODE_ID(part_id_c_w) == g_node_id) { // Cust if (tpcc_query->by_last_name) { key = custNPKey(c_last, c_d_id, c_w_id); index = _wl->i_customer_last; item = index_read(index, key, part_id_c_w); int cnt = 0; itemid_t * it = item; itemid_t * mid = item; while (it != NULL) { cnt ++; it = it->next; if (cnt % 2 == 0) mid = mid->next; } row = ((row_t *)mid->location); } else { key = custKey(c_id, c_d_id, c_w_id); index = _wl->i_customer_id; item = index_read(index, key, part_id_c_w); row = (row_t *) item->location; } rc2 = get_lock(row, WR); if(rc2 != RCOK) rc = rc2; } break; case TPCC_NEW_ORDER: if(GET_NODE_ID(part_id_w) == g_node_id) { // WH index = _wl->i_warehouse; item = index_read(index, w_id, part_id_w); row_t * row = ((row_t *)item->location); rc2 = get_lock(row,RD); if(rc2 != RCOK) rc = rc2; // Cust index = _wl->i_customer_id; key = custKey(c_id, d_id, w_id); item = index_read(index, key, wh_to_part(w_id)); row = (row_t *) item->location; rc2 = get_lock(row, RD); if(rc2 != RCOK) rc = rc2; // Dist key = distKey(d_id, w_id); item = index_read(_wl->i_district, key, wh_to_part(w_id)); row = ((row_t *)item->location); rc2 = get_lock(row, WR); if(rc2 != RCOK) rc = rc2; } // Items for(uint64_t i = 0; i < tpcc_query->ol_cnt; i++) { if(GET_NODE_ID(wh_to_part(tpcc_query->items[i]->ol_supply_w_id)) != g_node_id) continue; key = tpcc_query->items[i]->ol_i_id; item = index_read(_wl->i_item, key, 0); row = ((row_t *)item->location); rc2 = get_lock(row, RD); if(rc2 != RCOK) rc = rc2; key = stockKey(tpcc_query->items[i]->ol_i_id, tpcc_query->items[i]->ol_supply_w_id); index = _wl->i_stock; item = index_read(index, key, wh_to_part(tpcc_query->items[i]->ol_supply_w_id)); row = ((row_t *)item->location); rc2 = get_lock(row, WR); if(rc2 != RCOK) rc = rc2; } break; default: assert(false); } if(decr_lr() == 0) { if(ATOM_CAS(lock_ready,false,true)) rc = RCOK; } txn_stats.wait_starttime = get_sys_clock(); locking_done = true; INC_STATS(get_thd_id(),calvin_sched_time,get_sys_clock() - starttime); return rc; }
TxnManager * TxnTable::get_transaction_manager(uint64_t thd_id, uint64_t txn_id,uint64_t batch_id){ DEBUG("TxnTable::get_txn_manager %ld / %ld\n",txn_id,pool_size); uint64_t starttime = get_sys_clock(); uint64_t pool_id = txn_id % pool_size; uint64_t mtx_starttime = starttime; // set modify bit for this pool: txn_id % pool_size while(!ATOM_CAS(pool[pool_id]->modify,false,true)) { }; INC_STATS(thd_id,mtx[7],get_sys_clock()-mtx_starttime); txn_node_t t_node = pool[pool_id]->head; TxnManager * txn_man = NULL; uint64_t prof_starttime = get_sys_clock(); while (t_node != NULL) { if(is_matching_txn_node(t_node,txn_id,batch_id)) { txn_man = t_node->txn_man; break; } t_node = t_node->next; } INC_STATS(thd_id,mtx[20],get_sys_clock()-prof_starttime); if(!txn_man) { prof_starttime = get_sys_clock(); txn_table_pool.get(thd_id,t_node); INC_STATS(thd_id,mtx[21],get_sys_clock()-prof_starttime); prof_starttime = get_sys_clock(); txn_man_pool.get(thd_id,txn_man); INC_STATS(thd_id,mtx[22],get_sys_clock()-prof_starttime); prof_starttime = get_sys_clock(); txn_man->set_txn_id(txn_id); txn_man->set_batch_id(batch_id); t_node->txn_man = txn_man; txn_man->txn_stats.starttime = get_sys_clock(); txn_man->txn_stats.restart_starttime = txn_man->txn_stats.starttime; LIST_PUT_TAIL(pool[pool_id]->head,pool[pool_id]->tail,t_node); INC_STATS(thd_id,mtx[23],get_sys_clock()-prof_starttime); prof_starttime = get_sys_clock(); ++pool[pool_id]->cnt; if(pool[pool_id]->cnt > 1) { INC_STATS(thd_id,txn_table_cflt_cnt,1); INC_STATS(thd_id,txn_table_cflt_size,pool[pool_id]->cnt-1); } INC_STATS(thd_id,txn_table_new_cnt,1); INC_STATS(thd_id,mtx[24],get_sys_clock()-prof_starttime); } #if CC_ALG == MVCC if(txn_man->get_timestamp() < pool[pool_id]->min_ts) pool[pool_id]->min_ts = txn_man->get_timestamp(); #endif // unset modify bit for this pool: txn_id % pool_size ATOM_CAS(pool[pool_id]->modify,true,false); INC_STATS(thd_id,txn_table_get_time,get_sys_clock() - starttime); INC_STATS(thd_id,txn_table_get_cnt,1); return txn_man; }
void TxnTable::release_transaction_manager(uint64_t thd_id, uint64_t txn_id, uint64_t batch_id){ uint64_t starttime = get_sys_clock(); uint64_t pool_id = txn_id % pool_size; uint64_t mtx_starttime = starttime; // set modify bit for this pool: txn_id % pool_size while(!ATOM_CAS(pool[pool_id]->modify,false,true)) { }; INC_STATS(thd_id,mtx[8],get_sys_clock()-mtx_starttime); txn_node_t t_node = pool[pool_id]->head; #if CC_ALG == MVCC uint64_t min_ts = UINT64_MAX; txn_node_t saved_t_node = NULL; #endif uint64_t prof_starttime = get_sys_clock(); while (t_node != NULL) { if(is_matching_txn_node(t_node,txn_id,batch_id)) { LIST_REMOVE_HT(t_node,pool[txn_id % pool_size]->head,pool[txn_id % pool_size]->tail); --pool[pool_id]->cnt; #if CC_ALG == MVCC saved_t_node = t_node; t_node = t_node->next; continue; #else break; #endif } #if CC_ALG == MVCC if(t_node->txn_man->get_timestamp() < min_ts) min_ts = t_node->txn_man->get_timestamp(); #endif t_node = t_node->next; } INC_STATS(thd_id,mtx[25],get_sys_clock()-prof_starttime); prof_starttime = get_sys_clock(); #if CC_ALG == MVCC t_node = saved_t_node; pool[pool_id]->min_ts = min_ts; #endif // unset modify bit for this pool: txn_id % pool_size ATOM_CAS(pool[pool_id]->modify,true,false); prof_starttime = get_sys_clock(); assert(t_node); assert(t_node->txn_man); txn_man_pool.put(thd_id,t_node->txn_man); INC_STATS(thd_id,mtx[26],get_sys_clock()-prof_starttime); prof_starttime = get_sys_clock(); txn_table_pool.put(thd_id,t_node); INC_STATS(thd_id,mtx[27],get_sys_clock()-prof_starttime); INC_STATS(thd_id,txn_table_release_time,get_sys_clock() - starttime); INC_STATS(thd_id,txn_table_release_cnt,1); }
RC Row_maat::commit(access_t type, TxnManager * txn, row_t * data) { uint64_t mtx_wait_starttime = get_sys_clock(); while(!ATOM_CAS(maat_avail,true,false)) { } INC_STATS(txn->get_thd_id(),mtx[33],get_sys_clock() - mtx_wait_starttime); DEBUG("Maat Commit %ld: %d,%lu -- %ld\n",txn->get_txn_id(),type,txn->get_commit_timestamp(),_row->get_primary_key()); #if WORKLOAD == TPCC if(txn->get_commit_timestamp() > timestamp_last_read) timestamp_last_read = txn->get_commit_timestamp(); uncommitted_reads->erase(txn->get_txn_id()); if(txn->get_commit_timestamp() > timestamp_last_write) timestamp_last_write = txn->get_commit_timestamp(); uncommitted_writes->erase(txn->get_txn_id()); // Apply write to DB write(data); uint64_t txn_commit_ts = txn->get_commit_timestamp(); // Forward validation // Check uncommitted writes against this txn's for(auto it = uncommitted_writes->begin(); it != uncommitted_writes->end();it++) { if(txn->uncommitted_writes->count(*it) == 0) { // apply timestamps // these write txns need to come AFTER this txn uint64_t it_lower = time_table.get_lower(txn->get_thd_id(),*it); if(it_lower <= txn_commit_ts) { time_table.set_lower(txn->get_thd_id(),*it,txn_commit_ts+1); DEBUG("MAAT forward val set lower %ld: %lu\n",*it,txn_commit_ts+1); } } } uint64_t lower = time_table.get_lower(txn->get_thd_id(),txn->get_txn_id()); for(auto it = uncommitted_writes->begin(); it != uncommitted_writes->end();it++) { if(txn->uncommitted_writes_y->count(*it) == 0) { // apply timestamps // these write txns need to come BEFORE this txn uint64_t it_upper = time_table.get_upper(txn->get_thd_id(),*it); if(it_upper >= txn_commit_ts) { time_table.set_upper(txn->get_thd_id(),*it,txn_commit_ts-1); DEBUG("MAAT forward val set upper %ld: %lu\n",*it,txn_commit_ts-1); } } } for(auto it = uncommitted_reads->begin(); it != uncommitted_reads->end();it++) { if(txn->uncommitted_reads->count(*it) == 0) { // apply timestamps // these write txns need to come BEFORE this txn uint64_t it_upper = time_table.get_upper(txn->get_thd_id(),*it); if(it_upper >= lower) { time_table.set_upper(txn->get_thd_id(),*it,lower-1); DEBUG("MAAT forward val set upper %ld: %lu\n",*it,lower-1); } } } #else uint64_t txn_commit_ts = txn->get_commit_timestamp(); if(type == RD) { if(txn_commit_ts > timestamp_last_read) timestamp_last_read = txn_commit_ts; uncommitted_reads->erase(txn->get_txn_id()); // Forward validation // Check uncommitted writes against this txn's for(auto it = uncommitted_writes->begin(); it != uncommitted_writes->end();it++) { if(txn->uncommitted_writes->count(*it) == 0) { // apply timestamps // these write txns need to come AFTER this txn uint64_t it_lower = time_table.get_lower(txn->get_thd_id(),*it); if(it_lower <= txn_commit_ts) { time_table.set_lower(txn->get_thd_id(),*it,txn_commit_ts+1); DEBUG("MAAT forward val set lower %ld: %lu\n",*it,txn_commit_ts+1); } } } } /* #if WORKLOAD == TPCC if(txn_commit_ts > timestamp_last_read) timestamp_last_read = txn_commit_ts; #endif */ if(type == WR) { if(txn_commit_ts > timestamp_last_write) timestamp_last_write = txn_commit_ts; uncommitted_writes->erase(txn->get_txn_id()); // Apply write to DB write(data); uint64_t lower = time_table.get_lower(txn->get_thd_id(),txn->get_txn_id()); for(auto it = uncommitted_writes->begin(); it != uncommitted_writes->end();it++) { if(txn->uncommitted_writes_y->count(*it) == 0) { // apply timestamps // these write txns need to come BEFORE this txn uint64_t it_upper = time_table.get_upper(txn->get_thd_id(),*it); if(it_upper >= txn_commit_ts) { time_table.set_upper(txn->get_thd_id(),*it,txn_commit_ts-1); DEBUG("MAAT forward val set upper %ld: %lu\n",*it,txn_commit_ts-1); } } } for(auto it = uncommitted_reads->begin(); it != uncommitted_reads->end();it++) { if(txn->uncommitted_reads->count(*it) == 0) { // apply timestamps // these write txns need to come BEFORE this txn uint64_t it_upper = time_table.get_upper(txn->get_thd_id(),*it); if(it_upper >= lower) { time_table.set_upper(txn->get_thd_id(),*it,lower-1); DEBUG("MAAT forward val set upper %ld: %lu\n",*it,lower-1); } } } } #endif ATOM_CAS(maat_avail,false,true); return RCOK; }
void IndexHash::get_latch(BucketHeader * bucket) { while (!ATOM_CAS(bucket->locked, false, true)) {} }
void IndexHash::release_latch(BucketHeader * bucket) { bool ok = ATOM_CAS(bucket->locked, true, false); assert(ok); }