/* * Store a word-sized value in a unit transaction. */ static INLINE int stm_unit_write(volatile stm_word_t *addr, stm_word_t value, stm_word_t mask, stm_word_t *timestamp) { #ifdef UNIT_TX volatile stm_word_t *lock; stm_word_t l; PRINT_DEBUG2("==> stm_unit_write(a=%p,d=%p-%lu,m=0x%lx)\n", addr, (void *)value, (unsigned long)value, (unsigned long)mask); /* Get reference to lock */ lock = GET_LOCK(addr); /* Try to acquire lock */ restart: l = ATOMIC_LOAD_ACQ(lock); if (LOCK_GET_OWNED(l)) { /* Locked: wait until lock is free */ #ifdef WAIT_YIELD sched_yield(); #endif /* WAIT_YIELD */ goto restart; } /* Not locked */ if (timestamp != NULL && LOCK_GET_TIMESTAMP(l) > *timestamp) { /* Return current timestamp */ *timestamp = LOCK_GET_TIMESTAMP(l); return 0; } /* TODO: would need to store thread ID to be able to kill it (for wait freedom) */ if (ATOMIC_CAS_FULL(lock, l, LOCK_UNIT) == 0) goto restart; ATOMIC_STORE(addr, value); /* Update timestamp with newer value (may exceed VERSION_MAX by up to MAX_THREADS) */ l = FETCH_INC_CLOCK + 1; if (timestamp != NULL) *timestamp = l; /* Make sure that lock release becomes visible */ ATOMIC_STORE_REL(lock, LOCK_SET_TIMESTAMP(l)); if (unlikely(l >= VERSION_MAX)) { /* Block all transactions and reset clock (current thread is not in active transaction) */ stm_quiesce_barrier(NULL, rollover_clock, NULL); } return 1; #else /* ! UNIT_TX */ fprintf(stderr, "Unit transaction is not enabled\n"); exit(-1); return 1; #endif /* ! UNIT_TX */ }
int hytm_commit(TXPARAM) { stm_word_t t; w_entry_t *w; int i; TX_GET; /* Release irrevocability */ #ifdef IRREVOCABLE_ENABLED if (tx->irrevocable) { ATOMIC_STORE(&_tinystm.irrevocable, 0); if ((tx->irrevocable & 0x08) != 0) stm_quiesce_release(tx); tx->irrevocable = 0; goto commit_end; } #endif /* IRREVOCABLE_ENABLED */ t = FETCH_INC_CLOCK + 1; /* Set new timestamp in locks */ w = tx->w_set.entries; for (i = tx->w_set.nb_entries; i > 0; i--, w++) { /* XXX Maybe no duplicate entries can improve perf? */ asf_lock_store64((long unsigned int *)w->lock, LOCK_SET_TIMESTAMP(t)); } /* Commit the hytm transaction */ asf_commit(); commit_end: tx->retries = 0; /* Set status to COMMITTED */ SET_STATUS(tx->status, TX_COMMITTED); /* TODO statistics */ return 1; }
/* * Rollback transaction. */ static inline void stm_rollback(stm_tx_t *tx, int reason) { w_entry_t *w; int i; PRINT_DEBUG("==> stm_rollback(%p[%lu-%lu])\n", tx, (unsigned long)tx->start, (unsigned long)tx->end); assert(IS_ACTIVE(tx->status)); /* Drop locks */ i = tx->w_set.nb_entries; if (i > 0) { w = tx->w_set.entries; for (; i > 0; i--, w++) { if (w->next == NULL) { /* Only drop lock for last covered address in write set */ ATOMIC_STORE(w->lock, LOCK_SET_TIMESTAMP(w->version)); } } /* Make sure that all lock releases become visible */ ATOMIC_MB_WRITE; } tx->retries++; tx->aborts++; if (tx->retries == 1) tx->aborts_1++; else if (tx->retries == 2) tx->aborts_2++; if (tx->max_retries < tx->retries) tx->max_retries = tx->retries; /* Callbacks */ if (nb_abort_cb != 0) { int cb; for (cb = 0; cb < nb_abort_cb; cb++) abort_cb[cb].f(TXARGS abort_cb[cb].arg); } /* Set status to ABORTED */ SET_STATUS(tx->status, TX_ABORTED); /* Reset nesting level */ tx->nesting = 1; /* Wait until contented lock is free */ if (tx->c_lock != NULL) { /* Busy waiting (yielding is expensive) */ while (LOCK_GET_OWNED(ATOMIC_LOAD(tx->c_lock))) { sched_yield(); } tx->c_lock = NULL; } /* Reset field to restart transaction */ stm_prepare(tx); /* Jump back to transaction start */ if (tx->attr == NULL || !tx->attr->no_retry) siglongjmp(tx->env, reason); }
/* * Called by the CURRENT thread to commit a transaction. */ int stm_commit(TXPARAM) { w_entry_t *w; stm_word_t t; int i; TX_GET; PRINT_DEBUG("==> stm_commit(%p[%lu-%lu])\n", tx, (unsigned long)tx->start, (unsigned long)tx->end); /* Decrement nesting level */ if (--tx->nesting > 0) return 1; assert(IS_ACTIVE(tx->status)); /* A read-only transaction can commit immediately */ if (tx->w_set.nb_entries == 0) goto end; /* Update transaction */ /* Get commit timestamp (may exceed VERSION_MAX by up to MAX_THREADS) */ t = FETCH_INC_CLOCK + 1; /* Try to validate (only if a concurrent transaction has committed since tx->start) */ if (tx->start != t - 1 && !stm_validate(tx)) { /* Cannot commit */ tx->aborts_validate_commit++; stm_rollback(tx, STM_ABORT_VALIDATE); return 0; } /* Install new versions, drop locks and set new timestamp */ w = tx->w_set.entries; for (i = tx->w_set.nb_entries; i > 0; i--, w++) { if (w->mask != 0) ATOMIC_STORE(w->addr, w->value); /* Only drop lock for last covered address in write set */ if (w->next == NULL) ATOMIC_STORE_REL(w->lock, LOCK_SET_TIMESTAMP(t)); } end: tx->retries = 0; /* Callbacks */ if (nb_commit_cb != 0) { int cb; for (cb = 0; cb < nb_commit_cb; cb++) commit_cb[cb].f(TXARGS commit_cb[cb].arg); } /* Set status to COMMITTED */ SET_STATUS(tx->status, TX_COMMITTED); return 1; }