/* * Wait for all transactions to be be out of their current transaction. */ static inline int stm_quiesce(stm_tx_t *tx, int block) { stm_tx_t *t; PRINT_DEBUG("==> stm_quiesce(%p)\n", tx); if (IS_ACTIVE(tx->status)) { /* Only one active transaction can quiesce at a time, others must abort */ if (pthread_mutex_trylock(&quiesce_mutex) != 0) return 1; } else { /* We can safely block because we are inactive */ pthread_mutex_lock(&quiesce_mutex); } /* We own the lock at this point */ if (block) ATOMIC_STORE_REL(&quiesce, 2); /* Make sure we read latest status data */ ATOMIC_MB_FULL; /* Not optimal as we check transaction sequentially and might miss some inactivity states */ for (t = threads; t != NULL; t = t->next) { if (t == tx) continue; /* Wait for all other transactions to become inactive */ while (IS_ACTIVE(t->status)) ; } if (!block) pthread_mutex_unlock(&quiesce_mutex); return 0; }
/* * Store a word-sized value in a unit transaction. */ static INLINE int stm_unit_write(volatile stm_word_t *addr, stm_word_t value, stm_word_t mask, stm_word_t *timestamp) { #ifdef UNIT_TX volatile stm_word_t *lock; stm_word_t l; PRINT_DEBUG2("==> stm_unit_write(a=%p,d=%p-%lu,m=0x%lx)\n", addr, (void *)value, (unsigned long)value, (unsigned long)mask); /* Get reference to lock */ lock = GET_LOCK(addr); /* Try to acquire lock */ restart: l = ATOMIC_LOAD_ACQ(lock); if (LOCK_GET_OWNED(l)) { /* Locked: wait until lock is free */ #ifdef WAIT_YIELD sched_yield(); #endif /* WAIT_YIELD */ goto restart; } /* Not locked */ if (timestamp != NULL && LOCK_GET_TIMESTAMP(l) > *timestamp) { /* Return current timestamp */ *timestamp = LOCK_GET_TIMESTAMP(l); return 0; } /* TODO: would need to store thread ID to be able to kill it (for wait freedom) */ if (ATOMIC_CAS_FULL(lock, l, LOCK_UNIT) == 0) goto restart; ATOMIC_STORE(addr, value); /* Update timestamp with newer value (may exceed VERSION_MAX by up to MAX_THREADS) */ l = FETCH_INC_CLOCK + 1; if (timestamp != NULL) *timestamp = l; /* Make sure that lock release becomes visible */ ATOMIC_STORE_REL(lock, LOCK_SET_TIMESTAMP(l)); if (unlikely(l >= VERSION_MAX)) { /* Block all transactions and reset clock (current thread is not in active transaction) */ stm_quiesce_barrier(NULL, rollover_clock, NULL); } return 1; #else /* ! UNIT_TX */ fprintf(stderr, "Unit transaction is not enabled\n"); exit(-1); return 1; #endif /* ! UNIT_TX */ }
/* * (Re)allocate write set entries. */ static inline void stm_allocate_ws_entries(stm_tx_t *tx, int extend) { PRINT_DEBUG("==> stm_allocate_ws_entries(%p[%lu-%lu],%d)\n", tx, (unsigned long)tx->start, (unsigned long)tx->end, extend); if (extend) { /* Extend write set */ int j; w_entry_t *ows, *nws; /* Allocate new write set */ ows = tx->w_set.entries; if ((nws = (w_entry_t *)malloc(tx->w_set.size * 2 * sizeof(w_entry_t))) == NULL) { perror("malloc write set"); exit(1); } /* Copy write set */ memcpy(nws, ows, tx->w_set.size * sizeof(w_entry_t)); /* Update pointers and locks */ for (j = 0; j < tx->w_set.nb_entries; j++) { if (ows[j].next != NULL) nws[j].next = nws + (ows[j].next - ows); } for (j = 0; j < tx->w_set.nb_entries; j++) { if (ows[j].lock == GET_LOCK(ows[j].addr)) ATOMIC_STORE_REL(ows[j].lock, LOCK_SET_ADDR_WRITE((stm_word_t)&nws[j])); } tx->w_set.entries = nws; tx->w_set.size *= 2; free(ows); } else { /* Allocate write set */ if ((tx->w_set.entries = (w_entry_t *)malloc(tx->w_set.size * sizeof(w_entry_t))) == NULL) { perror("malloc write set"); exit(1); } } }
/* * Release threads blocked after quiescence. */ static inline void stm_quiesce_release(stm_tx_t *tx) { ATOMIC_STORE_REL(&quiesce, 0); pthread_mutex_unlock(&quiesce_mutex); }
/* * Called by the CURRENT thread to commit a transaction. */ int stm_commit(TXPARAM) { w_entry_t *w; stm_word_t t; int i; TX_GET; PRINT_DEBUG("==> stm_commit(%p[%lu-%lu])\n", tx, (unsigned long)tx->start, (unsigned long)tx->end); /* Decrement nesting level */ if (--tx->nesting > 0) return 1; assert(IS_ACTIVE(tx->status)); /* A read-only transaction can commit immediately */ if (tx->w_set.nb_entries == 0) goto end; /* Update transaction */ /* Get commit timestamp (may exceed VERSION_MAX by up to MAX_THREADS) */ t = FETCH_INC_CLOCK + 1; /* Try to validate (only if a concurrent transaction has committed since tx->start) */ if (tx->start != t - 1 && !stm_validate(tx)) { /* Cannot commit */ tx->aborts_validate_commit++; stm_rollback(tx, STM_ABORT_VALIDATE); return 0; } /* Install new versions, drop locks and set new timestamp */ w = tx->w_set.entries; for (i = tx->w_set.nb_entries; i > 0; i--, w++) { if (w->mask != 0) ATOMIC_STORE(w->addr, w->value); /* Only drop lock for last covered address in write set */ if (w->next == NULL) ATOMIC_STORE_REL(w->lock, LOCK_SET_TIMESTAMP(t)); } end: tx->retries = 0; /* Callbacks */ if (nb_commit_cb != 0) { int cb; for (cb = 0; cb < nb_commit_cb; cb++) commit_cb[cb].f(TXARGS commit_cb[cb].arg); } /* Set status to COMMITTED */ SET_STATUS(tx->status, TX_COMMITTED); return 1; }