/* * Called by the CURRENT thread to load a word-sized value in a unit transaction. */ stm_word_t stm_unit_load(volatile stm_word_t *addr, stm_word_t *timestamp) { volatile stm_word_t *lock; stm_word_t l, l2, value; PRINT_DEBUG2("==> stm_unit_load(a=%p)\n", addr); /* Get reference to lock */ lock = GET_LOCK(addr); /* Read lock, value, lock */ restart: l = ATOMIC_LOAD_ACQ(lock); restart_no_load: if (LOCK_GET_OWNED(l)) { /* Locked: wait until lock is free */ sched_yield(); goto restart; } /* Not locked */ value = ATOMIC_LOAD_ACQ(addr); l2 = ATOMIC_LOAD_ACQ(lock); if (l != l2) { l = l2; goto restart_no_load; } if (timestamp != NULL) *timestamp = LOCK_GET_TIMESTAMP(l); return value; }
static INLINE void abi_init(void) { /* thread safe */ reload: if (ATOMIC_LOAD_ACQ(&global_abi.status) == ABI_NOT_INITIALIZED) { if (ATOMIC_CAS_FULL(&global_abi.status, ABI_NOT_INITIALIZED, ABI_INITIALIZING) != 0) { /* TODO temporary to be sure to use tinySTM */ printf("TinySTM-ABI v%s.\n", _ITM_libraryVersion()); atexit((void (*)(void))(_ITM_finalizeProcess)); /* TinySTM initialization */ stm_init(); mod_mem_init(0); mod_alloc_cpp(); mod_log_init(); mod_cb_init(); ATOMIC_STORE(&global_abi.status, ABI_INITIALIZED); /* Also initialize thread as specify in the specification */ abi_init_thread(); return; } else { goto reload; } } else if (ATOMIC_LOAD_ACQ(&global_abi.status) != ABI_INITIALIZED) { /* Wait the end of the initialization */ goto reload; } return; }
static INLINE void abi_exit(void) { TX_GET; char * statistics; abi_exit_thread(tx); /* Ensure thread safety */ reload: if (ATOMIC_LOAD_ACQ(&global_abi.status) == ABI_INITIALIZED) { if (ATOMIC_CAS_FULL(&global_abi.status, ABI_INITIALIZED, ABI_FINALIZING) == 0) goto reload; } else { return; } if ((statistics = getenv("ITM_STATISTICS")) != NULL) { FILE * f; int i = 0; stats_t * ts; if (statistics[0] == '-') f = stdout; else if ((f = fopen("itm.log", "w")) == NULL) { fprintf(stderr, "can't open itm.log for writing\n"); goto finishing; } fprintf(f, "STATS REPORT\n"); fprintf(f, "THREAD TOTALS\n"); while (1) { do { ts = (stats_t *)ATOMIC_LOAD(&thread_stats); if (ts == NULL) goto no_more_stat; } while(ATOMIC_CAS_FULL(&thread_stats, ts, ts->next) == 0); /* Skip stats if not a transactional thread */ if (ts->nb_commits == 0) continue; fprintf(f, "Thread %-4i : %12s %12s %12s %12s\n", i, "Min", "Mean", "Max", "Total"); fprintf(f, " Transactions : %12lu\n", ts->nb_commits); fprintf(f, " %-25s: %12lu %12.2f %12lu %12lu\n", "Retries", ts->nb_retries_min, ts->nb_retries_avg, ts->nb_retries_max, ts->nb_aborts); fprintf(f,"\n"); /* Free the thread stats structure */ free(ts); i++; } no_more_stat: if (f != stdout) { fclose(f); } } finishing: stm_exit(); ATOMIC_STORE(&global_abi.status, ABI_NOT_INITIALIZED); }
/* * Check if transaction must block. */ static inline int stm_check_quiesce(stm_tx_t *tx) { stm_word_t s; /* Must be called upon start (while already active but before acquiring any lock) */ assert(IS_ACTIVE(tx->status)); ATOMIC_MB_FULL; if (ATOMIC_LOAD_ACQ(&quiesce) == 2) { s = ATOMIC_LOAD(&tx->status); SET_STATUS(tx->status, TX_IDLE); while (ATOMIC_LOAD_ACQ(&quiesce) == 2) { sched_yield(); } SET_STATUS(tx->status, GET_STATUS(s)); return 1; } return 0; }
/* * Called by the CURRENT thread to load a word-sized value in a unit transaction. */ _CALLCONV stm_word_t stm_unit_load(volatile stm_word_t *addr, stm_word_t *timestamp) { #ifdef UNIT_TX volatile stm_word_t *lock; stm_word_t l, l2, value; PRINT_DEBUG2("==> stm_unit_load(a=%p)\n", addr); /* Get reference to lock */ lock = GET_LOCK(addr); /* Read lock, value, lock */ restart: l = ATOMIC_LOAD_ACQ(lock); restart_no_load: if (LOCK_GET_OWNED(l)) { /* Locked: wait until lock is free */ #ifdef WAIT_YIELD sched_yield(); #endif /* WAIT_YIELD */ goto restart; } /* Not locked */ value = ATOMIC_LOAD_ACQ(addr); l2 = ATOMIC_LOAD_ACQ(lock); if (l != l2) { l = l2; goto restart_no_load; } if (timestamp != NULL) *timestamp = LOCK_GET_TIMESTAMP(l); return value; #else /* ! UNIT_TX */ fprintf(stderr, "Unit transaction is not enabled\n"); exit(-1); return 1; #endif /* ! UNIT_TX */ }
/* * Store a word-sized value in a unit transaction. */ static INLINE int stm_unit_write(volatile stm_word_t *addr, stm_word_t value, stm_word_t mask, stm_word_t *timestamp) { #ifdef UNIT_TX volatile stm_word_t *lock; stm_word_t l; PRINT_DEBUG2("==> stm_unit_write(a=%p,d=%p-%lu,m=0x%lx)\n", addr, (void *)value, (unsigned long)value, (unsigned long)mask); /* Get reference to lock */ lock = GET_LOCK(addr); /* Try to acquire lock */ restart: l = ATOMIC_LOAD_ACQ(lock); if (LOCK_GET_OWNED(l)) { /* Locked: wait until lock is free */ #ifdef WAIT_YIELD sched_yield(); #endif /* WAIT_YIELD */ goto restart; } /* Not locked */ if (timestamp != NULL && LOCK_GET_TIMESTAMP(l) > *timestamp) { /* Return current timestamp */ *timestamp = LOCK_GET_TIMESTAMP(l); return 0; } /* TODO: would need to store thread ID to be able to kill it (for wait freedom) */ if (ATOMIC_CAS_FULL(lock, l, LOCK_UNIT) == 0) goto restart; ATOMIC_STORE(addr, value); /* Update timestamp with newer value (may exceed VERSION_MAX by up to MAX_THREADS) */ l = FETCH_INC_CLOCK + 1; if (timestamp != NULL) *timestamp = l; /* Make sure that lock release becomes visible */ ATOMIC_STORE_REL(lock, LOCK_SET_TIMESTAMP(l)); if (unlikely(l >= VERSION_MAX)) { /* Block all transactions and reset clock (current thread is not in active transaction) */ stm_quiesce_barrier(NULL, rollover_clock, NULL); } return 1; #else /* ! UNIT_TX */ fprintf(stderr, "Unit transaction is not enabled\n"); exit(-1); return 1; #endif /* ! UNIT_TX */ }
/* * Store a word-sized value (return write set entry or NULL). */ static inline w_entry_t *stm_write(stm_tx_t *tx, volatile stm_word_t *addr, stm_word_t value, stm_word_t mask) { volatile stm_word_t *lock; stm_word_t l, version; w_entry_t *w; w_entry_t *prev = NULL; PRINT_DEBUG2("==> stm_write(t=%p[%lu-%lu],a=%p,d=%p-%lu,m=0x%lx)\n", tx, (unsigned long)tx->start, (unsigned long)tx->end, addr, (void *)value, (unsigned long)value, (unsigned long)mask); assert(IS_ACTIVE(tx->status)); if (tx->ro) { /* Disable read-only and abort */ assert(tx->attr != NULL); /* Update attributes to inform the caller */ tx->attr->read_only = 0; tx->aborts_ro++; stm_rollback(tx, STM_ABORT_RO_WRITE); return NULL; } /* Get reference to lock */ lock = GET_LOCK(addr); /* Try to acquire lock */ restart: l = ATOMIC_LOAD_ACQ(lock); restart_no_load: if (LOCK_GET_OWNED(l)) { /* Locked */ if (l == LOCK_UNIT) { /* Data modified by a unit store: should not last long => retry */ goto restart; } /* Do we own the lock? */ w = (w_entry_t *)LOCK_GET_ADDR(l); /* Simply check if address falls inside our write set (avoids non-faulting load) */ if (tx->w_set.entries <= w && w < tx->w_set.entries + tx->w_set.nb_entries) { /* Yes */ if (mask == 0) { /* No need to insert new entry or modify existing one */ return w; } prev = w; /* Did we previously write the same address? */ while (1) { if (addr == prev->addr) { /* No need to add to write set */ if (mask != ~(stm_word_t)0) { if (prev->mask == 0) prev->value = ATOMIC_LOAD(addr); value = (prev->value & ~mask) | (value & mask); } prev->value = value; prev->mask |= mask; return prev; } if (prev->next == NULL) { /* Remember last entry in linked list (for adding new entry) */ break; } prev = prev->next; } /* Get version from previous write set entry (all entries in linked list have same version) */ version = prev->version; /* Must add to write set */ if (tx->w_set.nb_entries == tx->w_set.size) stm_allocate_ws_entries(tx, 1); w = &tx->w_set.entries[tx->w_set.nb_entries]; goto do_write; } /* Conflict: CM kicks in */ tx->c_lock = lock; /* Abort */ tx->aborts_locked_write++; stm_rollback(tx, STM_ABORT_WW_CONFLICT); return NULL; } /* Not locked */ /* Handle write after reads (before CAS) */ version = LOCK_GET_TIMESTAMP(l); acquire: if (version > tx->end) { /* We might have read an older version previously */ if (!tx->can_extend || stm_has_read(tx, lock) != NULL) { /* Read version must be older (otherwise, tx->end >= version) */ /* Not much we can do: abort */ tx->aborts_validate_write++; stm_rollback(tx, STM_ABORT_VAL_WRITE); return NULL; } } /* Acquire lock (ETL) */ if (tx->w_set.nb_entries == tx->w_set.size) stm_allocate_ws_entries(tx, 1); w = &tx->w_set.entries[tx->w_set.nb_entries]; if (ATOMIC_CAS_FULL(lock, l, LOCK_SET_ADDR_WRITE((stm_word_t)w)) == 0) goto restart; /* We own the lock here (ETL) */ do_write: /* Add address to write set */ w->addr = addr; w->mask = mask; w->lock = lock; if (mask == 0) { /* Do not write anything */ #ifndef NDEBUG w->value = 0; #endif /* ! NDEBUG */ } else { /* Remember new value */ if (mask != ~(stm_word_t)0) value = (ATOMIC_LOAD(addr) & ~mask) | (value & mask); w->value = value; } w->version = version; w->next = NULL; if (prev != NULL) { /* Link new entry in list */ prev->next = w; } tx->w_set.nb_entries++; tx->w_set.has_writes++; return w; }
/* * Load a word-sized value (invisible read). */ static inline stm_word_t stm_read_invisible(stm_tx_t *tx, volatile stm_word_t *addr) { volatile stm_word_t *lock; stm_word_t l, l2, value, version; r_entry_t *r; w_entry_t *w; PRINT_DEBUG2("==> stm_read_invisible(t=%p[%lu-%lu],a=%p)\n", tx, (unsigned long)tx->start, (unsigned long)tx->end, addr); assert(IS_ACTIVE(tx->status)); /* Get reference to lock */ lock = GET_LOCK(addr); /* Note: we could check for duplicate reads and get value from read set */ /* Read lock, value, lock */ restart: l = ATOMIC_LOAD_ACQ(lock); restart_no_load: if (LOCK_GET_WRITE(l)) { /* Locked */ if (l == LOCK_UNIT) { /* Data modified by a unit store: should not last long => retry */ goto restart; } /* Do we own the lock? */ w = (w_entry_t *)LOCK_GET_ADDR(l); /* Simply check if address falls inside our write set (avoids non-faulting load) */ if (tx->w_set.entries <= w && w < tx->w_set.entries + tx->w_set.nb_entries) { /* Yes: did we previously write the same address? */ while (1) { if (addr == w->addr) { /* Yes: get value from write set (or from memory if mask was empty) */ value = (w->mask == 0 ? ATOMIC_LOAD(addr) : w->value); break; } if (w->next == NULL) { /* No: get value from memory */ value = ATOMIC_LOAD(addr); break; } w = w->next; } /* No need to add to read set (will remain valid) */ return value; } /* Conflict: CM kicks in (we could also check for duplicate reads and get value from read set) */ tx->c_lock = lock; /* Abort */ tx->aborts_locked_read++; stm_rollback(tx, STM_ABORT_RW_CONFLICT); return 0; } else { /* Not locked */ value = ATOMIC_LOAD_ACQ(addr); l2 = ATOMIC_LOAD_ACQ(lock); if (l != l2) { l = l2; goto restart_no_load; } /* Check timestamp */ version = LOCK_GET_TIMESTAMP(l); /* Valid version? */ if (version > tx->end) { /* No: try to extend first (except for read-only transactions: no read set) */ if (tx->ro || !tx->can_extend || !stm_extend(tx)) { /* Not much we can do: abort */ tx->aborts_validate_read++; stm_rollback(tx, STM_ABORT_VAL_READ); return 0; } /* Verify that version has not been overwritten (read value has not * yet been added to read set and may have not been checked during * extend) */ l = ATOMIC_LOAD_ACQ(lock); if (l != l2) { l = l2; goto restart_no_load; } /* Worked: we now have a good version (version <= tx->end) */ } } /* We have a good version: add to read set (update transactions) and return value */ if (!tx->ro) { /* Add address and version to read set */ if (tx->r_set.nb_entries == tx->r_set.size) stm_allocate_rs_entries(tx, 1); r = &tx->r_set.entries[tx->r_set.nb_entries++]; r->version = version; r->lock = lock; } return value; }