/* * Called upon thread deletion. */ static void mod_stats_on_thread_exit(TXPARAMS void *arg) { mod_stats_data_t *stats; unsigned long max, min; stats = (mod_stats_data_t *)stm_get_specific(TXARGS mod_stats_key); assert(stats != NULL); ATOMIC_FETCH_ADD_FULL(&mod_stats_global.commits, stats->commits); ATOMIC_FETCH_ADD_FULL(&mod_stats_global.retries_cnt, stats->retries_cnt); ATOMIC_FETCH_ADD_FULL(&mod_stats_global.retries_acc, stats->retries_acc); retry_max: max = ATOMIC_LOAD(&mod_stats_global.retries_max); if (stats->retries_max > max) { if (ATOMIC_CAS_FULL(&mod_stats_global.retries_max, max, stats->retries_max) == 0) goto retry_max; } retry_min: min = ATOMIC_LOAD(&mod_stats_global.retries_min); if (stats->retries_min < min) { if (ATOMIC_CAS_FULL(&mod_stats_global.retries_min, min, stats->retries_min) == 0) goto retry_min; } free(stats); }
static INLINE void abi_exit(void) { TX_GET; char * statistics; abi_exit_thread(tx); /* Ensure thread safety */ reload: if (ATOMIC_LOAD_ACQ(&global_abi.status) == ABI_INITIALIZED) { if (ATOMIC_CAS_FULL(&global_abi.status, ABI_INITIALIZED, ABI_FINALIZING) == 0) goto reload; } else { return; } if ((statistics = getenv("ITM_STATISTICS")) != NULL) { FILE * f; int i = 0; stats_t * ts; if (statistics[0] == '-') f = stdout; else if ((f = fopen("itm.log", "w")) == NULL) { fprintf(stderr, "can't open itm.log for writing\n"); goto finishing; } fprintf(f, "STATS REPORT\n"); fprintf(f, "THREAD TOTALS\n"); while (1) { do { ts = (stats_t *)ATOMIC_LOAD(&thread_stats); if (ts == NULL) goto no_more_stat; } while(ATOMIC_CAS_FULL(&thread_stats, ts, ts->next) == 0); /* Skip stats if not a transactional thread */ if (ts->nb_commits == 0) continue; fprintf(f, "Thread %-4i : %12s %12s %12s %12s\n", i, "Min", "Mean", "Max", "Total"); fprintf(f, " Transactions : %12lu\n", ts->nb_commits); fprintf(f, " %-25s: %12lu %12.2f %12lu %12lu\n", "Retries", ts->nb_retries_min, ts->nb_retries_avg, ts->nb_retries_max, ts->nb_aborts); fprintf(f,"\n"); /* Free the thread stats structure */ free(ts); i++; } no_more_stat: if (f != stdout) { fclose(f); } } finishing: stm_exit(); ATOMIC_STORE(&global_abi.status, ABI_NOT_INITIALIZED); }
static INLINE void abi_init(void) { /* thread safe */ reload: if (ATOMIC_LOAD_ACQ(&global_abi.status) == ABI_NOT_INITIALIZED) { if (ATOMIC_CAS_FULL(&global_abi.status, ABI_NOT_INITIALIZED, ABI_INITIALIZING) != 0) { /* TODO temporary to be sure to use tinySTM */ printf("TinySTM-ABI v%s.\n", _ITM_libraryVersion()); atexit((void (*)(void))(_ITM_finalizeProcess)); /* TinySTM initialization */ stm_init(); mod_mem_init(0); mod_alloc_cpp(); mod_log_init(); mod_cb_init(); ATOMIC_STORE(&global_abi.status, ABI_INITIALIZED); /* Also initialize thread as specify in the specification */ abi_init_thread(); return; } else { goto reload; } } else if (ATOMIC_LOAD_ACQ(&global_abi.status) != ABI_INITIALIZED) { /* Wait the end of the initialization */ goto reload; } return; }
inline void stm_wait(int id) { TX_GET; int active_txs,max_txs,entered=0; stm_time_t start_spin_time; active_txs=running_transactions; max_txs=max_allowed_running_transactions; if(active_txs<max_txs){ if (ATOMIC_CAS_FULL(&running_transactions, active_txs, active_txs+1) != 0){ if(tx->i_am_the_collector_thread==1){ tx->first_start_tx_time=tx->last_start_tx_time=start_spin_timepuppet =STM_TIMER_READ(); tx->total_no_tx_time+=start_spin_time - tx->start_no_tx_time; } entered=1; } } if(entered==0){ if(tx->i_am_the_collector_thread==1){ start_spin_time=STM_TIMER_READ(); tx->total_no_tx_time+=start_spin_time - tx->start_no_tx_time; } int cycle=300000,i=1; while(1){ active_txs=running_transactions; max_txs=max_allowed_running_transactions; if(active_txs<max_txs) if (ATOMIC_CAS_FULL(&running_transactions, active_txs, active_txs+1) != 0) break; tx->i_am_waiting=1; //usleep(1); for(i=0;i<cycle;i++){ if(tx->i_am_waiting==0)break; } tx->i_am_waiting=0; } } //Initialization of parameters if (tx->i_am_the_collector_thread==1){ if(entered==0)tx->first_start_tx_time=tx->last_start_tx_time=STM_TIMER_READ(); tx->start_no_tx_time=0; tx->total_spin_time+=tx->first_start_tx_time-start_spin_time; } }
/* * Store a word-sized value in a unit transaction. */ static INLINE int stm_unit_write(volatile stm_word_t *addr, stm_word_t value, stm_word_t mask, stm_word_t *timestamp) { #ifdef UNIT_TX volatile stm_word_t *lock; stm_word_t l; PRINT_DEBUG2("==> stm_unit_write(a=%p,d=%p-%lu,m=0x%lx)\n", addr, (void *)value, (unsigned long)value, (unsigned long)mask); /* Get reference to lock */ lock = GET_LOCK(addr); /* Try to acquire lock */ restart: l = ATOMIC_LOAD_ACQ(lock); if (LOCK_GET_OWNED(l)) { /* Locked: wait until lock is free */ #ifdef WAIT_YIELD sched_yield(); #endif /* WAIT_YIELD */ goto restart; } /* Not locked */ if (timestamp != NULL && LOCK_GET_TIMESTAMP(l) > *timestamp) { /* Return current timestamp */ *timestamp = LOCK_GET_TIMESTAMP(l); return 0; } /* TODO: would need to store thread ID to be able to kill it (for wait freedom) */ if (ATOMIC_CAS_FULL(lock, l, LOCK_UNIT) == 0) goto restart; ATOMIC_STORE(addr, value); /* Update timestamp with newer value (may exceed VERSION_MAX by up to MAX_THREADS) */ l = FETCH_INC_CLOCK + 1; if (timestamp != NULL) *timestamp = l; /* Make sure that lock release becomes visible */ ATOMIC_STORE_REL(lock, LOCK_SET_TIMESTAMP(l)); if (unlikely(l >= VERSION_MAX)) { /* Block all transactions and reset clock (current thread is not in active transaction) */ stm_quiesce_barrier(NULL, rollover_clock, NULL); } return 1; #else /* ! UNIT_TX */ fprintf(stderr, "Unit transaction is not enabled\n"); exit(-1); return 1; #endif /* ! UNIT_TX */ }
static void abi_exit_thread(struct stm_tx *tx) { if (tx == NULL) return; #if 0 /* FIXME disable during refactoring */ if (getenv("ITM_STATISTICS") != NULL) { stats_t * ts = malloc(sizeof(stats_t)); #ifdef TLS thread_abi_t *t = thread_abi; #else /* ! TLS */ thread_abi_t *t = pthread_getspecific(thread_abi); #endif /* ! TLS */ ts->thread_id = t->thread_id; stm_get_local_stats("nb_commits", &ts->nb_commits); stm_get_local_stats("nb_aborts", &ts->nb_aborts); stm_get_local_stats("nb_retries_avg", &ts->nb_retries_avg); stm_get_local_stats("nb_retries_min", &ts->nb_retries_min); stm_get_local_stats("nb_retries_max", &ts->nb_retries_max); /* Register thread-statistics to global */ do { ts->next = (stats_t *)ATOMIC_LOAD(&thread_stats); } while (ATOMIC_CAS_FULL(&thread_stats, ts->next, ts) == 0); /* ts will be freed on _ITM_finalizeProcess. */ #ifdef TLS thread_abi = NULL; #else /* ! TLS */ pthread_setspecific(thread_abi, NULL); #endif /* Free thread_abi_t structure. */ free(t); } #endif stm_exit_thread(); #ifdef TM_DTMC /* Free the saved stack */ tanger_stm_free_stack(); #endif }
/* * Set the CURRENT transaction as irrevocable. */ static INLINE int int_stm_set_irrevocable(stm_tx_t *tx, int serial) { #ifdef IRREVOCABLE_ENABLED # if CM == CM_MODULAR stm_word_t t; # endif /* CM == CM_MODULAR */ if (!IS_ACTIVE(tx->status) && serial != -1) { /* Request irrevocability outside of a transaction or in abort handler (for next execution) */ tx->irrevocable = 1 + (serial ? 0x08 : 0); return 0; } /* Are we already in irrevocable mode? */ if ((tx->irrevocable & 0x07) == 3) { return 1; } if (tx->irrevocable == 0) { /* Acquire irrevocability for the first time */ tx->irrevocable = 1 + (serial ? 0x08 : 0); #ifdef HYBRID_ASF /* TODO: we shouldn't use pthread_mutex/cond since it could use syscall. */ if (tx->software == 0) { asf_abort(ASF_RETRY_IRREVOCABLE); return 0; } #endif /* HYBRID_ASF */ /* Try acquiring global lock */ if (_tinystm.irrevocable == 1 || ATOMIC_CAS_FULL(&_tinystm.irrevocable, 0, 1) == 0) { /* Transaction will acquire irrevocability after rollback */ stm_rollback(tx, STM_ABORT_IRREVOCABLE); return 0; } /* Success: remember we have the lock */ tx->irrevocable++; /* Try validating transaction */ #if DESIGN == WRITE_BACK_ETL if (!stm_wbetl_validate(tx)) { stm_rollback(tx, STM_ABORT_VALIDATE); return 0; } #elif DESIGN == WRITE_BACK_CTL if (!stm_wbctl_validate(tx)) { stm_rollback(tx, STM_ABORT_VALIDATE); return 0; } #elif DESIGN == WRITE_THROUGH if (!stm_wt_validate(tx)) { stm_rollback(tx, STM_ABORT_VALIDATE); return 0; } #elif DESIGN == MODULAR if ((tx->attr.id == WRITE_BACK_CTL && stm_wbctl_validate(tx)) || (tx->attr.id == WRITE_THROUGH && stm_wt_validate(tx)) || (tx->attr.id != WRITE_BACK_CTL && tx->attr.id != WRITE_THROUGH && stm_wbetl_validate(tx))) { stm_rollback(tx, STM_ABORT_VALIDATE); return 0; } #endif /* DESIGN == MODULAR */ # if CM == CM_MODULAR /* We might still abort if we cannot set status (e.g., we are being killed) */ t = tx->status; if (GET_STATUS(t) != TX_ACTIVE || ATOMIC_CAS_FULL(&tx->status, t, t + (TX_IRREVOCABLE - TX_ACTIVE)) == 0) { stm_rollback(tx, STM_ABORT_KILLED); return 0; } # endif /* CM == CM_MODULAR */ if (serial && tx->w_set.nb_entries != 0) { /* TODO: or commit the transaction when we have the irrevocability. */ /* Don't mix transactional and direct accesses => restart with direct accesses */ stm_rollback(tx, STM_ABORT_IRREVOCABLE); return 0; } } else if ((tx->irrevocable & 0x07) == 1) { /* Acquire irrevocability after restart (no need to validate) */ while (_tinystm.irrevocable == 1 || ATOMIC_CAS_FULL(&_tinystm.irrevocable, 0, 1) == 0) ; /* Success: remember we have the lock */ tx->irrevocable++; } assert((tx->irrevocable & 0x07) == 2); /* Are we in serial irrevocable mode? */ if ((tx->irrevocable & 0x08) != 0) { /* Stop all other threads */ if (stm_quiesce(tx, 1) != 0) { /* Another thread is quiescing and we are active (trying to acquire irrevocability) */ assert(serial != -1); stm_rollback(tx, STM_ABORT_IRREVOCABLE); return 0; } } /* We are in irrevocable mode */ tx->irrevocable++; #else /* ! IRREVOCABLE_ENABLED */ fprintf(stderr, "Irrevocability is not supported in this configuration\n"); exit(-1); #endif /* ! IRREVOCABLE_ENABLED */ return 1; }
/* * Store a word-sized value (return write set entry or NULL). */ static inline w_entry_t *stm_write(stm_tx_t *tx, volatile stm_word_t *addr, stm_word_t value, stm_word_t mask) { volatile stm_word_t *lock; stm_word_t l, version; w_entry_t *w; w_entry_t *prev = NULL; PRINT_DEBUG2("==> stm_write(t=%p[%lu-%lu],a=%p,d=%p-%lu,m=0x%lx)\n", tx, (unsigned long)tx->start, (unsigned long)tx->end, addr, (void *)value, (unsigned long)value, (unsigned long)mask); assert(IS_ACTIVE(tx->status)); if (tx->ro) { /* Disable read-only and abort */ assert(tx->attr != NULL); /* Update attributes to inform the caller */ tx->attr->read_only = 0; tx->aborts_ro++; stm_rollback(tx, STM_ABORT_RO_WRITE); return NULL; } /* Get reference to lock */ lock = GET_LOCK(addr); /* Try to acquire lock */ restart: l = ATOMIC_LOAD_ACQ(lock); restart_no_load: if (LOCK_GET_OWNED(l)) { /* Locked */ if (l == LOCK_UNIT) { /* Data modified by a unit store: should not last long => retry */ goto restart; } /* Do we own the lock? */ w = (w_entry_t *)LOCK_GET_ADDR(l); /* Simply check if address falls inside our write set (avoids non-faulting load) */ if (tx->w_set.entries <= w && w < tx->w_set.entries + tx->w_set.nb_entries) { /* Yes */ if (mask == 0) { /* No need to insert new entry or modify existing one */ return w; } prev = w; /* Did we previously write the same address? */ while (1) { if (addr == prev->addr) { /* No need to add to write set */ if (mask != ~(stm_word_t)0) { if (prev->mask == 0) prev->value = ATOMIC_LOAD(addr); value = (prev->value & ~mask) | (value & mask); } prev->value = value; prev->mask |= mask; return prev; } if (prev->next == NULL) { /* Remember last entry in linked list (for adding new entry) */ break; } prev = prev->next; } /* Get version from previous write set entry (all entries in linked list have same version) */ version = prev->version; /* Must add to write set */ if (tx->w_set.nb_entries == tx->w_set.size) stm_allocate_ws_entries(tx, 1); w = &tx->w_set.entries[tx->w_set.nb_entries]; goto do_write; } /* Conflict: CM kicks in */ tx->c_lock = lock; /* Abort */ tx->aborts_locked_write++; stm_rollback(tx, STM_ABORT_WW_CONFLICT); return NULL; } /* Not locked */ /* Handle write after reads (before CAS) */ version = LOCK_GET_TIMESTAMP(l); acquire: if (version > tx->end) { /* We might have read an older version previously */ if (!tx->can_extend || stm_has_read(tx, lock) != NULL) { /* Read version must be older (otherwise, tx->end >= version) */ /* Not much we can do: abort */ tx->aborts_validate_write++; stm_rollback(tx, STM_ABORT_VAL_WRITE); return NULL; } } /* Acquire lock (ETL) */ if (tx->w_set.nb_entries == tx->w_set.size) stm_allocate_ws_entries(tx, 1); w = &tx->w_set.entries[tx->w_set.nb_entries]; if (ATOMIC_CAS_FULL(lock, l, LOCK_SET_ADDR_WRITE((stm_word_t)w)) == 0) goto restart; /* We own the lock here (ETL) */ do_write: /* Add address to write set */ w->addr = addr; w->mask = mask; w->lock = lock; if (mask == 0) { /* Do not write anything */ #ifndef NDEBUG w->value = 0; #endif /* ! NDEBUG */ } else { /* Remember new value */ if (mask != ~(stm_word_t)0) value = (ATOMIC_LOAD(addr) & ~mask) | (value & mask); w->value = value; } w->version = version; w->next = NULL; if (prev != NULL) { /* Link new entry in list */ prev->next = w; } tx->w_set.nb_entries++; tx->w_set.has_writes++; return w; }
inline void stm_wait(int id) { TX_GET; int active_txs, max_txs; int entered = 0; stm_time_t start_spin_time = 0; tx->CAS_executed = 0; //check whether executing CAS if (max_concurrent_threads<=max_allowed_running_transactions) { entered = 1; } else { while (1) { active_txs = running_transactions; max_txs = max_allowed_running_transactions; if (active_txs < max_txs) { if (ATOMIC_CAS_FULL(&running_transactions, active_txs, active_txs + 1) != 0) { if (tx->i_am_the_collector_thread == 1) { tx->first_start_tx_time = tx->last_start_tx_time =STM_TIMER_READ(); tx->total_no_tx_time += tx->last_start_tx_time - tx->start_no_tx_time; } entered = 1; tx->CAS_executed = 1; break; } } else break; } } if (entered == 1) { if (tx->i_am_the_collector_thread == 1) { tx->first_start_tx_time = tx->last_start_tx_time = STM_TIMER_READ(); tx->total_no_tx_time += tx->last_start_tx_time - tx->start_no_tx_time; } } else { if(tx->i_am_the_collector_thread==1){ //collect statistics start_spin_time=STM_TIMER_READ(); tx->total_no_tx_time+=start_spin_time - tx->start_no_tx_time; } //stm_time_t start; //start = STM_TIMER_READ(); //usleep(10000); //printf("\nSleep time: %llu", STM_TIMER_READ()-start); int i, max_cycles=500000; while(1){ active_txs=running_transactions; max_txs=max_allowed_running_transactions; if(active_txs<max_txs) if (ATOMIC_CAS_FULL(&running_transactions, active_txs, active_txs+1)!= 0) { tx->CAS_executed=1; break; } tx->i_am_waiting=1; for(i=0;i<max_cycles;i++) { if(tx->i_am_waiting==0)break; } tx->i_am_waiting=0; } if (tx->i_am_the_collector_thread==1) { tx->first_start_tx_time=tx->last_start_tx_time=STM_TIMER_READ(); tx->total_spin_time+=tx->first_start_tx_time-start_spin_time; } } if (tx->i_am_the_collector_thread==1) { tx->start_no_tx_time=0; } }