stm_word_t hytm_load(TXPARAMS volatile stm_word_t *addr) { volatile stm_word_t *lock; stm_word_t l; lock = GET_LOCK(addr); /* Load descriptor using ASF */ l = asf_lock_load64((long unsigned int *)lock); if (unlikely(LOCK_GET_WRITE(l))) { /* a software transaction is currently using this descriptor */ asf_abort(ASF_RETRY); /* unreachable */ return 0; } /* addr can return inconsistent value but will be abort after few cycles */ return *addr; }
void hytm_store(TXPARAMS volatile stm_word_t *addr, stm_word_t value) { TX_GET; volatile stm_word_t *lock; stm_word_t l; lock = GET_LOCK(addr); /* Load descriptor using ASF */ l = asf_lock_load64((long unsigned int *)lock); if (unlikely(LOCK_GET_WRITE(l))) { /* a software transaction is currently using this descriptor, ASF tx has to give up */ asf_abort(ASF_RETRY); /* XXX mark as unreachable */ return; } /* Write the value using ASF */ asf_lock_store64((long unsigned int *)addr, value); /* Add to write set to update the locks when we acquire TS */ /* XXX This could overflow if many write to the same address. */ tx->w_set.entries[tx->w_set.nb_entries++].lock = lock; }
/* * Load a word-sized value (invisible read). */ static inline stm_word_t stm_read_invisible(stm_tx_t *tx, volatile stm_word_t *addr) { volatile stm_word_t *lock; stm_word_t l, l2, value, version; r_entry_t *r; w_entry_t *w; PRINT_DEBUG2("==> stm_read_invisible(t=%p[%lu-%lu],a=%p)\n", tx, (unsigned long)tx->start, (unsigned long)tx->end, addr); assert(IS_ACTIVE(tx->status)); /* Get reference to lock */ lock = GET_LOCK(addr); /* Note: we could check for duplicate reads and get value from read set */ /* Read lock, value, lock */ restart: l = ATOMIC_LOAD_ACQ(lock); restart_no_load: if (LOCK_GET_WRITE(l)) { /* Locked */ if (l == LOCK_UNIT) { /* Data modified by a unit store: should not last long => retry */ goto restart; } /* Do we own the lock? */ w = (w_entry_t *)LOCK_GET_ADDR(l); /* Simply check if address falls inside our write set (avoids non-faulting load) */ if (tx->w_set.entries <= w && w < tx->w_set.entries + tx->w_set.nb_entries) { /* Yes: did we previously write the same address? */ while (1) { if (addr == w->addr) { /* Yes: get value from write set (or from memory if mask was empty) */ value = (w->mask == 0 ? ATOMIC_LOAD(addr) : w->value); break; } if (w->next == NULL) { /* No: get value from memory */ value = ATOMIC_LOAD(addr); break; } w = w->next; } /* No need to add to read set (will remain valid) */ return value; } /* Conflict: CM kicks in (we could also check for duplicate reads and get value from read set) */ tx->c_lock = lock; /* Abort */ tx->aborts_locked_read++; stm_rollback(tx, STM_ABORT_RW_CONFLICT); return 0; } else { /* Not locked */ value = ATOMIC_LOAD_ACQ(addr); l2 = ATOMIC_LOAD_ACQ(lock); if (l != l2) { l = l2; goto restart_no_load; } /* Check timestamp */ version = LOCK_GET_TIMESTAMP(l); /* Valid version? */ if (version > tx->end) { /* No: try to extend first (except for read-only transactions: no read set) */ if (tx->ro || !tx->can_extend || !stm_extend(tx)) { /* Not much we can do: abort */ tx->aborts_validate_read++; stm_rollback(tx, STM_ABORT_VAL_READ); return 0; } /* Verify that version has not been overwritten (read value has not * yet been added to read set and may have not been checked during * extend) */ l = ATOMIC_LOAD_ACQ(lock); if (l != l2) { l = l2; goto restart_no_load; } /* Worked: we now have a good version (version <= tx->end) */ } } /* We have a good version: add to read set (update transactions) and return value */ if (!tx->ro) { /* Add address and version to read set */ if (tx->r_set.nb_entries == tx->r_set.size) stm_allocate_rs_entries(tx, 1); r = &tx->r_set.entries[tx->r_set.nb_entries++]; r->version = version; r->lock = lock; } return value; }