/** * When a ProfileTM transaction commits, we end up in this code, which * calls the current policy's 'decider' to pick the new algorithm, and then * sets up metadata and makes the switch. */ void profile_oncomplete(TxThread* tx) { // NB: This is subtle: When we switched /to/ ProfileTM, we installed // begin_blocker, then changed algorithms via install_algorithm(), // then uninstalled begin_blocker. We are about to call // install_algorithm(), but the invariant that install_algorithm // demands is that the caller of install_algorithm() must have // installed begin_blocker. Failure to do so can result in a race // with TxThread constructors. // // No transactions can start, so we don't need to wait for everyone // to commit/abort. But we do need this, in case tmbegin is // /already/ begin_blocker on account of a call to set_policy or // TxThread() while (!bcasptr(&TxThread::tmbegin, stms[curr_policy.ALG_ID].begin, &begin_blocker)) { spin64(); } // Use the policy to decide what algorithm to switch to uint32_t new_algorithm = pols[curr_policy.POL_ID].decider(); // adjust thresholds adjust_thresholds(new_algorithm, curr_policy.PREPROFILE_ALG); // update the instrumentation level and install the algorithm install_algorithm(new_algorithm, tx); }
/** * TML requires this to be called before every write */ inline void beforewrite_TML(TxThread* tx) { // acquire the lock, abort on failure if (!bcasptr(×tamp.val, tx->start_time, tx->start_time + 1)) tx->tmabort(tx); ++tx->start_time; tx->tmlHasLock = true; }
/*** set a bit */ inline void rrec_t::setbit(unsigned slot) { uint32_t bucket = slot / BITS; uintptr_t mask = 1lu<<(slot % BITS); uintptr_t oldval = bits[bucket]; if (oldval & mask) return; while (true) { if (bcasptr(&bits[bucket], oldval, (oldval | mask))) return; oldval = bits[bucket]; } }
/*** MCS release */ inline void mcs_release(mcs_qnode_t** lock, mcs_qnode_t* mine) { // if my node is the only one, then if I can zero the lock, do so and I'm // done if (mine->next == 0) { if (bcasptr(lock, mine, static_cast<mcs_qnode_t*>(NULL))) return; // uh-oh, someone arrived while I was zeroing... wait for arriver to // initialize, fall out to other case while (mine->next == 0) { } // spin } // other case: someone is waiting on me... set their flag to let them start mine->next->flag = false; }
/** * On abort, get a timestamp if I exceed some threshold */ static void onAbort(TxThread* tx) { // if I'm already in the hourglass, just return if (tx->strong_HG) { tx->abort_hist.onHGAbort(); return; } // acquire a timestamp if consecutive aborts exceed a threshold if (tx->consec_aborts > ABORT_THRESHOLD) { if (bcasptr(&fcm_timestamp.val, 0ul, 1ul)) { tx->strong_HG = true; } } // NB: as before, some counting opportunities here }
/** * On abort, get a timestamp if I exceed some threshold */ static void onAbort(TxThread* tx) { // if I'm already in the hourglass, just return if (tx->strong_HG) { tx->abort_hist.onHGAbort(); return; } // acquire a timestamp if consecutive aborts exceed a threshold if (tx->consec_aborts > ABORT_THRESHOLD) { if (bcasptr(&fcm_timestamp.val, 0ul, 1ul)) { tx->strong_HG = true; } } else { // randomized exponential backoff exp_backoff(tx); } }
/*** unset a bit */ inline void rrec_t::unsetbit(unsigned slot) { uint32_t bucket = slot / BITS; uintptr_t mask = 1lu<<(slot % BITS); uintptr_t unmask = ~mask; uintptr_t oldval = bits[bucket]; if (!(oldval & mask)) return; // NB: this GCC-specific code #if defined(STM_CPU_X86) && defined(STM_CC_GCC) __sync_fetch_and_and(&bits[bucket], unmask); #else while (true) { if (bcasptr(&bits[bucket], oldval, (oldval & unmask))) return; oldval = bits[bucket]; } #endif }
/*** combine test and set */ inline bool rrec_t::setif(unsigned slot) { uint32_t bucket = slot / BITS; uintptr_t mask = 1lu<<(slot % BITS); uintptr_t oldval = bits[bucket]; if (oldval & mask) return false; // NB: We don't have suncc fetch_and_or, so there is an ifdef here that // falls back to a costly CAS-based atomic or #if defined(STM_CPU_X86) && defined(STM_CC_GCC) /* little endian */ __sync_fetch_and_or(&bits[bucket], mask); return true; #else while (true) { if (bcasptr(&bits[bucket], oldval, oldval | mask)) return true; oldval = bits[bucket]; } #endif }
/** * On abort, get a timestamp if I exceed some threshold */ static void onAbort(TxThread* tx) { // if I'm already in the hourglass, just return if (tx->strong_HG) { tx->abort_hist.onHGAbort(); return; } // acquire a timestamp if consecutive aborts exceed a threshold if (tx->consec_aborts > ABORT_THRESHOLD) { while (true) { if (bcasptr(&fcm_timestamp.val, 0ul, 1ul)) { tx->strong_HG = true; return; } while (fcm_timestamp.val) { } } } // NB: It would be good to explore what happens if I have a // strong_HG already? Can we count how many times I abort with // strong_HG? }