/*** simple bit set function, with strong ordering guarantees */ ALWAYS_INLINE void atomic_add(const void* const val) volatile { const uint32_t index = hash(val); const uint32_t block = index / WORD_SIZE; const uint32_t offset = index % WORD_SIZE; #if defined(STM_CPU_X86) atomicswapptr(&word_filter[block], word_filter[block] | (1u << offset)); #else word_filter[block] |= (1u << offset); WBR; #endif }
/** * MCS acquire. We count how long we spin, in order to detect very long * delays */ inline int mcs_acquire(mcs_qnode_t** lock, mcs_qnode_t* mine) { // init my qnode, then swap it into the root pointer mine->next = 0; mcs_qnode_t* pred = (mcs_qnode_t*)atomicswapptr(lock, mine); // now set my flag, point pred to me, and wait for my flag to be unset int ret = 0; if (pred != 0) { mine->flag = true; pred->next = mine; while (mine->flag) { ret++; } // spin } return ret; }