void shared_mutex::imp_lock_shared(u32 val) { verify("shared_mutex underflow" HERE), val < c_err; for (int i = 0; i < 10; i++) { busy_wait(); if (try_lock_shared()) { return; } } // Acquire writer lock and downgrade const u32 old = m_value.fetch_add(c_one); if (old == 0) { lock_downgrade(); return; } verify("shared_mutex overflow" HERE), (old % c_sig) + c_one < c_sig; imp_wait(); lock_downgrade(); }
void lock_shared() noexcept { for (;;) { if (try_lock_shared()) { return; } uintreg value; do { value = _bits.load(std::memory_order_acquire); } while ((value & WRITER) != 0); } }
void shared_mutex_t::lock_shared() { if (!try_lock_shared()) { std::unique_lock<std::mutex> lock(m_mutex); m_wrcv.wait(lock, WRAP_EXPR(m_info.atomic_op([](ownership_info_t& info) -> bool { if (info.waiting_readers < UINT16_MAX) { info.waiting_readers++; return true; } return false; }))); m_rcv.wait(lock, WRAP_EXPR(m_info.atomic_op([](ownership_info_t& info) -> bool { if (!info.writers && !info.waiting_writers && info.readers < MAX_READERS) { info.readers++; return true; } return false; }))); const auto info = m_info.atomic_op([](ownership_info_t& info) { if (!info.waiting_readers--) { throw EXCEPTION("Invalid value"); } }); if (info.waiting_readers == UINT16_MAX) { m_wrcv.notify_one(); } } }