void ReadWriteLock::Release(WriteGuard&) { KS_ASSERT(mWriteEntryThread == GetCurrentThreadId() && mReentrancyCount >= 0); int reentrants = mReentrancyCount > 0 ? atomic_decrement((u32*)&mReentrancyCount) + 1 : 0; if (reentrants == 0) { mWriteEntryThread = 0; const u32 lock_key(EXCL_ENCODE(exclusive_write, 0)); if (atomic_compare_and_swap(&mMutualExclusivityMask, lock_key, exclusive_none) != lock_key) { KS_ASSERT( ! "ReadWriteLockException::eAlreadyUnlocked" ); } } }
WriteGuard ReadWriteLock::Write() { const ThreadID threadID = GetCurrentThreadId(); const u32 lock_key(EXCL_ENCODE(exclusive_write, 0)); while (atomic_compare_and_swap(&mMutualExclusivityMask, exclusive_none, lock_key) != exclusive_none) { if (threadID == mWriteEntryThread) { atomic_increment((u32*)&mReentrancyCount); break; } cond_wait(); } KS_ASSERT(mWriteEntryThread == 0 || mWriteEntryThread == threadID); mWriteEntryThread = threadID; return WriteGuard(this); }
void ReadWriteLock::Release(ReadGuard&) { const u32 lock_key = EXCL_ENCODE(exclusive_write, 0); u32 mutual_mask = mMutualExclusivityMask; u32 current_key = mutual_mask & READ_COUNT_MASK; u32 exit_key = current_key - 1; while (atomic_compare_and_swap(&mMutualExclusivityMask, current_key, exit_key) != current_key ) { if (mutual_mask == lock_key && mWriteEntryThread == GetCurrentThreadId()) // it's a re-entrant read { int rentrants = atomic_decrement((u32*)&mReentrancyCount); KS_ASSERT(rentrants >= 0); break; } cond_wait(); mutual_mask = mMutualExclusivityMask; current_key = mutual_mask & READ_COUNT_MASK; exit_key = current_key - 1; } }
ReadWriteLock::~ReadWriteLock() { KS_ASSERT( mMutualExclusivityMask == 0 ); }
void *GPUBuffer::map(unsigned byte_size, unsigned access /*= GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT*/) { KS_ASSERT(byte_size <= m_size); bind(); return glMapBufferRange(m_target, 0, byte_size, access); }