/////////////////////////////////////////////////////////////////////////////// // UpdateInterestBits - Update the interest (desired notifications) for the given observer Error CSubject::UpdateInterestBits( IObserver* pObserver, u32 uInIntrestBits ) { // No need to check for pObs being nonzero since the find below guarantees correct work in any case Error curError = Errors::Failure; #if SUPPORT_CONCURRENT_ATTACH_DETACH_TO_SUBJECTS SpinWait::Lock lock(m_observerListMutex); #endif // Find the given observer in our observer list ObserverList::iterator it = std::find(m_observerList.begin(), m_observerList.end(), pObserver); if ( it != m_observerList.end() ) { #if SUPPORT_CONCURRENT_ATTACH_DETACH_TO_SUBJECTS // We are under the lock in this case it->m_interestBits |= inInterest; #else // No lock is used, but updates can happen concurrently. So use interlocked operation long prevBits; long newBits = long(it->m_interestBits | uInIntrestBits); do { prevBits = it->m_interestBits; } while ( _InterlockedCompareExchange((long*)&it->m_interestBits, newBits, prevBits) != prevBits ); #endif curError = Errors::Success; } return curError; }
// Adds a task set to the work queue VOID TaskScheduler::AddTaskSet( TASKSETHANDLE hSet, INT iTaskCount ) { // Increase the Task Count before adding the tasks to keep the // workers from going to sleep during this process _InterlockedExchangeAdd((LONG*)&miTaskCount,iTaskCount); // Looks for an open slot starting at the end of the queue INT iWriter = miWriter; do { while(mhActiveTaskSets[iWriter] != TASKSETHANDLE_INVALID) iWriter = (iWriter + 1) & (MAX_TASKSETS - 1); // verify that another thread hasn't already written to this slot } while(_InterlockedCompareExchange((LONG*)&mhActiveTaskSets[iWriter],hSet,TASKSETHANDLE_INVALID) != TASKSETHANDLE_INVALID); // Wake up all suspended threads LONG sleep_count = 0; ReleaseSemaphore(mhTaskAvailable,1,&sleep_count); INT iCountToWake = iTaskCount < (miThreadCount - sleep_count - 1) ? iTaskCount : miThreadCount - sleep_count - 1; ReleaseSemaphore(mhTaskAvailable,iCountToWake,&sleep_count); // reset the end of the queue miWriter = iWriter; }
//178 int sys_spu_thread_group_join(u32 id, mem32_t cause, mem32_t status) { sc_spu.Warning("sys_spu_thread_group_join(id=0x%x, cause_addr=0x%x, status_addr=0x%x)", id, cause.GetAddr(), status.GetAddr()); SpuGroupInfo* group_info; if(!Emu.GetIdManager().GetIDData(id, group_info)) { return CELL_ESRCH; } if (_InterlockedCompareExchange(&group_info->lock, 1, 0)) //get lock { return CELL_EBUSY; } cause = SYS_SPU_THREAD_GROUP_JOIN_ALL_THREADS_EXIT; status = 0; //unspecified because of ALL_THREADS_EXIT for(int i=0; i<g_spu_group_thr_count; i++) { if(group_info->threads[i]) { while (!group_info->threads[i]->IsStopped()) Sleep(1); } } _InterlockedExchange(&group_info->lock, 0); //release lock return CELL_OK; }
int _pthread_once_raw(pthread_once_t *o, void (*func)(void)) { long state = *o; _ReadWriteBarrier(); while (state != 1) { if (!state) { if (!_InterlockedCompareExchange(o, 2, 0)) { /* Success */ func(); /* Mark as done */ *o = 1; return 0; } } YieldProcessor(); _ReadWriteBarrier(); state = *o; } /* Done */ return 0; }
int thread_once(thread_control_t *control, void (*callback)(void)) { #ifdef ACE_WINDOWS int state = (int)(*control); _ReadWriteBarrier(); while (state != 1) { if ((!state) && (!_InterlockedCompareExchange(control, 2, 0))) { callback(); *control = 1; return 0; } YieldProcessor(); _ReadWriteBarrier(); state = (int)(*control); } return 0; #else return pthread_once(control, callback); #endif }
/* * @implemented */ LONG WINAPI InterlockedCompareExchange(IN OUT LONG volatile *Destination, IN LONG Exchange, IN LONG Comperand) { return _InterlockedCompareExchange(Destination, Exchange, Comperand); }
long atomic_compare_exchange(volatile long* value, long new_val, long old_val) { #ifdef __GNUC__ return __sync_val_compare_and_swap(value, old_val, new_val); #else return _InterlockedCompareExchange(value, new_val, old_val); #endif }
NTKERNELAPI LONG WINAPI InterlockedCompareExchange( LONG volatile *Destination, LONG Exchange, LONG Comparand) { return _InterlockedCompareExchange(Destination, Exchange, Comparand); }
int atomic_compareAndExchange(volatile int *value, int expected, int newVal) { #if LOOM_COMPILER == LOOM_COMPILER_MSVC return _InterlockedCompareExchange(value, newVal, expected); #else return __sync_val_compare_and_swap(value, expected, newVal); #endif }
UINT32 EFIAPI InternalSyncCompareExchange32 ( IN volatile UINT32 *Value, IN UINT32 CompareValue, IN UINT32 ExchangeValue ) { return _InterlockedCompareExchange (Value, ExchangeValue, CompareValue); }
int32_t Interlocked::cas (int32_t volatile &dest, int32_t excg, int32_t comp) { assert (is_ptr_aligned_nz (&dest)); return (_InterlockedCompareExchange ( reinterpret_cast <volatile long *> (&dest), excg, comp )); }
LONG WINAPI redirect_InterlockedCompareExchange ( __inout __drv_interlocked LONG volatile *Destination, __in LONG ExChange, __in LONG Comperand ) { return _InterlockedCompareExchange(Destination, ExChange, Comperand); }
int pthread_barrier_wait (pthread_barrier_t * barrier) { int result; int step; pthread_barrier_t b; if (barrier == NULL || *barrier == (pthread_barrier_t) PTW32_OBJECT_INVALID) { return EINVAL; } b = *barrier; step = b->iStep; if (0 == InterlockedDecrement ((long *) &(b->nCurrentBarrierHeight))) { /* Must be done before posting the semaphore. */ b->nCurrentBarrierHeight = b->nInitialBarrierHeight; /* * There is no race condition between the semaphore wait and post * because we are using two alternating semas and all threads have * entered barrier_wait and checked nCurrentBarrierHeight before this * barrier's sema can be posted. Any threads that have not quite * entered sem_wait below when the multiple_post has completed * will nevertheless continue through the semaphore (barrier) * and will not be left stranded. */ result = (b->nInitialBarrierHeight > 1 ? sem_post_multiple (&(b->semBarrierBreeched[step]), b->nInitialBarrierHeight - 1) : 0); } else { /* * Use the non-cancelable version of sem_wait(). */ result = ptw32_semwait (&(b->semBarrierBreeched[step])); } /* * The first thread across will be the PTHREAD_BARRIER_SERIAL_THREAD. * This also sets up the alternate semaphore as the next barrier. */ if (0 == result) { result = ( step == _InterlockedCompareExchange ( &(b->iStep), (1L - step), step) ? PTHREAD_BARRIER_SERIAL_THREAD : 0); } return (result); }
int32_t sk_atomic_conditional_inc(int32_t* addr) { while (true) { LONG value = static_cast<int32_t const volatile&>(*addr); if (value == 0) { return 0; } if (_InterlockedCompareExchange(reinterpret_cast<LONG*>(addr), value + 1, value) == value) { return value; } } }
KOKKOS_INLINE_FUNCTION T atomic_compare_exchange(volatile T * const dest, const T & compare, typename Kokkos::Impl::enable_if< sizeof(T) == sizeof(LONG), const T & >::type val) { union U { LONG i; T t; KOKKOS_INLINE_FUNCTION U() {}; } tmp; tmp.i = _InterlockedCompareExchange((LONG*)dest, *((LONG*)&val), *((LONG*)&compare)); return tmp.t; }
/* ======================================= InitOnceExecuteOnce ======================================= */ int __stdcall myInitOnceExecuteOnce ( __CR_IN__ void* InitOnce, __CR_IN__ void* InitFn, __CR_IN__ void* Parameter, __CR_IN__ void** Context ) { init_once_t func = (init_once_t)InitFn; if (!_InterlockedCompareExchange(InitOnce, TRUE, FALSE)) func(InitOnce, Parameter, Context); return (TRUE); }
uint32_t CpuTicks::now() { do { uint32_t hiResOk = CpuTicks_hiResOk; if (hiResOk == 1) { LARGE_INTEGER now; if (!::QueryPerformanceCounter(&now)) break; return (int64_t)(double(now.QuadPart) / CpuTicks_hiResFreq); } if (hiResOk == 0) { LARGE_INTEGER qpf; if (!::QueryPerformanceFrequency(&qpf)) { _InterlockedCompareExchange((LONG*)&CpuTicks_hiResOk, 0xFFFFFFFF, 0); break; } LARGE_INTEGER now; if (!::QueryPerformanceCounter(&now)) { _InterlockedCompareExchange((LONG*)&CpuTicks_hiResOk, 0xFFFFFFFF, 0); break; } double freqDouble = double(qpf.QuadPart) / 1000.0; CpuTicks_hiResFreq = freqDouble; _InterlockedCompareExchange((LONG*)&CpuTicks_hiResOk, 1, 0); return static_cast<uint32_t>( static_cast<int64_t>(double(now.QuadPart) / freqDouble) & 0xFFFFFFFF); } } while (0); // Bail to a less precise GetTickCount(). return ::GetTickCount(); }
static ETHR_INLINE int wait(ethr_event *e, int spincount) { LONG state; DWORD code; int sc, res, until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS; if (spincount < 0) ETHR_FATAL_ERROR__(EINVAL); sc = spincount; while (1) { long on; while (1) { #if ETHR_READ_AND_SET_WITHOUT_INTERLOCKED_OP__ state = e->state; #else state = _InterlockedExchangeAdd(&e->state, (LONG) 0); #endif if (state == ETHR_EVENT_ON__) return 0; if (sc == 0) break; sc--; ETHR_SPIN_BODY; if (--until_yield == 0) { until_yield = ETHR_YIELD_AFTER_BUSY_LOOPS; res = ETHR_YIELD(); if (res != 0) ETHR_FATAL_ERROR__(res); } } if (state != ETHR_EVENT_OFF_WAITER__) { state = _InterlockedCompareExchange(&e->state, ETHR_EVENT_OFF_WAITER__, ETHR_EVENT_OFF__); if (state == ETHR_EVENT_ON__) return 0; ETHR_ASSERT(state == ETHR_EVENT_OFF__); } code = WaitForSingleObject(e->handle, INFINITE); if (code != WAIT_OBJECT_0) ETHR_FATAL_ERROR__(ethr_win_get_errno__()); } }
int pthread_spin_destroy(pthread_spinlock_t * lock) { register pthread_spinlock_t s; int result = 0; if (lock == NULL || *lock == NULL) return EINVAL; if ((s = *lock) != PTHREAD_SPINLOCK_INITIALIZER) { if (s->interlock == PTW32_SPIN_USE_MUTEX) result = pthread_mutex_destroy(&(s->u.mutex)); else if (PTW32_SPIN_UNLOCKED != _InterlockedCompareExchange(&(s->interlock), PTW32_OBJECT_INVALID, PTW32_SPIN_UNLOCKED)) result = EINVAL; if (0 == result) { /* * We are relying on the application to ensure that all other threads * have finished with the spinlock before destroying it. */ *lock = NULL; (void) free(s); } } else { /* * See notes in ptw32_spinlock_check_need_init() above also. */ EnterCriticalSection(&ptw32_spinlock_test_init_lock); /* * Check again. */ if (*lock == PTHREAD_SPINLOCK_INITIALIZER) { /* * This is all we need to do to destroy a statically * initialised spinlock that has not yet been used (initialised). * If we get to here, another thread * waiting to initialise this mutex will get an EINVAL. */ *lock = NULL; } else { /* * The spinlock has been initialised while we were waiting * so assume it's in use. */ result = EBUSY; } LeaveCriticalSection(&ptw32_spinlock_test_init_lock); } return (result); }
bool MSYS_Compare_And_Swap_Long(long * address, const long oldValToCheckFor, const long newValToWrite) { /* Init vars. */ bool ret = false; /* The result of this function. */ #ifdef _MSC_FULL_VER /* _InterlockedCompareExchange(long volatile * DESTIONATION, long EXCHANGE, long COMPERAND) */ if (_InterlockedCompareExchange(address, newValToWrite, oldValToCheckFor) == oldValToCheckFor) { ret = true; } #endif /* _MSC_FULL_VER */ /* Return the result. */ return ret; }
/* _Atomic_compare_exchange_weak_4, _Atomic_compare_exchange_strong_4 */ static int _Compare_exchange_seq_cst_4(volatile _Uint4_t *_Tgt, _Uint4_t *_Exp, _Uint4_t _Value) { /* compare and exchange values atomically with sequentially consistent memory order */ _Uint1_t res; _Uint4_t prev = _InterlockedCompareExchange((volatile long *)_Tgt, _Value, *_Exp); if (prev == *_Exp) res = 1; else { /* copy old value */ res = 0; *_Exp = prev; } return (res); }
// Locks all other processors and returns exclusivity pointer. This function // should never be called before the last exclusivity is released. _Use_decl_annotations_ EXTERN_C void *ExclGainExclusivity() { NT_ASSERT(InterlockedAdd(&g_ExclpNumberOfLockedProcessors, 0) == 0); _InterlockedAnd(&g_ExclpReleaseAllProcessors, 0); const auto numberOfProcessors = KeQueryActiveProcessorCount(nullptr); // Allocates DPCs for all processors. auto context = reinterpret_cast<ExclusivityContext *>(ExAllocatePoolWithTag( NonPagedPoolNx, sizeof(void *) + (numberOfProcessors * sizeof(KDPC)), EXCLP_POOL_TAG)); if (!context) { return nullptr; } // Execute a lock DPC for all processors but this. context->OldIrql = KeRaiseIrqlToDpcLevel(); const auto currentCpu = KeGetCurrentProcessorNumber(); for (auto i = 0ul; i < numberOfProcessors; i++) { if (i == currentCpu) { continue; } // Queue a lock DPC. KeInitializeDpc(&context->Dpcs[i], ExclpRaiseIrqlAndWaitDpc, nullptr); KeSetTargetProcessorDpc(&context->Dpcs[i], static_cast<CCHAR>(i)); KeInsertQueueDpc(&context->Dpcs[i], nullptr, nullptr); } // Wait until all other processors were halted. const auto needToBeLocked = numberOfProcessors - 1; while (_InterlockedCompareExchange(&g_ExclpNumberOfLockedProcessors, needToBeLocked, needToBeLocked) != static_cast<LONG>(needToBeLocked)) { KeStallExecutionProcessor(10); } return context; }
// Main loop for the worker threads VOID TaskScheduler::ExecuteTasks() { // Get the ID for the thread const UINT iContextId = _InterlockedIncrement((LONG*)&muContextId); // Start reading from the beginning of the work queue INT iReader = 0; // Thread keeps recieving and executing tasks until it is terminated while(mbAlive == TRUE) { // Get a Handle from the work queue TASKSETHANDLE handle = mhActiveTaskSets[iReader]; // If there is a TaskSet in the slot execute a task if(handle != TASKSETHANDLE_INVALID) { TaskMgrSS::TaskSet *pSet = &gTaskMgrSS.mSets[handle]; if(pSet->muCompletionCount > 0 && pSet->muTaskId >= 0) { pSet->Execute(iContextId); } else { _InterlockedCompareExchange((LONG*)&mhActiveTaskSets[iReader],TASKSETHANDLE_INVALID,handle); iReader = (iReader + 1) & (MAX_TASKSETS - 1); } } // Otherwise keep looking for work else if(miTaskCount > 0) { iReader = (iReader + 1) & (MAX_TASKSETS - 1); } // or sleep if all of the work has been completed else { if(miTaskCount <= 0) { WaitForSingleObject(mhTaskAvailable,INFINITE); } } } }
//------------------------------------------------------------------------- static VOID EnterSpinLock(VOID) { SIZE_T spinCount = 0; // Wait until the flag is FALSE. while (_InterlockedCompareExchange(&g_isLocked, TRUE, FALSE) != FALSE) { _ReadWriteBarrier(); // Prevent the loop from being too busy. if (spinCount < 16) _mm_pause(); else if (spinCount < 32) Sleep(0); else Sleep(1); spinCount++; } }
_Use_decl_annotations_ EXTERN_C void ExclReleaseExclusivity( void *Exclusivity) { if (!Exclusivity) { return; } // Tell other processors they can be unlocked with changing the value. _InterlockedIncrement(&g_ExclpReleaseAllProcessors); // Wait until all other processors were unlocked. while (_InterlockedCompareExchange(&g_ExclpNumberOfLockedProcessors, 0, 0)) { KeStallExecutionProcessor(10); } auto context = static_cast<ExclusivityContext *>(Exclusivity); KeLowerIrql(context->OldIrql); ExFreePoolWithTag(Exclusivity, EXCLP_POOL_TAG); }
// Yields the main thread to the scheduler when it needs to wait for a Task Set to be completed VOID TaskScheduler::WaitForFlag( volatile BOOL *pFlag ) { // Start at the the end of the work queue int iReader = miWriter; // The condition for exiting this loop is changed externally to the function, // possibly in another thread. The loop will break with no more than one task // being executed, returning the main thread as soon as possible. while(*pFlag == FALSE) { TASKSETHANDLE handle = mhActiveTaskSets[iReader]; if(handle != TASKSETHANDLE_INVALID) { TaskMgrSS::TaskSet *pSet = &gTaskMgrSS.mSets[handle]; if(pSet->muCompletionCount > 0 && pSet->muTaskId >= 0) { // The context ID for the main thread is 0. pSet->Execute(0); } else { _InterlockedCompareExchange((LONG*)&mhActiveTaskSets[iReader],TASKSETHANDLE_INVALID,handle); iReader = (iReader + 1) & (MAX_TASKSETS - 1); } } else if(miTaskCount > 0) { iReader = (iReader + 1) & (MAX_TASKSETS - 1); } else { // Worker threads get suspended, but the main thread needs to stay alert, // so it spins until the condition is met or more work it added. while(miTaskCount == 0 && *pFlag == FALSE); } } }
// Locks this processor until g_ReleaseAllProcessors becomes 1. _Use_decl_annotations_ EXTERN_C static void ExclpRaiseIrqlAndWaitDpc( PKDPC Dpc, PVOID DeferredContext, PVOID SystemArgument1, PVOID SystemArgument2) { UNREFERENCED_PARAMETER(Dpc); UNREFERENCED_PARAMETER(DeferredContext); UNREFERENCED_PARAMETER(SystemArgument1); UNREFERENCED_PARAMETER(SystemArgument2); // Increase the number of locked processors. _InterlockedIncrement(&g_ExclpNumberOfLockedProcessors); // Wait until g_ReleaseAllProcessors becomes 1. while (!_InterlockedCompareExchange(&g_ExclpReleaseAllProcessors, 1, 1)) { KeStallExecutionProcessor(10); } // Decrease the number of locked processors. _InterlockedDecrement(&g_ExclpNumberOfLockedProcessors); }
static inline int32_t fenced_compare_exchange_strong_32(int32_t *ptr, int32_t expected, int32_t desired) { return _InterlockedCompareExchange((void*)ptr, desired, expected); }
static inline int32_t fenced_compare_exchange_strong(int32_t *ptr, int32_t expected, int32_t desired) { return _InterlockedCompareExchange(reinterpret_cast<volatile long*>(ptr), desired, expected); }
bool btSpinMutex::tryLock() { volatile long* aDest = reinterpret_cast<long*>(&mLock); return ( 0 == _InterlockedCompareExchange( aDest, 1, 0) ); }