/* _Atomic_fetch_or_8 */ _Uint8_t _Fetch_or_8(volatile _Uint8_t *_Tgt, _Uint8_t _Value) { /* or _Value with *_Tgt atomically with sequentially consistent memory order */ #if _MS_32 _Compiler_barrier(); __asm { mov esi, _Tgt; mov eax, [esi]; mov edx, 4[esi]; again: mov ecx, edx; mov ebx, eax; or ebx, dword ptr _Value; or ecx, dword ptr _Value[4]; lock cmpxchg8b [esi]; jnz again; mov dword ptr _Value, eax; mov dword ptr _Value[4], edx; } _Compiler_barrier(); #elif _MS_64 _Value = _InterlockedOr64((volatile _LONGLONG *)_Tgt, _Value); #endif /* _MS_32 */ return (_Value); }
/* _Atomic_load_8 */ static _Uint8_t _Load_seq_cst_8(volatile _Uint8_t *_Tgt) { /* load from *_Tgt atomically with sequentially consistent memory order */ _Uint8_t _Value; #if _MS_32 _Compiler_barrier(); __asm { mov esi, _Tgt; mov ecx, edx; mov ebx, eax; lock cmpxchg8b [esi]; mov dword ptr _Value[4], edx; mov dword ptr _Value, eax; } _Compiler_barrier(); #elif _MS_64 _Value = _InterlockedOr64((volatile _LONGLONG *)_Tgt, 0); #endif /* _MS_32 */ return (_Value); }
__int64 __stdcall InterlockedOr64(__int64 volatile *Destination, __int64 Value) { return _InterlockedOr64(Destination, Value); }