/* _Atomic_fetch_and_2 */ _Uint2_t _Fetch_and_seq_cst_2(volatile _Uint2_t *_Tgt, _Uint2_t _Value) { /* and _Value with *_Tgt atomically with sequentially consistent memory order */ _Value = _InterlockedAnd16((volatile short *)_Tgt, _Value); return (_Value); }
void bt_readlock(BtLatch *latch) { ushort prev; do { // obtain latch mutex #ifdef unix if( __sync_fetch_and_or((ushort *)latch, Mutex) & Mutex ) continue; #else if( prev = _InterlockedOr16((ushort *)latch, Mutex) & Mutex ) continue; #endif // see if exclusive request is granted or pending if( prev = !(latch->exclusive | latch->pending) ) #ifdef unix __sync_fetch_and_add((ushort *)latch, Share); #else _InterlockedExchangeAdd16 ((ushort *)latch, Share); #endif #ifdef unix __sync_fetch_and_and ((ushort *)latch, ~Mutex); #else _InterlockedAnd16((ushort *)latch, ~Mutex); #endif if( prev ) return; #ifdef unix } while( sched_yield(), 1 ); #else } while( SwitchToThread(), 1 );
short test_InterlockedAnd16(short volatile *value, short mask) { return _InterlockedAnd16(value, mask); }
Int16 KInterlockedAnd16(Int16 volatile* var, Int16 add) { return _InterlockedAnd16(var, add); }