void LockStepTaskScheduler4ThreadsLocalCore::syncThreads(const size_t localThreadID) { const unsigned int m = mode; if (localThreadID == 0) { __memory_barrier(); threadState[m][localThreadID] = 1; __memory_barrier(); while( (*(volatile unsigned int*)&threadState[m][0]) != 0x01010101 ) __pause(WAIT_CYCLES); mode = 1 - mode; __memory_barrier(); *(volatile unsigned int*)&threadState[m][0] = 0; } else { __memory_barrier(); threadState[m][localThreadID] = 1; __memory_barrier(); while (threadState[m][localThreadID] == 1) __pause(WAIT_CYCLES); } }
int main(){ Stack *topo; topo=NULL; system("clear"); _push(&topo,1); _push(&topo,2); _push(&topo,3); printf("\n\n %i \n\n",sizeofStack(topo)); printStack(topo); __pause(); _pop(&topo); printStack(topo); printf("\n\n %i \n\n",sizeofStack(topo)); __pause(); return 1; }
void MutexActive::lock () { while (1) { while (flag == 1) __pause(1023); // read without atomic op first if (cmpxchg(flag, 1, 0) == 0) break; } __memory_barrier(); // compiler must not schedule loads and stores around this point }
int _pause(void) { int ret; _thread_enter_cancellation_point(); ret = __pause(); _thread_leave_cancellation_point(); return ret; }
int _pause(void) { struct pthread *curthread = _get_curthread(); int ret; _thr_cancel_enter(curthread); ret = __pause(); _thr_cancel_leave(curthread, 1); return ret; }
void kernel_pause (void) { __pause (); }
inline static void spinPause() { __pause(); }