void runtime·notetsleep(Note *n, int64 ns) { int64 deadline, now; if(ns < 0) { runtime·notesleep(n); return; } if(runtime·atomicload(&n->key) != 0) return; if(m->profilehz > 0) runtime·setprof(false); deadline = runtime·nanotime() + ns; for(;;) { runtime·futexsleep(&n->key, 0, ns); if(runtime·atomicload(&n->key) != 0) break; now = runtime·nanotime(); if(now >= deadline) break; ns = deadline - now; } if(m->profilehz > 0) runtime·setprof(true); }
void runtime·notesleep(Note *n) { if(m->profilehz > 0) runtime·setprof(false); while(runtime·atomicload(&n->key) == 0) runtime·futexsleep(&n->key, 0, -1); if(m->profilehz > 0) runtime·setprof(true); }
// Possible lock states are MUTEX_UNLOCKED, MUTEX_LOCKED and MUTEX_SLEEPING. // MUTEX_SLEEPING means that there is presumably at least one sleeping thread. // Note that there can be spinning threads during all states - they do not // affect mutex's state. void runtime·lock(Lock *l) { uint32 i, v, wait, spin; if(m->locks++ < 0) runtime·throw("runtime·lock: lock count"); // Speculative grab for lock. v = runtime·xchg(&l->key, MUTEX_LOCKED); if(v == MUTEX_UNLOCKED) return; // wait is either MUTEX_LOCKED or MUTEX_SLEEPING // depending on whether there is a thread sleeping // on this mutex. If we ever change l->key from // MUTEX_SLEEPING to some other value, we must be // careful to change it back to MUTEX_SLEEPING before // returning, to ensure that the sleeping thread gets // its wakeup call. wait = v; // On uniprocessor's, no point spinning. // On multiprocessors, spin for ACTIVE_SPIN attempts. spin = 0; if(runtime·ncpu > 1) spin = ACTIVE_SPIN; for(;;) { // Try for lock, spinning. for(i = 0; i < spin; i++) { while(l->key == MUTEX_UNLOCKED) if(runtime·cas(&l->key, MUTEX_UNLOCKED, wait)) return; runtime·procyield(ACTIVE_SPIN_CNT); } // Try for lock, rescheduling. for(i=0; i < PASSIVE_SPIN; i++) { while(l->key == MUTEX_UNLOCKED) if(runtime·cas(&l->key, MUTEX_UNLOCKED, wait)) return; runtime·osyield(); } // Sleep. v = runtime·xchg(&l->key, MUTEX_SLEEPING); if(v == MUTEX_UNLOCKED) return; wait = MUTEX_SLEEPING; runtime·futexsleep(&l->key, MUTEX_SLEEPING, -1); } }
static void futexlock(Lock *l) { uint32 v; again: v = l->key; if((v&1) == 0){ if(cas(&l->key, v, v|1)){ // Lock wasn't held; we grabbed it. return; } goto again; } // Lock was held; try to add ourselves to the waiter count. if(!cas(&l->key, v, v+2)) goto again; // We're accounted for, now sleep in the kernel. // // We avoid the obvious lock/unlock race because // the kernel won't put us to sleep if l->key has // changed underfoot and is no longer v+2. // // We only really care that (v&1) == 1 (the lock is held), // and in fact there is a futex variant that could // accomodate that check, but let's not get carried away.) futexsleep(&l->key, v+2); // We're awake: remove ourselves from the count. for(;;){ v = l->key; if(v < 2) throw("bad lock key"); if(cas(&l->key, v, v-2)) break; } // Try for the lock again. goto again; }
void runtime·notesleep(Note *n) { while(runtime·atomicload(&n->state) == 0) futexsleep(&n->state, 0); }