void runtime_lock(Lock *l) { M *m; uintptr v; uint32 i, spin; m = runtime_m(); if(m->locks++ < 0) runtime_throw("runtime_lock: lock count"); // Speculative grab for lock. if(runtime_casp((void**)&l->key, nil, (void*)LOCKED)) return; if(m->waitsema == 0) m->waitsema = runtime_semacreate(); // On uniprocessor's, no point spinning. // On multiprocessors, spin for ACTIVE_SPIN attempts. spin = 0; if(runtime_ncpu > 1) spin = ACTIVE_SPIN; for(i=0;; i++) { v = (uintptr)runtime_atomicloadp((void**)&l->key); if((v&LOCKED) == 0) { unlocked: if(runtime_casp((void**)&l->key, (void*)v, (void*)(v|LOCKED))) return; i = 0; } if(i<spin) runtime_procyield(ACTIVE_SPIN_CNT); else if(i<spin+PASSIVE_SPIN) runtime_osyield(); else { // Someone else has it. // l->waitm points to a linked list of M's waiting // for this lock, chained through m->nextwaitm. // Queue this M. for(;;) { m->nextwaitm = (void*)(v&~LOCKED); if(runtime_casp((void**)&l->key, (void*)v, (void*)((uintptr)m|LOCKED))) break; v = (uintptr)runtime_atomicloadp((void**)&l->key); if((v&LOCKED) == 0) goto unlocked; } if(v&LOCKED) { // Queued. Wait. runtime_semasleep(-1); i = 0; } } } }
void runtime_notesleep(Note *n) { M *m; m = runtime_m(); if(m->waitsema == 0) m->waitsema = runtime_semacreate(); if(!runtime_casp(&n->waitm, nil, m)) { // must be LOCKED (got wakeup) if(n->waitm != (void*)LOCKED) runtime_throw("notesleep - waitm out of sync"); return; } // Queued. Sleep. runtime_semasleep(-1); }
void runtime_notesleep(Note *n) { M *m; m = runtime_m(); if(m->waitsema == 0) m->waitsema = runtime_semacreate(); if(!runtime_casp((void**)&n->key, nil, m)) { // must be LOCKED (got wakeup) if(n->key != LOCKED) runtime_throw("notesleep - waitm out of sync"); return; } // Queued. Sleep. if(m->profilehz > 0) runtime_setprof(false); runtime_semasleep(-1); if(m->profilehz > 0) runtime_setprof(true); }
void runtime_notetsleep(Note *n, int64 ns) { M *m; M *mp; int64 deadline, now; if(ns < 0) { runtime_notesleep(n); return; } m = runtime_m(); if(m->waitsema == 0) m->waitsema = runtime_semacreate(); // Register for wakeup on n->waitm. if(!runtime_casp((void**)&n->key, nil, m)) { // must be LOCKED (got wakeup already) if(n->key != LOCKED) runtime_throw("notetsleep - waitm out of sync"); return; } if(m->profilehz > 0) runtime_setprof(false); deadline = runtime_nanotime() + ns; for(;;) { // Registered. Sleep. if(runtime_semasleep(ns) >= 0) { // Acquired semaphore, semawakeup unregistered us. // Done. if(m->profilehz > 0) runtime_setprof(true); return; } // Interrupted or timed out. Still registered. Semaphore not acquired. now = runtime_nanotime(); if(now >= deadline) break; // Deadline hasn't arrived. Keep sleeping. ns = deadline - now; } if(m->profilehz > 0) runtime_setprof(true); // Deadline arrived. Still registered. Semaphore not acquired. // Want to give up and return, but have to unregister first, // so that any notewakeup racing with the return does not // try to grant us the semaphore when we don't expect it. for(;;) { mp = runtime_atomicloadp((void**)&n->key); if(mp == m) { // No wakeup yet; unregister if possible. if(runtime_casp((void**)&n->key, mp, nil)) return; } else if(mp == (M*)LOCKED) { // Wakeup happened so semaphore is available. // Grab it to avoid getting out of sync. if(runtime_semasleep(-1) < 0) runtime_throw("runtime: unable to acquire - semaphore out of sync"); return; } else { runtime_throw("runtime: unexpected waitm - semaphore out of sync"); } } }