// Possible lock states are MUTEX_UNLOCKED, MUTEX_LOCKED and MUTEX_SLEEPING. // MUTEX_SLEEPING means that there is presumably at least one sleeping thread. // Note that there can be spinning threads during all states - they do not // affect mutex's state. void runtime_lock(Lock *l) { uint32 i, v, wait, spin; if(runtime_m()->locks++ < 0) runtime_throw("runtime_lock: lock count"); // Speculative grab for lock. v = runtime_xchg(&l->key, MUTEX_LOCKED); if(v == MUTEX_UNLOCKED) return; // wait is either MUTEX_LOCKED or MUTEX_SLEEPING // depending on whether there is a thread sleeping // on this mutex. If we ever change l->key from // MUTEX_SLEEPING to some other value, we must be // careful to change it back to MUTEX_SLEEPING before // returning, to ensure that the sleeping thread gets // its wakeup call. wait = v; // On uniprocessor's, no point spinning. // On multiprocessors, spin for ACTIVE_SPIN attempts. spin = 0; if(runtime_ncpu > 1) spin = ACTIVE_SPIN; for(;;) { // Try for lock, spinning. for(i = 0; i < spin; i++) { while(l->key == MUTEX_UNLOCKED) if(runtime_cas(&l->key, MUTEX_UNLOCKED, wait)) return; runtime_procyield(ACTIVE_SPIN_CNT); } // Try for lock, rescheduling. for(i=0; i < PASSIVE_SPIN; i++) { while(l->key == MUTEX_UNLOCKED) if(runtime_cas(&l->key, MUTEX_UNLOCKED, wait)) return; runtime_osyield(); } // Sleep. v = runtime_xchg(&l->key, MUTEX_SLEEPING); if(v == MUTEX_UNLOCKED) return; wait = MUTEX_SLEEPING; runtime_futexsleep(&l->key, MUTEX_SLEEPING, -1); } }
void runtime_stoptheworld(void) { uint32 v; schedlock(); runtime_gcwaiting = 1; setmcpumax(1); // while mcpu > 1 for(;;) { v = runtime_sched.atomic; if(atomic_mcpu(v) <= 1) break; // It would be unsafe for multiple threads to be using // the stopped note at once, but there is only // ever one thread doing garbage collection. runtime_noteclear(&runtime_sched.stopped); if(atomic_waitstop(v)) runtime_throw("invalid waitstop"); // atomic { waitstop = 1 }, predicated on mcpu <= 1 check above // still being true. if(!runtime_cas(&runtime_sched.atomic, v, v+(1<<waitstopShift))) continue; schedunlock(); runtime_notesleep(&runtime_sched.stopped); schedlock(); } runtime_singleproc = runtime_gomaxprocs == 1; schedunlock(); }
// Sweeps spans in list until reclaims at least npages into heap. // Returns the actual number of pages reclaimed. static uintptr MHeap_ReclaimList(MHeap *h, MSpan *list, uintptr npages) { MSpan *s; uintptr n; uint32 sg; n = 0; sg = runtime_mheap.sweepgen; retry: for(s = list->next; s != list; s = s->next) { if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) { runtime_MSpanList_Remove(s); // swept spans are at the end of the list runtime_MSpanList_InsertBack(list, s); runtime_unlock(h); n += runtime_MSpan_Sweep(s); runtime_lock(h); if(n >= npages) return n; // the span could have been moved elsewhere goto retry; } if(s->sweepgen == sg-1) { // the span is being sweept by background sweeper, skip continue; } // already swept empty span, // all subsequent ones must also be either swept or in process of sweeping break; } return n; }
// Try to increment mcpu. Report whether succeeded. static bool canaddmcpu(void) { uint32 v; for(;;) { v = runtime_sched.atomic; if(atomic_mcpu(v) >= atomic_mcpumax(v)) return 0; if(runtime_cas(&runtime_sched.atomic, v, v+(1<<mcpuShift))) return 1; } }
void setmcpumax(uint32 n) { uint32 v, w; for(;;) { v = runtime_sched.atomic; w = v; w &= ~(mcpuMask<<mcpumaxShift); w |= n<<mcpumaxShift; if(runtime_cas(&runtime_sched.atomic, v, w)) break; } }
void syscall_cgocall () { M* m; if (runtime_needextram && runtime_cas (&runtime_needextram, 1, 0)) runtime_newextram (); runtime_lockOSThread(); m = runtime_m (); ++m->ncgocall; ++m->ncgo; runtime_entersyscall (0); }
void syscall_cgocall () { M* m; G* g; if (runtime_needextram && runtime_cas (&runtime_needextram, 1, 0)) runtime_newextram (); m = runtime_m (); ++m->ncgocall; g = runtime_g (); ++g->ncgo; runtime_entersyscall (); }
// Allocate a list of objects from the central free list. // Return the number of objects allocated. // The objects are linked together by their first words. // On return, *pfirst points at the first object. int32 runtime_MCentral_AllocList(MCentral *c, MLink **pfirst) { MSpan *s; int32 cap, n; uint32 sg; runtime_lock(c); sg = runtime_mheap.sweepgen; retry: for(s = c->nonempty.next; s != &c->nonempty; s = s->next) { if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) { runtime_unlock(c); runtime_MSpan_Sweep(s); runtime_lock(c); // the span could have been moved to heap, retry goto retry; } if(s->sweepgen == sg-1) { // the span is being swept by background sweeper, skip continue; } // we have a nonempty span that does not require sweeping, allocate from it goto havespan; } for(s = c->empty.next; s != &c->empty; s = s->next) { if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) { // we have an empty span that requires sweeping, // sweep it and see if we can free some space in it runtime_MSpanList_Remove(s); // swept spans are at the end of the list runtime_MSpanList_InsertBack(&c->empty, s); runtime_unlock(c); runtime_MSpan_Sweep(s); runtime_lock(c); // the span could be moved to nonempty or heap, retry goto retry; } if(s->sweepgen == sg-1) { // the span is being swept by background sweeper, skip continue; } // already swept empty span, // all subsequent ones must also be either swept or in process of sweeping break; } // Replenish central list if empty. if(!MCentral_Grow(c)) { runtime_unlock(c); *pfirst = nil; return 0; } s = c->nonempty.next; havespan: cap = (s->npages << PageShift) / s->elemsize; n = cap - s->ref; *pfirst = s->freelist; s->freelist = nil; s->ref += n; c->nfree -= n; runtime_MSpanList_Remove(s); runtime_MSpanList_InsertBack(&c->empty, s); runtime_unlock(c); return n; }