void runtime·notetsleep(Note *n, int64 ns) { int64 deadline, now; if(ns < 0) { runtime·notesleep(n); return; } if(runtime·atomicload(&n->key) != 0) return; if(m->profilehz > 0) runtime·setprof(false); deadline = runtime·nanotime() + ns; for(;;) { runtime·futexsleep(&n->key, 0, ns); if(runtime·atomicload(&n->key) != 0) break; now = runtime·nanotime(); if(now >= deadline) break; ns = deadline - now; } if(m->profilehz > 0) runtime·setprof(true); }
void runtime·entersyscall(void) { uint32 v; if(m->profilehz > 0) runtime·setprof(false); // Leave SP around for gc and traceback. runtime·gosave(&g->sched); g->gcsp = g->sched.sp; g->gcstack = g->stackbase; g->gcguard = g->stackguard; g->status = Gsyscall; if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) { // runtime·printf("entersyscall inconsistent %p [%p,%p]\n", // g->gcsp, g->gcguard-StackGuard, g->gcstack); runtime·throw("entersyscall"); } // Fast path. // The slow path inside the schedlock/schedunlock will get // through without stopping if it does: // mcpu-- // gwait not true // waitstop && mcpu <= mcpumax not true // If we can do the same with a single atomic add, // then we can skip the locks. v = runtime·xadd(&runtime·sched.atomic, -1<<mcpuShift); if(!atomic_gwaiting(v) && (!atomic_waitstop(v) || atomic_mcpu(v) > atomic_mcpumax(v))) return; schedlock(); v = runtime·atomicload(&runtime·sched.atomic); if(atomic_gwaiting(v)) { matchmg(); v = runtime·atomicload(&runtime·sched.atomic); } if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) { runtime·xadd(&runtime·sched.atomic, -1<<waitstopShift); runtime·notewakeup(&runtime·sched.stopped); } // Re-save sched in case one of the calls // (notewakeup, matchmg) triggered something using it. runtime·gosave(&g->sched); schedunlock(); }
void runtime·notesleep(Note *n) { if(m->profilehz > 0) runtime·setprof(false); while(runtime·atomicload(&n->key) == 0) runtime·futexsleep(&n->key, 0, -1); if(m->profilehz > 0) runtime·setprof(true); }
runtime·findfunc(uintptr addr) { Func *f; int32 nf, n; // Use atomic double-checked locking, // because when called from pprof signal // handler, findfunc must run without // grabbing any locks. // (Before enabling the signal handler, // SetCPUProfileRate calls findfunc to trigger // the initialization outside the handler.) // Avoid deadlock on fault during malloc // by not calling buildfuncs if we're already in malloc. if(!m->mallocing && !m->gcing) { if(runtime·atomicload(&funcinit) == 0) { runtime·lock(&funclock); if(funcinit == 0) { buildfuncs(); runtime·atomicstore(&funcinit, 1); } runtime·unlock(&funclock); } } if(nfunc == 0) return nil; if(addr < func[0].entry || addr >= func[nfunc].entry) return nil; // binary search to find func with entry <= addr. f = func; nf = nfunc; while(nf > 0) { n = nf/2; if(f[n].entry <= addr && addr < f[n+1].entry) return &f[n]; else if(addr < f[n].entry) nf = n; else { f += n+1; nf -= n+1; } } // can't get here -- we already checked above // that the address was in the table bounds. // this can only happen if the table isn't sorted // by address or if the binary search above is buggy. runtime·prints("findfunc unreachable\n"); return nil; }
// Implementation of runtime.GOMAXPROCS. // delete when scheduler is stronger int32 runtime·gomaxprocsfunc(int32 n) { int32 ret; uint32 v; schedlock(); ret = runtime·gomaxprocs; if(n <= 0) n = ret; if(n > maxgomaxprocs) n = maxgomaxprocs; runtime·gomaxprocs = n; if(runtime·gomaxprocs > 1) runtime·singleproc = false; if(runtime·gcwaiting != 0) { if(atomic_mcpumax(runtime·sched.atomic) != 1) runtime·throw("invalid mcpumax during gc"); schedunlock(); return ret; } setmcpumax(n); // If there are now fewer allowed procs // than procs running, stop. v = runtime·atomicload(&runtime·sched.atomic); if(atomic_mcpu(v) > n) { schedunlock(); runtime·gosched(); return ret; } // handle more procs matchmg(); schedunlock(); return ret; }
// Get the next goroutine that m should run. // Sched must be locked on entry, is unlocked on exit. // Makes sure that at most $GOMAXPROCS g's are // running on cpus (not in system calls) at any given time. static G* nextgandunlock(void) { G *gp; uint32 v; top: if(atomic_mcpu(runtime·sched.atomic) >= maxgomaxprocs) runtime·throw("negative mcpu"); // If there is a g waiting as m->nextg, the mcpu++ // happened before it was passed to mnextg. if(m->nextg != nil) { gp = m->nextg; m->nextg = nil; schedunlock(); return gp; } if(m->lockedg != nil) { // We can only run one g, and it's not available. // Make sure some other cpu is running to handle // the ordinary run queue. if(runtime·sched.gwait != 0) { matchmg(); // m->lockedg might have been on the queue. if(m->nextg != nil) { gp = m->nextg; m->nextg = nil; schedunlock(); return gp; } } } else { // Look for work on global queue. while(haveg() && canaddmcpu()) { gp = gget(); if(gp == nil) runtime·throw("gget inconsistency"); if(gp->lockedm) { mnextg(gp->lockedm, gp); continue; } runtime·sched.grunning++; schedunlock(); return gp; } // The while loop ended either because the g queue is empty // or because we have maxed out our m procs running go // code (mcpu >= mcpumax). We need to check that // concurrent actions by entersyscall/exitsyscall cannot // invalidate the decision to end the loop. // // We hold the sched lock, so no one else is manipulating the // g queue or changing mcpumax. Entersyscall can decrement // mcpu, but if does so when there is something on the g queue, // the gwait bit will be set, so entersyscall will take the slow path // and use the sched lock. So it cannot invalidate our decision. // // Wait on global m queue. mput(m); } // Look for deadlock situation. // There is a race with the scavenger that causes false negatives: // if the scavenger is just starting, then we have // scvg != nil && grunning == 0 && gwait == 0 // and we do not detect a deadlock. It is possible that we should // add that case to the if statement here, but it is too close to Go 1 // to make such a subtle change. Instead, we work around the // false negative in trivial programs by calling runtime.gosched // from the main goroutine just before main.main. // See runtime·main above. // // On a related note, it is also possible that the scvg == nil case is // wrong and should include gwait, but that does not happen in // standard Go programs, which all start the scavenger. // if((scvg == nil && runtime·sched.grunning == 0) || (scvg != nil && runtime·sched.grunning == 1 && runtime·sched.gwait == 0 && (scvg->status == Grunning || scvg->status == Gsyscall))) { runtime·throw("all goroutines are asleep - deadlock!"); } m->nextg = nil; m->waitnextg = 1; runtime·noteclear(&m->havenextg); // Stoptheworld is waiting for all but its cpu to go to stop. // Entersyscall might have decremented mcpu too, but if so // it will see the waitstop and take the slow path. // Exitsyscall never increments mcpu beyond mcpumax. v = runtime·atomicload(&runtime·sched.atomic); if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) { // set waitstop = 0 (known to be 1) runtime·xadd(&runtime·sched.atomic, -1<<waitstopShift); runtime·notewakeup(&runtime·sched.stopped); } schedunlock(); runtime·notesleep(&m->havenextg); if(m->helpgc) { runtime·gchelper(); m->helpgc = 0; runtime·lock(&runtime·sched); goto top; } if((gp = m->nextg) == nil) runtime·throw("bad m->nextg in nextgoroutine"); m->nextg = nil; return gp; }
void runtime·notesleep(Note *n) { while(runtime·atomicload(&n->state) == 0) futexsleep(&n->state, 0); }
// Get the next goroutine that m should run. // Sched must be locked on entry, is unlocked on exit. // Makes sure that at most $GOMAXPROCS g's are // running on cpus (not in system calls) at any given time. static G* nextgandunlock(void) { G *gp; uint32 v; top: if(atomic_mcpu(runtime·sched.atomic) >= maxgomaxprocs) runtime·throw("negative mcpu"); // If there is a g waiting as m->nextg, the mcpu++ // happened before it was passed to mnextg. if(m->nextg != nil) { gp = m->nextg; m->nextg = nil; schedunlock(); return gp; } if(m->lockedg != nil) { // We can only run one g, and it's not available. // Make sure some other cpu is running to handle // the ordinary run queue. if(runtime·sched.gwait != 0) { matchmg(); // m->lockedg might have been on the queue. if(m->nextg != nil) { gp = m->nextg; m->nextg = nil; schedunlock(); return gp; } } } else { // Look for work on global queue. while(haveg() && canaddmcpu()) { gp = gget(); if(gp == nil) runtime·throw("gget inconsistency"); if(gp->lockedm) { mnextg(gp->lockedm, gp); continue; } runtime·sched.grunning++; schedunlock(); return gp; } // The while loop ended either because the g queue is empty // or because we have maxed out our m procs running go // code (mcpu >= mcpumax). We need to check that // concurrent actions by entersyscall/exitsyscall cannot // invalidate the decision to end the loop. // // We hold the sched lock, so no one else is manipulating the // g queue or changing mcpumax. Entersyscall can decrement // mcpu, but if does so when there is something on the g queue, // the gwait bit will be set, so entersyscall will take the slow path // and use the sched lock. So it cannot invalidate our decision. // // Wait on global m queue. mput(m); } // Look for deadlock situation: one single active g which happens to be scvg. if(runtime·sched.grunning == 1 && runtime·sched.gwait == 0) { if(scvg->status == Grunning || scvg->status == Gsyscall) runtime·throw("all goroutines are asleep - deadlock!"); } m->nextg = nil; m->waitnextg = 1; runtime·noteclear(&m->havenextg); // Stoptheworld is waiting for all but its cpu to go to stop. // Entersyscall might have decremented mcpu too, but if so // it will see the waitstop and take the slow path. // Exitsyscall never increments mcpu beyond mcpumax. v = runtime·atomicload(&runtime·sched.atomic); if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) { // set waitstop = 0 (known to be 1) runtime·xadd(&runtime·sched.atomic, -1<<waitstopShift); runtime·notewakeup(&runtime·sched.stopped); } schedunlock(); runtime·notesleep(&m->havenextg); if(m->helpgc) { runtime·gchelper(); m->helpgc = 0; runtime·lock(&runtime·sched); goto top; } if((gp = m->nextg) == nil) runtime·throw("bad m->nextg in nextgoroutine"); m->nextg = nil; return gp; }