// Put on `g' queue. Sched must be locked. static void gput(G *g) { M *m; // If g is wired, hand it off directly. if((m = g->lockedm) != nil && canaddmcpu()) { mnextg(m, g); return; } // If g is the idle goroutine for an m, hand it off. if(g->idlem != nil) { if(g->idlem->idleg != nil) { runtime·printf("m%d idle out of sync: g%d g%d\n", g->idlem->id, g->idlem->idleg->goid, g->goid); runtime·throw("runtime: double idle"); } g->idlem->idleg = g; return; } g->schedlink = nil; if(runtime·sched.ghead == nil) runtime·sched.ghead = g; else runtime·sched.gtail->schedlink = g; runtime·sched.gtail = g; // increment gwait. // if it transitions to nonzero, set atomic gwaiting bit. if(runtime·sched.gwait++ == 0) runtime·xadd(&runtime·sched.atomic, 1<<gwaitingShift); }
// Put on `g' queue. Sched must be locked. static void gput(G *g) { M *m; // If g is wired, hand it off directly. if(runtime·sched.mcpu < runtime·sched.mcpumax && (m = g->lockedm) != nil) { mnextg(m, g); return; } // If g is the idle goroutine for an m, hand it off. if(g->idlem != nil) { if(g->idlem->idleg != nil) { runtime·printf("m%d idle out of sync: g%d g%d\n", g->idlem->id, g->idlem->idleg->goid, g->goid); runtime·throw("runtime: double idle"); } g->idlem->idleg = g; return; } g->schedlink = nil; if(runtime·sched.ghead == nil) runtime·sched.ghead = g; else runtime·sched.gtail->schedlink = g; runtime·sched.gtail = g; runtime·sched.gwait++; }
// Get the next goroutine that m should run. // Sched must be locked on entry, is unlocked on exit. // Makes sure that at most $GOMAXPROCS gs are // running on cpus (not in system calls) at any given time. static G* nextgandunlock(void) { G *gp; if(runtime·sched.mcpu < 0) runtime·throw("negative runtime·sched.mcpu"); // If there is a g waiting as m->nextg, // mnextg took care of the runtime·sched.mcpu++. if(m->nextg != nil) { gp = m->nextg; m->nextg = nil; schedunlock(); return gp; } if(m->lockedg != nil) { // We can only run one g, and it's not available. // Make sure some other cpu is running to handle // the ordinary run queue. if(runtime·sched.gwait != 0) matchmg(); } else { // Look for work on global queue. while(runtime·sched.mcpu < runtime·sched.mcpumax && (gp=gget()) != nil) { if(gp->lockedm) { mnextg(gp->lockedm, gp); continue; } runtime·sched.mcpu++; // this m will run gp schedunlock(); return gp; } // Otherwise, wait on global m queue. mput(m); } if(runtime·sched.mcpu == 0 && runtime·sched.msyscall == 0) runtime·throw("all goroutines are asleep - deadlock!"); m->nextg = nil; m->waitnextg = 1; runtime·noteclear(&m->havenextg); if(runtime·sched.waitstop && runtime·sched.mcpu <= runtime·sched.mcpumax) { runtime·sched.waitstop = 0; runtime·notewakeup(&runtime·sched.stopped); } schedunlock(); runtime·notesleep(&m->havenextg); if((gp = m->nextg) == nil) runtime·throw("bad m->nextg in nextgoroutine"); m->nextg = nil; return gp; }
// Kick off new ms as needed (up to mcpumax). // There are already `other' other cpus that will // start looking for goroutines shortly. // Sched is locked. static void matchmg(void) { G *g; if(m->mallocing || m->gcing) return; while(runtime·sched.mcpu < runtime·sched.mcpumax && (g = gget()) != nil){ M *m; // Find the m that will run g. if((m = mget(g)) == nil){ m = runtime·malloc(sizeof(M)); // Add to runtime·allm so garbage collector doesn't free m // when it is just in a register or thread-local storage. m->alllink = runtime·allm; runtime·allm = m; m->id = runtime·sched.mcount++; if(runtime·iscgo) { CgoThreadStart ts; if(libcgo_thread_start == nil) runtime·throw("libcgo_thread_start missing"); // pthread_create will make us a stack. m->g0 = runtime·malg(-1); ts.m = m; ts.g = m->g0; ts.fn = runtime·mstart; runtime·asmcgocall(libcgo_thread_start, &ts); } else { if(Windows) // windows will layout sched stack on os stack m->g0 = runtime·malg(-1); else m->g0 = runtime·malg(8192); runtime·newosproc(m, m->g0, m->g0->stackbase, runtime·mstart); } } mnextg(m, g); } }
// Kick off new m's as needed (up to mcpumax). // Sched is locked. static void matchmg(void) { G *gp; M *mp; if(m->mallocing || m->gcing) return; while(haveg() && canaddmcpu()) { gp = gget(); if(gp == nil) runtime·throw("gget inconsistency"); // Find the m that will run gp. if((mp = mget(gp)) == nil) mp = runtime·newm(); mnextg(mp, gp); } }
// Get the next goroutine that m should run. // Sched must be locked on entry, is unlocked on exit. // Makes sure that at most $GOMAXPROCS g's are // running on cpus (not in system calls) at any given time. static G* nextgandunlock(void) { G *gp; uint32 v; top: if(atomic_mcpu(runtime·sched.atomic) >= maxgomaxprocs) runtime·throw("negative mcpu"); // If there is a g waiting as m->nextg, the mcpu++ // happened before it was passed to mnextg. if(m->nextg != nil) { gp = m->nextg; m->nextg = nil; schedunlock(); return gp; } if(m->lockedg != nil) { // We can only run one g, and it's not available. // Make sure some other cpu is running to handle // the ordinary run queue. if(runtime·sched.gwait != 0) { matchmg(); // m->lockedg might have been on the queue. if(m->nextg != nil) { gp = m->nextg; m->nextg = nil; schedunlock(); return gp; } } } else { // Look for work on global queue. while(haveg() && canaddmcpu()) { gp = gget(); if(gp == nil) runtime·throw("gget inconsistency"); if(gp->lockedm) { mnextg(gp->lockedm, gp); continue; } runtime·sched.grunning++; schedunlock(); return gp; } // The while loop ended either because the g queue is empty // or because we have maxed out our m procs running go // code (mcpu >= mcpumax). We need to check that // concurrent actions by entersyscall/exitsyscall cannot // invalidate the decision to end the loop. // // We hold the sched lock, so no one else is manipulating the // g queue or changing mcpumax. Entersyscall can decrement // mcpu, but if does so when there is something on the g queue, // the gwait bit will be set, so entersyscall will take the slow path // and use the sched lock. So it cannot invalidate our decision. // // Wait on global m queue. mput(m); } // Look for deadlock situation. // There is a race with the scavenger that causes false negatives: // if the scavenger is just starting, then we have // scvg != nil && grunning == 0 && gwait == 0 // and we do not detect a deadlock. It is possible that we should // add that case to the if statement here, but it is too close to Go 1 // to make such a subtle change. Instead, we work around the // false negative in trivial programs by calling runtime.gosched // from the main goroutine just before main.main. // See runtime·main above. // // On a related note, it is also possible that the scvg == nil case is // wrong and should include gwait, but that does not happen in // standard Go programs, which all start the scavenger. // if((scvg == nil && runtime·sched.grunning == 0) || (scvg != nil && runtime·sched.grunning == 1 && runtime·sched.gwait == 0 && (scvg->status == Grunning || scvg->status == Gsyscall))) { runtime·throw("all goroutines are asleep - deadlock!"); } m->nextg = nil; m->waitnextg = 1; runtime·noteclear(&m->havenextg); // Stoptheworld is waiting for all but its cpu to go to stop. // Entersyscall might have decremented mcpu too, but if so // it will see the waitstop and take the slow path. // Exitsyscall never increments mcpu beyond mcpumax. v = runtime·atomicload(&runtime·sched.atomic); if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) { // set waitstop = 0 (known to be 1) runtime·xadd(&runtime·sched.atomic, -1<<waitstopShift); runtime·notewakeup(&runtime·sched.stopped); } schedunlock(); runtime·notesleep(&m->havenextg); if(m->helpgc) { runtime·gchelper(); m->helpgc = 0; runtime·lock(&runtime·sched); goto top; } if((gp = m->nextg) == nil) runtime·throw("bad m->nextg in nextgoroutine"); m->nextg = nil; return gp; }
// Get the next goroutine that m should run. // Sched must be locked on entry, is unlocked on exit. // Makes sure that at most $GOMAXPROCS g's are // running on cpus (not in system calls) at any given time. static G* nextgandunlock(void) { G *gp; uint32 v; top: if(atomic_mcpu(runtime_sched.atomic) >= maxgomaxprocs) runtime_throw("negative mcpu"); // If there is a g waiting as m->nextg, the mcpu++ // happened before it was passed to mnextg. if(m->nextg != nil) { gp = m->nextg; m->nextg = nil; schedunlock(); return gp; } if(m->lockedg != nil) { // We can only run one g, and it's not available. // Make sure some other cpu is running to handle // the ordinary run queue. if(runtime_sched.gwait != 0) { matchmg(); // m->lockedg might have been on the queue. if(m->nextg != nil) { gp = m->nextg; m->nextg = nil; schedunlock(); return gp; } } } else { // Look for work on global queue. while(haveg() && canaddmcpu()) { gp = gget(); if(gp == nil) runtime_throw("gget inconsistency"); if(gp->lockedm) { mnextg(gp->lockedm, gp); continue; } runtime_sched.grunning++; schedunlock(); return gp; } // The while loop ended either because the g queue is empty // or because we have maxed out our m procs running go // code (mcpu >= mcpumax). We need to check that // concurrent actions by entersyscall/exitsyscall cannot // invalidate the decision to end the loop. // // We hold the sched lock, so no one else is manipulating the // g queue or changing mcpumax. Entersyscall can decrement // mcpu, but if does so when there is something on the g queue, // the gwait bit will be set, so entersyscall will take the slow path // and use the sched lock. So it cannot invalidate our decision. // // Wait on global m queue. mput(m); } v = runtime_atomicload(&runtime_sched.atomic); if(runtime_sched.grunning == 0) runtime_throw("all goroutines are asleep - deadlock!"); m->nextg = nil; m->waitnextg = 1; runtime_noteclear(&m->havenextg); // Stoptheworld is waiting for all but its cpu to go to stop. // Entersyscall might have decremented mcpu too, but if so // it will see the waitstop and take the slow path. // Exitsyscall never increments mcpu beyond mcpumax. if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) { // set waitstop = 0 (known to be 1) runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift); runtime_notewakeup(&runtime_sched.stopped); } schedunlock(); runtime_notesleep(&m->havenextg); if(m->helpgc) { runtime_gchelper(); m->helpgc = 0; runtime_lock(&runtime_sched); goto top; } if((gp = m->nextg) == nil) runtime_throw("bad m->nextg in nextgoroutine"); m->nextg = nil; return gp; }