void runtime_entersyscall(void) { uint32 v; if(m->profilehz > 0) runtime_setprof(false); // Leave SP around for gc and traceback. #ifdef USING_SPLIT_STACK g->gcstack = __splitstack_find(nil, nil, &g->gcstack_size, &g->gcnext_segment, &g->gcnext_sp, &g->gcinitial_sp); #else g->gcnext_sp = (byte *) &v; #endif // Save the registers in the g structure so that any pointers // held in registers will be seen by the garbage collector. // We could use getcontext here, but setjmp is more efficient // because it doesn't need to save the signal mask. setjmp(g->gcregs); g->status = Gsyscall; // Fast path. // The slow path inside the schedlock/schedunlock will get // through without stopping if it does: // mcpu-- // gwait not true // waitstop && mcpu <= mcpumax not true // If we can do the same with a single atomic add, // then we can skip the locks. v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift); if(!atomic_gwaiting(v) && (!atomic_waitstop(v) || atomic_mcpu(v) > atomic_mcpumax(v))) return; schedlock(); v = runtime_atomicload(&runtime_sched.atomic); if(atomic_gwaiting(v)) { matchmg(); v = runtime_atomicload(&runtime_sched.atomic); } if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) { runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift); runtime_notewakeup(&runtime_sched.stopped); } schedunlock(); }
void runtime·entersyscall(void) { uint32 v; if(m->profilehz > 0) runtime·setprof(false); // Leave SP around for gc and traceback. runtime·gosave(&g->sched); g->gcsp = g->sched.sp; g->gcstack = g->stackbase; g->gcguard = g->stackguard; g->status = Gsyscall; if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) { // runtime·printf("entersyscall inconsistent %p [%p,%p]\n", // g->gcsp, g->gcguard-StackGuard, g->gcstack); runtime·throw("entersyscall"); } // Fast path. // The slow path inside the schedlock/schedunlock will get // through without stopping if it does: // mcpu-- // gwait not true // waitstop && mcpu <= mcpumax not true // If we can do the same with a single atomic add, // then we can skip the locks. v = runtime·xadd(&runtime·sched.atomic, -1<<mcpuShift); if(!atomic_gwaiting(v) && (!atomic_waitstop(v) || atomic_mcpu(v) > atomic_mcpumax(v))) return; schedlock(); v = runtime·atomicload(&runtime·sched.atomic); if(atomic_gwaiting(v)) { matchmg(); v = runtime·atomicload(&runtime·sched.atomic); } if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) { runtime·xadd(&runtime·sched.atomic, -1<<waitstopShift); runtime·notewakeup(&runtime·sched.stopped); } // Re-save sched in case one of the calls // (notewakeup, matchmg) triggered something using it. runtime·gosave(&g->sched); schedunlock(); }
void runtime·stoptheworld(void) { uint32 v; schedlock(); runtime·gcwaiting = 1; setmcpumax(1); // while mcpu > 1 for(;;) { v = runtime·sched.atomic; if(atomic_mcpu(v) <= 1) break; // It would be unsafe for multiple threads to be using // the stopped note at once, but there is only // ever one thread doing garbage collection. runtime·noteclear(&runtime·sched.stopped); if(atomic_waitstop(v)) runtime·throw("invalid waitstop"); // atomic { waitstop = 1 }, predicated on mcpu <= 1 check above // still being true. if(!runtime·cas(&runtime·sched.atomic, v, v+(1<<waitstopShift))) continue; schedunlock(); runtime·notesleep(&runtime·sched.stopped); schedlock(); } runtime·singleproc = runtime·gomaxprocs == 1; schedunlock(); }
// Get the next goroutine that m should run. // Sched must be locked on entry, is unlocked on exit. // Makes sure that at most $GOMAXPROCS g's are // running on cpus (not in system calls) at any given time. static G* nextgandunlock(void) { G *gp; uint32 v; top: if(atomic_mcpu(runtime·sched.atomic) >= maxgomaxprocs) runtime·throw("negative mcpu"); // If there is a g waiting as m->nextg, the mcpu++ // happened before it was passed to mnextg. if(m->nextg != nil) { gp = m->nextg; m->nextg = nil; schedunlock(); return gp; } if(m->lockedg != nil) { // We can only run one g, and it's not available. // Make sure some other cpu is running to handle // the ordinary run queue. if(runtime·sched.gwait != 0) { matchmg(); // m->lockedg might have been on the queue. if(m->nextg != nil) { gp = m->nextg; m->nextg = nil; schedunlock(); return gp; } } } else { // Look for work on global queue. while(haveg() && canaddmcpu()) { gp = gget(); if(gp == nil) runtime·throw("gget inconsistency"); if(gp->lockedm) { mnextg(gp->lockedm, gp); continue; } runtime·sched.grunning++; schedunlock(); return gp; } // The while loop ended either because the g queue is empty // or because we have maxed out our m procs running go // code (mcpu >= mcpumax). We need to check that // concurrent actions by entersyscall/exitsyscall cannot // invalidate the decision to end the loop. // // We hold the sched lock, so no one else is manipulating the // g queue or changing mcpumax. Entersyscall can decrement // mcpu, but if does so when there is something on the g queue, // the gwait bit will be set, so entersyscall will take the slow path // and use the sched lock. So it cannot invalidate our decision. // // Wait on global m queue. mput(m); } // Look for deadlock situation. // There is a race with the scavenger that causes false negatives: // if the scavenger is just starting, then we have // scvg != nil && grunning == 0 && gwait == 0 // and we do not detect a deadlock. It is possible that we should // add that case to the if statement here, but it is too close to Go 1 // to make such a subtle change. Instead, we work around the // false negative in trivial programs by calling runtime.gosched // from the main goroutine just before main.main. // See runtime·main above. // // On a related note, it is also possible that the scvg == nil case is // wrong and should include gwait, but that does not happen in // standard Go programs, which all start the scavenger. // if((scvg == nil && runtime·sched.grunning == 0) || (scvg != nil && runtime·sched.grunning == 1 && runtime·sched.gwait == 0 && (scvg->status == Grunning || scvg->status == Gsyscall))) { runtime·throw("all goroutines are asleep - deadlock!"); } m->nextg = nil; m->waitnextg = 1; runtime·noteclear(&m->havenextg); // Stoptheworld is waiting for all but its cpu to go to stop. // Entersyscall might have decremented mcpu too, but if so // it will see the waitstop and take the slow path. // Exitsyscall never increments mcpu beyond mcpumax. v = runtime·atomicload(&runtime·sched.atomic); if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) { // set waitstop = 0 (known to be 1) runtime·xadd(&runtime·sched.atomic, -1<<waitstopShift); runtime·notewakeup(&runtime·sched.stopped); } schedunlock(); runtime·notesleep(&m->havenextg); if(m->helpgc) { runtime·gchelper(); m->helpgc = 0; runtime·lock(&runtime·sched); goto top; } if((gp = m->nextg) == nil) runtime·throw("bad m->nextg in nextgoroutine"); m->nextg = nil; return gp; }
// Get the next goroutine that m should run. // Sched must be locked on entry, is unlocked on exit. // Makes sure that at most $GOMAXPROCS g's are // running on cpus (not in system calls) at any given time. static G* nextgandunlock(void) { G *gp; uint32 v; top: if(atomic_mcpu(runtime_sched.atomic) >= maxgomaxprocs) runtime_throw("negative mcpu"); // If there is a g waiting as m->nextg, the mcpu++ // happened before it was passed to mnextg. if(m->nextg != nil) { gp = m->nextg; m->nextg = nil; schedunlock(); return gp; } if(m->lockedg != nil) { // We can only run one g, and it's not available. // Make sure some other cpu is running to handle // the ordinary run queue. if(runtime_sched.gwait != 0) { matchmg(); // m->lockedg might have been on the queue. if(m->nextg != nil) { gp = m->nextg; m->nextg = nil; schedunlock(); return gp; } } } else { // Look for work on global queue. while(haveg() && canaddmcpu()) { gp = gget(); if(gp == nil) runtime_throw("gget inconsistency"); if(gp->lockedm) { mnextg(gp->lockedm, gp); continue; } runtime_sched.grunning++; schedunlock(); return gp; } // The while loop ended either because the g queue is empty // or because we have maxed out our m procs running go // code (mcpu >= mcpumax). We need to check that // concurrent actions by entersyscall/exitsyscall cannot // invalidate the decision to end the loop. // // We hold the sched lock, so no one else is manipulating the // g queue or changing mcpumax. Entersyscall can decrement // mcpu, but if does so when there is something on the g queue, // the gwait bit will be set, so entersyscall will take the slow path // and use the sched lock. So it cannot invalidate our decision. // // Wait on global m queue. mput(m); } v = runtime_atomicload(&runtime_sched.atomic); if(runtime_sched.grunning == 0) runtime_throw("all goroutines are asleep - deadlock!"); m->nextg = nil; m->waitnextg = 1; runtime_noteclear(&m->havenextg); // Stoptheworld is waiting for all but its cpu to go to stop. // Entersyscall might have decremented mcpu too, but if so // it will see the waitstop and take the slow path. // Exitsyscall never increments mcpu beyond mcpumax. if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) { // set waitstop = 0 (known to be 1) runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift); runtime_notewakeup(&runtime_sched.stopped); } schedunlock(); runtime_notesleep(&m->havenextg); if(m->helpgc) { runtime_gchelper(); m->helpgc = 0; runtime_lock(&runtime_sched); goto top; } if((gp = m->nextg) == nil) runtime_throw("bad m->nextg in nextgoroutine"); m->nextg = nil; return gp; }