void runtime·stoptheworld(void) { uint32 v; schedlock(); runtime·gcwaiting = 1; setmcpumax(1); // while mcpu > 1 for(;;) { v = runtime·sched.atomic; if(atomic_mcpu(v) <= 1) break; // It would be unsafe for multiple threads to be using // the stopped note at once, but there is only // ever one thread doing garbage collection. runtime·noteclear(&runtime·sched.stopped); if(atomic_waitstop(v)) runtime·throw("invalid waitstop"); // atomic { waitstop = 1 }, predicated on mcpu <= 1 check above // still being true. if(!runtime·cas(&runtime·sched.atomic, v, v+(1<<waitstopShift))) continue; schedunlock(); runtime·notesleep(&runtime·sched.stopped); schedlock(); } runtime·singleproc = runtime·gomaxprocs == 1; schedunlock(); }
G* __go_go(void (*fn)(void*), void* arg) { byte *sp; size_t spsize; G * volatile newg; // volatile to avoid longjmp warning schedlock(); if((newg = gfget()) != nil){ #ifdef USING_SPLIT_STACK int dont_block_signals = 0; sp = __splitstack_resetcontext(&newg->stack_context[0], &spsize); __splitstack_block_signals_context(&newg->stack_context[0], &dont_block_signals, nil); #else sp = newg->gcinitial_sp; spsize = newg->gcstack_size; if(spsize == 0) runtime_throw("bad spsize in __go_go"); newg->gcnext_sp = sp; #endif } else { newg = runtime_malg(StackMin, &sp, &spsize); if(runtime_lastg == nil) runtime_allg = newg; else runtime_lastg->alllink = newg; runtime_lastg = newg; } newg->status = Gwaiting; newg->waitreason = "new goroutine"; newg->entry = (byte*)fn; newg->param = arg; newg->gopc = (uintptr)__builtin_return_address(0); runtime_sched.gcount++; runtime_sched.goidgen++; newg->goid = runtime_sched.goidgen; if(sp == nil) runtime_throw("nil g->stack0"); getcontext(&newg->context); newg->context.uc_stack.ss_sp = sp; #ifdef MAKECONTEXT_STACK_TOP newg->context.uc_stack.ss_sp += spsize; #endif newg->context.uc_stack.ss_size = spsize; makecontext(&newg->context, kickoff, 0); newprocreadylocked(newg); schedunlock(); return newg; //printf(" goid=%d\n", newg->goid); }
// delete when scheduler is stronger int32 runtime·gomaxprocsfunc(int32 n) { int32 ret; schedlock(); ret = runtime·gomaxprocs; if (n <= 0) n = ret; runtime·gomaxprocs = n; if (runtime·gcwaiting != 0) { if (runtime·sched.mcpumax != 1) runtime·throw("invalid runtime·sched.mcpumax during gc"); schedunlock(); return ret; } runtime·sched.mcpumax = n; // handle fewer procs? if(runtime·sched.mcpu > runtime·sched.mcpumax) { schedunlock(); // just give up the cpu. // we'll only get rescheduled once the // number has come down. runtime·gosched(); return ret; } // handle more procs matchmg(); schedunlock(); return ret; }
void runtime·starttheworld(bool extra) { M *m; schedlock(); runtime·gcwaiting = 0; setmcpumax(runtime·gomaxprocs); matchmg(); if(extra && canaddmcpu()) { // Start a new m that will (we hope) be idle // and so available to help when the next // garbage collection happens. // canaddmcpu above did mcpu++ // (necessary, because m will be doing various // initialization work so is definitely running), // but m is not running a specific goroutine, // so set the helpgc flag as a signal to m's // first schedule(nil) to mcpu-- and grunning--. m = runtime·newm(); m->helpgc = 1; runtime·sched.grunning++; } schedunlock(); }
void runtime·entersyscall(void) { if(runtime·sched.predawn) return; schedlock(); g->status = Gsyscall; runtime·sched.mcpu--; runtime·sched.msyscall++; if(runtime·sched.gwait != 0) matchmg(); if(runtime·sched.waitstop && runtime·sched.mcpu <= runtime·sched.mcpumax) { runtime·sched.waitstop = 0; runtime·notewakeup(&runtime·sched.stopped); } // Leave SP around for gc and traceback. // Do before schedunlock so that gc // never sees Gsyscall with wrong stack. runtime·gosave(&g->sched); g->gcsp = g->sched.sp; g->gcstack = g->stackbase; g->gcguard = g->stackguard; if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) { runtime·printf("entersyscall inconsistent %p [%p,%p]\n", g->gcsp, g->gcguard-StackGuard, g->gcstack); runtime·throw("entersyscall"); } schedunlock(); }
// Mark g ready to run. void runtime·ready(G *g) { schedlock(); readylocked(g); schedunlock(); }
runtime·newproc1(byte *fn, byte *argp, int32 narg, int32 nret, void *callerpc) { byte *sp; G *newg; int32 siz; //printf("newproc1 %p %p narg=%d nret=%d\n", fn, argp, narg, nret); siz = narg + nret; siz = (siz+7) & ~7; // We could instead create a secondary stack frame // and make it look like goexit was on the original but // the call to the actual goroutine function was split. // Not worth it: this is almost always an error. if(siz > StackMin - 1024) runtime·throw("runtime.newproc: function arguments too large for new goroutine"); schedlock(); if((newg = gfget()) != nil){ if(newg->stackguard - StackGuard != newg->stack0) runtime·throw("invalid stack in newg"); } else { newg = runtime·malg(StackMin); if(runtime·lastg == nil) runtime·allg = newg; else runtime·lastg->alllink = newg; runtime·lastg = newg; } newg->status = Gwaiting; newg->waitreason = "new goroutine"; sp = newg->stackbase; sp -= siz; runtime·memmove(sp, argp, narg); if(thechar == '5') { // caller's LR sp -= sizeof(void*); *(void**)sp = nil; } newg->sched.sp = sp; newg->sched.pc = (byte*)runtime·goexit; newg->sched.g = newg; newg->entry = fn; newg->gopc = (uintptr)callerpc; runtime·sched.gcount++; runtime·sched.goidgen++; newg->goid = runtime·sched.goidgen; newprocreadylocked(newg); schedunlock(); return newg; //printf(" goid=%d\n", newg->goid); }
// TODO(rsc): Remove. This is only temporary, // for the mark and sweep collector. void runtime·starttheworld(void) { schedlock(); runtime·gcwaiting = 0; runtime·sched.mcpumax = runtime·gomaxprocs; matchmg(); schedunlock(); }
// TODO(rsc): Remove. This is only temporary, // for the mark and sweep collector. void runtime·stoptheworld(void) { schedlock(); runtime·gcwaiting = 1; runtime·sched.mcpumax = 1; while(runtime·sched.mcpu > 1) { // It would be unsafe for multiple threads to be using // the stopped note at once, but there is only // ever one thread doing garbage collection, // so this is okay. runtime·noteclear(&runtime·sched.stopped); runtime·sched.waitstop = 1; schedunlock(); runtime·notesleep(&runtime·sched.stopped); schedlock(); } schedunlock(); }
runtime·newproc1(byte *fn, byte *argp, int32 narg, int32 nret, void *callerpc) { byte *sp; G *newg; int32 siz; //printf("newproc1 %p %p narg=%d nret=%d\n", fn, argp, narg, nret); siz = narg + nret; siz = (siz+7) & ~7; if(siz > 1024) runtime·throw("runtime.newproc: too many args"); schedlock(); if((newg = gfget()) != nil){ newg->status = Gwaiting; if(newg->stackguard - StackGuard != newg->stack0) runtime·throw("invalid stack in newg"); } else { newg = runtime·malg(StackMin); newg->status = Gwaiting; newg->alllink = runtime·allg; runtime·allg = newg; } sp = newg->stackbase; sp -= siz; runtime·mcpy(sp, argp, narg); if(thechar == '5') { // caller's LR sp -= sizeof(void*); *(void**)sp = nil; } newg->sched.sp = sp; newg->sched.pc = (byte*)runtime·goexit; newg->sched.g = newg; newg->entry = fn; newg->gopc = (uintptr)callerpc; runtime·sched.gcount++; runtime·goidgen++; newg->goid = runtime·goidgen; newprocreadylocked(newg); schedunlock(); return newg; //printf(" goid=%d\n", newg->goid); }
// Called after main·init_function; main·main will be called on return. void runtime·initdone(void) { // Let's go. runtime·sched.predawn = 0; mstats.enablegc = 1; // If main·init_function started other goroutines, // kick off new ms to handle them, like ready // would have, had it not been pre-dawn. schedlock(); matchmg(); schedunlock(); }
void runtime_entersyscall(void) { uint32 v; if(m->profilehz > 0) runtime_setprof(false); // Leave SP around for gc and traceback. #ifdef USING_SPLIT_STACK g->gcstack = __splitstack_find(nil, nil, &g->gcstack_size, &g->gcnext_segment, &g->gcnext_sp, &g->gcinitial_sp); #else g->gcnext_sp = (byte *) &v; #endif // Save the registers in the g structure so that any pointers // held in registers will be seen by the garbage collector. // We could use getcontext here, but setjmp is more efficient // because it doesn't need to save the signal mask. setjmp(g->gcregs); g->status = Gsyscall; // Fast path. // The slow path inside the schedlock/schedunlock will get // through without stopping if it does: // mcpu-- // gwait not true // waitstop && mcpu <= mcpumax not true // If we can do the same with a single atomic add, // then we can skip the locks. v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift); if(!atomic_gwaiting(v) && (!atomic_waitstop(v) || atomic_mcpu(v) > atomic_mcpumax(v))) return; schedlock(); v = runtime_atomicload(&runtime_sched.atomic); if(atomic_gwaiting(v)) { matchmg(); v = runtime_atomicload(&runtime_sched.atomic); } if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) { runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift); runtime_notewakeup(&runtime_sched.stopped); } schedunlock(); }
void runtime·entersyscall(void) { uint32 v; if(m->profilehz > 0) runtime·setprof(false); // Leave SP around for gc and traceback. runtime·gosave(&g->sched); g->gcsp = g->sched.sp; g->gcstack = g->stackbase; g->gcguard = g->stackguard; g->status = Gsyscall; if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) { // runtime·printf("entersyscall inconsistent %p [%p,%p]\n", // g->gcsp, g->gcguard-StackGuard, g->gcstack); runtime·throw("entersyscall"); } // Fast path. // The slow path inside the schedlock/schedunlock will get // through without stopping if it does: // mcpu-- // gwait not true // waitstop && mcpu <= mcpumax not true // If we can do the same with a single atomic add, // then we can skip the locks. v = runtime·xadd(&runtime·sched.atomic, -1<<mcpuShift); if(!atomic_gwaiting(v) && (!atomic_waitstop(v) || atomic_mcpu(v) > atomic_mcpumax(v))) return; schedlock(); v = runtime·atomicload(&runtime·sched.atomic); if(atomic_gwaiting(v)) { matchmg(); v = runtime·atomicload(&runtime·sched.atomic); } if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) { runtime·xadd(&runtime·sched.atomic, -1<<waitstopShift); runtime·notewakeup(&runtime·sched.stopped); } // Re-save sched in case one of the calls // (notewakeup, matchmg) triggered something using it. runtime·gosave(&g->sched); schedunlock(); }
// The goroutine g exited its system call. // Arrange for it to run on a cpu again. // This is called only from the go syscall library, not // from the low-level system calls used by the runtime. void runtime·exitsyscall(void) { if(runtime·sched.predawn) return; schedlock(); runtime·sched.msyscall--; runtime·sched.mcpu++; // Fast path - if there's room for this m, we're done. if(m->profilehz == runtime·sched.profilehz && runtime·sched.mcpu <= runtime·sched.mcpumax) { // There's a cpu for us, so we can run. g->status = Grunning; // Garbage collector isn't running (since we are), // so okay to clear gcstack. g->gcstack = nil; schedunlock(); return; } // Tell scheduler to put g back on the run queue: // mostly equivalent to g->status = Grunning, // but keeps the garbage collector from thinking // that g is running right now, which it's not. g->readyonstop = 1; schedunlock(); // Slow path - all the cpus are taken. // The scheduler will ready g and put this m to sleep. // When the scheduler takes g away from m, // it will undo the runtime·sched.mcpu++ above. runtime·gosched(); // Gosched returned, so we're allowed to run now. // Delete the gcstack information that we left for // the garbage collector during the system call. // Must wait until now because until gosched returns // we don't know for sure that the garbage collector // is not running. g->gcstack = nil; }
void runtime·starttheworld(void) { M *mp; int32 max; // Figure out how many CPUs GC could possibly use. max = runtime·gomaxprocs; if(max > runtime·ncpu) max = runtime·ncpu; if(max > MaxGcproc) max = MaxGcproc; schedlock(); runtime·gcwaiting = 0; setmcpumax(runtime·gomaxprocs); matchmg(); if(runtime·gcprocs() < max && canaddmcpu()) { // If GC could have used another helper proc, start one now, // in the hope that it will be available next time. // It would have been even better to start it before the collection, // but doing so requires allocating memory, so it's tricky to // coordinate. This lazy approach works out in practice: // we don't mind if the first couple gc rounds don't have quite // the maximum number of procs. // canaddmcpu above did mcpu++ // (necessary, because m will be doing various // initialization work so is definitely running), // but m is not running a specific goroutine, // so set the helpgc flag as a signal to m's // first schedule(nil) to mcpu-- and grunning--. mp = runtime·newm(); mp->helpgc = 1; runtime·sched.grunning++; } schedunlock(); }
// Implementation of runtime.GOMAXPROCS. // delete when scheduler is stronger int32 runtime·gomaxprocsfunc(int32 n) { int32 ret; uint32 v; schedlock(); ret = runtime·gomaxprocs; if(n <= 0) n = ret; if(n > maxgomaxprocs) n = maxgomaxprocs; runtime·gomaxprocs = n; if(runtime·gomaxprocs > 1) runtime·singleproc = false; if(runtime·gcwaiting != 0) { if(atomic_mcpumax(runtime·sched.atomic) != 1) runtime·throw("invalid mcpumax during gc"); schedunlock(); return ret; } setmcpumax(n); // If there are now fewer allowed procs // than procs running, stop. v = runtime·atomicload(&runtime·sched.atomic); if(atomic_mcpu(v) > n) { schedunlock(); runtime·gosched(); return ret; } // handle more procs matchmg(); schedunlock(); return ret; }
// One round of scheduler: find a goroutine and run it. // The argument is the goroutine that was running before // schedule was called, or nil if this is the first call. // Never returns. static void schedule(G *gp) { int32 hz; uint32 v; schedlock(); if(gp != nil) { // Just finished running gp. gp->m = nil; runtime·sched.grunning--; // atomic { mcpu-- } v = runtime·xadd(&runtime·sched.atomic, -1<<mcpuShift); if(atomic_mcpu(v) > maxgomaxprocs) runtime·throw("negative mcpu in scheduler"); switch(gp->status){ case Grunnable: case Gdead: // Shouldn't have been running! runtime·throw("bad gp->status in sched"); case Grunning: gp->status = Grunnable; gput(gp); break; case Gmoribund: gp->status = Gdead; if(gp->lockedm) { gp->lockedm = nil; m->lockedg = nil; } gp->idlem = nil; unwindstack(gp, nil); gfput(gp); if(--runtime·sched.gcount == 0) runtime·exit(0); break; } if(gp->readyonstop){ gp->readyonstop = 0; readylocked(gp); } } else if(m->helpgc) { // Bootstrap m or new m started by starttheworld. // atomic { mcpu-- } v = runtime·xadd(&runtime·sched.atomic, -1<<mcpuShift); if(atomic_mcpu(v) > maxgomaxprocs) runtime·throw("negative mcpu in scheduler"); // Compensate for increment in starttheworld(). runtime·sched.grunning--; m->helpgc = 0; } else if(m->nextg != nil) { // New m started by matchmg. } else { runtime·throw("invalid m state in scheduler"); } // Find (or wait for) g to run. Unlocks runtime·sched. gp = nextgandunlock(); gp->readyonstop = 0; gp->status = Grunning; m->curg = gp; gp->m = m; // Check whether the profiler needs to be turned on or off. hz = runtime·sched.profilehz; if(m->profilehz != hz) runtime·resetcpuprofiler(hz); if(gp->sched.pc == (byte*)runtime·goexit) { // kickoff runtime·gogocall(&gp->sched, (void(*)(void))gp->entry); } runtime·gogo(&gp->sched, 0); }
// One round of scheduler: find a goroutine and run it. // The argument is the goroutine that was running before // schedule was called, or nil if this is the first call. // Never returns. static void schedule(G *gp) { int32 hz; schedlock(); if(gp != nil) { if(runtime·sched.predawn) runtime·throw("init rescheduling"); // Just finished running gp. gp->m = nil; runtime·sched.mcpu--; if(runtime·sched.mcpu < 0) runtime·throw("runtime·sched.mcpu < 0 in scheduler"); switch(gp->status){ case Grunnable: case Gdead: // Shouldn't have been running! runtime·throw("bad gp->status in sched"); case Grunning: gp->status = Grunnable; gput(gp); break; case Gmoribund: gp->status = Gdead; if(gp->lockedm) { gp->lockedm = nil; m->lockedg = nil; } gp->idlem = nil; unwindstack(gp, nil); gfput(gp); if(--runtime·sched.gcount == 0) runtime·exit(0); break; } if(gp->readyonstop){ gp->readyonstop = 0; readylocked(gp); } } // Find (or wait for) g to run. Unlocks runtime·sched. gp = nextgandunlock(); gp->readyonstop = 0; gp->status = Grunning; m->curg = gp; gp->m = m; // Check whether the profiler needs to be turned on or off. hz = runtime·sched.profilehz; if(m->profilehz != hz) runtime·resetcpuprofiler(hz); if(gp->sched.pc == (byte*)runtime·goexit) { // kickoff runtime·gogocall(&gp->sched, (void(*)(void))gp->entry); } runtime·gogo(&gp->sched, 0); }