void runtime_entersyscall(void) { uint32 v; if(m->profilehz > 0) runtime_setprof(false); // Leave SP around for gc and traceback. #ifdef USING_SPLIT_STACK g->gcstack = __splitstack_find(nil, nil, &g->gcstack_size, &g->gcnext_segment, &g->gcnext_sp, &g->gcinitial_sp); #else g->gcnext_sp = (byte *) &v; #endif // Save the registers in the g structure so that any pointers // held in registers will be seen by the garbage collector. // We could use getcontext here, but setjmp is more efficient // because it doesn't need to save the signal mask. setjmp(g->gcregs); g->status = Gsyscall; // Fast path. // The slow path inside the schedlock/schedunlock will get // through without stopping if it does: // mcpu-- // gwait not true // waitstop && mcpu <= mcpumax not true // If we can do the same with a single atomic add, // then we can skip the locks. v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift); if(!atomic_gwaiting(v) && (!atomic_waitstop(v) || atomic_mcpu(v) > atomic_mcpumax(v))) return; schedlock(); v = runtime_atomicload(&runtime_sched.atomic); if(atomic_gwaiting(v)) { matchmg(); v = runtime_atomicload(&runtime_sched.atomic); } if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) { runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift); runtime_notewakeup(&runtime_sched.stopped); } schedunlock(); }
void runtime·entersyscall(void) { uint32 v; if(m->profilehz > 0) runtime·setprof(false); // Leave SP around for gc and traceback. runtime·gosave(&g->sched); g->gcsp = g->sched.sp; g->gcstack = g->stackbase; g->gcguard = g->stackguard; g->status = Gsyscall; if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) { // runtime·printf("entersyscall inconsistent %p [%p,%p]\n", // g->gcsp, g->gcguard-StackGuard, g->gcstack); runtime·throw("entersyscall"); } // Fast path. // The slow path inside the schedlock/schedunlock will get // through without stopping if it does: // mcpu-- // gwait not true // waitstop && mcpu <= mcpumax not true // If we can do the same with a single atomic add, // then we can skip the locks. v = runtime·xadd(&runtime·sched.atomic, -1<<mcpuShift); if(!atomic_gwaiting(v) && (!atomic_waitstop(v) || atomic_mcpu(v) > atomic_mcpumax(v))) return; schedlock(); v = runtime·atomicload(&runtime·sched.atomic); if(atomic_gwaiting(v)) { matchmg(); v = runtime·atomicload(&runtime·sched.atomic); } if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) { runtime·xadd(&runtime·sched.atomic, -1<<waitstopShift); runtime·notewakeup(&runtime·sched.stopped); } // Re-save sched in case one of the calls // (notewakeup, matchmg) triggered something using it. runtime·gosave(&g->sched); schedunlock(); }