// Unwind the stack after a deferred function calls recover // after a panic. Then arrange to continue running as though // the caller of the deferred function returned normally. static void recovery(G *gp) { void *argp; uintptr pc; // Info about defer passed in G struct. argp = (void*)gp->sigcode0; pc = (uintptr)gp->sigcode1; // Unwind to the stack frame with d's arguments in it. runtime·unwindstack(gp, argp); // Make the deferproc for this d return again, // this time returning 1. The calling function will // jump to the standard return epilogue. // The -2*sizeof(uintptr) makes up for the // two extra words that are on the stack at // each call to deferproc. // (The pc we're returning to does pop pop // before it tests the return value.) // On the arm there are 2 saved LRs mixed in too. if(thechar == '5') gp->sched.sp = (uintptr)argp - 4*sizeof(uintptr); else gp->sched.sp = (uintptr)argp - 2*sizeof(uintptr); gp->sched.pc = pc; gp->sched.lr = 0; gp->sched.ret = 1; runtime·gogo(&gp->sched); }
// Unwind the stack after a deferred function calls recover // after a panic. Then arrange to continue running as though // the caller of the deferred function returned normally. static void recovery(G *gp) { Defer *d; // Rewind gp's stack; we're running on m->g0's stack. d = gp->defer; gp->defer = d->link; // Unwind to the stack frame with d's arguments in it. unwindstack(gp, d->argp); // Make the deferproc for this d return again, // this time returning 1. The calling function will // jump to the standard return epilogue. // The -2*sizeof(uintptr) makes up for the // two extra words that are on the stack at // each call to deferproc. // (The pc we're returning to does pop pop // before it tests the return value.) // On the arm there are 2 saved LRs mixed in too. if(thechar == '5') gp->sched.sp = (byte*)d->argp - 4*sizeof(uintptr); else gp->sched.sp = (byte*)d->argp - 2*sizeof(uintptr); gp->sched.pc = d->pc; if(!d->nofree) runtime·free(d); runtime·gogo(&gp->sched, 1); }
/* Unwind until stop frame. Optionally cleanup frames. */ static void *err_unwind(lua_State *L, void *stopcf, int errcode) { TValue *frame = L->base-1; void *cf = L->cframe; while (cf) { int32_t nres = cframe_nres(cframe_raw(cf)); if (nres < 0) { /* C frame without Lua frame? */ TValue *top = restorestack(L, -nres); if (frame < top) { /* Frame reached? */ if (errcode) { L->cframe = cframe_prev(cf); L->base = frame+1; unwindstack(L, top); } return cf; } } if (frame <= tvref(L->stack)) break; switch (frame_typep(frame)) { case FRAME_LUA: /* Lua frame. */ case FRAME_LUAP: frame = frame_prevl(frame); break; case FRAME_C: /* C frame. */ #if LJ_UNWIND_EXT if (errcode) { L->cframe = cframe_prev(cf); L->base = frame_prevd(frame) + 1; unwindstack(L, frame); } else if (cf != stopcf) { cf = cframe_prev(cf); frame = frame_prevd(frame); break; } return NULL; /* Continue unwinding. */ #else UNUSED(stopcf); cf = cframe_prev(cf); frame = frame_prevd(frame); break; #endif case FRAME_CP: /* Protected C frame. */ if (cframe_canyield(cf)) { /* Resume? */ if (errcode) { L->cframe = NULL; L->status = (uint8_t)errcode; } return cf; } if (errcode) { L->cframe = cframe_prev(cf); L->base = frame_prevd(frame) + 1; unwindstack(L, frame); } return cf; case FRAME_CONT: /* Continuation frame. */ case FRAME_VARG: /* Vararg frame. */ frame = frame_prevd(frame); break; case FRAME_PCALL: /* FF pcall() frame. */ case FRAME_PCALLH: /* FF pcall() frame inside hook. */ if (errcode) { if (errcode == LUA_YIELD) { frame = frame_prevd(frame); break; } if (frame_typep(frame) == FRAME_PCALL) hook_leave(G(L)); L->cframe = cf; L->base = frame_prevd(frame) + 1; unwindstack(L, L->base); } return (void *)((intptr_t)cf | CFRAME_UNWIND_FF); } } /* No C frame. */ if (errcode) { L->cframe = NULL; L->base = tvref(L->stack)+1; unwindstack(L, L->base); if (G(L)->panic) G(L)->panic(L); exit(EXIT_FAILURE); } return L; /* Anything non-NULL will do. */ }
// One round of scheduler: find a goroutine and run it. // The argument is the goroutine that was running before // schedule was called, or nil if this is the first call. // Never returns. static void schedule(G *gp) { int32 hz; uint32 v; schedlock(); if(gp != nil) { // Just finished running gp. gp->m = nil; runtime·sched.grunning--; // atomic { mcpu-- } v = runtime·xadd(&runtime·sched.atomic, -1<<mcpuShift); if(atomic_mcpu(v) > maxgomaxprocs) runtime·throw("negative mcpu in scheduler"); switch(gp->status){ case Grunnable: case Gdead: // Shouldn't have been running! runtime·throw("bad gp->status in sched"); case Grunning: gp->status = Grunnable; gput(gp); break; case Gmoribund: gp->status = Gdead; if(gp->lockedm) { gp->lockedm = nil; m->lockedg = nil; } gp->idlem = nil; unwindstack(gp, nil); gfput(gp); if(--runtime·sched.gcount == 0) runtime·exit(0); break; } if(gp->readyonstop){ gp->readyonstop = 0; readylocked(gp); } } else if(m->helpgc) { // Bootstrap m or new m started by starttheworld. // atomic { mcpu-- } v = runtime·xadd(&runtime·sched.atomic, -1<<mcpuShift); if(atomic_mcpu(v) > maxgomaxprocs) runtime·throw("negative mcpu in scheduler"); // Compensate for increment in starttheworld(). runtime·sched.grunning--; m->helpgc = 0; } else if(m->nextg != nil) { // New m started by matchmg. } else { runtime·throw("invalid m state in scheduler"); } // Find (or wait for) g to run. Unlocks runtime·sched. gp = nextgandunlock(); gp->readyonstop = 0; gp->status = Grunning; m->curg = gp; gp->m = m; // Check whether the profiler needs to be turned on or off. hz = runtime·sched.profilehz; if(m->profilehz != hz) runtime·resetcpuprofiler(hz); if(gp->sched.pc == (byte*)runtime·goexit) { // kickoff runtime·gogocall(&gp->sched, (void(*)(void))gp->entry); } runtime·gogo(&gp->sched, 0); }
// One round of scheduler: find a goroutine and run it. // The argument is the goroutine that was running before // schedule was called, or nil if this is the first call. // Never returns. static void schedule(G *gp) { int32 hz; schedlock(); if(gp != nil) { if(runtime·sched.predawn) runtime·throw("init rescheduling"); // Just finished running gp. gp->m = nil; runtime·sched.mcpu--; if(runtime·sched.mcpu < 0) runtime·throw("runtime·sched.mcpu < 0 in scheduler"); switch(gp->status){ case Grunnable: case Gdead: // Shouldn't have been running! runtime·throw("bad gp->status in sched"); case Grunning: gp->status = Grunnable; gput(gp); break; case Gmoribund: gp->status = Gdead; if(gp->lockedm) { gp->lockedm = nil; m->lockedg = nil; } gp->idlem = nil; unwindstack(gp, nil); gfput(gp); if(--runtime·sched.gcount == 0) runtime·exit(0); break; } if(gp->readyonstop){ gp->readyonstop = 0; readylocked(gp); } } // Find (or wait for) g to run. Unlocks runtime·sched. gp = nextgandunlock(); gp->readyonstop = 0; gp->status = Grunning; m->curg = gp; gp->m = m; // Check whether the profiler needs to be turned on or off. hz = runtime·sched.profilehz; if(m->profilehz != hz) runtime·resetcpuprofiler(hz); if(gp->sched.pc == (byte*)runtime·goexit) { // kickoff runtime·gogocall(&gp->sched, (void(*)(void))gp->entry); } runtime·gogo(&gp->sched, 0); }