int task_main(struct task_args *args) { gogo(test_main, NULL); //gogo(test_msize, NULL); //gogo(test_sizeclass, NULL); //gogo(test_mem, NULL); gogo(test_gogo, NULL); return 0; }
// Unwind the stack after a deferred function calls recover // after a panic. Then arrange to continue running as though // the caller of the deferred function returned normally. static void recovery(G *gp) { Defer *d; // Rewind gp's stack; we're running on m->g0's stack. d = gp->defer; gp->defer = d->link; // Unwind to the stack frame with d's arguments in it. unwindstack(gp, d->argp); // Make the deferproc for this d return again, // this time returning 1. The calling function will // jump to the standard return epilogue. // The -2*sizeof(uintptr) makes up for the // two extra words that are on the stack at // each call to deferproc. // (The pc we're returning to does pop pop // before it tests the return value.) // On the arm there are 2 saved LRs mixed in too. if(thechar == '5') gp->sched.sp = (byte*)d->argp - 4*sizeof(uintptr); else gp->sched.sp = (byte*)d->argp - 2*sizeof(uintptr); gp->sched.pc = d->pc; if(!d->nofree) runtime·free(d); runtime·gogo(&gp->sched, 1); }
// Called from runtime·lessstack when returning from a function which // allocated a new stack segment. The function's return value is in // m->cret. void runtime·oldstack(void) { Stktop *top, old; uint32 argsize; uintptr cret; byte *sp; G *g1; int32 goid; //printf("oldstack m->cret=%p\n", m->cret); g1 = m->curg; top = (Stktop*)g1->stackbase; sp = (byte*)top; old = *top; argsize = old.argsize; if(argsize > 0) { sp -= argsize; runtime·memmove(top->argp, sp, argsize); } goid = old.gobuf.g->goid; // fault if g is bad, before gogo USED(goid); if(old.free != 0) runtime·stackfree(g1->stackguard - StackGuard, old.free); g1->stackbase = old.stackbase; g1->stackguard = old.stackguard; cret = m->cret; m->cret = 0; // drop reference runtime·gogo(&old.gobuf, cret); }
void runtime·oldstack(void) { Stktop *top, old; uint32 argsize; byte *sp; G *g1; static int32 goid; //printf("oldstack m->cret=%p\n", m->cret); g1 = m->curg; top = (Stktop*)g1->stackbase; sp = (byte*)top; old = *top; argsize = old.argsize; if(argsize > 0) { sp -= argsize; runtime·mcpy(top->argp, sp, argsize); } goid = old.gobuf.g->goid; // fault if g is bad, before gogo if(old.free != 0) runtime·stackfree(g1->stackguard - StackGuard, old.free); g1->stackbase = old.stackbase; g1->stackguard = old.stackguard; runtime·gogo(&old.gobuf, m->cret); }
// Unwind the stack after a deferred function calls recover // after a panic. Then arrange to continue running as though // the caller of the deferred function returned normally. static void recovery(G *gp) { void *argp; uintptr pc; // Info about defer passed in G struct. argp = (void*)gp->sigcode0; pc = (uintptr)gp->sigcode1; // Unwind to the stack frame with d's arguments in it. runtime·unwindstack(gp, argp); // Make the deferproc for this d return again, // this time returning 1. The calling function will // jump to the standard return epilogue. // The -2*sizeof(uintptr) makes up for the // two extra words that are on the stack at // each call to deferproc. // (The pc we're returning to does pop pop // before it tests the return value.) // On the arm there are 2 saved LRs mixed in too. if(thechar == '5') gp->sched.sp = (uintptr)argp - 4*sizeof(uintptr); else gp->sched.sp = (uintptr)argp - 2*sizeof(uintptr); gp->sched.pc = pc; gp->sched.lr = 0; gp->sched.ret = 1; runtime·gogo(&gp->sched); }
// Called from runtime·lessstack when returning from a function which // allocated a new stack segment. The function's return value is in // m->cret. void runtime·oldstack(void) { Stktop *top; uint32 argsize; byte *sp, *old; uintptr *src, *dst, *dstend; G *gp; int64 goid; int32 oldstatus; gp = m->curg; top = (Stktop*)gp->stackbase; old = (byte*)gp->stackguard - StackGuard; sp = (byte*)top; argsize = top->argsize; if(StackDebug >= 1) { runtime·printf("runtime: oldstack gobuf={pc:%p sp:%p lr:%p} cret=%p argsize=%p\n", top->gobuf.pc, top->gobuf.sp, top->gobuf.lr, (uintptr)m->cret, (uintptr)argsize); } // gp->status is usually Grunning, but it could be Gsyscall if a stack overflow // happens during a function call inside entersyscall. oldstatus = gp->status; gp->sched = top->gobuf; gp->sched.ret = m->cret; m->cret = 0; // drop reference gp->status = Gwaiting; gp->waitreason = "stack unsplit"; if(argsize > 0) { sp -= argsize; dst = (uintptr*)top->argp; dstend = dst + argsize/sizeof(*dst); src = (uintptr*)sp; while(dst < dstend) *dst++ = *src++; } goid = top->gobuf.g->goid; // fault if g is bad, before gogo USED(goid); gp->stackbase = top->stackbase; gp->stackguard = top->stackguard; gp->stackguard0 = gp->stackguard; gp->panicwrap = top->panicwrap; runtime·stackfree(gp, old, top); gp->status = oldstatus; runtime·gogo(&gp->sched); }
void test_gogo(void *args) { int i, limit = 1000000; double ct; struct timespec start, end; clock_gettime(CLOCK_REALTIME, &start); for (i = 0; i < limit; i++) { gogo(test_gogo_foo, NULL); } clock_gettime(CLOCK_REALTIME, &end); ct = (end.tv_sec - start.tv_sec) * 1000000000ULL + end.tv_nsec - start.tv_nsec; fprintf(stdout, "building %d thread cost: %lld\n", limit, ct); }
static void mheap_alloc_m(G *gp) { MHeap *h; MSpan *s; h = g->m->ptrarg[0]; g->m->ptrarg[0] = nil; s = mheap_alloc(h, g->m->scalararg[0], g->m->scalararg[1], g->m->scalararg[2]); g->m->ptrarg[0] = s; runtime·gogo(&gp->sched); }
// Called from runtime·lessstack when returning from a function which // allocated a new stack segment. The function's return value is in // m->cret. void runtime·oldstack(void) { Stktop *top; Gobuf label; uint32 argsize; uintptr cret; byte *sp, *old; uintptr *src, *dst, *dstend; G *gp; int64 goid; //printf("oldstack m->cret=%p\n", m->cret); gp = m->curg; top = (Stktop*)gp->stackbase; old = (byte*)gp->stackguard - StackGuard; sp = (byte*)top; argsize = top->argsize; if(argsize > 0) { sp -= argsize; dst = (uintptr*)top->argp; dstend = dst + argsize/sizeof(*dst); src = (uintptr*)sp; while(dst < dstend) *dst++ = *src++; } goid = top->gobuf.g->goid; // fault if g is bad, before gogo USED(goid); label = top->gobuf; gp->stackbase = (uintptr)top->stackbase; gp->stackguard = (uintptr)top->stackguard; if(top->free != 0) runtime·stackfree(old, top->free); cret = m->cret; m->cret = 0; // drop reference runtime·gogo(&label, cret); }
int main(){ #ifdef LOCAL freopen("in.txt", "r", stdin); freopen("out.txt", "w+", stdout); #endif init(); int i, n, m, a, b, c, d; scanf("%d %d", &n, &m); for (i = 1; i <= n; i++) scanf("%d", &arr[i]); seg_built(1, 1, n); while (m--){ scanf("%d", &a); switch (a){ case 1: sum = 1; scanf("%d %d %d", &b, &c, &d); seg_rank(1, 1, n, b, c, d); printf("%d\n",sum); break; case 2: scanf("%d %d %d", &b, &c, &d); gogo(n, b, c, d); break; case 3: scanf("%d %d", &b, &c); seg_modify(1, 1, n, b, c), arr[b] = c; break; case 4: scanf("%d %d %d", &b, &c, &d); printf("%d\n", seg_pred(1, 1, n, b, c, d)); break; case 5: scanf("%d %d %d", &b, &c, &d); printf("%d\n", seg_succ(1, 1, n, b, c, d)); break; } } return 0; }
// One round of scheduler: find a goroutine and run it. // The argument is the goroutine that was running before // schedule was called, or nil if this is the first call. // Never returns. static void schedule(G *gp) { int32 hz; uint32 v; schedlock(); if(gp != nil) { // Just finished running gp. gp->m = nil; runtime·sched.grunning--; // atomic { mcpu-- } v = runtime·xadd(&runtime·sched.atomic, -1<<mcpuShift); if(atomic_mcpu(v) > maxgomaxprocs) runtime·throw("negative mcpu in scheduler"); switch(gp->status){ case Grunnable: case Gdead: // Shouldn't have been running! runtime·throw("bad gp->status in sched"); case Grunning: gp->status = Grunnable; gput(gp); break; case Gmoribund: gp->status = Gdead; if(gp->lockedm) { gp->lockedm = nil; m->lockedg = nil; } gp->idlem = nil; unwindstack(gp, nil); gfput(gp); if(--runtime·sched.gcount == 0) runtime·exit(0); break; } if(gp->readyonstop){ gp->readyonstop = 0; readylocked(gp); } } else if(m->helpgc) { // Bootstrap m or new m started by starttheworld. // atomic { mcpu-- } v = runtime·xadd(&runtime·sched.atomic, -1<<mcpuShift); if(atomic_mcpu(v) > maxgomaxprocs) runtime·throw("negative mcpu in scheduler"); // Compensate for increment in starttheworld(). runtime·sched.grunning--; m->helpgc = 0; } else if(m->nextg != nil) { // New m started by matchmg. } else { runtime·throw("invalid m state in scheduler"); } // Find (or wait for) g to run. Unlocks runtime·sched. gp = nextgandunlock(); gp->readyonstop = 0; gp->status = Grunning; m->curg = gp; gp->m = m; // Check whether the profiler needs to be turned on or off. hz = runtime·sched.profilehz; if(m->profilehz != hz) runtime·resetcpuprofiler(hz); if(gp->sched.pc == (byte*)runtime·goexit) { // kickoff runtime·gogocall(&gp->sched, (void(*)(void))gp->entry); } runtime·gogo(&gp->sched, 0); }
// Hook used by runtime·malg to call runtime·stackalloc on the // scheduler stack. This exists because runtime·stackalloc insists // on being called on the scheduler stack, to avoid trying to grow // the stack while allocating a new stack segment. static void mstackalloc(G *gp) { gp->param = runtime·stackalloc((uintptr)gp->param); runtime·gogo(&gp->sched, 0); }
int main() { int i,j,t; scanf("%d",&t); while(t--) { memset(used,0,sizeof(used)); scanf("%d%d",&n,&m); for(i=0;i<m;i++) { scanf("%d%d",&e[i].a,&e[i].b); eg[i] = (node){e[i].b,e[i].a}; } for(i=1;i<=n;i++) st[i] = str[i] = -1; std::sort(e,e+m); std::sort(eg,eg+m); for(i=0;i<m;i++) { if(st[e[i].a] == -1) st[e[i].a] = i; if(str[eg[i].a] == -1) str[eg[i].a] = i; } st[n+1] = str[n+1] = m; for(i=n;i>0;i--) { if(st[i] == -1) st[i] = st[i+1]; if(str[i] == -1) str[i] = str[i+1]; } for(i=1;i<=n;i++) dfn[i] = fin[i] = -1; Time = counter = 0; for(i=1;i<=n;i++) if(dfn[i] == -1) go(i); for(i=1;i<=n;i++) pp[i] = (node){fin[i],i}; std::sort(pp+1,pp+1+n); memset(dfn,0,sizeof(dfn)); for(i=n;i>0;i--) if(used[pp[i].b] == 0) { counter++; gogo(pp[i].b); } memset(deg,0,sizeof(deg)); for(i=0;i<m;i++) if(used[e[i].a] != used[e[i].b]) deg[used[e[i].b]]++; int ans = 0; for(i=1;i<=counter;i++) if(deg[i] == 0) ans++; printf("%d\n",ans); } return 0; }
void gogo(int now) { used[now] = counter; for(int i=str[now];i<str[now+1];i++) if(used[eg[i].b] == 0) gogo(eg[i].b); }
// One round of scheduler: find a goroutine and run it. // The argument is the goroutine that was running before // schedule was called, or nil if this is the first call. // Never returns. static void schedule(G *gp) { int32 hz; schedlock(); if(gp != nil) { if(runtime·sched.predawn) runtime·throw("init rescheduling"); // Just finished running gp. gp->m = nil; runtime·sched.mcpu--; if(runtime·sched.mcpu < 0) runtime·throw("runtime·sched.mcpu < 0 in scheduler"); switch(gp->status){ case Grunnable: case Gdead: // Shouldn't have been running! runtime·throw("bad gp->status in sched"); case Grunning: gp->status = Grunnable; gput(gp); break; case Gmoribund: gp->status = Gdead; if(gp->lockedm) { gp->lockedm = nil; m->lockedg = nil; } gp->idlem = nil; unwindstack(gp, nil); gfput(gp); if(--runtime·sched.gcount == 0) runtime·exit(0); break; } if(gp->readyonstop){ gp->readyonstop = 0; readylocked(gp); } } // Find (or wait for) g to run. Unlocks runtime·sched. gp = nextgandunlock(); gp->readyonstop = 0; gp->status = Grunning; m->curg = gp; gp->m = m; // Check whether the profiler needs to be turned on or off. hz = runtime·sched.profilehz; if(m->profilehz != hz) runtime·resetcpuprofiler(hz); if(gp->sched.pc == (byte*)runtime·goexit) { // kickoff runtime·gogocall(&gp->sched, (void(*)(void))gp->entry); } runtime·gogo(&gp->sched, 0); }