Пример #1
0
static void
gtraceback(G* gp)
{
	Traceback* traceback;

	traceback = gp->traceback;
	gp->traceback = nil;
	traceback->c = runtime_callers(1, traceback->pcbuf,
		sizeof traceback->pcbuf / sizeof traceback->pcbuf[0]);
	runtime_gogo(traceback->gp);
}
Пример #2
0
static void
gtraceback(G* gp)
{
    G* ret;

    runtime_traceback(nil);
    runtime_goroutinetrailer(gp);
    ret = gp->dotraceback;
    gp->dotraceback = nil;
    runtime_gogo(ret);
}
Пример #3
0
void
runtime_tracebackothers(G * volatile me)
{
	G * volatile g;
	Traceback traceback;

	traceback.gp = me;
	for(g = runtime_allg; g != nil; g = g->alllink) {
		if(g == me || g->status == Gdead)
			continue;
		runtime_printf("\n");
		runtime_goroutineheader(g);

		// Our only mechanism for doing a stack trace is
		// _Unwind_Backtrace.  And that only works for the
		// current thread, not for other random goroutines.
		// So we need to switch context to the goroutine, get
		// the backtrace, and then switch back.

		// This means that if g is running or in a syscall, we
		// can't reliably print a stack trace.  FIXME.
		if(g->status == Gsyscall || g->status == Grunning) {
			runtime_printf("no stack trace available\n");
			runtime_goroutinetrailer(g);
			continue;
		}

		g->traceback = &traceback;

#ifdef USING_SPLIT_STACK
		__splitstack_getcontext(&me->stack_context[0]);
#endif
		getcontext(&me->context);

		if(g->traceback != nil) {
			runtime_gogo(g);
		}

		runtime_printtrace(traceback.pcbuf, traceback.c);
		runtime_goroutinetrailer(g);
	}
}
Пример #4
0
// One round of scheduler: find a goroutine and run it.
// The argument is the goroutine that was running before
// schedule was called, or nil if this is the first call.
// Never returns.
static void
schedule(G *gp)
{
	int32 hz;
	uint32 v;

	schedlock();
	if(gp != nil) {
		// Just finished running gp.
		gp->m = nil;
		runtime_sched.grunning--;

		// atomic { mcpu-- }
		v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift);
		if(atomic_mcpu(v) > maxgomaxprocs)
			runtime_throw("negative mcpu in scheduler");

		switch(gp->status){
		case Grunnable:
		case Gdead:
			// Shouldn't have been running!
			runtime_throw("bad gp->status in sched");
		case Grunning:
			gp->status = Grunnable;
			gput(gp);
			break;
		case Gmoribund:
			gp->status = Gdead;
			if(gp->lockedm) {
				gp->lockedm = nil;
				m->lockedg = nil;
			}
			gp->idlem = nil;
			runtime_memclr(&gp->context, sizeof gp->context);
			gfput(gp);
			if(--runtime_sched.gcount == 0)
				runtime_exit(0);
			break;
		}
		if(gp->readyonstop){
			gp->readyonstop = 0;
			readylocked(gp);
		}
	} else if(m->helpgc) {
		// Bootstrap m or new m started by starttheworld.
		// atomic { mcpu-- }
		v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift);
		if(atomic_mcpu(v) > maxgomaxprocs)
			runtime_throw("negative mcpu in scheduler");
		// Compensate for increment in starttheworld().
		runtime_sched.grunning--;
		m->helpgc = 0;
	} else if(m->nextg != nil) {
		// New m started by matchmg.
	} else {
		runtime_throw("invalid m state in scheduler");
	}

	// Find (or wait for) g to run.  Unlocks runtime_sched.
	gp = nextgandunlock();
	gp->readyonstop = 0;
	gp->status = Grunning;
	m->curg = gp;
	gp->m = m;

	// Check whether the profiler needs to be turned on or off.
	hz = runtime_sched.profilehz;
	if(m->profilehz != hz)
		runtime_resetcpuprofiler(hz);

	runtime_gogo(gp);
}