Esempio n. 1
0
void
runtime_entersyscall(void)
{
    uint32 v;

    if(m->profilehz > 0)
        runtime_setprof(false);

    // Leave SP around for gc and traceback.
#ifdef USING_SPLIT_STACK
    g->gcstack = __splitstack_find(nil, nil, &g->gcstack_size,
                                   &g->gcnext_segment, &g->gcnext_sp,
                                   &g->gcinitial_sp);
#else
    g->gcnext_sp = (byte *) &v;
#endif

    // Save the registers in the g structure so that any pointers
    // held in registers will be seen by the garbage collector.
    // We could use getcontext here, but setjmp is more efficient
    // because it doesn't need to save the signal mask.
    setjmp(g->gcregs);

    g->status = Gsyscall;

    // Fast path.
    // The slow path inside the schedlock/schedunlock will get
    // through without stopping if it does:
    //	mcpu--
    //	gwait not true
    //	waitstop && mcpu <= mcpumax not true
    // If we can do the same with a single atomic add,
    // then we can skip the locks.
    v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift);
    if(!atomic_gwaiting(v) && (!atomic_waitstop(v) || atomic_mcpu(v) > atomic_mcpumax(v)))
        return;

    schedlock();
    v = runtime_atomicload(&runtime_sched.atomic);
    if(atomic_gwaiting(v)) {
        matchmg();
        v = runtime_atomicload(&runtime_sched.atomic);
    }
    if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
        runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift);
        runtime_notewakeup(&runtime_sched.stopped);
    }

    schedunlock();
}
Esempio n. 2
0
// Put on `g' queue.  Sched must be locked.
static void
gput(G *g)
{
	M *m;

	// If g is wired, hand it off directly.
	if((m = g->lockedm) != nil && canaddmcpu()) {
		mnextg(m, g);
		return;
	}

	// If g is the idle goroutine for an m, hand it off.
	if(g->idlem != nil) {
		if(g->idlem->idleg != nil) {
			runtime_printf("m%d idle out of sync: g%d g%d\n",
				g->idlem->id,
				g->idlem->idleg->goid, g->goid);
			runtime_throw("runtime: double idle");
		}
		g->idlem->idleg = g;
		return;
	}

	g->schedlink = nil;
	if(runtime_sched.ghead == nil)
		runtime_sched.ghead = g;
	else
		runtime_sched.gtail->schedlink = g;
	runtime_sched.gtail = g;

	// increment gwait.
	// if it transitions to nonzero, set atomic gwaiting bit.
	if(runtime_sched.gwait++ == 0)
		runtime_xadd(&runtime_sched.atomic, 1<<gwaitingShift);
}
Esempio n. 3
0
// Allocate a new g, with a stack big enough for stacksize bytes.
G*
runtime_malg(int32 stacksize, byte** ret_stack, size_t* ret_stacksize)
{
	G *newg;

	newg = runtime_malloc(sizeof(G));
	if(stacksize >= 0) {
#if USING_SPLIT_STACK
		int dont_block_signals = 0;

		*ret_stack = __splitstack_makecontext(stacksize,
						      &newg->stack_context[0],
						      ret_stacksize);
		__splitstack_block_signals_context(&newg->stack_context[0],
						   &dont_block_signals, nil);
#else
		*ret_stack = runtime_mallocgc(stacksize, FlagNoProfiling|FlagNoGC, 0, 0);
		*ret_stacksize = stacksize;
		newg->gcinitial_sp = *ret_stack;
		newg->gcstack_size = stacksize;
		runtime_xadd(&runtime_stacks_sys, stacksize);
#endif
	}
	return newg;
}
Esempio n. 4
0
// The goroutine g exited its system call.
// Arrange for it to run on a cpu again.
// This is called only from the go syscall library, not
// from the low-level system calls used by the runtime.
void
runtime_exitsyscall(void)
{
	G *gp;
	uint32 v;

	// Fast path.
	// If we can do the mcpu++ bookkeeping and
	// find that we still have mcpu <= mcpumax, then we can
	// start executing Go code immediately, without having to
	// schedlock/schedunlock.
	// Also do fast return if any locks are held, so that
	// panic code can use syscalls to open a file.
	gp = g;
	v = runtime_xadd(&runtime_sched.atomic, (1<<mcpuShift));
	if((m->profilehz == runtime_sched.profilehz && atomic_mcpu(v) <= atomic_mcpumax(v)) || m->locks > 0) {
		// There's a cpu for us, so we can run.
		gp->status = Grunning;
		// Garbage collector isn't running (since we are),
		// so okay to clear gcstack.
#ifdef USING_SPLIT_STACK
		gp->gcstack = nil;
#endif
		gp->gcnext_sp = nil;
		runtime_memclr(&gp->gcregs, sizeof gp->gcregs);

		if(m->profilehz > 0)
			runtime_setprof(true);
		return;
	}

	// Tell scheduler to put g back on the run queue:
	// mostly equivalent to g->status = Grunning,
	// but keeps the garbage collector from thinking
	// that g is running right now, which it's not.
	gp->readyonstop = 1;

	// All the cpus are taken.
	// The scheduler will ready g and put this m to sleep.
	// When the scheduler takes g away from m,
	// it will undo the runtime_sched.mcpu++ above.
	runtime_gosched();

	// Gosched returned, so we're allowed to run now.
	// Delete the gcstack information that we left for
	// the garbage collector during the system call.
	// Must wait until now because until gosched returns
	// we don't know for sure that the garbage collector
	// is not running.
#ifdef USING_SPLIT_STACK
	gp->gcstack = nil;
#endif
	gp->gcnext_sp = nil;
	runtime_memclr(&gp->gcregs, sizeof gp->gcregs);
}
Esempio n. 5
0
File: panic.c Progetto: 0day-ci/gcc
void
runtime_dopanic(int32 unused __attribute__ ((unused)))
{
	G *g;
	static bool didothers;
	bool crash;
	int32 t;

	g = runtime_g();
	if(g->sig != 0)
		runtime_printf("[signal %x code=%p addr=%p]\n",
			       g->sig, (void*)g->sigcode0, (void*)g->sigcode1);

	if((t = runtime_gotraceback(&crash)) > 0){
		if(g != runtime_m()->g0) {
			runtime_printf("\n");
			runtime_goroutineheader(g);
			runtime_traceback();
			runtime_printcreatedby(g);
		} else if(t >= 2 || runtime_m()->throwing > 0) {
			runtime_printf("\nruntime stack:\n");
			runtime_traceback();
		}
		if(!didothers) {
			didothers = true;
			runtime_tracebackothers(g);
		}
	}
	runtime_unlock(&paniclk);
	if(runtime_xadd(&runtime_panicking, -1) != 0) {
		// Some other m is panicking too.
		// Let it print what it needs to print.
		// Wait forever without chewing up cpu.
		// It will exit when it's done.
		static Lock deadlock;
		runtime_lock(&deadlock);
		runtime_lock(&deadlock);
	}
	
	if(crash)
		runtime_crash();

	runtime_exit(2);
}
Esempio n. 6
0
// Get from `g' queue.  Sched must be locked.
static G*
gget(void)
{
	G *g;

	g = runtime_sched.ghead;
	if(g){
		runtime_sched.ghead = g->schedlink;
		if(runtime_sched.ghead == nil)
			runtime_sched.gtail = nil;
		// decrement gwait.
		// if it transitions to zero, clear atomic gwaiting bit.
		if(--runtime_sched.gwait == 0)
			runtime_xadd(&runtime_sched.atomic, -1<<gwaitingShift);
	} else if(m->idleg != nil) {
		g = m->idleg;
		m->idleg = nil;
	}
	return g;
}
Esempio n. 7
0
File: panic.c Progetto: 0day-ci/gcc
void
runtime_startpanic(void)
{
	M *m;

	m = runtime_m();
	if(runtime_mheap.cachealloc.size == 0) { // very early
		runtime_printf("runtime: panic before malloc heap initialized\n");
		m->mallocing = 1; // tell rest of panic not to try to malloc
	} else if(m->mcache == nil) // can happen if called from signal handler or throw
		m->mcache = runtime_allocmcache();
	switch(m->dying) {
	case 0:
		m->dying = 1;
		if(runtime_g() != nil)
			runtime_g()->writebuf = nil;
		runtime_xadd(&runtime_panicking, 1);
		runtime_lock(&paniclk);
		if(runtime_debug.schedtrace > 0 || runtime_debug.scheddetail > 0)
			runtime_schedtrace(true);
		runtime_freezetheworld();
		return;
	case 1:
		// Something failed while panicing, probably the print of the
		// argument to panic().  Just print a stack trace and exit.
		m->dying = 2;
		runtime_printf("panic during panic\n");
		runtime_dopanic(0);
		runtime_exit(3);
	case 2:
		// This is a genuine bug in the runtime, we couldn't even
		// print the stack trace successfully.
		m->dying = 3;
		runtime_printf("stack trace unavailable\n");
		runtime_exit(4);
	default:
		// Can't even print!  Just exit.
		runtime_exit(5);
	}
}
Esempio n. 8
0
// Get the next goroutine that m should run.
// Sched must be locked on entry, is unlocked on exit.
// Makes sure that at most $GOMAXPROCS g's are
// running on cpus (not in system calls) at any given time.
static G*
nextgandunlock(void)
{
	G *gp;
	uint32 v;

top:
	if(atomic_mcpu(runtime_sched.atomic) >= maxgomaxprocs)
		runtime_throw("negative mcpu");

	// If there is a g waiting as m->nextg, the mcpu++
	// happened before it was passed to mnextg.
	if(m->nextg != nil) {
		gp = m->nextg;
		m->nextg = nil;
		schedunlock();
		return gp;
	}

	if(m->lockedg != nil) {
		// We can only run one g, and it's not available.
		// Make sure some other cpu is running to handle
		// the ordinary run queue.
		if(runtime_sched.gwait != 0) {
			matchmg();
			// m->lockedg might have been on the queue.
			if(m->nextg != nil) {
				gp = m->nextg;
				m->nextg = nil;
				schedunlock();
				return gp;
			}
		}
	} else {
		// Look for work on global queue.
		while(haveg() && canaddmcpu()) {
			gp = gget();
			if(gp == nil)
				runtime_throw("gget inconsistency");

			if(gp->lockedm) {
				mnextg(gp->lockedm, gp);
				continue;
			}
			runtime_sched.grunning++;
			schedunlock();
			return gp;
		}

		// The while loop ended either because the g queue is empty
		// or because we have maxed out our m procs running go
		// code (mcpu >= mcpumax).  We need to check that
		// concurrent actions by entersyscall/exitsyscall cannot
		// invalidate the decision to end the loop.
		//
		// We hold the sched lock, so no one else is manipulating the
		// g queue or changing mcpumax.  Entersyscall can decrement
		// mcpu, but if does so when there is something on the g queue,
		// the gwait bit will be set, so entersyscall will take the slow path
		// and use the sched lock.  So it cannot invalidate our decision.
		//
		// Wait on global m queue.
		mput(m);
	}

	// Look for deadlock situation.
	// There is a race with the scavenger that causes false negatives:
	// if the scavenger is just starting, then we have
	//	scvg != nil && grunning == 0 && gwait == 0
	// and we do not detect a deadlock.  It is possible that we should
	// add that case to the if statement here, but it is too close to Go 1
	// to make such a subtle change.  Instead, we work around the
	// false negative in trivial programs by calling runtime.gosched
	// from the main goroutine just before main.main.
	// See runtime_main above.
	//
	// On a related note, it is also possible that the scvg == nil case is
	// wrong and should include gwait, but that does not happen in
	// standard Go programs, which all start the scavenger.
	//
	if((scvg == nil && runtime_sched.grunning == 0) ||
	   (scvg != nil && runtime_sched.grunning == 1 && runtime_sched.gwait == 0 &&
	    (scvg->status == Grunning || scvg->status == Gsyscall))) {
		runtime_throw("all goroutines are asleep - deadlock!");
	}

	m->nextg = nil;
	m->waitnextg = 1;
	runtime_noteclear(&m->havenextg);

	// Stoptheworld is waiting for all but its cpu to go to stop.
	// Entersyscall might have decremented mcpu too, but if so
	// it will see the waitstop and take the slow path.
	// Exitsyscall never increments mcpu beyond mcpumax.
	v = runtime_atomicload(&runtime_sched.atomic);
	if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
		// set waitstop = 0 (known to be 1)
		runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift);
		runtime_notewakeup(&runtime_sched.stopped);
	}
	schedunlock();

	runtime_notesleep(&m->havenextg);
	if(m->helpgc) {
		runtime_gchelper();
		m->helpgc = 0;
		runtime_lock(&runtime_sched);
		goto top;
	}
	if((gp = m->nextg) == nil)
		runtime_throw("bad m->nextg in nextgoroutine");
	m->nextg = nil;
	return gp;
}
Esempio n. 9
0
// One round of scheduler: find a goroutine and run it.
// The argument is the goroutine that was running before
// schedule was called, or nil if this is the first call.
// Never returns.
static void
schedule(G *gp)
{
	int32 hz;
	uint32 v;

	schedlock();
	if(gp != nil) {
		// Just finished running gp.
		gp->m = nil;
		runtime_sched.grunning--;

		// atomic { mcpu-- }
		v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift);
		if(atomic_mcpu(v) > maxgomaxprocs)
			runtime_throw("negative mcpu in scheduler");

		switch(gp->status){
		case Grunnable:
		case Gdead:
			// Shouldn't have been running!
			runtime_throw("bad gp->status in sched");
		case Grunning:
			gp->status = Grunnable;
			gput(gp);
			break;
		case Gmoribund:
			gp->status = Gdead;
			if(gp->lockedm) {
				gp->lockedm = nil;
				m->lockedg = nil;
			}
			gp->idlem = nil;
			runtime_memclr(&gp->context, sizeof gp->context);
			gfput(gp);
			if(--runtime_sched.gcount == 0)
				runtime_exit(0);
			break;
		}
		if(gp->readyonstop){
			gp->readyonstop = 0;
			readylocked(gp);
		}
	} else if(m->helpgc) {
		// Bootstrap m or new m started by starttheworld.
		// atomic { mcpu-- }
		v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift);
		if(atomic_mcpu(v) > maxgomaxprocs)
			runtime_throw("negative mcpu in scheduler");
		// Compensate for increment in starttheworld().
		runtime_sched.grunning--;
		m->helpgc = 0;
	} else if(m->nextg != nil) {
		// New m started by matchmg.
	} else {
		runtime_throw("invalid m state in scheduler");
	}

	// Find (or wait for) g to run.  Unlocks runtime_sched.
	gp = nextgandunlock();
	gp->readyonstop = 0;
	gp->status = Grunning;
	m->curg = gp;
	gp->m = m;

	// Check whether the profiler needs to be turned on or off.
	hz = runtime_sched.profilehz;
	if(m->profilehz != hz)
		runtime_resetcpuprofiler(hz);

	runtime_gogo(gp);
}
Esempio n. 10
0
File: proc.c Progetto: Sunmonds/gcc
// Get the next goroutine that m should run.
// Sched must be locked on entry, is unlocked on exit.
// Makes sure that at most $GOMAXPROCS g's are
// running on cpus (not in system calls) at any given time.
static G*
nextgandunlock(void)
{
	G *gp;
	uint32 v;

top:
	if(atomic_mcpu(runtime_sched.atomic) >= maxgomaxprocs)
		runtime_throw("negative mcpu");

	// If there is a g waiting as m->nextg, the mcpu++
	// happened before it was passed to mnextg.
	if(m->nextg != nil) {
		gp = m->nextg;
		m->nextg = nil;
		schedunlock();
		return gp;
	}

	if(m->lockedg != nil) {
		// We can only run one g, and it's not available.
		// Make sure some other cpu is running to handle
		// the ordinary run queue.
		if(runtime_sched.gwait != 0) {
			matchmg();
			// m->lockedg might have been on the queue.
			if(m->nextg != nil) {
				gp = m->nextg;
				m->nextg = nil;
				schedunlock();
				return gp;
			}
		}
	} else {
		// Look for work on global queue.
		while(haveg() && canaddmcpu()) {
			gp = gget();
			if(gp == nil)
				runtime_throw("gget inconsistency");

			if(gp->lockedm) {
				mnextg(gp->lockedm, gp);
				continue;
			}
			runtime_sched.grunning++;
			schedunlock();
			return gp;
		}

		// The while loop ended either because the g queue is empty
		// or because we have maxed out our m procs running go
		// code (mcpu >= mcpumax).  We need to check that
		// concurrent actions by entersyscall/exitsyscall cannot
		// invalidate the decision to end the loop.
		//
		// We hold the sched lock, so no one else is manipulating the
		// g queue or changing mcpumax.  Entersyscall can decrement
		// mcpu, but if does so when there is something on the g queue,
		// the gwait bit will be set, so entersyscall will take the slow path
		// and use the sched lock.  So it cannot invalidate our decision.
		//
		// Wait on global m queue.
		mput(m);
	}

	v = runtime_atomicload(&runtime_sched.atomic);
	if(runtime_sched.grunning == 0)
		runtime_throw("all goroutines are asleep - deadlock!");
	m->nextg = nil;
	m->waitnextg = 1;
	runtime_noteclear(&m->havenextg);

	// Stoptheworld is waiting for all but its cpu to go to stop.
	// Entersyscall might have decremented mcpu too, but if so
	// it will see the waitstop and take the slow path.
	// Exitsyscall never increments mcpu beyond mcpumax.
	if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
		// set waitstop = 0 (known to be 1)
		runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift);
		runtime_notewakeup(&runtime_sched.stopped);
	}
	schedunlock();

	runtime_notesleep(&m->havenextg);
	if(m->helpgc) {
		runtime_gchelper();
		m->helpgc = 0;
		runtime_lock(&runtime_sched);
		goto top;
	}
	if((gp = m->nextg) == nil)
		runtime_throw("bad m->nextg in nextgoroutine");
	m->nextg = nil;
	return gp;
}