Example #1
0
void
runtime_notetsleep(Note *n, int64 ns)
{
	int64 deadline, now;

	if(ns < 0) {
		runtime_notesleep(n);
		return;
	}

	if(runtime_atomicload((uint32*)&n->key) != 0)
		return;

	if(runtime_m()->profilehz > 0)
		runtime_setprof(false);
	deadline = runtime_nanotime() + ns;
	for(;;) {
		runtime_futexsleep((uint32*)&n->key, 0, ns);
		if(runtime_atomicload((uint32*)&n->key) != 0)
			break;
		now = runtime_nanotime();
		if(now >= deadline)
			break;
		ns = deadline - now;
	}
	if(runtime_m()->profilehz > 0)
		runtime_setprof(true);
}
Example #2
0
void
runtime_entersyscall(void)
{
    uint32 v;

    if(m->profilehz > 0)
        runtime_setprof(false);

    // Leave SP around for gc and traceback.
#ifdef USING_SPLIT_STACK
    g->gcstack = __splitstack_find(nil, nil, &g->gcstack_size,
                                   &g->gcnext_segment, &g->gcnext_sp,
                                   &g->gcinitial_sp);
#else
    g->gcnext_sp = (byte *) &v;
#endif

    // Save the registers in the g structure so that any pointers
    // held in registers will be seen by the garbage collector.
    // We could use getcontext here, but setjmp is more efficient
    // because it doesn't need to save the signal mask.
    setjmp(g->gcregs);

    g->status = Gsyscall;

    // Fast path.
    // The slow path inside the schedlock/schedunlock will get
    // through without stopping if it does:
    //	mcpu--
    //	gwait not true
    //	waitstop && mcpu <= mcpumax not true
    // If we can do the same with a single atomic add,
    // then we can skip the locks.
    v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift);
    if(!atomic_gwaiting(v) && (!atomic_waitstop(v) || atomic_mcpu(v) > atomic_mcpumax(v)))
        return;

    schedlock();
    v = runtime_atomicload(&runtime_sched.atomic);
    if(atomic_gwaiting(v)) {
        matchmg();
        v = runtime_atomicload(&runtime_sched.atomic);
    }
    if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
        runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift);
        runtime_notewakeup(&runtime_sched.stopped);
    }

    schedunlock();
}
Example #3
0
// The GOTRACEBACK environment variable controls the
// behavior of a Go program that is crashing and exiting.
//	GOTRACEBACK=0   suppress all tracebacks
//	GOTRACEBACK=1   default behavior - show tracebacks but exclude runtime frames
//	GOTRACEBACK=2   show tracebacks including runtime frames
//	GOTRACEBACK=crash   show tracebacks including runtime frames, then crash (core dump etc)
int32
runtime_gotraceback(bool *crash)
{
	const byte *p;
	uint32 x;

	if(crash != nil)
		*crash = false;
	if(runtime_m()->traceback != 0)
		return runtime_m()->traceback;
	x = runtime_atomicload(&traceback_cache);
	if(x == ~(uint32)0) {
		p = runtime_getenv("GOTRACEBACK");
		if(p == nil)
			p = (const byte*)"";
		if(p[0] == '\0')
			x = 1<<1;
		else if(runtime_strcmp((const char *)p, "crash") == 0)
			x = (2<<1) | 1;
		else
			x = runtime_atoi(p)<<1;	
		runtime_atomicstore(&traceback_cache, x);
	}
	if(crash != nil)
		*crash = x&1;
	return x>>1;
}
Example #4
0
void
runtime_notesleep(Note *n)
{
	if(runtime_m()->profilehz > 0)
		runtime_setprof(false);
	while(runtime_atomicload((uint32*)&n->key) == 0)
		runtime_futexsleep((uint32*)&n->key, 0, -1);
	if(runtime_m()->profilehz > 0)
		runtime_setprof(true);
}
Example #5
0
void
runtime_notetsleep(Note *n, int64 ns)
{
	int64 deadline, now;

	if(ns < 0) {
		runtime_notesleep(n);
		return;
	}

	if(runtime_atomicload(&n->key) != 0)
		return;

	deadline = runtime_nanotime() + ns;
	for(;;) {
		runtime_futexsleep(&n->key, 0, ns);
		if(runtime_atomicload(&n->key) != 0)
			return;
		now = runtime_nanotime();
		if(now >= deadline)
			return;
		ns = deadline - now;
	}
}
Example #6
0
// Implementation of runtime.GOMAXPROCS.
// delete when scheduler is stronger
int32
runtime_gomaxprocsfunc(int32 n)
{
	int32 ret;
	uint32 v;

	schedlock();
	ret = runtime_gomaxprocs;
	if(n <= 0)
		n = ret;
	if(n > maxgomaxprocs)
		n = maxgomaxprocs;
	runtime_gomaxprocs = n;
	if(runtime_gomaxprocs > 1)
		runtime_singleproc = false;
 	if(runtime_gcwaiting != 0) {
 		if(atomic_mcpumax(runtime_sched.atomic) != 1)
 			runtime_throw("invalid mcpumax during gc");
		schedunlock();
		return ret;
	}

	setmcpumax(n);

	// If there are now fewer allowed procs
	// than procs running, stop.
	v = runtime_atomicload(&runtime_sched.atomic);
	if((int32)atomic_mcpu(v) > n) {
		schedunlock();
		runtime_gosched();
		return ret;
	}
	// handle more procs
	matchmg();
	schedunlock();
	return ret;
}
Example #7
0
void
runtime_notesleep(Note *n)
{
	while(runtime_atomicload(&n->key) == 0)
		runtime_futexsleep(&n->key, 0, -1);
}
Example #8
0
// Get the next goroutine that m should run.
// Sched must be locked on entry, is unlocked on exit.
// Makes sure that at most $GOMAXPROCS g's are
// running on cpus (not in system calls) at any given time.
static G*
nextgandunlock(void)
{
	G *gp;
	uint32 v;

top:
	if(atomic_mcpu(runtime_sched.atomic) >= maxgomaxprocs)
		runtime_throw("negative mcpu");

	// If there is a g waiting as m->nextg, the mcpu++
	// happened before it was passed to mnextg.
	if(m->nextg != nil) {
		gp = m->nextg;
		m->nextg = nil;
		schedunlock();
		return gp;
	}

	if(m->lockedg != nil) {
		// We can only run one g, and it's not available.
		// Make sure some other cpu is running to handle
		// the ordinary run queue.
		if(runtime_sched.gwait != 0) {
			matchmg();
			// m->lockedg might have been on the queue.
			if(m->nextg != nil) {
				gp = m->nextg;
				m->nextg = nil;
				schedunlock();
				return gp;
			}
		}
	} else {
		// Look for work on global queue.
		while(haveg() && canaddmcpu()) {
			gp = gget();
			if(gp == nil)
				runtime_throw("gget inconsistency");

			if(gp->lockedm) {
				mnextg(gp->lockedm, gp);
				continue;
			}
			runtime_sched.grunning++;
			schedunlock();
			return gp;
		}

		// The while loop ended either because the g queue is empty
		// or because we have maxed out our m procs running go
		// code (mcpu >= mcpumax).  We need to check that
		// concurrent actions by entersyscall/exitsyscall cannot
		// invalidate the decision to end the loop.
		//
		// We hold the sched lock, so no one else is manipulating the
		// g queue or changing mcpumax.  Entersyscall can decrement
		// mcpu, but if does so when there is something on the g queue,
		// the gwait bit will be set, so entersyscall will take the slow path
		// and use the sched lock.  So it cannot invalidate our decision.
		//
		// Wait on global m queue.
		mput(m);
	}

	// Look for deadlock situation.
	// There is a race with the scavenger that causes false negatives:
	// if the scavenger is just starting, then we have
	//	scvg != nil && grunning == 0 && gwait == 0
	// and we do not detect a deadlock.  It is possible that we should
	// add that case to the if statement here, but it is too close to Go 1
	// to make such a subtle change.  Instead, we work around the
	// false negative in trivial programs by calling runtime.gosched
	// from the main goroutine just before main.main.
	// See runtime_main above.
	//
	// On a related note, it is also possible that the scvg == nil case is
	// wrong and should include gwait, but that does not happen in
	// standard Go programs, which all start the scavenger.
	//
	if((scvg == nil && runtime_sched.grunning == 0) ||
	   (scvg != nil && runtime_sched.grunning == 1 && runtime_sched.gwait == 0 &&
	    (scvg->status == Grunning || scvg->status == Gsyscall))) {
		runtime_throw("all goroutines are asleep - deadlock!");
	}

	m->nextg = nil;
	m->waitnextg = 1;
	runtime_noteclear(&m->havenextg);

	// Stoptheworld is waiting for all but its cpu to go to stop.
	// Entersyscall might have decremented mcpu too, but if so
	// it will see the waitstop and take the slow path.
	// Exitsyscall never increments mcpu beyond mcpumax.
	v = runtime_atomicload(&runtime_sched.atomic);
	if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
		// set waitstop = 0 (known to be 1)
		runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift);
		runtime_notewakeup(&runtime_sched.stopped);
	}
	schedunlock();

	runtime_notesleep(&m->havenextg);
	if(m->helpgc) {
		runtime_gchelper();
		m->helpgc = 0;
		runtime_lock(&runtime_sched);
		goto top;
	}
	if((gp = m->nextg) == nil)
		runtime_throw("bad m->nextg in nextgoroutine");
	m->nextg = nil;
	return gp;
}
Example #9
0
File: proc.c Project: Sunmonds/gcc
// Get the next goroutine that m should run.
// Sched must be locked on entry, is unlocked on exit.
// Makes sure that at most $GOMAXPROCS g's are
// running on cpus (not in system calls) at any given time.
static G*
nextgandunlock(void)
{
	G *gp;
	uint32 v;

top:
	if(atomic_mcpu(runtime_sched.atomic) >= maxgomaxprocs)
		runtime_throw("negative mcpu");

	// If there is a g waiting as m->nextg, the mcpu++
	// happened before it was passed to mnextg.
	if(m->nextg != nil) {
		gp = m->nextg;
		m->nextg = nil;
		schedunlock();
		return gp;
	}

	if(m->lockedg != nil) {
		// We can only run one g, and it's not available.
		// Make sure some other cpu is running to handle
		// the ordinary run queue.
		if(runtime_sched.gwait != 0) {
			matchmg();
			// m->lockedg might have been on the queue.
			if(m->nextg != nil) {
				gp = m->nextg;
				m->nextg = nil;
				schedunlock();
				return gp;
			}
		}
	} else {
		// Look for work on global queue.
		while(haveg() && canaddmcpu()) {
			gp = gget();
			if(gp == nil)
				runtime_throw("gget inconsistency");

			if(gp->lockedm) {
				mnextg(gp->lockedm, gp);
				continue;
			}
			runtime_sched.grunning++;
			schedunlock();
			return gp;
		}

		// The while loop ended either because the g queue is empty
		// or because we have maxed out our m procs running go
		// code (mcpu >= mcpumax).  We need to check that
		// concurrent actions by entersyscall/exitsyscall cannot
		// invalidate the decision to end the loop.
		//
		// We hold the sched lock, so no one else is manipulating the
		// g queue or changing mcpumax.  Entersyscall can decrement
		// mcpu, but if does so when there is something on the g queue,
		// the gwait bit will be set, so entersyscall will take the slow path
		// and use the sched lock.  So it cannot invalidate our decision.
		//
		// Wait on global m queue.
		mput(m);
	}

	v = runtime_atomicload(&runtime_sched.atomic);
	if(runtime_sched.grunning == 0)
		runtime_throw("all goroutines are asleep - deadlock!");
	m->nextg = nil;
	m->waitnextg = 1;
	runtime_noteclear(&m->havenextg);

	// Stoptheworld is waiting for all but its cpu to go to stop.
	// Entersyscall might have decremented mcpu too, but if so
	// it will see the waitstop and take the slow path.
	// Exitsyscall never increments mcpu beyond mcpumax.
	if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
		// set waitstop = 0 (known to be 1)
		runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift);
		runtime_notewakeup(&runtime_sched.stopped);
	}
	schedunlock();

	runtime_notesleep(&m->havenextg);
	if(m->helpgc) {
		runtime_gchelper();
		m->helpgc = 0;
		runtime_lock(&runtime_sched);
		goto top;
	}
	if((gp = m->nextg) == nil)
		runtime_throw("bad m->nextg in nextgoroutine");
	m->nextg = nil;
	return gp;
}