Exemplo n.º 1
0
int32
runtime·helpgc(bool *extra)
{
	M *mp;
	int32 n, max;

	// Figure out how many CPUs to use.
	// Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
	max = runtime·gomaxprocs;
	if(max > runtime·ncpu)
		max = runtime·ncpu;
	if(max > MaxGcproc)
		max = MaxGcproc;

	// We're going to use one CPU no matter what.
	// Figure out the max number of additional CPUs.
	max--;

	runtime·lock(&runtime·sched);
	n = 0;
	while(n < max && (mp = mget(nil)) != nil) {
		n++;
		mp->helpgc = 1;
		mp->waitnextg = 0;
		runtime·notewakeup(&mp->havenextg);
	}
	runtime·unlock(&runtime·sched);
	if(extra)
		*extra = n != max;
	return n;
}
Exemplo n.º 2
0
Arquivo: proc.c Projeto: machinaut/go
void
runtime·entersyscall(void)
{
	if(runtime·sched.predawn)
		return;
	schedlock();
	g->status = Gsyscall;
	runtime·sched.mcpu--;
	runtime·sched.msyscall++;
	if(runtime·sched.gwait != 0)
		matchmg();

	if(runtime·sched.waitstop && runtime·sched.mcpu <= runtime·sched.mcpumax) {
		runtime·sched.waitstop = 0;
		runtime·notewakeup(&runtime·sched.stopped);
	}

	// Leave SP around for gc and traceback.
	// Do before schedunlock so that gc
	// never sees Gsyscall with wrong stack.
	runtime·gosave(&g->sched);
	g->gcsp = g->sched.sp;
	g->gcstack = g->stackbase;
	g->gcguard = g->stackguard;
	if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) {
		runtime·printf("entersyscall inconsistent %p [%p,%p]\n", g->gcsp, g->gcguard-StackGuard, g->gcstack);
		runtime·throw("entersyscall");
	}
	schedunlock();
}
Exemplo n.º 3
0
// Unlock the scheduler.
static void
schedunlock(void)
{
	M *m;

	m = mwakeup;
	mwakeup = nil;
	runtime·unlock(&runtime·sched);
	if(m != nil)
		runtime·notewakeup(&m->havenextg);
}
Exemplo n.º 4
0
Arquivo: proc.c Projeto: machinaut/go
// Get the next goroutine that m should run.
// Sched must be locked on entry, is unlocked on exit.
// Makes sure that at most $GOMAXPROCS gs are
// running on cpus (not in system calls) at any given time.
static G*
nextgandunlock(void)
{
	G *gp;

	if(runtime·sched.mcpu < 0)
		runtime·throw("negative runtime·sched.mcpu");

	// If there is a g waiting as m->nextg,
	// mnextg took care of the runtime·sched.mcpu++.
	if(m->nextg != nil) {
		gp = m->nextg;
		m->nextg = nil;
		schedunlock();
		return gp;
	}

	if(m->lockedg != nil) {
		// We can only run one g, and it's not available.
		// Make sure some other cpu is running to handle
		// the ordinary run queue.
		if(runtime·sched.gwait != 0)
			matchmg();
	} else {
		// Look for work on global queue.
		while(runtime·sched.mcpu < runtime·sched.mcpumax && (gp=gget()) != nil) {
			if(gp->lockedm) {
				mnextg(gp->lockedm, gp);
				continue;
			}
			runtime·sched.mcpu++;		// this m will run gp
			schedunlock();
			return gp;
		}
		// Otherwise, wait on global m queue.
		mput(m);
	}
	if(runtime·sched.mcpu == 0 && runtime·sched.msyscall == 0)
		runtime·throw("all goroutines are asleep - deadlock!");
	m->nextg = nil;
	m->waitnextg = 1;
	runtime·noteclear(&m->havenextg);
	if(runtime·sched.waitstop && runtime·sched.mcpu <= runtime·sched.mcpumax) {
		runtime·sched.waitstop = 0;
		runtime·notewakeup(&runtime·sched.stopped);
	}
	schedunlock();

	runtime·notesleep(&m->havenextg);
	if((gp = m->nextg) == nil)
		runtime·throw("bad m->nextg in nextgoroutine");
	m->nextg = nil;
	return gp;
}
Exemplo n.º 5
0
// Pass g to m for running.
// Caller has already incremented mcpu.
static void
mnextg(M *m, G *g)
{
	runtime·sched.grunning++;
	m->nextg = g;
	if(m->waitnextg) {
		m->waitnextg = 0;
		if(mwakeup != nil)
			runtime·notewakeup(&mwakeup->havenextg);
		mwakeup = m;
	}
}
Exemplo n.º 6
0
// Pass g to m for running.
// Caller has already incremented mcpu.
static void
mnextg(M *mp, G *gp)
{
    runtime·sched.grunning++;
    mp->nextg = gp;
    if(mp->waitnextg) {
        mp->waitnextg = 0;
        if(mwakeup != nil)
            runtime·notewakeup(&mwakeup->havenextg);
        mwakeup = mp;
    }
}
Exemplo n.º 7
0
void
runtime·entersyscall(void)
{
	uint32 v;

	if(m->profilehz > 0)
		runtime·setprof(false);

	// Leave SP around for gc and traceback.
	runtime·gosave(&g->sched);
	g->gcsp = g->sched.sp;
	g->gcstack = g->stackbase;
	g->gcguard = g->stackguard;
	g->status = Gsyscall;
	if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) {
		// runtime·printf("entersyscall inconsistent %p [%p,%p]\n",
		//	g->gcsp, g->gcguard-StackGuard, g->gcstack);
		runtime·throw("entersyscall");
	}

	// Fast path.
	// The slow path inside the schedlock/schedunlock will get
	// through without stopping if it does:
	//	mcpu--
	//	gwait not true
	//	waitstop && mcpu <= mcpumax not true
	// If we can do the same with a single atomic add,
	// then we can skip the locks.
	v = runtime·xadd(&runtime·sched.atomic, -1<<mcpuShift);
	if(!atomic_gwaiting(v) && (!atomic_waitstop(v) || atomic_mcpu(v) > atomic_mcpumax(v)))
		return;

	schedlock();
	v = runtime·atomicload(&runtime·sched.atomic);
	if(atomic_gwaiting(v)) {
		matchmg();
		v = runtime·atomicload(&runtime·sched.atomic);
	}
	if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
		runtime·xadd(&runtime·sched.atomic, -1<<waitstopShift);
		runtime·notewakeup(&runtime·sched.stopped);
	}

	// Re-save sched in case one of the calls
	// (notewakeup, matchmg) triggered something using it.
	runtime·gosave(&g->sched);

	schedunlock();
}
Exemplo n.º 8
0
void
runtime·helpgc(int32 nproc)
{
    M *mp;
    int32 n;

    runtime·lock(&runtime·sched);
    for(n = 1; n < nproc; n++) { // one M is currently running
        mp = mget(nil);
        if(mp == nil)
            runtime·throw("runtime·gcprocs inconsistency");
        mp->helpgc = 1;
        mp->waitnextg = 0;
        runtime·notewakeup(&mp->havenextg);
    }
    runtime·unlock(&runtime·sched);
}
Exemplo n.º 9
0
static void 
addtimer ( Timer *t ) 
{ 
int32 n; 
Timer **nt; 
#line 110 "/home/14/ren/source/golang/go/src/pkg/runtime/time.goc"
if ( t->when < 0 ) 
t->when = ( 1LL<<63 ) -1; 
#line 113 "/home/14/ren/source/golang/go/src/pkg/runtime/time.goc"
if ( timers.len >= timers.cap ) { 
#line 115 "/home/14/ren/source/golang/go/src/pkg/runtime/time.goc"
n = 16; 
if ( n <= timers.cap ) 
n = timers.cap*3 / 2; 
nt = runtime·malloc ( n*sizeof nt[0] ) ; 
runtime·memmove ( nt , timers.t , timers.len*sizeof nt[0] ) ; 
runtime·free ( timers.t ) ; 
timers.t = nt; 
timers.cap = n; 
} 
t->i = timers.len++; 
timers.t[t->i] = t; 
siftup ( t->i ) ; 
if ( t->i == 0 ) { 
#line 129 "/home/14/ren/source/golang/go/src/pkg/runtime/time.goc"
if ( timers.sleeping ) { 
timers.sleeping = false; 
runtime·notewakeup ( &timers.waitnote ) ; 
} 
if ( timers.rescheduling ) { 
timers.rescheduling = false; 
runtime·ready ( timers.timerproc ) ; 
} 
} 
if ( timers.timerproc == nil ) { 
timers.timerproc = runtime·newproc1 ( &timerprocv , nil , 0 , 0 , addtimer ) ; 
timers.timerproc->issystem = true; 
} 
if ( debug ) 
dumptimers ( "addtimer" ) ; 
} 
Exemplo n.º 10
0
// Get the next goroutine that m should run.
// Sched must be locked on entry, is unlocked on exit.
// Makes sure that at most $GOMAXPROCS g's are
// running on cpus (not in system calls) at any given time.
static G*
nextgandunlock(void)
{
	G *gp;
	uint32 v;

top:
	if(atomic_mcpu(runtime·sched.atomic) >= maxgomaxprocs)
		runtime·throw("negative mcpu");

	// If there is a g waiting as m->nextg, the mcpu++
	// happened before it was passed to mnextg.
	if(m->nextg != nil) {
		gp = m->nextg;
		m->nextg = nil;
		schedunlock();
		return gp;
	}

	if(m->lockedg != nil) {
		// We can only run one g, and it's not available.
		// Make sure some other cpu is running to handle
		// the ordinary run queue.
		if(runtime·sched.gwait != 0) {
			matchmg();
			// m->lockedg might have been on the queue.
			if(m->nextg != nil) {
				gp = m->nextg;
				m->nextg = nil;
				schedunlock();
				return gp;
			}
		}
	} else {
		// Look for work on global queue.
		while(haveg() && canaddmcpu()) {
			gp = gget();
			if(gp == nil)
				runtime·throw("gget inconsistency");

			if(gp->lockedm) {
				mnextg(gp->lockedm, gp);
				continue;
			}
			runtime·sched.grunning++;
			schedunlock();
			return gp;
		}

		// The while loop ended either because the g queue is empty
		// or because we have maxed out our m procs running go
		// code (mcpu >= mcpumax).  We need to check that
		// concurrent actions by entersyscall/exitsyscall cannot
		// invalidate the decision to end the loop.
		//
		// We hold the sched lock, so no one else is manipulating the
		// g queue or changing mcpumax.  Entersyscall can decrement
		// mcpu, but if does so when there is something on the g queue,
		// the gwait bit will be set, so entersyscall will take the slow path
		// and use the sched lock.  So it cannot invalidate our decision.
		//
		// Wait on global m queue.
		mput(m);
	}

	// Look for deadlock situation.
	// There is a race with the scavenger that causes false negatives:
	// if the scavenger is just starting, then we have
	//	scvg != nil && grunning == 0 && gwait == 0
	// and we do not detect a deadlock.  It is possible that we should
	// add that case to the if statement here, but it is too close to Go 1
	// to make such a subtle change.  Instead, we work around the
	// false negative in trivial programs by calling runtime.gosched
	// from the main goroutine just before main.main.
	// See runtime·main above.
	//
	// On a related note, it is also possible that the scvg == nil case is
	// wrong and should include gwait, but that does not happen in
	// standard Go programs, which all start the scavenger.
	//
	if((scvg == nil && runtime·sched.grunning == 0) ||
	   (scvg != nil && runtime·sched.grunning == 1 && runtime·sched.gwait == 0 &&
	    (scvg->status == Grunning || scvg->status == Gsyscall))) {
		runtime·throw("all goroutines are asleep - deadlock!");
	}

	m->nextg = nil;
	m->waitnextg = 1;
	runtime·noteclear(&m->havenextg);

	// Stoptheworld is waiting for all but its cpu to go to stop.
	// Entersyscall might have decremented mcpu too, but if so
	// it will see the waitstop and take the slow path.
	// Exitsyscall never increments mcpu beyond mcpumax.
	v = runtime·atomicload(&runtime·sched.atomic);
	if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
		// set waitstop = 0 (known to be 1)
		runtime·xadd(&runtime·sched.atomic, -1<<waitstopShift);
		runtime·notewakeup(&runtime·sched.stopped);
	}
	schedunlock();

	runtime·notesleep(&m->havenextg);
	if(m->helpgc) {
		runtime·gchelper();
		m->helpgc = 0;
		runtime·lock(&runtime·sched);
		goto top;
	}
	if((gp = m->nextg) == nil)
		runtime·throw("bad m->nextg in nextgoroutine");
	m->nextg = nil;
	return gp;
}
Exemplo n.º 11
0
Arquivo: proc.c Projeto: rlcook0/go
// Get the next goroutine that m should run.
// Sched must be locked on entry, is unlocked on exit.
// Makes sure that at most $GOMAXPROCS g's are
// running on cpus (not in system calls) at any given time.
static G*
nextgandunlock(void)
{
	G *gp;
	uint32 v;

top:
	if(atomic_mcpu(runtime·sched.atomic) >= maxgomaxprocs)
		runtime·throw("negative mcpu");

	// If there is a g waiting as m->nextg, the mcpu++
	// happened before it was passed to mnextg.
	if(m->nextg != nil) {
		gp = m->nextg;
		m->nextg = nil;
		schedunlock();
		return gp;
	}

	if(m->lockedg != nil) {
		// We can only run one g, and it's not available.
		// Make sure some other cpu is running to handle
		// the ordinary run queue.
		if(runtime·sched.gwait != 0) {
			matchmg();
			// m->lockedg might have been on the queue.
			if(m->nextg != nil) {
				gp = m->nextg;
				m->nextg = nil;
				schedunlock();
				return gp;
			}
		}
	} else {
		// Look for work on global queue.
		while(haveg() && canaddmcpu()) {
			gp = gget();
			if(gp == nil)
				runtime·throw("gget inconsistency");

			if(gp->lockedm) {
				mnextg(gp->lockedm, gp);
				continue;
			}
			runtime·sched.grunning++;
			schedunlock();
			return gp;
		}

		// The while loop ended either because the g queue is empty
		// or because we have maxed out our m procs running go
		// code (mcpu >= mcpumax).  We need to check that
		// concurrent actions by entersyscall/exitsyscall cannot
		// invalidate the decision to end the loop.
		//
		// We hold the sched lock, so no one else is manipulating the
		// g queue or changing mcpumax.  Entersyscall can decrement
		// mcpu, but if does so when there is something on the g queue,
		// the gwait bit will be set, so entersyscall will take the slow path
		// and use the sched lock.  So it cannot invalidate our decision.
		//
		// Wait on global m queue.
		mput(m);
	}

	// Look for deadlock situation: one single active g which happens to be scvg.
	if(runtime·sched.grunning == 1 && runtime·sched.gwait == 0) {
		if(scvg->status == Grunning || scvg->status == Gsyscall)
			runtime·throw("all goroutines are asleep - deadlock!");
	}

	m->nextg = nil;
	m->waitnextg = 1;
	runtime·noteclear(&m->havenextg);

	// Stoptheworld is waiting for all but its cpu to go to stop.
	// Entersyscall might have decremented mcpu too, but if so
	// it will see the waitstop and take the slow path.
	// Exitsyscall never increments mcpu beyond mcpumax.
	v = runtime·atomicload(&runtime·sched.atomic);
	if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
		// set waitstop = 0 (known to be 1)
		runtime·xadd(&runtime·sched.atomic, -1<<waitstopShift);
		runtime·notewakeup(&runtime·sched.stopped);
	}
	schedunlock();

	runtime·notesleep(&m->havenextg);
	if(m->helpgc) {
		runtime·gchelper();
		m->helpgc = 0;
		runtime·lock(&runtime·sched);
		goto top;
	}
	if((gp = m->nextg) == nil)
		runtime·throw("bad m->nextg in nextgoroutine");
	m->nextg = nil;
	return gp;
}