コード例 #1
0
ファイル: proc.c プロジェクト: robb-broome/ruby-lab-code
void
runtime_starttheworld(bool extra)
{
	M *m;

	schedlock();
	runtime_gcwaiting = 0;
	setmcpumax(runtime_gomaxprocs);
	matchmg();
	if(extra && canaddmcpu()) {
		// Start a new m that will (we hope) be idle
		// and so available to help when the next
		// garbage collection happens.
		// canaddmcpu above did mcpu++
		// (necessary, because m will be doing various
		// initialization work so is definitely running),
		// but m is not running a specific goroutine,
		// so set the helpgc flag as a signal to m's
		// first schedule(nil) to mcpu-- and grunning--.
		m = runtime_newm();
		m->helpgc = 1;
		runtime_sched.grunning++;
	}
	schedunlock();
}
コード例 #2
0
ファイル: proc.c プロジェクト: robb-broome/ruby-lab-code
// Mark g ready to run.
void
runtime_ready(G *g)
{
	schedlock();
	readylocked(g);
	schedunlock();
}
コード例 #3
0
G*
__go_go(void (*fn)(void*), void* arg)
{
	byte *sp;
	size_t spsize;
	G * volatile newg;	// volatile to avoid longjmp warning

	schedlock();

	if((newg = gfget()) != nil){
#ifdef USING_SPLIT_STACK
		int dont_block_signals = 0;

		sp = __splitstack_resetcontext(&newg->stack_context[0],
					       &spsize);
		__splitstack_block_signals_context(&newg->stack_context[0],
						   &dont_block_signals, nil);
#else
		sp = newg->gcinitial_sp;
		spsize = newg->gcstack_size;
		if(spsize == 0)
			runtime_throw("bad spsize in __go_go");
		newg->gcnext_sp = sp;
#endif
	} else {
		newg = runtime_malg(StackMin, &sp, &spsize);
		if(runtime_lastg == nil)
			runtime_allg = newg;
		else
			runtime_lastg->alllink = newg;
		runtime_lastg = newg;
	}
	newg->status = Gwaiting;
	newg->waitreason = "new goroutine";

	newg->entry = (byte*)fn;
	newg->param = arg;
	newg->gopc = (uintptr)__builtin_return_address(0);

	runtime_sched.gcount++;
	runtime_sched.goidgen++;
	newg->goid = runtime_sched.goidgen;

	if(sp == nil)
		runtime_throw("nil g->stack0");

	getcontext(&newg->context);
	newg->context.uc_stack.ss_sp = sp;
#ifdef MAKECONTEXT_STACK_TOP
	newg->context.uc_stack.ss_sp += spsize;
#endif
	newg->context.uc_stack.ss_size = spsize;
	makecontext(&newg->context, kickoff, 0);

	newprocreadylocked(newg);
	schedunlock();

	return newg;
//printf(" goid=%d\n", newg->goid);
}
コード例 #4
0
ファイル: proc.c プロジェクト: xorrbit/golang
// Mark g ready to run.
void
runtime·ready(G *gp)
{
    schedlock();
    readylocked(gp);
    schedunlock();
}
コード例 #5
0
ファイル: proc.c プロジェクト: machinaut/go
void
runtime·entersyscall(void)
{
	if(runtime·sched.predawn)
		return;
	schedlock();
	g->status = Gsyscall;
	runtime·sched.mcpu--;
	runtime·sched.msyscall++;
	if(runtime·sched.gwait != 0)
		matchmg();

	if(runtime·sched.waitstop && runtime·sched.mcpu <= runtime·sched.mcpumax) {
		runtime·sched.waitstop = 0;
		runtime·notewakeup(&runtime·sched.stopped);
	}

	// Leave SP around for gc and traceback.
	// Do before schedunlock so that gc
	// never sees Gsyscall with wrong stack.
	runtime·gosave(&g->sched);
	g->gcsp = g->sched.sp;
	g->gcstack = g->stackbase;
	g->gcguard = g->stackguard;
	if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) {
		runtime·printf("entersyscall inconsistent %p [%p,%p]\n", g->gcsp, g->gcguard-StackGuard, g->gcstack);
		runtime·throw("entersyscall");
	}
	schedunlock();
}
コード例 #6
0
runtime·newproc1(byte *fn, byte *argp, int32 narg, int32 nret, void *callerpc)
{
	byte *sp;
	G *newg;
	int32 siz;

//printf("newproc1 %p %p narg=%d nret=%d\n", fn, argp, narg, nret);
	siz = narg + nret;
	siz = (siz+7) & ~7;

	// We could instead create a secondary stack frame
	// and make it look like goexit was on the original but
	// the call to the actual goroutine function was split.
	// Not worth it: this is almost always an error.
	if(siz > StackMin - 1024)
		runtime·throw("runtime.newproc: function arguments too large for new goroutine");

	schedlock();

	if((newg = gfget()) != nil){
		if(newg->stackguard - StackGuard != newg->stack0)
			runtime·throw("invalid stack in newg");
	} else {
		newg = runtime·malg(StackMin);
		if(runtime·lastg == nil)
			runtime·allg = newg;
		else
			runtime·lastg->alllink = newg;
		runtime·lastg = newg;
	}
	newg->status = Gwaiting;
	newg->waitreason = "new goroutine";

	sp = newg->stackbase;
	sp -= siz;
	runtime·memmove(sp, argp, narg);
	if(thechar == '5') {
		// caller's LR
		sp -= sizeof(void*);
		*(void**)sp = nil;
	}

	newg->sched.sp = sp;
	newg->sched.pc = (byte*)runtime·goexit;
	newg->sched.g = newg;
	newg->entry = fn;
	newg->gopc = (uintptr)callerpc;

	runtime·sched.gcount++;
	runtime·sched.goidgen++;
	newg->goid = runtime·sched.goidgen;

	newprocreadylocked(newg);
	schedunlock();

	return newg;
//printf(" goid=%d\n", newg->goid);
}
コード例 #7
0
ファイル: proc.c プロジェクト: machinaut/go
// TODO(rsc): Remove. This is only temporary,
// for the mark and sweep collector.
void
runtime·starttheworld(void)
{
	schedlock();
	runtime·gcwaiting = 0;
	runtime·sched.mcpumax = runtime·gomaxprocs;
	matchmg();
	schedunlock();
}
コード例 #8
0
ファイル: proc.c プロジェクト: machinaut/go
// TODO(rsc): Remove. This is only temporary,
// for the mark and sweep collector.
void
runtime·stoptheworld(void)
{
	schedlock();
	runtime·gcwaiting = 1;
	runtime·sched.mcpumax = 1;
	while(runtime·sched.mcpu > 1) {
		// It would be unsafe for multiple threads to be using
		// the stopped note at once, but there is only
		// ever one thread doing garbage collection,
		// so this is okay.
		runtime·noteclear(&runtime·sched.stopped);
		runtime·sched.waitstop = 1;
		schedunlock();
		runtime·notesleep(&runtime·sched.stopped);
		schedlock();
	}
	schedunlock();
}
コード例 #9
0
ファイル: proc.c プロジェクト: machinaut/go
// The goroutine g exited its system call.
// Arrange for it to run on a cpu again.
// This is called only from the go syscall library, not
// from the low-level system calls used by the runtime.
void
runtime·exitsyscall(void)
{
	if(runtime·sched.predawn)
		return;

	schedlock();
	runtime·sched.msyscall--;
	runtime·sched.mcpu++;
	// Fast path - if there's room for this m, we're done.
	if(m->profilehz == runtime·sched.profilehz && runtime·sched.mcpu <= runtime·sched.mcpumax) {
		// There's a cpu for us, so we can run.
		g->status = Grunning;
		// Garbage collector isn't running (since we are),
		// so okay to clear gcstack.
		g->gcstack = nil;
		schedunlock();
		return;
	}
	// Tell scheduler to put g back on the run queue:
	// mostly equivalent to g->status = Grunning,
	// but keeps the garbage collector from thinking
	// that g is running right now, which it's not.
	g->readyonstop = 1;
	schedunlock();

	// Slow path - all the cpus are taken.
	// The scheduler will ready g and put this m to sleep.
	// When the scheduler takes g away from m,
	// it will undo the runtime·sched.mcpu++ above.
	runtime·gosched();
	
	// Gosched returned, so we're allowed to run now.
	// Delete the gcstack information that we left for
	// the garbage collector during the system call.
	// Must wait until now because until gosched returns
	// we don't know for sure that the garbage collector
	// is not running.
	g->gcstack = nil;
}
コード例 #10
0
ファイル: proc.c プロジェクト: machinaut/go
// Called after main·init_function; main·main will be called on return.
void
runtime·initdone(void)
{
	// Let's go.
	runtime·sched.predawn = 0;
	mstats.enablegc = 1;

	// If main·init_function started other goroutines,
	// kick off new ms to handle them, like ready
	// would have, had it not been pre-dawn.
	schedlock();
	matchmg();
	schedunlock();
}
コード例 #11
0
ファイル: proc.c プロジェクト: machinaut/go
runtime·newproc1(byte *fn, byte *argp, int32 narg, int32 nret, void *callerpc)
{
	byte *sp;
	G *newg;
	int32 siz;

//printf("newproc1 %p %p narg=%d nret=%d\n", fn, argp, narg, nret);
	siz = narg + nret;
	siz = (siz+7) & ~7;
	if(siz > 1024)
		runtime·throw("runtime.newproc: too many args");

	schedlock();

	if((newg = gfget()) != nil){
		newg->status = Gwaiting;
		if(newg->stackguard - StackGuard != newg->stack0)
			runtime·throw("invalid stack in newg");
	} else {
		newg = runtime·malg(StackMin);
		newg->status = Gwaiting;
		newg->alllink = runtime·allg;
		runtime·allg = newg;
	}

	sp = newg->stackbase;
	sp -= siz;
	runtime·mcpy(sp, argp, narg);
	if(thechar == '5') {
		// caller's LR
		sp -= sizeof(void*);
		*(void**)sp = nil;
	}

	newg->sched.sp = sp;
	newg->sched.pc = (byte*)runtime·goexit;
	newg->sched.g = newg;
	newg->entry = fn;
	newg->gopc = (uintptr)callerpc;

	runtime·sched.gcount++;
	runtime·goidgen++;
	newg->goid = runtime·goidgen;

	newprocreadylocked(newg);
	schedunlock();

	return newg;
//printf(" goid=%d\n", newg->goid);
}
コード例 #12
0
void
runtime_entersyscall(void)
{
    uint32 v;

    if(m->profilehz > 0)
        runtime_setprof(false);

    // Leave SP around for gc and traceback.
#ifdef USING_SPLIT_STACK
    g->gcstack = __splitstack_find(nil, nil, &g->gcstack_size,
                                   &g->gcnext_segment, &g->gcnext_sp,
                                   &g->gcinitial_sp);
#else
    g->gcnext_sp = (byte *) &v;
#endif

    // Save the registers in the g structure so that any pointers
    // held in registers will be seen by the garbage collector.
    // We could use getcontext here, but setjmp is more efficient
    // because it doesn't need to save the signal mask.
    setjmp(g->gcregs);

    g->status = Gsyscall;

    // Fast path.
    // The slow path inside the schedlock/schedunlock will get
    // through without stopping if it does:
    //	mcpu--
    //	gwait not true
    //	waitstop && mcpu <= mcpumax not true
    // If we can do the same with a single atomic add,
    // then we can skip the locks.
    v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift);
    if(!atomic_gwaiting(v) && (!atomic_waitstop(v) || atomic_mcpu(v) > atomic_mcpumax(v)))
        return;

    schedlock();
    v = runtime_atomicload(&runtime_sched.atomic);
    if(atomic_gwaiting(v)) {
        matchmg();
        v = runtime_atomicload(&runtime_sched.atomic);
    }
    if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
        runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift);
        runtime_notewakeup(&runtime_sched.stopped);
    }

    schedunlock();
}
コード例 #13
0
// Implementation of runtime.GOMAXPROCS.
// delete when scheduler is stronger
int32
runtime·gomaxprocsfunc(int32 n)
{
	int32 ret;
	uint32 v;

	schedlock();
	ret = runtime·gomaxprocs;
	if(n <= 0)
		n = ret;
	if(n > maxgomaxprocs)
		n = maxgomaxprocs;
	runtime·gomaxprocs = n;
	if(runtime·gomaxprocs > 1)
		runtime·singleproc = false;
 	if(runtime·gcwaiting != 0) {
 		if(atomic_mcpumax(runtime·sched.atomic) != 1)
 			runtime·throw("invalid mcpumax during gc");
		schedunlock();
		return ret;
	}

	setmcpumax(n);

	// If there are now fewer allowed procs
	// than procs running, stop.
	v = runtime·atomicload(&runtime·sched.atomic);
	if(atomic_mcpu(v) > n) {
		schedunlock();
		runtime·gosched();
		return ret;
	}
	// handle more procs
	matchmg();
	schedunlock();
	return ret;
}
コード例 #14
0
void
runtime·entersyscall(void)
{
	uint32 v;

	if(m->profilehz > 0)
		runtime·setprof(false);

	// Leave SP around for gc and traceback.
	runtime·gosave(&g->sched);
	g->gcsp = g->sched.sp;
	g->gcstack = g->stackbase;
	g->gcguard = g->stackguard;
	g->status = Gsyscall;
	if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) {
		// runtime·printf("entersyscall inconsistent %p [%p,%p]\n",
		//	g->gcsp, g->gcguard-StackGuard, g->gcstack);
		runtime·throw("entersyscall");
	}

	// Fast path.
	// The slow path inside the schedlock/schedunlock will get
	// through without stopping if it does:
	//	mcpu--
	//	gwait not true
	//	waitstop && mcpu <= mcpumax not true
	// If we can do the same with a single atomic add,
	// then we can skip the locks.
	v = runtime·xadd(&runtime·sched.atomic, -1<<mcpuShift);
	if(!atomic_gwaiting(v) && (!atomic_waitstop(v) || atomic_mcpu(v) > atomic_mcpumax(v)))
		return;

	schedlock();
	v = runtime·atomicload(&runtime·sched.atomic);
	if(atomic_gwaiting(v)) {
		matchmg();
		v = runtime·atomicload(&runtime·sched.atomic);
	}
	if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
		runtime·xadd(&runtime·sched.atomic, -1<<waitstopShift);
		runtime·notewakeup(&runtime·sched.stopped);
	}

	// Re-save sched in case one of the calls
	// (notewakeup, matchmg) triggered something using it.
	runtime·gosave(&g->sched);

	schedunlock();
}
コード例 #15
0
ファイル: proc.c プロジェクト: xorrbit/golang
void
runtime·starttheworld(void)
{
    M *mp;
    int32 max;

    // Figure out how many CPUs GC could possibly use.
    max = runtime·gomaxprocs;
    if(max > runtime·ncpu)
        max = runtime·ncpu;
    if(max > MaxGcproc)
        max = MaxGcproc;

    schedlock();
    runtime·gcwaiting = 0;
    setmcpumax(runtime·gomaxprocs);
    matchmg();
    if(runtime·gcprocs() < max && canaddmcpu()) {
        // If GC could have used another helper proc, start one now,
        // in the hope that it will be available next time.
        // It would have been even better to start it before the collection,
        // but doing so requires allocating memory, so it's tricky to
        // coordinate.  This lazy approach works out in practice:
        // we don't mind if the first couple gc rounds don't have quite
        // the maximum number of procs.
        // canaddmcpu above did mcpu++
        // (necessary, because m will be doing various
        // initialization work so is definitely running),
        // but m is not running a specific goroutine,
        // so set the helpgc flag as a signal to m's
        // first schedule(nil) to mcpu-- and grunning--.
        mp = runtime·newm();
        mp->helpgc = 1;
        runtime·sched.grunning++;
    }
    schedunlock();
}
コード例 #16
0
// Get the next goroutine that m should run.
// Sched must be locked on entry, is unlocked on exit.
// Makes sure that at most $GOMAXPROCS g's are
// running on cpus (not in system calls) at any given time.
static G*
nextgandunlock(void)
{
	G *gp;
	uint32 v;

top:
	if(atomic_mcpu(runtime·sched.atomic) >= maxgomaxprocs)
		runtime·throw("negative mcpu");

	// If there is a g waiting as m->nextg, the mcpu++
	// happened before it was passed to mnextg.
	if(m->nextg != nil) {
		gp = m->nextg;
		m->nextg = nil;
		schedunlock();
		return gp;
	}

	if(m->lockedg != nil) {
		// We can only run one g, and it's not available.
		// Make sure some other cpu is running to handle
		// the ordinary run queue.
		if(runtime·sched.gwait != 0) {
			matchmg();
			// m->lockedg might have been on the queue.
			if(m->nextg != nil) {
				gp = m->nextg;
				m->nextg = nil;
				schedunlock();
				return gp;
			}
		}
	} else {
		// Look for work on global queue.
		while(haveg() && canaddmcpu()) {
			gp = gget();
			if(gp == nil)
				runtime·throw("gget inconsistency");

			if(gp->lockedm) {
				mnextg(gp->lockedm, gp);
				continue;
			}
			runtime·sched.grunning++;
			schedunlock();
			return gp;
		}

		// The while loop ended either because the g queue is empty
		// or because we have maxed out our m procs running go
		// code (mcpu >= mcpumax).  We need to check that
		// concurrent actions by entersyscall/exitsyscall cannot
		// invalidate the decision to end the loop.
		//
		// We hold the sched lock, so no one else is manipulating the
		// g queue or changing mcpumax.  Entersyscall can decrement
		// mcpu, but if does so when there is something on the g queue,
		// the gwait bit will be set, so entersyscall will take the slow path
		// and use the sched lock.  So it cannot invalidate our decision.
		//
		// Wait on global m queue.
		mput(m);
	}

	// Look for deadlock situation.
	// There is a race with the scavenger that causes false negatives:
	// if the scavenger is just starting, then we have
	//	scvg != nil && grunning == 0 && gwait == 0
	// and we do not detect a deadlock.  It is possible that we should
	// add that case to the if statement here, but it is too close to Go 1
	// to make such a subtle change.  Instead, we work around the
	// false negative in trivial programs by calling runtime.gosched
	// from the main goroutine just before main.main.
	// See runtime·main above.
	//
	// On a related note, it is also possible that the scvg == nil case is
	// wrong and should include gwait, but that does not happen in
	// standard Go programs, which all start the scavenger.
	//
	if((scvg == nil && runtime·sched.grunning == 0) ||
	   (scvg != nil && runtime·sched.grunning == 1 && runtime·sched.gwait == 0 &&
	    (scvg->status == Grunning || scvg->status == Gsyscall))) {
		runtime·throw("all goroutines are asleep - deadlock!");
	}

	m->nextg = nil;
	m->waitnextg = 1;
	runtime·noteclear(&m->havenextg);

	// Stoptheworld is waiting for all but its cpu to go to stop.
	// Entersyscall might have decremented mcpu too, but if so
	// it will see the waitstop and take the slow path.
	// Exitsyscall never increments mcpu beyond mcpumax.
	v = runtime·atomicload(&runtime·sched.atomic);
	if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
		// set waitstop = 0 (known to be 1)
		runtime·xadd(&runtime·sched.atomic, -1<<waitstopShift);
		runtime·notewakeup(&runtime·sched.stopped);
	}
	schedunlock();

	runtime·notesleep(&m->havenextg);
	if(m->helpgc) {
		runtime·gchelper();
		m->helpgc = 0;
		runtime·lock(&runtime·sched);
		goto top;
	}
	if((gp = m->nextg) == nil)
		runtime·throw("bad m->nextg in nextgoroutine");
	m->nextg = nil;
	return gp;
}
コード例 #17
0
ファイル: proc.c プロジェクト: Sunmonds/gcc
// Get the next goroutine that m should run.
// Sched must be locked on entry, is unlocked on exit.
// Makes sure that at most $GOMAXPROCS g's are
// running on cpus (not in system calls) at any given time.
static G*
nextgandunlock(void)
{
	G *gp;
	uint32 v;

top:
	if(atomic_mcpu(runtime_sched.atomic) >= maxgomaxprocs)
		runtime_throw("negative mcpu");

	// If there is a g waiting as m->nextg, the mcpu++
	// happened before it was passed to mnextg.
	if(m->nextg != nil) {
		gp = m->nextg;
		m->nextg = nil;
		schedunlock();
		return gp;
	}

	if(m->lockedg != nil) {
		// We can only run one g, and it's not available.
		// Make sure some other cpu is running to handle
		// the ordinary run queue.
		if(runtime_sched.gwait != 0) {
			matchmg();
			// m->lockedg might have been on the queue.
			if(m->nextg != nil) {
				gp = m->nextg;
				m->nextg = nil;
				schedunlock();
				return gp;
			}
		}
	} else {
		// Look for work on global queue.
		while(haveg() && canaddmcpu()) {
			gp = gget();
			if(gp == nil)
				runtime_throw("gget inconsistency");

			if(gp->lockedm) {
				mnextg(gp->lockedm, gp);
				continue;
			}
			runtime_sched.grunning++;
			schedunlock();
			return gp;
		}

		// The while loop ended either because the g queue is empty
		// or because we have maxed out our m procs running go
		// code (mcpu >= mcpumax).  We need to check that
		// concurrent actions by entersyscall/exitsyscall cannot
		// invalidate the decision to end the loop.
		//
		// We hold the sched lock, so no one else is manipulating the
		// g queue or changing mcpumax.  Entersyscall can decrement
		// mcpu, but if does so when there is something on the g queue,
		// the gwait bit will be set, so entersyscall will take the slow path
		// and use the sched lock.  So it cannot invalidate our decision.
		//
		// Wait on global m queue.
		mput(m);
	}

	v = runtime_atomicload(&runtime_sched.atomic);
	if(runtime_sched.grunning == 0)
		runtime_throw("all goroutines are asleep - deadlock!");
	m->nextg = nil;
	m->waitnextg = 1;
	runtime_noteclear(&m->havenextg);

	// Stoptheworld is waiting for all but its cpu to go to stop.
	// Entersyscall might have decremented mcpu too, but if so
	// it will see the waitstop and take the slow path.
	// Exitsyscall never increments mcpu beyond mcpumax.
	if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
		// set waitstop = 0 (known to be 1)
		runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift);
		runtime_notewakeup(&runtime_sched.stopped);
	}
	schedunlock();

	runtime_notesleep(&m->havenextg);
	if(m->helpgc) {
		runtime_gchelper();
		m->helpgc = 0;
		runtime_lock(&runtime_sched);
		goto top;
	}
	if((gp = m->nextg) == nil)
		runtime_throw("bad m->nextg in nextgoroutine");
	m->nextg = nil;
	return gp;
}