Exemple #1
0
// The goroutine g exited its system call.
// Arrange for it to run on a cpu again.
// This is called only from the go syscall library, not
// from the low-level system calls used by the runtime.
void
runtime_exitsyscall(void)
{
	G *gp;
	uint32 v;

	// Fast path.
	// If we can do the mcpu++ bookkeeping and
	// find that we still have mcpu <= mcpumax, then we can
	// start executing Go code immediately, without having to
	// schedlock/schedunlock.
	// Also do fast return if any locks are held, so that
	// panic code can use syscalls to open a file.
	gp = g;
	v = runtime_xadd(&runtime_sched.atomic, (1<<mcpuShift));
	if((m->profilehz == runtime_sched.profilehz && atomic_mcpu(v) <= atomic_mcpumax(v)) || m->locks > 0) {
		// There's a cpu for us, so we can run.
		gp->status = Grunning;
		// Garbage collector isn't running (since we are),
		// so okay to clear gcstack.
#ifdef USING_SPLIT_STACK
		gp->gcstack = nil;
#endif
		gp->gcnext_sp = nil;
		runtime_memclr(&gp->gcregs, sizeof gp->gcregs);

		if(m->profilehz > 0)
			runtime_setprof(true);
		return;
	}

	// Tell scheduler to put g back on the run queue:
	// mostly equivalent to g->status = Grunning,
	// but keeps the garbage collector from thinking
	// that g is running right now, which it's not.
	gp->readyonstop = 1;

	// All the cpus are taken.
	// The scheduler will ready g and put this m to sleep.
	// When the scheduler takes g away from m,
	// it will undo the runtime_sched.mcpu++ above.
	runtime_gosched();

	// Gosched returned, so we're allowed to run now.
	// Delete the gcstack information that we left for
	// the garbage collector during the system call.
	// Must wait until now because until gosched returns
	// we don't know for sure that the garbage collector
	// is not running.
#ifdef USING_SPLIT_STACK
	gp->gcstack = nil;
#endif
	gp->gcnext_sp = nil;
	runtime_memclr(&gp->gcregs, sizeof gp->gcregs);
}
Exemple #2
0
static void
resizefintab(Fintab *tab)
{
	Fintab newtab;
	void *k;
	int32 i;

	runtime_memclr((byte*)&newtab, sizeof newtab);
	newtab.max = tab->max;
	if(newtab.max == 0)
		newtab.max = 3*3*3;
	else if(tab->ndead < tab->nkey/2) {
		// grow table if not many dead values.
		// otherwise just rehash into table of same size.
		newtab.max *= 3;
	}
	
	newtab.fkey = runtime_mallocgc(newtab.max*sizeof newtab.fkey[0], FlagNoPointers, 0, 1);
	newtab.val = runtime_mallocgc(newtab.max*sizeof newtab.val[0], 0, 0, 1);
	
	for(i=0; i<tab->max; i++) {
		k = tab->fkey[i];
		if(k != nil && k != (void*)-1)
			addfintab(&newtab, k, tab->val[i].fn, tab->val[i].ft);
	}
	
	runtime_free(tab->fkey);
	runtime_free(tab->val);
	
	tab->fkey = newtab.fkey;
	tab->val = newtab.val;
	tab->nkey = newtab.nkey;
	tab->ndead = newtab.ndead;
	tab->max = newtab.max;
}
Exemple #3
0
// Allocate a new span of npage pages from the heap
// and record its size class in the HeapMap and HeapMapCache.
MSpan*
runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero)
{
	MSpan *s;

	runtime_lock(h);
	mstats.heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc;
	runtime_m()->mcache->local_cachealloc = 0;
	s = MHeap_AllocLocked(h, npage, sizeclass);
	if(s != nil) {
		mstats.heap_inuse += npage<<PageShift;
		if(large) {
			mstats.heap_objects++;
			mstats.heap_alloc += npage<<PageShift;
			// Swept spans are at the end of lists.
			if(s->npages < nelem(h->free))
				runtime_MSpanList_InsertBack(&h->busy[s->npages], s);
			else
				runtime_MSpanList_InsertBack(&h->busylarge, s);
		}
	}
	runtime_unlock(h);
	if(s != nil) {
		if(needzero && s->needzero)
			runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
		s->needzero = 0;
	}
	return s;
}
Exemple #4
0
void
runtime_resetcpuprofiler(int32 hz)
{
	struct itimerval it;

	runtime_memclr((byte*)&it, sizeof it);
	if(hz == 0) {
		runtime_setitimer(ITIMER_PROF, &it, nil);
	} else {
		it.it_interval.tv_sec = 0;
		it.it_interval.tv_usec = 1000000 / hz;
		it.it_value = it.it_interval;
		runtime_setitimer(ITIMER_PROF, &it, nil);
	}
	runtime_m()->profilehz = hz;
}
Exemple #5
0
// Allocate a new span of npage pages from the heap
// and record its size class in the HeapMap and HeapMapCache.
MSpan*
runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32 zeroed)
{
	MSpan *s;

	runtime_lock(h);
	runtime_purgecachedstats(runtime_m()->mcache);
	s = MHeap_AllocLocked(h, npage, sizeclass);
	if(s != nil) {
		mstats.heap_inuse += npage<<PageShift;
		if(acct) {
			mstats.heap_objects++;
			mstats.heap_alloc += npage<<PageShift;
		}
	}
	runtime_unlock(h);
	if(s != nil && *(uintptr*)(s->start<<PageShift) != 0 && zeroed)
		runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
	return s;
}
Exemple #6
0
MCache*
runtime_allocmcache(void)
{
	intgo rate;
	MCache *c;
	int32 i;

	runtime_lock(&runtime_mheap);
	c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc);
	runtime_unlock(&runtime_mheap);
	runtime_memclr((byte*)c, sizeof(*c));
	for(i = 0; i < _NumSizeClasses; i++)
		c->alloc[i] = &emptymspan;

	// Set first allocation sample size.
	rate = runtime_MemProfileRate;
	if(rate > 0x3fffffff)	// make 2*rate not overflow
		rate = 0x3fffffff;
	if(rate != 0)
		c->next_sample = runtime_fastrand1() % (2*rate);

	return c;
}
Exemple #7
0
// One round of scheduler: find a goroutine and run it.
// The argument is the goroutine that was running before
// schedule was called, or nil if this is the first call.
// Never returns.
static void
schedule(G *gp)
{
	int32 hz;
	uint32 v;

	schedlock();
	if(gp != nil) {
		// Just finished running gp.
		gp->m = nil;
		runtime_sched.grunning--;

		// atomic { mcpu-- }
		v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift);
		if(atomic_mcpu(v) > maxgomaxprocs)
			runtime_throw("negative mcpu in scheduler");

		switch(gp->status){
		case Grunnable:
		case Gdead:
			// Shouldn't have been running!
			runtime_throw("bad gp->status in sched");
		case Grunning:
			gp->status = Grunnable;
			gput(gp);
			break;
		case Gmoribund:
			gp->status = Gdead;
			if(gp->lockedm) {
				gp->lockedm = nil;
				m->lockedg = nil;
			}
			gp->idlem = nil;
			runtime_memclr(&gp->context, sizeof gp->context);
			gfput(gp);
			if(--runtime_sched.gcount == 0)
				runtime_exit(0);
			break;
		}
		if(gp->readyonstop){
			gp->readyonstop = 0;
			readylocked(gp);
		}
	} else if(m->helpgc) {
		// Bootstrap m or new m started by starttheworld.
		// atomic { mcpu-- }
		v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift);
		if(atomic_mcpu(v) > maxgomaxprocs)
			runtime_throw("negative mcpu in scheduler");
		// Compensate for increment in starttheworld().
		runtime_sched.grunning--;
		m->helpgc = 0;
	} else if(m->nextg != nil) {
		// New m started by matchmg.
	} else {
		runtime_throw("invalid m state in scheduler");
	}

	// Find (or wait for) g to run.  Unlocks runtime_sched.
	gp = nextgandunlock();
	gp->readyonstop = 0;
	gp->status = Grunning;
	m->curg = gp;
	gp->m = m;

	// Check whether the profiler needs to be turned on or off.
	hz = runtime_sched.profilehz;
	if(m->profilehz != hz)
		runtime_resetcpuprofiler(hz);

	runtime_gogo(gp);
}
Exemple #8
0
void
memclrBytes(Slice s)
{
	runtime_memclr(s.__values, s.__count);
}
Exemple #9
0
static bool
chanrecv(ChanType *t, Hchan* c, byte *ep, bool block, bool *received)
{
	SudoG *sg;
	SudoG mysg;
	G *gp;
	int64 t0;
	G *g;

	if(runtime_gcwaiting())
		runtime_gosched();

	// raceenabled: don't need to check ep, as it is always on the stack.

	if(debug)
		runtime_printf("chanrecv: chan=%p\n", c);

	g = runtime_g();

	if(c == nil) {
		USED(t);
		if(!block)
			return false;
		runtime_park(nil, nil, "chan receive (nil chan)");
		return false;  // not reached
	}

	t0 = 0;
	mysg.releasetime = 0;
	if(runtime_blockprofilerate > 0) {
		t0 = runtime_cputicks();
		mysg.releasetime = -1;
	}

	runtime_lock(c);
	if(c->dataqsiz > 0)
		goto asynch;

	if(c->closed)
		goto closed;

	sg = dequeue(&c->sendq);
	if(sg != nil) {
		if(raceenabled)
			racesync(c, sg);
		runtime_unlock(c);

		if(ep != nil)
			runtime_memmove(ep, sg->elem, c->elemsize);
		gp = sg->g;
		gp->param = sg;
		if(sg->releasetime)
			sg->releasetime = runtime_cputicks();
		runtime_ready(gp);

		if(received != nil)
			*received = true;
		return true;
	}

	if(!block) {
		runtime_unlock(c);
		return false;
	}

	mysg.elem = ep;
	mysg.g = g;
	mysg.selectdone = nil;
	g->param = nil;
	enqueue(&c->recvq, &mysg);
	runtime_parkunlock(c, "chan receive");

	if(g->param == nil) {
		runtime_lock(c);
		if(!c->closed)
			runtime_throw("chanrecv: spurious wakeup");
		goto closed;
	}

	if(received != nil)
		*received = true;
	if(mysg.releasetime > 0)
		runtime_blockevent(mysg.releasetime - t0, 2);
	return true;

asynch:
	if(c->qcount <= 0) {
		if(c->closed)
			goto closed;

		if(!block) {
			runtime_unlock(c);
			if(received != nil)
				*received = false;
			return false;
		}
		mysg.g = g;
		mysg.elem = nil;
		mysg.selectdone = nil;
		enqueue(&c->recvq, &mysg);
		runtime_parkunlock(c, "chan receive");

		runtime_lock(c);
		goto asynch;
	}

	if(raceenabled)
		runtime_raceacquire(chanbuf(c, c->recvx));

	if(ep != nil)
		runtime_memmove(ep, chanbuf(c, c->recvx), c->elemsize);
	runtime_memclr(chanbuf(c, c->recvx), c->elemsize);
	if(++c->recvx == c->dataqsiz)
		c->recvx = 0;
	c->qcount--;

	sg = dequeue(&c->sendq);
	if(sg != nil) {
		gp = sg->g;
		runtime_unlock(c);
		if(sg->releasetime)
			sg->releasetime = runtime_cputicks();
		runtime_ready(gp);
	} else
		runtime_unlock(c);

	if(received != nil)
		*received = true;
	if(mysg.releasetime > 0)
		runtime_blockevent(mysg.releasetime - t0, 2);
	return true;

closed:
	if(ep != nil)
		runtime_memclr(ep, c->elemsize);
	if(received != nil)
		*received = false;
	if(raceenabled)
		runtime_raceacquire(c);
	runtime_unlock(c);
	if(mysg.releasetime > 0)
		runtime_blockevent(mysg.releasetime - t0, 2);
	return true;
}
Exemple #10
0
// free RefNone, free & queue finalizers for RefNone|RefHasFinalizer, reset RefSome
static void
sweepspan(MSpan *s)
{
	int32 n, npages, size;
	byte *p;
	uint32 ref, *gcrefp, *gcrefep;
	MCache *c;
	Finalizer *f;

	p = (byte*)(s->start << PageShift);
	if(s->sizeclass == 0) {
		// Large block.
		ref = s->gcref0;
		switch(ref & ~(RefFlags^RefHasFinalizer)) {
		case RefNone:
			// Free large object.
			mstats.alloc -= s->npages<<PageShift;
			runtime_memclr(p, s->npages<<PageShift);
			if(ref & RefProfiled)
				runtime_MProf_Free(p, s->npages<<PageShift);
			s->gcref0 = RefFree;
			runtime_MHeap_Free(&runtime_mheap, s, 1);
			break;
		case RefNone|RefHasFinalizer:
			f = runtime_getfinalizer(p, 1);
			if(f == nil)
				runtime_throw("finalizer inconsistency");
			f->arg = p;
			f->next = finq;
			finq = f;
			ref &= ~RefHasFinalizer;
			// fall through
		case RefSome:
		case RefSome|RefHasFinalizer:
			s->gcref0 = RefNone | (ref&RefFlags);
			break;
		}
		return;
	}

	// Chunk full of small blocks.
	runtime_MGetSizeClassInfo(s->sizeclass, &size, &npages, &n);
	gcrefp = s->gcref;
	gcrefep = s->gcref + n;
	for(; gcrefp < gcrefep; gcrefp++, p += size) {
		ref = *gcrefp;
		if(ref < RefNone)	// RefFree or RefStack
			continue;
		switch(ref & ~(RefFlags^RefHasFinalizer)) {
		case RefNone:
			// Free small object.
			if(ref & RefProfiled)
				runtime_MProf_Free(p, size);
			*gcrefp = RefFree;
			c = m->mcache;
			if(size > (int32)sizeof(uintptr))
				((uintptr*)p)[1] = 1;	// mark as "needs to be zeroed"
			mstats.alloc -= size;
			mstats.by_size[s->sizeclass].nfree++;
			runtime_MCache_Free(c, p, s->sizeclass, size);
			break;
		case RefNone|RefHasFinalizer:
			f = runtime_getfinalizer(p, 1);
			if(f == nil)
				runtime_throw("finalizer inconsistency");
			f->arg = p;
			f->next = finq;
			finq = f;
			ref &= ~RefHasFinalizer;
			// fall through
		case RefSome:
		case RefSome|RefHasFinalizer:
			*gcrefp = RefNone | (ref&RefFlags);
			break;
		}
	}
}
Exemple #11
0
// add finalizer; caller is responsible for making sure not already in table
void
runtime_addfinalizer(void *p, void (*f)(void*), const struct __go_func_type *ft)
{
	Fintab newtab;
	int32 i;
	uint32 *ref;
	byte *base;
	Finalizer *e;
	
	e = nil;
	if(f != nil) {
		e = runtime_mal(sizeof *e);
		e->fn = f;
		e->ft = ft;
	}

	runtime_lock(&finlock);
	if(!runtime_mlookup(p, &base, nil, nil, &ref) || p != base) {
		runtime_unlock(&finlock);
		runtime_throw("addfinalizer on invalid pointer");
	}
	if(f == nil) {
		if(*ref & RefHasFinalizer) {
			lookfintab(&fintab, p, 1);
			*ref &= ~RefHasFinalizer;
		}
		runtime_unlock(&finlock);
		return;
	}

	if(*ref & RefHasFinalizer) {
		runtime_unlock(&finlock);
		runtime_throw("double finalizer");
	}
	*ref |= RefHasFinalizer;

	if(fintab.nkey >= fintab.max/2+fintab.max/4) {
		// keep table at most 3/4 full:
		// allocate new table and rehash.

		runtime_memclr((byte*)&newtab, sizeof newtab);
		newtab.max = fintab.max;
		if(newtab.max == 0)
			newtab.max = 3*3*3;
		else if(fintab.ndead < fintab.nkey/2) {
			// grow table if not many dead values.
			// otherwise just rehash into table of same size.
			newtab.max *= 3;
		}

		newtab.key = runtime_mallocgc(newtab.max*sizeof newtab.key[0], RefNoPointers, 0, 1);
		newtab.val = runtime_mallocgc(newtab.max*sizeof newtab.val[0], 0, 0, 1);

		for(i=0; i<fintab.max; i++) {
			void *k;

			k = fintab.key[i];
			if(k != nil && k != (void*)-1)
				addfintab(&newtab, k, fintab.val[i]);
		}
		runtime_free(fintab.key);
		runtime_free(fintab.val);
		fintab = newtab;
	}

	addfintab(&fintab, p, e);
	runtime_unlock(&finlock);
}
// add finalizer; caller is responsible for making sure not already in table
void
runtime_addfinalizer(void *p, void (*f)(void*), const struct __go_func_type *ft)
{
	Fintab newtab;
	int32 i;
	byte *base;
	Finalizer *e;
	
	e = nil;
	if(f != nil) {
		e = runtime_mal(sizeof *e);
		e->fn = f;
		e->ft = ft;
	}

	if(!__sync_bool_compare_and_swap(&m->holds_finlock, 0, 1))
		runtime_throw("finalizer deadlock");

	runtime_lock(&finlock);
	if(!runtime_mlookup(p, &base, nil, nil) || p != base) {
		runtime_unlock(&finlock);
		__sync_bool_compare_and_swap(&m->holds_finlock, 1, 0);
		runtime_throw("addfinalizer on invalid pointer");
	}
	if(f == nil) {
		lookfintab(&fintab, p, 1);
		goto unlock;
	}

	if(lookfintab(&fintab, p, 0)) {
		runtime_unlock(&finlock);
		__sync_bool_compare_and_swap(&m->holds_finlock, 1, 0);
		runtime_throw("double finalizer");
	}
	runtime_setblockspecial(p);

	if(fintab.nkey >= fintab.max/2+fintab.max/4) {
		// keep table at most 3/4 full:
		// allocate new table and rehash.

		runtime_memclr((byte*)&newtab, sizeof newtab);
		newtab.max = fintab.max;
		if(newtab.max == 0)
			newtab.max = 3*3*3;
		else if(fintab.ndead < fintab.nkey/2) {
			// grow table if not many dead values.
			// otherwise just rehash into table of same size.
			newtab.max *= 3;
		}

		newtab.key = runtime_mallocgc(newtab.max*sizeof newtab.key[0], FlagNoPointers, 0, 1);
		newtab.val = runtime_mallocgc(newtab.max*sizeof newtab.val[0], 0, 0, 1);

		for(i=0; i<fintab.max; i++) {
			void *k;

			k = fintab.key[i];
			if(k != nil && k != (void*)-1)
				addfintab(&newtab, k, fintab.val[i]);
		}
		runtime_free(fintab.key);
		runtime_free(fintab.val);
		fintab = newtab;
	}

	addfintab(&fintab, p, e);
 unlock:
	runtime_unlock(&finlock);

	__sync_bool_compare_and_swap(&m->holds_finlock, 1, 0);

	if(__sync_bool_compare_and_swap(&m->gcing_for_finlock, 1, 0)) {
		__go_run_goroutine_gc(200);
	}
}