Ejemplo n.º 1
0
Archivo: mcache.c Proyecto: kraj/gcc
// Gets a span that has a free object in it and assigns it
// to be the cached span for the given sizeclass.  Returns this span.
MSpan*
runtime_MCache_Refill(MCache *c, int32 sizeclass)
{
	MCacheList *l;
	MSpan *s;

	runtime_m()->locks++;
	// Return the current cached span to the central lists.
	s = c->alloc[sizeclass];
	if(s->freelist != nil)
		runtime_throw("refill on a nonempty span");
	if(s != &emptymspan)
		runtime_MCentral_UncacheSpan(&runtime_mheap.central[sizeclass], s);

	// Push any explicitly freed objects to the central lists.
	// Not required, but it seems like a good time to do it.
	l = &c->free[sizeclass];
	if(l->nlist > 0) {
		runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass], l->list);
		l->list = nil;
		l->nlist = 0;
	}

	// Get a new cached span from the central lists.
	s = runtime_MCentral_CacheSpan(&runtime_mheap.central[sizeclass]);
	if(s == nil)
		runtime_throw("out of memory");
	if(s->freelist == nil) {
		runtime_printf("%d %d\n", s->ref, (int32)((s->npages << PageShift) / s->elemsize));
		runtime_throw("empty span");
	}
	c->alloc[sizeclass] = s;
	runtime_m()->locks--;
	return s;
}
Ejemplo n.º 2
0
void
runtime_walkfintab(void (*fn)(void*), void (*scan)(byte *, int64))
{
	void **key;
	void **ekey;
	int32 i;

	if(!__sync_bool_compare_and_swap(&m->holds_finlock, 0, 1))
		runtime_throw("finalizer deadlock");

	for(i=0; i<TABSZ; i++) {
		runtime_lock(&fintab[i]);
		key = fintab[i].fkey;
		ekey = key + fintab[i].max;
		for(; key < ekey; key++)
			if(*key != nil && *key != ((void*)-1))
				fn(*key);
		scan((byte*)&fintab[i].fkey, sizeof(void*));
		scan((byte*)&fintab[i].val, sizeof(void*));
		runtime_unlock(&fintab[i]);
	}

	__sync_bool_compare_and_swap(&m->holds_finlock, 1, 0);
	if(__sync_bool_compare_and_swap(&m->gcing_for_finlock, 1, 0)) {
		runtime_throw("walkfintab not called from gc");
	}
}
Ejemplo n.º 3
0
Archivo: proc.c Proyecto: Sunmonds/gcc
static void
runtime_mcall(void (*pfn)(G*))
{
#ifndef USING_SPLIT_STACK
	int i;
#endif

	// Ensure that all registers are on the stack for the garbage
	// collector.
	__builtin_unwind_init();

	if(g == m->g0)
		runtime_throw("runtime: mcall called on m->g0 stack");

	if(g != nil) {

#ifdef USING_SPLIT_STACK
		__splitstack_getcontext(&g->stack_context[0]);
#else
		g->gcnext_sp = &i;
#endif
		g->fromgogo = false;
		getcontext(&g->context);
	}
	if (g == nil || !g->fromgogo) {
#ifdef USING_SPLIT_STACK
		__splitstack_setcontext(&m->g0->stack_context[0]);
#endif
		m->g0->entry = (byte*)pfn;
		m->g0->param = g;
		g = m->g0;
		setcontext(&m->g0->context);
		runtime_throw("runtime: mcall function returned");
	}
}
Ejemplo n.º 4
0
Archivo: proc.c Proyecto: Sunmonds/gcc
static M*
startm(void)
{
	M *m;
	pthread_attr_t attr;
	pthread_t tid;

	m = runtime_malloc(sizeof(M));
	mcommoninit(m);
	m->g0 = runtime_malg(-1, nil, nil);

	if(pthread_attr_init(&attr) != 0)
		runtime_throw("pthread_attr_init");
	if(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) != 0)
		runtime_throw("pthread_attr_setdetachstate");

#ifndef PTHREAD_STACK_MIN
#define PTHREAD_STACK_MIN 8192
#endif
	if(pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN) != 0)
		runtime_throw("pthread_attr_setstacksize");

	if(pthread_create(&tid, &attr, runtime_mstart, m) != 0)
		runtime_throw("pthread_create");

	return m;
}
Ejemplo n.º 5
0
static void
TestAtomic64(void)
{
	uint64 z64, x64;

	z64 = 42;
	x64 = 0;
	PREFETCH(&z64);
	if(runtime_cas64(&z64, x64, 1))
		runtime_throw("cas64 failed");
	if(x64 != 0)
		runtime_throw("cas64 failed");
	x64 = 42;
	if(!runtime_cas64(&z64, x64, 1))
		runtime_throw("cas64 failed");
	if(x64 != 42 || z64 != 1)
		runtime_throw("cas64 failed");
	if(runtime_atomicload64(&z64) != 1)
		runtime_throw("load64 failed");
	runtime_atomicstore64(&z64, (1ull<<40)+1);
	if(runtime_atomicload64(&z64) != (1ull<<40)+1)
		runtime_throw("store64 failed");
	if(runtime_xadd64(&z64, (1ull<<40)+1) != (2ull<<40)+2)
		runtime_throw("xadd64 failed");
	if(runtime_atomicload64(&z64) != (2ull<<40)+2)
		runtime_throw("xadd64 failed");
	if(runtime_xchg64(&z64, (3ull<<40)+3) != (2ull<<40)+2)
		runtime_throw("xchg64 failed");
	if(runtime_atomicload64(&z64) != (3ull<<40)+3)
		runtime_throw("xchg64 failed");
}
Ejemplo n.º 6
0
void
runtime_netpollinit(void)
{
	int p[2];
	int fl;

	FD_ZERO(&fds);
	allocated = 128;
	data = runtime_mallocgc(allocated * sizeof(PollDesc *), 0,
				FlagNoScan|FlagNoProfiling|FlagNoInvokeGC);

	if(pipe(p) < 0)
		runtime_throw("netpollinit: failed to create pipe");
	rdwake = p[0];
	wrwake = p[1];

	fl = fcntl(rdwake, F_GETFL);
	if(fl < 0)
		runtime_throw("netpollinit: fcntl failed");
	fl |= O_NONBLOCK;
	if(fcntl(rdwake, F_SETFL, fl))
		 runtime_throw("netpollinit: fcntl failed");
	fcntl(rdwake, F_SETFD, FD_CLOEXEC);

	fl = fcntl(wrwake, F_GETFL);
	if(fl < 0)
		runtime_throw("netpollinit: fcntl failed");
	fl |= O_NONBLOCK;
	if(fcntl(wrwake, F_SETFL, fl))
		 runtime_throw("netpollinit: fcntl failed");
	fcntl(wrwake, F_SETFD, FD_CLOEXEC);

	FD_SET(rdwake, &fds);
}
Ejemplo n.º 7
0
G*
__go_go(void (*fn)(void*), void* arg)
{
	byte *sp;
	size_t spsize;
	G * volatile newg;	// volatile to avoid longjmp warning

	schedlock();

	if((newg = gfget()) != nil){
#ifdef USING_SPLIT_STACK
		int dont_block_signals = 0;

		sp = __splitstack_resetcontext(&newg->stack_context[0],
					       &spsize);
		__splitstack_block_signals_context(&newg->stack_context[0],
						   &dont_block_signals, nil);
#else
		sp = newg->gcinitial_sp;
		spsize = newg->gcstack_size;
		if(spsize == 0)
			runtime_throw("bad spsize in __go_go");
		newg->gcnext_sp = sp;
#endif
	} else {
		newg = runtime_malg(StackMin, &sp, &spsize);
		if(runtime_lastg == nil)
			runtime_allg = newg;
		else
			runtime_lastg->alllink = newg;
		runtime_lastg = newg;
	}
	newg->status = Gwaiting;
	newg->waitreason = "new goroutine";

	newg->entry = (byte*)fn;
	newg->param = arg;
	newg->gopc = (uintptr)__builtin_return_address(0);

	runtime_sched.gcount++;
	runtime_sched.goidgen++;
	newg->goid = runtime_sched.goidgen;

	if(sp == nil)
		runtime_throw("nil g->stack0");

	getcontext(&newg->context);
	newg->context.uc_stack.ss_sp = sp;
#ifdef MAKECONTEXT_STACK_TOP
	newg->context.uc_stack.ss_sp += spsize;
#endif
	newg->context.uc_stack.ss_size = spsize;
	makecontext(&newg->context, kickoff, 0);

	newprocreadylocked(newg);
	schedunlock();

	return newg;
//printf(" goid=%d\n", newg->goid);
}
Ejemplo n.º 8
0
static MSpan*
MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
{
	uintptr n;
	MSpan *s, *t;
	PageID p;

	// To prevent excessive heap growth, before allocating n pages
	// we need to sweep and reclaim at least n pages.
	if(!h->sweepdone)
		MHeap_Reclaim(h, npage);

	// Try in fixed-size lists up to max.
	for(n=npage; n < nelem(h->free); n++) {
		if(!runtime_MSpanList_IsEmpty(&h->free[n])) {
			s = h->free[n].next;
			goto HaveSpan;
		}
	}

	// Best fit in list of large spans.
	if((s = MHeap_AllocLarge(h, npage)) == nil) {
		if(!MHeap_Grow(h, npage))
			return nil;
		if((s = MHeap_AllocLarge(h, npage)) == nil)
			return nil;
	}

HaveSpan:
	// Mark span in use.
	if(s->state != MSpanFree)
		runtime_throw("MHeap_AllocLocked - MSpan not free");
	if(s->npages < npage)
		runtime_throw("MHeap_AllocLocked - bad npages");
	runtime_MSpanList_Remove(s);
	runtime_atomicstore(&s->sweepgen, h->sweepgen);
	s->state = MSpanInUse;
	mstats.heap_idle -= s->npages<<PageShift;
	mstats.heap_released -= s->npreleased<<PageShift;
	if(s->npreleased > 0)
		runtime_SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift);
	s->npreleased = 0;

	if(s->npages > npage) {
		// Trim extra and put it back in the heap.
		t = runtime_FixAlloc_Alloc(&h->spanalloc);
		runtime_MSpan_Init(t, s->start + npage, s->npages - npage);
		s->npages = npage;
		p = t->start;
		p -= ((uintptr)h->arena_start>>PageShift);
		if(p > 0)
			h->spans[p-1] = s;
		h->spans[p] = t;
		h->spans[p+t->npages-1] = t;
		t->needzero = s->needzero;
		runtime_atomicstore(&t->sweepgen, h->sweepgen);
		t->state = MSpanInUse;
		MHeap_FreeLocked(h, t);
		t->unusedsince = s->unusedsince; // preserve age
	}
Ejemplo n.º 9
0
// Create a new m.  It will start off with a call to runtime_mstart.
M*
runtime_newm(void)
{
	M *m;
	pthread_attr_t attr;
	pthread_t tid;
	size_t stacksize;

	m = runtime_malloc(sizeof(M));
	mcommoninit(m);
	m->g0 = runtime_malg(-1, nil, nil);

	if(pthread_attr_init(&attr) != 0)
		runtime_throw("pthread_attr_init");
	if(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) != 0)
		runtime_throw("pthread_attr_setdetachstate");

	stacksize = PTHREAD_STACK_MIN;

	// With glibc before version 2.16 the static TLS size is taken
	// out of the stack size, and we get an error or a crash if
	// there is not enough stack space left.  Add it back in if we
	// can, in case the program uses a lot of TLS space.  FIXME:
	// This can be disabled in glibc 2.16 and later, if the bug is
	// indeed fixed then.
	stacksize += tlssize;

	if(pthread_attr_setstacksize(&attr, stacksize) != 0)
		runtime_throw("pthread_attr_setstacksize");

	if(pthread_create(&tid, &attr, runtime_mstart, m) != 0)
		runtime_throw("pthread_create");

	return m;
}
Ejemplo n.º 10
0
void*
runtime_FixAlloc_Alloc(FixAlloc *f)
{
	void *v;
	
	if(f->size == 0) {
		runtime_printf("runtime: use of FixAlloc_Alloc before FixAlloc_Init\n");
		runtime_throw("runtime: internal error");
	}

	if(f->list) {
		v = f->list;
		f->list = *(void**)f->list;
		f->inuse += f->size;
		return v;
	}
	if(f->nchunk < f->size) {
		f->sys += FixAllocChunk;
		f->chunk = f->alloc(FixAllocChunk);
		if(f->chunk == nil)
			runtime_throw("out of memory (FixAlloc)");
		f->nchunk = FixAllocChunk;
	}
	v = f->chunk;
	if(f->first)
		f->first(f->arg, v);
	f->chunk += f->size;
	f->nchunk -= f->size;
	f->inuse += f->size;
	return v;
}
Ejemplo n.º 11
0
static void
runtime_mcall(void (*pfn)(G*))
{
	M *mp;
	G *gp;
#ifndef USING_SPLIT_STACK
	int i;
#endif

	// Ensure that all registers are on the stack for the garbage
	// collector.
	__builtin_unwind_init();

	mp = m;
	gp = g;
	if(gp == mp->g0)
		runtime_throw("runtime: mcall called on m->g0 stack");

	if(gp != nil) {

#ifdef USING_SPLIT_STACK
		__splitstack_getcontext(&g->stack_context[0]);
#else
		gp->gcnext_sp = &i;
#endif
		gp->fromgogo = false;
		getcontext(&gp->context);

		// When we return from getcontext, we may be running
		// in a new thread.  That means that m and g may have
		// changed.  They are global variables so we will
		// reload them, but the addresses of m and g may be
		// cached in our local stack frame, and those
		// addresses may be wrong.  Call functions to reload
		// the values for this thread.
		mp = runtime_m();
		gp = runtime_g();

		if(gp->traceback != nil)
			gtraceback(gp);
	}
	if (gp == nil || !gp->fromgogo) {
#ifdef USING_SPLIT_STACK
		__splitstack_setcontext(&mp->g0->stack_context[0]);
#endif
		mp->g0->entry = (byte*)pfn;
		mp->g0->param = gp;

		// It's OK to set g directly here because this case
		// can not occur if we got here via a setcontext to
		// the getcontext call just above.
		g = mp->g0;

		fixcontext(&mp->g0->context);
		setcontext(&mp->g0->context);
		runtime_throw("runtime: mcall function returned");
	}
}
Ejemplo n.º 12
0
// Enter scheduler.  If g->status is Grunning,
// re-queues g and runs everyone else who is waiting
// before running g again.  If g->status is Gmoribund,
// kills off g.
void
runtime_gosched(void)
{
	if(m->locks != 0)
		runtime_throw("gosched holding locks");
	if(g == m->g0)
		runtime_throw("gosched of g0");
	runtime_mcall(schedule);
}
Ejemplo n.º 13
0
static MSpan*
MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
{
	uintptr n;
	MSpan *s, *t;
	PageID p;

	// Try in fixed-size lists up to max.
	for(n=npage; n < nelem(h->free); n++) {
		if(!runtime_MSpanList_IsEmpty(&h->free[n])) {
			s = h->free[n].next;
			goto HaveSpan;
		}
	}

	// Best fit in list of large spans.
	if((s = MHeap_AllocLarge(h, npage)) == nil) {
		if(!MHeap_Grow(h, npage))
			return nil;
		if((s = MHeap_AllocLarge(h, npage)) == nil)
			return nil;
	}

HaveSpan:
	// Mark span in use.
	if(s->state != MSpanFree)
		runtime_throw("MHeap_AllocLocked - MSpan not free");
	if(s->npages < npage)
		runtime_throw("MHeap_AllocLocked - bad npages");
	runtime_MSpanList_Remove(s);
	s->state = MSpanInUse;
	mstats.heap_idle -= s->npages<<PageShift;
	mstats.heap_released -= s->npreleased<<PageShift;
	s->npreleased = 0;

	if(s->npages > npage) {
		// Trim extra and put it back in the heap.
		t = runtime_FixAlloc_Alloc(&h->spanalloc);
		mstats.mspan_inuse = h->spanalloc.inuse;
		mstats.mspan_sys = h->spanalloc.sys;
		runtime_MSpan_Init(t, s->start + npage, s->npages - npage);
		s->npages = npage;
		p = t->start;
		if(sizeof(void*) == 8)
			p -= ((uintptr)h->arena_start>>PageShift);
		if(p > 0)
			h->map[p-1] = s;
		h->map[p] = t;
		h->map[p+t->npages-1] = t;
		*(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift);  // copy "needs zeroing" mark
		t->state = MSpanInUse;
		MHeap_FreeLocked(h, t);
	}
Ejemplo n.º 14
0
bool
runtime_addfinalizer(void *p, void (*f)(void*), const struct __go_func_type *ft)
{
	Fintab *tab;
	byte *base;
	bool ret = false;
	
	if(debug) {
		if(!runtime_mlookup(p, &base, nil, nil) || p != base)
			runtime_throw("addfinalizer on invalid pointer");
	}
	
	if(!__sync_bool_compare_and_swap(&m->holds_finlock, 0, 1))
		runtime_throw("finalizer deadlock");

	tab = TAB(p);
	runtime_lock(tab);
	if(f == nil) {
		if(lookfintab(tab, p, true, nil))
			runtime_setblockspecial(p, false);
		ret = true;
		goto unlock;
	}

	if(lookfintab(tab, p, false, nil)) {
		ret = false;
		goto unlock;
	}

	if(tab->nkey >= tab->max/2+tab->max/4) {
		// keep table at most 3/4 full:
		// allocate new table and rehash.
		resizefintab(tab);
	}

	addfintab(tab, p, f, ft);
	runtime_setblockspecial(p, true);
	ret = true;

 unlock:
	runtime_unlock(tab);

	__sync_bool_compare_and_swap(&m->holds_finlock, 1, 0);

	if(__sync_bool_compare_and_swap(&m->gcing_for_finlock, 1, 0)) {
		__go_run_goroutine_gc(200);
	}

	return ret;
}
Ejemplo n.º 15
0
void
runtime_unlock(Lock *l)
{
	uint32 v;

	if(--runtime_m()->locks < 0)
		runtime_throw("runtime_unlock: lock count");

	v = runtime_xchg(&l->key, MUTEX_UNLOCKED);
	if(v == MUTEX_UNLOCKED)
		runtime_throw("unlock of unlocked lock");
	if(v == MUTEX_SLEEPING)
		runtime_futexwakeup(&l->key, 1);
}
Ejemplo n.º 16
0
// Mark this g as m's idle goroutine.
// This functionality might be used in environments where programs
// are limited to a single thread, to simulate a select-driven
// network server.  It is not exposed via the standard runtime API.
void
runtime_idlegoroutine(void)
{
	if(g->idlem != nil)
		runtime_throw("g is already an idle goroutine");
	g->idlem = m;
}
Ejemplo n.º 17
0
// Put on `g' queue.  Sched must be locked.
static void
gput(G *g)
{
	M *m;

	// If g is wired, hand it off directly.
	if((m = g->lockedm) != nil && canaddmcpu()) {
		mnextg(m, g);
		return;
	}

	// If g is the idle goroutine for an m, hand it off.
	if(g->idlem != nil) {
		if(g->idlem->idleg != nil) {
			runtime_printf("m%d idle out of sync: g%d g%d\n",
				g->idlem->id,
				g->idlem->idleg->goid, g->goid);
			runtime_throw("runtime: double idle");
		}
		g->idlem->idleg = g;
		return;
	}

	g->schedlink = nil;
	if(runtime_sched.ghead == nil)
		runtime_sched.ghead = g;
	else
		runtime_sched.gtail->schedlink = g;
	runtime_sched.gtail = g;

	// increment gwait.
	// if it transitions to nonzero, set atomic gwaiting bit.
	if(runtime_sched.gwait++ == 0)
		runtime_xadd(&runtime_sched.atomic, 1<<gwaitingShift);
}
Ejemplo n.º 18
0
static Hchan*
makechan(ChanType *t, int64 hint)
{
	Hchan *c;
	uintptr n;
	const Type *elem;

	elem = t->__element_type;

	// compiler checks this but be safe.
	if(elem->__size >= (1<<16))
		runtime_throw("makechan: invalid channel element type");

	if(hint < 0 || (intgo)hint != hint || (elem->__size > 0 && (uintptr)hint > (MaxMem - sizeof(*c)) / elem->__size))
		runtime_panicstring("makechan: size out of range");

	n = sizeof(*c);
	n = ROUND(n, elem->__align);

	// allocate memory in one call
	c = (Hchan*)runtime_mallocgc(sizeof(*c) + hint*elem->__size, (uintptr)t | TypeInfo_Chan, 0);
	c->elemsize = elem->__size;
	c->elemtype = elem;
	c->dataqsiz = hint;

	if(debug)
		runtime_printf("makechan: chan=%p; elemsize=%D; dataqsiz=%D\n",
			c, (int64)elem->__size, (int64)c->dataqsiz);

	return c;
}
Ejemplo n.º 19
0
static void
RecordSpan(void *vh, byte *p)
{
	MHeap *h;
	MSpan *s;
	MSpan **all;
	uint32 cap;

	h = vh;
	s = (MSpan*)p;
	if(h->nspan >= h->nspancap) {
		cap = 64*1024/sizeof(all[0]);
		if(cap < h->nspancap*3/2)
			cap = h->nspancap*3/2;
		all = (MSpan**)runtime_SysAlloc(cap*sizeof(all[0]), &mstats.other_sys);
		if(all == nil)
			runtime_throw("runtime: cannot allocate memory");
		if(h->allspans) {
			runtime_memmove(all, h->allspans, h->nspancap*sizeof(all[0]));
			// Don't free the old array if it's referenced by sweep.
			// See the comment in mgc0.c.
			if(h->allspans != runtime_mheap.sweepspans)
				runtime_SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats.other_sys);
		}
		h->allspans = all;
		h->nspancap = cap;
	}
	h->allspans[h->nspan++] = s;
}
Ejemplo n.º 20
0
// get finalizer; if del, delete finalizer.
// caller is responsible for updating RefHasFinalizer (special) bit.
bool
runtime_getfinalizer(void *p, bool del, void (**fn)(void*), const struct __go_func_type **ft)
{
	Fintab *tab;
	bool res;
	Fin f;
	
	if(!__sync_bool_compare_and_swap(&m->holds_finlock, 0, 1))
		runtime_throw("finalizer deadlock");

	tab = TAB(p);
	runtime_lock(tab);
	res = lookfintab(tab, p, del, &f);
	runtime_unlock(tab);

	__sync_bool_compare_and_swap(&m->holds_finlock, 1, 0);
	if(__sync_bool_compare_and_swap(&m->gcing_for_finlock, 1, 0)) {
		__go_run_goroutine_gc(201);
	}

	if(res==false)
		return false;
	*fn = f.fn;
	*ft = f.ft;
	return true;
}
Ejemplo n.º 21
0
void
runtime_stoptheworld(void)
{
	uint32 v;

	schedlock();
	runtime_gcwaiting = 1;

	setmcpumax(1);

	// while mcpu > 1
	for(;;) {
		v = runtime_sched.atomic;
		if(atomic_mcpu(v) <= 1)
			break;

		// It would be unsafe for multiple threads to be using
		// the stopped note at once, but there is only
		// ever one thread doing garbage collection.
		runtime_noteclear(&runtime_sched.stopped);
		if(atomic_waitstop(v))
			runtime_throw("invalid waitstop");

		// atomic { waitstop = 1 }, predicated on mcpu <= 1 check above
		// still being true.
		if(!runtime_cas(&runtime_sched.atomic, v, v+(1<<waitstopShift)))
			continue;

		schedunlock();
		runtime_notesleep(&runtime_sched.stopped);
		schedlock();
	}
	runtime_singleproc = runtime_gomaxprocs == 1;
	schedunlock();
}
Ejemplo n.º 22
0
void
runtime_unlock(Lock *l)
{
    uintptr v;
    M *mp;

    if(--runtime_m()->locks < 0)
        runtime_throw("runtime_unlock: lock count");

    for(;;) {
        v = (uintptr)runtime_atomicloadp((void**)&l->key);
        if(v == LOCKED) {
            if(runtime_casp((void**)&l->key, (void*)LOCKED, nil))
                break;
        } else {
            // Other M's are waiting for the lock.
            // Dequeue an M.
            mp = (void*)(v&~LOCKED);
            if(runtime_casp((void**)&l->key, (void*)v, mp->nextwaitm)) {
                // Dequeued an M.  Wake it.
                runtime_semawakeup(mp);
                break;
            }
        }
    }
}
Ejemplo n.º 23
0
void
runtime_panicstring(const char *s)
{
	Eface err;

	if(runtime_m()->mallocing) {
		runtime_printf("panic: %s\n", s);
		runtime_throw("panic during malloc");
	}
	if(runtime_m()->gcing) {
		runtime_printf("panic: %s\n", s);
		runtime_throw("panic during gc");
	}
	runtime_newErrorCString(s, &err);
	runtime_panic(err);
}
Ejemplo n.º 24
0
_Bool
__go_type_equal_float (const void *vk1, const void *vk2, uintptr_t key_size)
{
  if (key_size == 4)
    {
      const float *fp1;
      const float *fp2;

      fp1 = (const float *) vk1;
      fp2 = (const float *) vk2;

      return *fp1 == *fp2;
    }
  else if (key_size == 8)
    {
      const double *dp1;
      const double *dp2;

      dp1 = (const double *) vk1;
      dp2 = (const double *) vk2;

      return *dp1 == *dp2;
    }
  else
    runtime_throw ("__go_type_equal_float: invalid float size");
}
Ejemplo n.º 25
0
static void
addfintab(Fintab *t, void *k, FuncVal *fn, const struct __go_func_type *ft)
{
	int32 i, j;

	i = (uintptr)k % (uintptr)t->max;
	for(j=0; j<t->max; j++) {
		if(t->fkey[i] == nil) {
			t->nkey++;
			goto ret;
		}
		if(t->fkey[i] == (void*)-1) {
			t->ndead--;
			goto ret;
		}
		if(++i == t->max)
			i = 0;
	}

	// cannot happen - table is known to be non-full
	runtime_throw("finalizer table inconsistent");

ret:
	t->fkey[i] = k;
	t->val[i].fn = fn;
	t->val[i].ft = ft;
}
Ejemplo n.º 26
0
static void
RecordSpan(void *vh, byte *p)
{
	MHeap *h;
	MSpan *s;
	MSpan **all;
	uint32 cap;

	h = vh;
	s = (MSpan*)p;
	if(h->nspan >= h->nspancap) {
		cap = 64*1024/sizeof(all[0]);
		if(cap < h->nspancap*3/2)
			cap = h->nspancap*3/2;
		all = (MSpan**)runtime_SysAlloc(cap*sizeof(all[0]));
		if(all == nil)
			runtime_throw("runtime: cannot allocate memory");
		if(h->allspans) {
			runtime_memmove(all, h->allspans, h->nspancap*sizeof(all[0]));
			runtime_SysFree(h->allspans, h->nspancap*sizeof(all[0]));
		}
		h->allspans = all;
		h->nspancap = cap;
	}
	h->allspans[h->nspan++] = s;
}
Ejemplo n.º 27
0
bool
runtime_addfinalizer(void *p, FuncVal *f, const struct __go_func_type *ft)
{
	Fintab *tab;
	byte *base;
	
	if(debug) {
		if(!runtime_mlookup(p, &base, nil, nil) || p != base)
			runtime_throw("addfinalizer on invalid pointer");
	}
	
	tab = TAB(p);
	runtime_lock(tab);
	if(f == nil) {
		lookfintab(tab, p, true, nil);
		runtime_unlock(tab);
		return true;
	}

	if(lookfintab(tab, p, false, nil)) {
		runtime_unlock(tab);
		return false;
	}

	if(tab->nkey >= tab->max/2+tab->max/4) {
		// keep table at most 3/4 full:
		// allocate new table and rehash.
		resizefintab(tab);
	}

	addfintab(tab, p, f, ft);
	runtime_setblockspecial(p, true);
	runtime_unlock(tab);
	return true;
}
Ejemplo n.º 28
0
void
runtime_notewakeup(Note *n)
{
	if(runtime_xchg((uint32*)&n->key, 1))
		runtime_throw("notewakeup - double wakeup");
	runtime_futexwakeup((uint32*)&n->key, 1);
}
Ejemplo n.º 29
0
static bool
lookfintab(Fintab *t, void *k, bool del, Fin *f)
{
	int32 i, j;

	if(t->max == 0)
		return false;
	i = (uintptr)k % (uintptr)t->max;
	for(j=0; j<t->max; j++) {
		if(t->fkey[i] == nil)
			return false;
		if(t->fkey[i] == k) {
			if(f)
				*f = t->val[i];
			if(del) {
				t->fkey[i] = (void*)-1;
				t->val[i].fn = nil;
				t->val[i].ft = nil;
				t->ndead++;
			}
			return true;
		}
		if(++i == t->max)
			i = 0;
	}

	// cannot happen - table is known to be non-full
	runtime_throw("finalizer table inconsistent");
	return false;
}
Ejemplo n.º 30
0
static Finalizer*
lookfintab(Fintab *t, void *k, bool del)
{
	int32 i, j;
	Finalizer *v;

	if(t->max == 0)
		return nil;
	i = (uintptr)k % (uintptr)t->max;
	for(j=0; j<t->max; j++) {
		if(t->key[i] == nil)
			return nil;
		if(t->key[i] == k) {
			v = t->val[i];
			if(del) {
				t->key[i] = (void*)-1;
				t->val[i] = nil;
				t->ndead++;
			}
			return v;
		}
		if(++i == t->max)
			i = 0;
	}

	// cannot happen - table is known to be non-full
	runtime_throw("finalizer table inconsistent");
	return nil;
}