Beispiel #1
0
static MSpan*
MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
{
	uintptr n;
	MSpan *s, *t;
	PageID p;

	// To prevent excessive heap growth, before allocating n pages
	// we need to sweep and reclaim at least n pages.
	if(!h->sweepdone)
		MHeap_Reclaim(h, npage);

	// Try in fixed-size lists up to max.
	for(n=npage; n < nelem(h->free); n++) {
		if(!runtime_MSpanList_IsEmpty(&h->free[n])) {
			s = h->free[n].next;
			goto HaveSpan;
		}
	}

	// Best fit in list of large spans.
	if((s = MHeap_AllocLarge(h, npage)) == nil) {
		if(!MHeap_Grow(h, npage))
			return nil;
		if((s = MHeap_AllocLarge(h, npage)) == nil)
			return nil;
	}

HaveSpan:
	// Mark span in use.
	if(s->state != MSpanFree)
		runtime_throw("MHeap_AllocLocked - MSpan not free");
	if(s->npages < npage)
		runtime_throw("MHeap_AllocLocked - bad npages");
	runtime_MSpanList_Remove(s);
	runtime_atomicstore(&s->sweepgen, h->sweepgen);
	s->state = MSpanInUse;
	mstats.heap_idle -= s->npages<<PageShift;
	mstats.heap_released -= s->npreleased<<PageShift;
	if(s->npreleased > 0)
		runtime_SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift);
	s->npreleased = 0;

	if(s->npages > npage) {
		// Trim extra and put it back in the heap.
		t = runtime_FixAlloc_Alloc(&h->spanalloc);
		runtime_MSpan_Init(t, s->start + npage, s->npages - npage);
		s->npages = npage;
		p = t->start;
		p -= ((uintptr)h->arena_start>>PageShift);
		if(p > 0)
			h->spans[p-1] = s;
		h->spans[p] = t;
		h->spans[p+t->npages-1] = t;
		t->needzero = s->needzero;
		runtime_atomicstore(&t->sweepgen, h->sweepgen);
		t->state = MSpanInUse;
		MHeap_FreeLocked(h, t);
		t->unusedsince = s->unusedsince; // preserve age
	}
Beispiel #2
0
// The GOTRACEBACK environment variable controls the
// behavior of a Go program that is crashing and exiting.
//	GOTRACEBACK=0   suppress all tracebacks
//	GOTRACEBACK=1   default behavior - show tracebacks but exclude runtime frames
//	GOTRACEBACK=2   show tracebacks including runtime frames
//	GOTRACEBACK=crash   show tracebacks including runtime frames, then crash (core dump etc)
int32
runtime_gotraceback(bool *crash)
{
	const byte *p;
	uint32 x;

	if(crash != nil)
		*crash = false;
	if(runtime_m()->traceback != 0)
		return runtime_m()->traceback;
	x = runtime_atomicload(&traceback_cache);
	if(x == ~(uint32)0) {
		p = runtime_getenv("GOTRACEBACK");
		if(p == nil)
			p = (const byte*)"";
		if(p[0] == '\0')
			x = 1<<1;
		else if(runtime_strcmp((const char *)p, "crash") == 0)
			x = (2<<1) | 1;
		else
			x = runtime_atoi(p)<<1;	
		runtime_atomicstore(&traceback_cache, x);
	}
	if(crash != nil)
		*crash = x&1;
	return x>>1;
}
Beispiel #3
0
// Free n objects from a span s back into the central free list c.
// Called during sweep.
// Returns true if the span was returned to heap.  Sets sweepgen to
// the latest generation.
bool
runtime_MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end)
{
	if(s->incache)
		runtime_throw("freespan into cached span");
	runtime_lock(&c->lock);

	// Move to nonempty if necessary.
	if(s->freelist == nil) {
		runtime_MSpanList_Remove(s);
		runtime_MSpanList_Insert(&c->nonempty, s);
	}

	// Add the objects back to s's free list.
	end->next = s->freelist;
	s->freelist = start;
	s->ref -= n;
	c->nfree += n;
	
	// delay updating sweepgen until here.  This is the signal that
	// the span may be used in an MCache, so it must come after the
	// linked list operations above (actually, just after the
	// lock of c above.)
	runtime_atomicstore(&s->sweepgen, runtime_mheap.sweepgen);

	if(s->ref != 0) {
		runtime_unlock(&c->lock);
		return false;
	}

	// s is completely freed, return it to the heap.
	MCentral_ReturnToHeap(c, s); // unlocks c
	return true;
}