예제 #1
0
static MSpan*
MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
{
	uintptr n;
	MSpan *s, *t;
	PageID p;

	// To prevent excessive heap growth, before allocating n pages
	// we need to sweep and reclaim at least n pages.
	if(!h->sweepdone)
		MHeap_Reclaim(h, npage);

	// Try in fixed-size lists up to max.
	for(n=npage; n < nelem(h->free); n++) {
		if(!runtime_MSpanList_IsEmpty(&h->free[n])) {
			s = h->free[n].next;
			goto HaveSpan;
		}
	}

	// Best fit in list of large spans.
	if((s = MHeap_AllocLarge(h, npage)) == nil) {
		if(!MHeap_Grow(h, npage))
			return nil;
		if((s = MHeap_AllocLarge(h, npage)) == nil)
			return nil;
	}

HaveSpan:
	// Mark span in use.
	if(s->state != MSpanFree)
		runtime_throw("MHeap_AllocLocked - MSpan not free");
	if(s->npages < npage)
		runtime_throw("MHeap_AllocLocked - bad npages");
	runtime_MSpanList_Remove(s);
	runtime_atomicstore(&s->sweepgen, h->sweepgen);
	s->state = MSpanInUse;
	mstats.heap_idle -= s->npages<<PageShift;
	mstats.heap_released -= s->npreleased<<PageShift;
	if(s->npreleased > 0)
		runtime_SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift);
	s->npreleased = 0;

	if(s->npages > npage) {
		// Trim extra and put it back in the heap.
		t = runtime_FixAlloc_Alloc(&h->spanalloc);
		runtime_MSpan_Init(t, s->start + npage, s->npages - npage);
		s->npages = npage;
		p = t->start;
		p -= ((uintptr)h->arena_start>>PageShift);
		if(p > 0)
			h->spans[p-1] = s;
		h->spans[p] = t;
		h->spans[p+t->npages-1] = t;
		t->needzero = s->needzero;
		runtime_atomicstore(&t->sweepgen, h->sweepgen);
		t->state = MSpanInUse;
		MHeap_FreeLocked(h, t);
		t->unusedsince = s->unusedsince; // preserve age
	}
예제 #2
0
파일: mheap.c 프로젝트: tempbottle/golang
// Allocate a new span of npage pages from the heap for GC'd memory
// and record its size class in the HeapMap and HeapMapCache.
static MSpan*
mheap_alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large)
{
	MSpan *s;

	if(g != g->m->g0)
		runtime·throw("mheap_alloc not on M stack");
	runtime·lock(&h->lock);

	// To prevent excessive heap growth, before allocating n pages
	// we need to sweep and reclaim at least n pages.
	if(!h->sweepdone)
		MHeap_Reclaim(h, npage);

	// transfer stats from cache to global
	mstats.heap_alloc += g->m->mcache->local_cachealloc;
	g->m->mcache->local_cachealloc = 0;

	s = MHeap_AllocSpanLocked(h, npage);
	if(s != nil) {
		// Record span info, because gc needs to be
		// able to map interior pointer to containing span.
		runtime·atomicstore(&s->sweepgen, h->sweepgen);
		s->state = MSpanInUse;
		s->freelist = nil;
		s->ref = 0;
		s->sizeclass = sizeclass;
		s->elemsize = (sizeclass==0 ? s->npages<<PageShift : runtime·class_to_size[sizeclass]);

		// update stats, sweep lists
		if(large) {
			mstats.heap_objects++;
			mstats.heap_alloc += npage<<PageShift;
			// Swept spans are at the end of lists.
			if(s->npages < nelem(h->free))
				runtime·MSpanList_InsertBack(&h->busy[s->npages], s);
			else
				runtime·MSpanList_InsertBack(&h->busylarge, s);
		}
	}
	runtime·unlock(&h->lock);
	return s;
}