Ejemplo n.º 1
0
// Free n objects from a span s back into the central free list c.
// Called from GC.
void
runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end)
{
	int32 size;

	runtime·lock(c);

	// Move to nonempty if necessary.
	if(s->freelist == nil) {
		runtime·MSpanList_Remove(s);
		runtime·MSpanList_Insert(&c->nonempty, s);
	}

	// Add the objects back to s's free list.
	end->next = s->freelist;
	s->freelist = start;
	s->ref -= n;
	c->nfree += n;

	// If s is completely freed, return it to the heap.
	if(s->ref == 0) {
		size = runtime·class_to_size[c->sizeclass];
		runtime·MSpanList_Remove(s);
		*(uintptr*)(s->start<<PageShift) = 1;  // needs zeroing
		s->freelist = nil;
		c->nfree -= (s->npages << PageShift) / size;
		runtime·unlock(c);
		runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
		runtime·MHeap_Free(runtime·mheap, s, 0);
	} else {
		runtime·unlock(c);
	}
}
Ejemplo n.º 2
0
// Free n objects from a span s back into the central free list c.
// Called during sweep.
// Returns true if the span was returned to heap.  Sets sweepgen to
// the latest generation.
bool
runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end)
{
	if(s->incache)
		runtime·throw("freespan into cached span");
	runtime·lock(c);

	// Move to nonempty if necessary.
	if(s->freelist == nil) {
		runtime·MSpanList_Remove(s);
		runtime·MSpanList_Insert(&c->nonempty, s);
	}

	// Add the objects back to s's free list.
	end->next = s->freelist;
	s->freelist = start;
	s->ref -= n;
	c->nfree += n;
	
	// delay updating sweepgen until here.  This is the signal that
	// the span may be used in an MCache, so it must come after the
	// linked list operations above (actually, just after the
	// lock of c above.)
	runtime·atomicstore(&s->sweepgen, runtime·mheap.sweepgen);

	if(s->ref != 0) {
		runtime·unlock(c);
		return false;
	}

	// s is completely freed, return it to the heap.
	MCentral_ReturnToHeap(c, s); // unlocks c
	return true;
}
Ejemplo n.º 3
0
// Return span from an MCache.
void
runtime·MCentral_UncacheSpan(MCentral *c, MSpan *s)
{
	MLink *v;
	int32 cap, n;

	runtime·lock(c);

	s->incache = false;

	// Move any explicitly freed items from the freebuf to the freelist.
	while((v = s->freebuf) != nil) {
		s->freebuf = v->next;
		runtime·markfreed(v);
		v->next = s->freelist;
		s->freelist = v;
		s->ref--;
	}

	if(s->ref == 0) {
		// Free back to heap.  Unlikely, but possible.
		MCentral_ReturnToHeap(c, s); // unlocks c
		return;
	}
	
	cap = (s->npages << PageShift) / s->elemsize;
	n = cap - s->ref;
	if(n > 0) {
		c->nfree += n;
		runtime·MSpanList_Remove(s);
		runtime·MSpanList_Insert(&c->nonempty, s);
	}
	runtime·unlock(c);
}
Ejemplo n.º 4
0
// Sweeps spans in list until reclaims at least npages into heap.
// Returns the actual number of pages reclaimed.
static uintptr
MHeap_ReclaimList(MHeap *h, MSpan *list, uintptr npages)
{
	MSpan *s;
	uintptr n;
	uint32 sg;

	n = 0;
	sg = runtime·mheap.sweepgen;
retry:
	for(s = list->next; s != list; s = s->next) {
		if(s->sweepgen == sg-2 && runtime·cas(&s->sweepgen, sg-2, sg-1)) {
			runtime·MSpanList_Remove(s);
			// swept spans are at the end of the list
			runtime·MSpanList_InsertBack(list, s);
			runtime·unlock(&h->lock);
			n += runtime·MSpan_Sweep(s);
			runtime·lock(&h->lock);
			if(n >= npages)
				return n;
			// the span could have been moved elsewhere
			goto retry;
		}
		if(s->sweepgen == sg-1) {
			// the span is being sweept by background sweeper, skip
			continue;
		}
		// already swept empty span,
		// all subsequent ones must also be either swept or in process of sweeping
		break;
	}
	return n;
}
Ejemplo n.º 5
0
// Allocates a span of the given size.  h must be locked.
// The returned span has been removed from the
// free list, but its state is still MSpanFree.
static MSpan*
MHeap_AllocSpanLocked(MHeap *h, uintptr npage)
{
	uintptr n;
	MSpan *s, *t;
	PageID p;

	// Try in fixed-size lists up to max.
	for(n=npage; n < nelem(h->free); n++) {
		if(!runtime·MSpanList_IsEmpty(&h->free[n])) {
			s = h->free[n].next;
			goto HaveSpan;
		}
	}

	// Best fit in list of large spans.
	if((s = MHeap_AllocLarge(h, npage)) == nil) {
		if(!MHeap_Grow(h, npage))
			return nil;
		if((s = MHeap_AllocLarge(h, npage)) == nil)
			return nil;
	}

HaveSpan:
	// Mark span in use.
	if(s->state != MSpanFree)
		runtime·throw("MHeap_AllocLocked - MSpan not free");
	if(s->npages < npage)
		runtime·throw("MHeap_AllocLocked - bad npages");
	runtime·MSpanList_Remove(s);
	if(s->next != nil || s->prev != nil)
		runtime·throw("still in list");
	if(s->npreleased > 0) {
		runtime·SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift);
		mstats.heap_released -= s->npreleased<<PageShift;
		s->npreleased = 0;
	}

	if(s->npages > npage) {
		// Trim extra and put it back in the heap.
		t = runtime·FixAlloc_Alloc(&h->spanalloc);
		runtime·MSpan_Init(t, s->start + npage, s->npages - npage);
		s->npages = npage;
		p = t->start;
		p -= ((uintptr)h->arena_start>>PageShift);
		if(p > 0)
			h->spans[p-1] = s;
		h->spans[p] = t;
		h->spans[p+t->npages-1] = t;
		t->needzero = s->needzero;
		s->state = MSpanStack; // prevent coalescing with s
		t->state = MSpanStack;
		MHeap_FreeSpanLocked(h, t);
		t->unusedsince = s->unusedsince; // preserve age (TODO: wrong: t is possibly merged and/or deallocated at this point)
		s->state = MSpanFree;
	}
Ejemplo n.º 6
0
static void
MHeap_FreeLocked(MHeap *h, MSpan *s)
{
	MSpan *t;

	if(s->state != MSpanInUse || s->ref != 0) {
		printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref);
		throw("MHeap_FreeLocked - invalid free");
	}
	s->state = MSpanFree;
	MSpanList_Remove(s);

	// Coalesce with earlier, later spans.
	if((t = MHeapMap_Get(&h->map, s->start - 1)) != nil && t->state != MSpanInUse) {
		s->start = t->start;
		s->npages += t->npages;
		MHeapMap_Set(&h->map, s->start, s);
		MSpanList_Remove(t);
		t->state = MSpanDead;
		FixAlloc_Free(&h->spanalloc, t);
		mstats.mspan_inuse = h->spanalloc.inuse;
		mstats.mspan_sys = h->spanalloc.sys;
	}
	if((t = MHeapMap_Get(&h->map, s->start + s->npages)) != nil && t->state != MSpanInUse) {
		s->npages += t->npages;
		MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
		MSpanList_Remove(t);
		t->state = MSpanDead;
		FixAlloc_Free(&h->spanalloc, t);
		mstats.mspan_inuse = h->spanalloc.inuse;
		mstats.mspan_sys = h->spanalloc.sys;
	}

	// Insert s into appropriate list.
	if(s->npages < nelem(h->free))
		MSpanList_Insert(&h->free[s->npages], s);
	else
		MSpanList_Insert(&h->large, s);

	// TODO(rsc): IncrementalScavenge() to return memory to OS.
}
Ejemplo n.º 7
0
Archivo: mheap.c Proyecto: rlcook0/go
static MSpan*
MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
{
	uintptr n;
	MSpan *s, *t;
	PageID p;

	// Try in fixed-size lists up to max.
	for(n=npage; n < nelem(h->free); n++) {
		if(!runtime·MSpanList_IsEmpty(&h->free[n])) {
			s = h->free[n].next;
			goto HaveSpan;
		}
	}

	// Best fit in list of large spans.
	if((s = MHeap_AllocLarge(h, npage)) == nil) {
		if(!MHeap_Grow(h, npage))
			return nil;
		if((s = MHeap_AllocLarge(h, npage)) == nil)
			return nil;
	}

HaveSpan:
	// Mark span in use.
	if(s->state != MSpanFree)
		runtime·throw("MHeap_AllocLocked - MSpan not free");
	if(s->npages < npage)
		runtime·throw("MHeap_AllocLocked - bad npages");
	runtime·MSpanList_Remove(s);
	s->state = MSpanInUse;
	mstats.heap_idle -= s->npages<<PageShift;
	mstats.heap_released -= s->npreleased<<PageShift;
	s->npreleased = 0;

	if(s->npages > npage) {
		// Trim extra and put it back in the heap.
		t = runtime·FixAlloc_Alloc(&h->spanalloc);
		mstats.mspan_inuse = h->spanalloc.inuse;
		mstats.mspan_sys = h->spanalloc.sys;
		runtime·MSpan_Init(t, s->start + npage, s->npages - npage);
		s->npages = npage;
		p = t->start;
		if(sizeof(void*) == 8)
			p -= ((uintptr)h->arena_start>>PageShift);
		if(p > 0)
			h->map[p-1] = s;
		h->map[p] = t;
		h->map[p+t->npages-1] = t;
		*(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift);  // copy "needs zeroing" mark
		t->state = MSpanInUse;
		MHeap_FreeLocked(h, t);
	}
Ejemplo n.º 8
0
static MSpan*
MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
{
	uintptr n;
	MSpan *s, *t;

	// Try in fixed-size lists up to max.
	for(n=npage; n < nelem(h->free); n++) {
		if(!MSpanList_IsEmpty(&h->free[n])) {
			s = h->free[n].next;
			goto HaveSpan;
		}
	}

	// Best fit in list of large spans.
	if((s = MHeap_AllocLarge(h, npage)) == nil) {
		if(!MHeap_Grow(h, npage))
			return nil;
		if((s = MHeap_AllocLarge(h, npage)) == nil)
			return nil;
	}

HaveSpan:
	// Mark span in use.
	if(s->state != MSpanFree)
		throw("MHeap_AllocLocked - MSpan not free");
	if(s->npages < npage)
		throw("MHeap_AllocLocked - bad npages");
	MSpanList_Remove(s);
	s->state = MSpanInUse;

	if(s->npages > npage) {
		// Trim extra and put it back in the heap.
		t = FixAlloc_Alloc(&h->spanalloc);
		mstats.mspan_inuse = h->spanalloc.inuse;
		mstats.mspan_sys = h->spanalloc.sys;
		MSpan_Init(t, s->start + npage, s->npages - npage);
		s->npages = npage;
		MHeapMap_Set(&h->map, t->start - 1, s);
		MHeapMap_Set(&h->map, t->start, t);
		MHeapMap_Set(&h->map, t->start + t->npages - 1, t);
		t->state = MSpanInUse;
		MHeap_FreeLocked(h, t);
	}

	// Record span info, because gc needs to be
	// able to map interior pointer to containing span.
	s->sizeclass = sizeclass;
	for(n=0; n<npage; n++)
		MHeapMap_Set(&h->map, s->start+n, s);
	return s;
}
Ejemplo n.º 9
0
// Helper: free one object back into the central free list.
static void
MCentral_Free(MCentral *c, void *v)
{
	MSpan *s;
	MLink *p;
	int32 size;

	// Find span for v.
	s = runtime·MHeap_Lookup(runtime·mheap, v);
	if(s == nil || s->ref == 0)
		runtime·throw("invalid free");

	// Move to nonempty if necessary.
	if(s->freelist == nil) {
		runtime·MSpanList_Remove(s);
		runtime·MSpanList_Insert(&c->nonempty, s);
	}

	// Add v back to s's free list.
	p = v;
	p->next = s->freelist;
	s->freelist = p;
	c->nfree++;

	// If s is completely freed, return it to the heap.
	if(--s->ref == 0) {
		size = runtime·class_to_size[c->sizeclass];
		runtime·MSpanList_Remove(s);
		runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
		*(uintptr*)(s->start<<PageShift) = 1;  // needs zeroing
		s->freelist = nil;
		c->nfree -= (s->npages << PageShift) / size;
		runtime·unlock(c);
		runtime·MHeap_Free(runtime·mheap, s, 0);
		runtime·lock(c);
	}
}
Ejemplo n.º 10
0
// Return s to the heap.  s must be unused (s->ref == 0).  Unlocks c.
static void
MCentral_ReturnToHeap(MCentral *c, MSpan *s)
{
	int32 size;

	size = runtime·class_to_size[c->sizeclass];
	runtime·MSpanList_Remove(s);
	s->needzero = 1;
	s->freelist = nil;
	if(s->ref != 0)
		runtime·throw("ref wrong");
	c->nfree -= (s->npages << PageShift) / size;
	runtime·unlock(c);
	runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
	runtime·MHeap_Free(&runtime·mheap, s, 0);
}
Ejemplo n.º 11
0
// Allocate up to n objects from the central free list.
// Return the number of objects allocated.
// The objects are linked together by their first words.
// On return, *pstart points at the first object.
int32
runtime·MCentral_AllocList(MCentral *c, int32 n, MLink **pfirst)
{
	MSpan *s;
	MLink *first, *last;
	int32 cap, avail, i;

	runtime·lock(c);
	// Replenish central list if empty.
	if(runtime·MSpanList_IsEmpty(&c->nonempty)) {
		if(!MCentral_Grow(c)) {
			runtime·unlock(c);
			*pfirst = nil;
			return 0;
		}
	}
	s = c->nonempty.next;
	cap = (s->npages << PageShift) / s->elemsize;
	avail = cap - s->ref;
	if(avail < n)
		n = avail;

	// First one is guaranteed to work, because we just grew the list.
	first = s->freelist;
	last = first;
	for(i=1; i<n; i++) {
		last = last->next;
	}
	s->freelist = last->next;
	last->next = nil;
	s->ref += n;
	c->nfree -= n;

	if(n == avail) {
		if(s->freelist != nil || s->ref != cap) {
			runtime·throw("invalid freelist");
		}
		runtime·MSpanList_Remove(s);
		runtime·MSpanList_Insert(&c->empty, s);
	}

	runtime·unlock(c);
	*pfirst = first;
	return n;
}
Ejemplo n.º 12
0
// Helper: allocate one object from the central free list.
static void*
MCentral_Alloc(MCentral *c)
{
	MSpan *s;
	MLink *v;

	if(runtime·MSpanList_IsEmpty(&c->nonempty))
		return nil;
	s = c->nonempty.next;
	s->ref++;
	v = s->freelist;
	s->freelist = v->next;
	if(s->freelist == nil) {
		runtime·MSpanList_Remove(s);
		runtime·MSpanList_Insert(&c->empty, s);
	}
	return v;
}
Ejemplo n.º 13
0
// Return span from an MCache.
void
runtime·MCentral_UncacheSpan(MCentral *c, MSpan *s)
{
	int32 cap, n;

	runtime·lock(&c->lock);

	s->incache = false;

	if(s->ref == 0)
		runtime·throw("uncaching full span");

	cap = (s->npages << PageShift) / s->elemsize;
	n = cap - s->ref;
	if(n > 0) {
		runtime·MSpanList_Remove(s);
		runtime·MSpanList_Insert(&c->nonempty, s);
	}
	runtime·unlock(&c->lock);
}
Ejemplo n.º 14
0
// Helper: free one object back into the central free list.
// Caller must hold lock on c on entry.  Holds lock on exit.
static void
MCentral_Free(MCentral *c, MLink *v)
{
	MSpan *s;

	// Find span for v.
	s = runtime·MHeap_Lookup(&runtime·mheap, v);
	if(s == nil || s->ref == 0)
		runtime·throw("invalid free");
	if(s->sweepgen != runtime·mheap.sweepgen)
		runtime·throw("free into unswept span");
	
	// If the span is currently being used unsynchronized by an MCache,
	// we can't modify the freelist.  Add to the freebuf instead.  The
	// items will get moved to the freelist when the span is returned
	// by the MCache.
	if(s->incache) {
		v->next = s->freebuf;
		s->freebuf = v;
		return;
	}

	// Move span to nonempty if necessary.
	if(s->freelist == nil) {
		runtime·MSpanList_Remove(s);
		runtime·MSpanList_Insert(&c->nonempty, s);
	}

	// Add the object to span's free list.
	runtime·markfreed(v);
	v->next = s->freelist;
	s->freelist = v;
	s->ref--;
	c->nfree++;

	// If s is completely freed, return it to the heap.
	if(s->ref == 0) {
		MCentral_ReturnToHeap(c, s); // unlocks c
		runtime·lock(c);
	}
}
Ejemplo n.º 15
0
runtime·MCentral_CacheSpan(MCentral *c)
{
	MSpan *s;
	int32 cap, n;
	uint32 sg;

	runtime·lock(&c->lock);
	sg = runtime·mheap.sweepgen;
retry:
	for(s = c->nonempty.next; s != &c->nonempty; s = s->next) {
		if(s->sweepgen == sg-2 && runtime·cas(&s->sweepgen, sg-2, sg-1)) {
			runtime·unlock(&c->lock);
			runtime·MSpan_Sweep(s);
			runtime·lock(&c->lock);
			// the span could have been moved to heap, retry
			goto retry;
		}
		if(s->sweepgen == sg-1) {
			// the span is being swept by background sweeper, skip
			continue;
		}
		// we have a nonempty span that does not require sweeping, allocate from it
		goto havespan;
	}

	for(s = c->empty.next; s != &c->empty; s = s->next) {
		if(s->sweepgen == sg-2 && runtime·cas(&s->sweepgen, sg-2, sg-1)) {
			// we have an empty span that requires sweeping,
			// sweep it and see if we can free some space in it
			runtime·MSpanList_Remove(s);
			// swept spans are at the end of the list
			runtime·MSpanList_InsertBack(&c->empty, s);
			runtime·unlock(&c->lock);
			runtime·MSpan_Sweep(s);
			runtime·lock(&c->lock);
			// the span could be moved to nonempty or heap, retry
			goto retry;
		}
		if(s->sweepgen == sg-1) {
			// the span is being swept by background sweeper, skip
			continue;
		}
		// already swept empty span,
		// all subsequent ones must also be either swept or in process of sweeping
		break;
	}

	// Replenish central list if empty.
	if(!MCentral_Grow(c)) {
		runtime·unlock(&c->lock);
		return nil;
	}
	goto retry;

havespan:
	cap = (s->npages << PageShift) / s->elemsize;
	n = cap - s->ref;
	if(n == 0)
		runtime·throw("empty span");
	if(s->freelist == nil)
		runtime·throw("freelist empty");
	runtime·MSpanList_Remove(s);
	runtime·MSpanList_InsertBack(&c->empty, s);
	s->incache = true;
	runtime·unlock(&c->lock);
	return s;
}
Ejemplo n.º 16
0
static MSpan*
MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
{
	uintptr n;
	MSpan *s, *t;
	PageID p;

	// Try in fixed-size lists up to max.
	for(n=npage; n < nelem(h->free); n++) {
		if(!runtime·MSpanList_IsEmpty(&h->free[n])) {
			s = h->free[n].next;
			goto HaveSpan;
		}
	}

	// Best fit in list of large spans.
	if((s = MHeap_AllocLarge(h, npage)) == nil) {
		if(!MHeap_Grow(h, npage))
			return nil;
		if((s = MHeap_AllocLarge(h, npage)) == nil)
			return nil;
	}

HaveSpan:
	// Mark span in use.
	if(s->state != MSpanFree)
		runtime·throw("MHeap_AllocLocked - MSpan not free");
	if(s->npages < npage)
		runtime·throw("MHeap_AllocLocked - bad npages");
	runtime·MSpanList_Remove(s);
	s->state = MSpanInUse;
	mstats.heap_idle -= s->npages<<PageShift;
	mstats.heap_released -= s->npreleased<<PageShift;
	if(s->npreleased > 0) {
		// We have called runtime·SysUnused with these pages, and on
		// Unix systems it called madvise.  At this point at least
		// some BSD-based kernels will return these pages either as
		// zeros or with the old data.  For our caller, the first word
		// in the page indicates whether the span contains zeros or
		// not (this word was set when the span was freed by
		// MCentral_Free or runtime·MCentral_FreeSpan).  If the first
		// page in the span is returned as zeros, and some subsequent
		// page is returned with the old data, then we will be
		// returning a span that is assumed to be all zeros, but the
		// actual data will not be all zeros.  Avoid that problem by
		// explicitly marking the span as not being zeroed, just in
		// case.  The beadbead constant we use here means nothing, it
		// is just a unique constant not seen elsewhere in the
		// runtime, as a clue in case it turns up unexpectedly in
		// memory or in a stack trace.
		*(uintptr*)(s->start<<PageShift) = (uintptr)0xbeadbeadbeadbeadULL;
	}
	s->npreleased = 0;

	if(s->npages > npage) {
		// Trim extra and put it back in the heap.
		t = runtime·FixAlloc_Alloc(&h->spanalloc);
		mstats.mspan_inuse = h->spanalloc.inuse;
		mstats.mspan_sys = h->spanalloc.sys;
		runtime·MSpan_Init(t, s->start + npage, s->npages - npage);
		s->npages = npage;
		p = t->start;
		if(sizeof(void*) == 8)
			p -= ((uintptr)h->arena_start>>PageShift);
		if(p > 0)
			h->map[p-1] = s;
		h->map[p] = t;
		h->map[p+t->npages-1] = t;
		*(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift);  // copy "needs zeroing" mark
		t->state = MSpanInUse;
		MHeap_FreeLocked(h, t);
		t->unusedsince = s->unusedsince; // preserve age
	}