Exemplo n.º 1
0
// Free n objects from a span s back into the central free list c.
// Called from GC.
void
runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end)
{
	int32 size;

	runtime·lock(c);

	// Move to nonempty if necessary.
	if(s->freelist == nil) {
		runtime·MSpanList_Remove(s);
		runtime·MSpanList_Insert(&c->nonempty, s);
	}

	// Add the objects back to s's free list.
	end->next = s->freelist;
	s->freelist = start;
	s->ref -= n;
	c->nfree += n;

	// If s is completely freed, return it to the heap.
	if(s->ref == 0) {
		size = runtime·class_to_size[c->sizeclass];
		runtime·MSpanList_Remove(s);
		*(uintptr*)(s->start<<PageShift) = 1;  // needs zeroing
		s->freelist = nil;
		c->nfree -= (s->npages << PageShift) / size;
		runtime·unlock(c);
		runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
		runtime·MHeap_Free(runtime·mheap, s, 0);
	} else {
		runtime·unlock(c);
	}
}
Exemplo n.º 2
0
// Return s to the heap.  s must be unused (s->ref == 0).  Unlocks c.
static void
MCentral_ReturnToHeap(MCentral *c, MSpan *s)
{
	int32 size;

	size = runtime·class_to_size[c->sizeclass];
	runtime·MSpanList_Remove(s);
	s->needzero = 1;
	s->freelist = nil;
	if(s->ref != 0)
		runtime·throw("ref wrong");
	c->nfree -= (s->npages << PageShift) / size;
	runtime·unlock(c);
	runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
	runtime·MHeap_Free(&runtime·mheap, s, 0);
}
Exemplo n.º 3
0
void 
runtime·free ( void *v ) 
{ 
int32 sizeclass; 
MSpan *s; 
MCache *c; 
uint32 prof; 
uintptr size; 
#line 2161 "C:\Go\src\pkg\runtime\malloc.goc"
if ( v == nil ) 
return; 
#line 2167 "C:\Go\src\pkg\runtime\malloc.goc"
if ( m->mallocing ) 
runtime·throw ( "malloc/free - deadlock" ) ; 
m->mallocing = 1; 
#line 2171 "C:\Go\src\pkg\runtime\malloc.goc"
if ( !runtime·mlookup ( v , nil , nil , &s ) ) { 
runtime·printf ( "free %p: not an allocated block\n" , v ) ; 
runtime·throw ( "free runtime·mlookup" ) ; 
} 
prof = runtime·blockspecial ( v ) ; 
#line 2178 "C:\Go\src\pkg\runtime\malloc.goc"
sizeclass = s->sizeclass; 
c = m->mcache; 
if ( sizeclass == 0 ) { 
#line 2182 "C:\Go\src\pkg\runtime\malloc.goc"
size = s->npages<<PageShift; 
* ( uintptr* ) ( s->start<<PageShift ) = 1; 
#line 2186 "C:\Go\src\pkg\runtime\malloc.goc"
runtime·markfreed ( v , size ) ; 
runtime·unmarkspan ( v , 1<<PageShift ) ; 
runtime·MHeap_Free ( &runtime·mheap , s , 1 ) ; 
} else { 
#line 2191 "C:\Go\src\pkg\runtime\malloc.goc"
size = runtime·class_to_size[sizeclass]; 
if ( size > sizeof ( uintptr ) ) 
( ( uintptr* ) v ) [1] = 1; 
#line 2197 "C:\Go\src\pkg\runtime\malloc.goc"
runtime·markfreed ( v , size ) ; 
c->local_by_size[sizeclass].nfree++; 
runtime·MCache_Free ( c , v , sizeclass , size ) ; 
} 
c->local_alloc -= size; 
if ( prof ) 
runtime·MProf_Free ( v , size ) ; 
m->mallocing = 0; 
} 
Exemplo n.º 4
0
// Free n objects from a span s back into the central free list c.
// Called during sweep.
// Returns true if the span was returned to heap.  Sets sweepgen to
// the latest generation.
bool
runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end)
{
	if(s->incache)
		runtime·throw("freespan into cached span");
	runtime·lock(&c->lock);

	// Move to nonempty if necessary.
	if(s->freelist == nil) {
		runtime·MSpanList_Remove(s);
		runtime·MSpanList_Insert(&c->nonempty, s);
	}

	// Add the objects back to s's free list.
	end->next = s->freelist;
	s->freelist = start;
	s->ref -= n;
	
	// delay updating sweepgen until here.  This is the signal that
	// the span may be used in an MCache, so it must come after the
	// linked list operations above (actually, just after the
	// lock of c above.)
	runtime·atomicstore(&s->sweepgen, runtime·mheap.sweepgen);

	if(s->ref != 0) {
		runtime·unlock(&c->lock);
		return false;
	}

	// s is completely freed, return it to the heap.
	runtime·MSpanList_Remove(s);
	s->needzero = 1;
	s->freelist = nil;
	runtime·unlock(&c->lock);
	runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
	runtime·MHeap_Free(&runtime·mheap, s, 0);
	return true;
}
Exemplo n.º 5
0
// Helper: free one object back into the central free list.
static void
MCentral_Free(MCentral *c, void *v)
{
	MSpan *s;
	MLink *p;
	int32 size;

	// Find span for v.
	s = runtime·MHeap_Lookup(runtime·mheap, v);
	if(s == nil || s->ref == 0)
		runtime·throw("invalid free");

	// Move to nonempty if necessary.
	if(s->freelist == nil) {
		runtime·MSpanList_Remove(s);
		runtime·MSpanList_Insert(&c->nonempty, s);
	}

	// Add v back to s's free list.
	p = v;
	p->next = s->freelist;
	s->freelist = p;
	c->nfree++;

	// If s is completely freed, return it to the heap.
	if(--s->ref == 0) {
		size = runtime·class_to_size[c->sizeclass];
		runtime·MSpanList_Remove(s);
		runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
		*(uintptr*)(s->start<<PageShift) = 1;  // needs zeroing
		s->freelist = nil;
		c->nfree -= (s->npages << PageShift) / size;
		runtime·unlock(c);
		runtime·MHeap_Free(runtime·mheap, s, 0);
		runtime·lock(c);
	}
}
Exemplo n.º 6
0
// free RefNone, free & queue finalizers for RefNone|RefHasFinalizer, reset RefSome
static void
sweepspan(MSpan *s)
{
	int32 n, npages, size;
	byte *p;
	uint32 ref, *gcrefp, *gcrefep;
	MCache *c;
	Finalizer *f;

	p = (byte*)(s->start << PageShift);
	if(s->sizeclass == 0) {
		// Large block.
		ref = s->gcref0;
		switch(ref & ~(RefFlags^RefHasFinalizer)) {
		case RefNone:
			// Free large object.
			mstats.alloc -= s->npages<<PageShift;
			runtime_memclr(p, s->npages<<PageShift);
			if(ref & RefProfiled)
				MProf_Free(p, s->npages<<PageShift);
			s->gcref0 = RefFree;
			MHeap_Free(&mheap, s, 1);
			break;
		case RefNone|RefHasFinalizer:
			f = getfinalizer(p, 1);
			if(f == nil)
				throw("finalizer inconsistency");
			f->arg = p;
			f->next = finq;
			finq = f;
			ref &= ~RefHasFinalizer;
			// fall through
		case RefSome:
		case RefSome|RefHasFinalizer:
			s->gcref0 = RefNone | (ref&RefFlags);
			break;
		}
		return;
	}

	// Chunk full of small blocks.
	MGetSizeClassInfo(s->sizeclass, &size, &npages, &n);
	gcrefp = s->gcref;
	gcrefep = s->gcref + n;
	for(; gcrefp < gcrefep; gcrefp++, p += size) {
		ref = *gcrefp;
		if(ref < RefNone)	// RefFree or RefStack
			continue;
		switch(ref & ~(RefFlags^RefHasFinalizer)) {
		case RefNone:
			// Free small object.
			if(ref & RefProfiled)
				MProf_Free(p, size);
			*gcrefp = RefFree;
			c = m->mcache;
			if(size > sizeof(uintptr))
				((uintptr*)p)[1] = 1;	// mark as "needs to be zeroed"
			mstats.alloc -= size;
			mstats.by_size[s->sizeclass].nfree++;
			MCache_Free(c, p, s->sizeclass, size);
			break;
		case RefNone|RefHasFinalizer:
			f = getfinalizer(p, 1);
			if(f == nil)
				throw("finalizer inconsistency");
			f->arg = p;
			f->next = finq;
			finq = f;
			ref &= ~RefHasFinalizer;
			// fall through
		case RefSome:
		case RefSome|RefHasFinalizer:
			*gcrefp = RefNone | (ref&RefFlags);
			break;
		}
	}
}