// Free n objects from a span s back into the central free list c. // Called during sweep. // Returns true if the span was returned to heap. bool runtime_MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end) { int32 size; runtime_lock(c); // Move to nonempty if necessary. if(s->freelist == nil) { runtime_MSpanList_Remove(s); runtime_MSpanList_Insert(&c->nonempty, s); } // Add the objects back to s's free list. end->next = s->freelist; s->freelist = start; s->ref -= n; c->nfree += n; if(s->ref != 0) { runtime_unlock(c); return false; } // s is completely freed, return it to the heap. size = runtime_class_to_size[c->sizeclass]; runtime_MSpanList_Remove(s); s->needzero = 1; s->freelist = nil; c->nfree -= (s->npages << PageShift) / size; runtime_unlock(c); runtime_unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift); runtime_MHeap_Free(&runtime_mheap, s, 0); return true; }
// Return s to the heap. s must be unused (s->ref == 0). Unlocks c. static void MCentral_ReturnToHeap(MCentral *c, MSpan *s) { int32 size; size = runtime_class_to_size[c->sizeclass]; runtime_MSpanList_Remove(s); s->needzero = 1; s->freelist = nil; if(s->ref != 0) runtime_throw("ref wrong"); c->nfree -= (s->npages << PageShift) / size; runtime_unlock(&c->lock); runtime_unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift); runtime_MHeap_Free(&runtime_mheap, s, 0); }
// Helper: free one object back into the central free list. static void MCentral_Free(MCentral *c, void *v) { MSpan *s; MLink *p; int32 size; // Find span for v. s = runtime_MHeap_Lookup(&runtime_mheap, v); if(s == nil || s->ref == 0) runtime_throw("invalid free"); // Move to nonempty if necessary. if(s->freelist == nil) { runtime_MSpanList_Remove(s); runtime_MSpanList_Insert(&c->nonempty, s); } // Add v back to s's free list. p = v; p->next = s->freelist; s->freelist = p; c->nfree++; // If s is completely freed, return it to the heap. if(--s->ref == 0) { size = runtime_class_to_size[c->sizeclass]; runtime_MSpanList_Remove(s); runtime_unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift); *(uintptr*)(s->start<<PageShift) = 1; // needs zeroing s->freelist = nil; c->nfree -= (s->npages << PageShift) / size; runtime_unlock(c); runtime_MHeap_Free(&runtime_mheap, s, 0); runtime_lock(c); } }
// free RefNone, free & queue finalizers for RefNone|RefHasFinalizer, reset RefSome static void sweepspan(MSpan *s) { int32 n, npages, size; byte *p; uint32 ref, *gcrefp, *gcrefep; MCache *c; Finalizer *f; p = (byte*)(s->start << PageShift); if(s->sizeclass == 0) { // Large block. ref = s->gcref0; switch(ref & ~(RefFlags^RefHasFinalizer)) { case RefNone: // Free large object. mstats.alloc -= s->npages<<PageShift; runtime_memclr(p, s->npages<<PageShift); if(ref & RefProfiled) runtime_MProf_Free(p, s->npages<<PageShift); s->gcref0 = RefFree; runtime_MHeap_Free(&runtime_mheap, s, 1); break; case RefNone|RefHasFinalizer: f = runtime_getfinalizer(p, 1); if(f == nil) runtime_throw("finalizer inconsistency"); f->arg = p; f->next = finq; finq = f; ref &= ~RefHasFinalizer; // fall through case RefSome: case RefSome|RefHasFinalizer: s->gcref0 = RefNone | (ref&RefFlags); break; } return; } // Chunk full of small blocks. runtime_MGetSizeClassInfo(s->sizeclass, &size, &npages, &n); gcrefp = s->gcref; gcrefep = s->gcref + n; for(; gcrefp < gcrefep; gcrefp++, p += size) { ref = *gcrefp; if(ref < RefNone) // RefFree or RefStack continue; switch(ref & ~(RefFlags^RefHasFinalizer)) { case RefNone: // Free small object. if(ref & RefProfiled) runtime_MProf_Free(p, size); *gcrefp = RefFree; c = m->mcache; if(size > (int32)sizeof(uintptr)) ((uintptr*)p)[1] = 1; // mark as "needs to be zeroed" mstats.alloc -= size; mstats.by_size[s->sizeclass].nfree++; runtime_MCache_Free(c, p, s->sizeclass, size); break; case RefNone|RefHasFinalizer: f = runtime_getfinalizer(p, 1); if(f == nil) runtime_throw("finalizer inconsistency"); f->arg = p; f->next = finq; finq = f; ref &= ~RefHasFinalizer; // fall through case RefSome: case RefSome|RefHasFinalizer: *gcrefp = RefNone | (ref&RefFlags); break; } } }