static MSpan* MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass) { uintptr n; MSpan *s, *t; PageID p; // To prevent excessive heap growth, before allocating n pages // we need to sweep and reclaim at least n pages. if(!h->sweepdone) MHeap_Reclaim(h, npage); // Try in fixed-size lists up to max. for(n=npage; n < nelem(h->free); n++) { if(!runtime_MSpanList_IsEmpty(&h->free[n])) { s = h->free[n].next; goto HaveSpan; } } // Best fit in list of large spans. if((s = MHeap_AllocLarge(h, npage)) == nil) { if(!MHeap_Grow(h, npage)) return nil; if((s = MHeap_AllocLarge(h, npage)) == nil) return nil; } HaveSpan: // Mark span in use. if(s->state != MSpanFree) runtime_throw("MHeap_AllocLocked - MSpan not free"); if(s->npages < npage) runtime_throw("MHeap_AllocLocked - bad npages"); runtime_MSpanList_Remove(s); runtime_atomicstore(&s->sweepgen, h->sweepgen); s->state = MSpanInUse; mstats.heap_idle -= s->npages<<PageShift; mstats.heap_released -= s->npreleased<<PageShift; if(s->npreleased > 0) runtime_SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift); s->npreleased = 0; if(s->npages > npage) { // Trim extra and put it back in the heap. t = runtime_FixAlloc_Alloc(&h->spanalloc); runtime_MSpan_Init(t, s->start + npage, s->npages - npage); s->npages = npage; p = t->start; p -= ((uintptr)h->arena_start>>PageShift); if(p > 0) h->spans[p-1] = s; h->spans[p] = t; h->spans[p+t->npages-1] = t; t->needzero = s->needzero; runtime_atomicstore(&t->sweepgen, h->sweepgen); t->state = MSpanInUse; MHeap_FreeLocked(h, t); t->unusedsince = s->unusedsince; // preserve age }
// Free the span back into the heap. void MHeap_Free(MHeap *h, MSpan *s, int32 acct) { lock(h); mstats.heap_alloc += m->mcache->local_alloc; m->mcache->local_alloc = 0; mstats.heap_inuse -= s->npages<<PageShift; if(acct) mstats.heap_alloc -= s->npages<<PageShift; MHeap_FreeLocked(h, s); unlock(h); }
static MSpan* MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass) { uintptr n; MSpan *s, *t; PageID p; // Try in fixed-size lists up to max. for(n=npage; n < nelem(h->free); n++) { if(!runtime·MSpanList_IsEmpty(&h->free[n])) { s = h->free[n].next; goto HaveSpan; } } // Best fit in list of large spans. if((s = MHeap_AllocLarge(h, npage)) == nil) { if(!MHeap_Grow(h, npage)) return nil; if((s = MHeap_AllocLarge(h, npage)) == nil) return nil; } HaveSpan: // Mark span in use. if(s->state != MSpanFree) runtime·throw("MHeap_AllocLocked - MSpan not free"); if(s->npages < npage) runtime·throw("MHeap_AllocLocked - bad npages"); runtime·MSpanList_Remove(s); s->state = MSpanInUse; mstats.heap_idle -= s->npages<<PageShift; mstats.heap_released -= s->npreleased<<PageShift; s->npreleased = 0; if(s->npages > npage) { // Trim extra and put it back in the heap. t = runtime·FixAlloc_Alloc(&h->spanalloc); mstats.mspan_inuse = h->spanalloc.inuse; mstats.mspan_sys = h->spanalloc.sys; runtime·MSpan_Init(t, s->start + npage, s->npages - npage); s->npages = npage; p = t->start; if(sizeof(void*) == 8) p -= ((uintptr)h->arena_start>>PageShift); if(p > 0) h->map[p-1] = s; h->map[p] = t; h->map[p+t->npages-1] = t; *(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark t->state = MSpanInUse; MHeap_FreeLocked(h, t); }
static MSpan* MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass) { uintptr n; MSpan *s, *t; // Try in fixed-size lists up to max. for(n=npage; n < nelem(h->free); n++) { if(!MSpanList_IsEmpty(&h->free[n])) { s = h->free[n].next; goto HaveSpan; } } // Best fit in list of large spans. if((s = MHeap_AllocLarge(h, npage)) == nil) { if(!MHeap_Grow(h, npage)) return nil; if((s = MHeap_AllocLarge(h, npage)) == nil) return nil; } HaveSpan: // Mark span in use. if(s->state != MSpanFree) throw("MHeap_AllocLocked - MSpan not free"); if(s->npages < npage) throw("MHeap_AllocLocked - bad npages"); MSpanList_Remove(s); s->state = MSpanInUse; if(s->npages > npage) { // Trim extra and put it back in the heap. t = FixAlloc_Alloc(&h->spanalloc); mstats.mspan_inuse = h->spanalloc.inuse; mstats.mspan_sys = h->spanalloc.sys; MSpan_Init(t, s->start + npage, s->npages - npage); s->npages = npage; MHeapMap_Set(&h->map, t->start - 1, s); MHeapMap_Set(&h->map, t->start, t); MHeapMap_Set(&h->map, t->start + t->npages - 1, t); t->state = MSpanInUse; MHeap_FreeLocked(h, t); } // Record span info, because gc needs to be // able to map interior pointer to containing span. s->sizeclass = sizeclass; for(n=0; n<npage; n++) MHeapMap_Set(&h->map, s->start+n, s); return s; }
// Try to add at least npage pages of memory to the heap, // returning whether it worked. static bool MHeap_Grow(MHeap *h, uintptr npage) { uintptr ask; void *v; MSpan *s; // Ask for a big chunk, to reduce the number of mappings // the operating system needs to track; also amortizes // the overhead of an operating system mapping. // For Native Client, allocate a multiple of 64kB (16 pages). npage = (npage+15)&~15; ask = npage<<PageShift; if(ask < HeapAllocChunk) ask = HeapAllocChunk; v = SysAlloc(ask); if(v == nil) { if(ask > (npage<<PageShift)) { ask = npage<<PageShift; v = SysAlloc(ask); } if(v == nil) return false; } mstats.heap_sys += ask; if((byte*)v < h->min || h->min == nil) h->min = v; if((byte*)v+ask > h->max) h->max = (byte*)v+ask; // NOTE(rsc): In tcmalloc, if we've accumulated enough // system allocations, the heap map gets entirely allocated // in 32-bit mode. (In 64-bit mode that's not practical.) if(!MHeapMap_Preallocate(&h->map, ((uintptr)v>>PageShift) - 1, (ask>>PageShift) + 2)) { SysFree(v, ask); return false; } // Create a fake "in use" span and free it, so that the // right coalescing happens. s = FixAlloc_Alloc(&h->spanalloc); mstats.mspan_inuse = h->spanalloc.inuse; mstats.mspan_sys = h->spanalloc.sys; MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift); MHeapMap_Set(&h->map, s->start, s); MHeapMap_Set(&h->map, s->start + s->npages - 1, s); s->state = MSpanInUse; MHeap_FreeLocked(h, s); return true; }
// Free the span back into the heap. void runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct) { runtime·lock(h); mstats.heap_alloc += m->mcache->local_alloc; m->mcache->local_alloc = 0; mstats.heap_objects += m->mcache->local_objects; m->mcache->local_objects = 0; mstats.heap_inuse -= s->npages<<PageShift; if(acct) { mstats.heap_alloc -= s->npages<<PageShift; mstats.heap_objects--; } MHeap_FreeLocked(h, s); runtime·unlock(h); }
static MSpan* MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass) { uintptr n; MSpan *s, *t; PageID p; // Try in fixed-size lists up to max. for(n=npage; n < nelem(h->free); n++) { if(!runtime·MSpanList_IsEmpty(&h->free[n])) { s = h->free[n].next; goto HaveSpan; } } // Best fit in list of large spans. if((s = MHeap_AllocLarge(h, npage)) == nil) { if(!MHeap_Grow(h, npage)) return nil; if((s = MHeap_AllocLarge(h, npage)) == nil) return nil; } HaveSpan: // Mark span in use. if(s->state != MSpanFree) runtime·throw("MHeap_AllocLocked - MSpan not free"); if(s->npages < npage) runtime·throw("MHeap_AllocLocked - bad npages"); runtime·MSpanList_Remove(s); s->state = MSpanInUse; mstats.heap_idle -= s->npages<<PageShift; mstats.heap_released -= s->npreleased<<PageShift; if(s->npreleased > 0) { // We have called runtime·SysUnused with these pages, and on // Unix systems it called madvise. At this point at least // some BSD-based kernels will return these pages either as // zeros or with the old data. For our caller, the first word // in the page indicates whether the span contains zeros or // not (this word was set when the span was freed by // MCentral_Free or runtime·MCentral_FreeSpan). If the first // page in the span is returned as zeros, and some subsequent // page is returned with the old data, then we will be // returning a span that is assumed to be all zeros, but the // actual data will not be all zeros. Avoid that problem by // explicitly marking the span as not being zeroed, just in // case. The beadbead constant we use here means nothing, it // is just a unique constant not seen elsewhere in the // runtime, as a clue in case it turns up unexpectedly in // memory or in a stack trace. *(uintptr*)(s->start<<PageShift) = (uintptr)0xbeadbeadbeadbeadULL; } s->npreleased = 0; if(s->npages > npage) { // Trim extra and put it back in the heap. t = runtime·FixAlloc_Alloc(&h->spanalloc); mstats.mspan_inuse = h->spanalloc.inuse; mstats.mspan_sys = h->spanalloc.sys; runtime·MSpan_Init(t, s->start + npage, s->npages - npage); s->npages = npage; p = t->start; if(sizeof(void*) == 8) p -= ((uintptr)h->arena_start>>PageShift); if(p > 0) h->map[p-1] = s; h->map[p] = t; h->map[p+t->npages-1] = t; *(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark t->state = MSpanInUse; MHeap_FreeLocked(h, t); t->unusedsince = s->unusedsince; // preserve age }