static MSpan* MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass) { uintptr n; MSpan *s, *t; PageID p; // To prevent excessive heap growth, before allocating n pages // we need to sweep and reclaim at least n pages. if(!h->sweepdone) MHeap_Reclaim(h, npage); // Try in fixed-size lists up to max. for(n=npage; n < nelem(h->free); n++) { if(!runtime_MSpanList_IsEmpty(&h->free[n])) { s = h->free[n].next; goto HaveSpan; } } // Best fit in list of large spans. if((s = MHeap_AllocLarge(h, npage)) == nil) { if(!MHeap_Grow(h, npage)) return nil; if((s = MHeap_AllocLarge(h, npage)) == nil) return nil; } HaveSpan: // Mark span in use. if(s->state != MSpanFree) runtime_throw("MHeap_AllocLocked - MSpan not free"); if(s->npages < npage) runtime_throw("MHeap_AllocLocked - bad npages"); runtime_MSpanList_Remove(s); runtime_atomicstore(&s->sweepgen, h->sweepgen); s->state = MSpanInUse; mstats.heap_idle -= s->npages<<PageShift; mstats.heap_released -= s->npreleased<<PageShift; if(s->npreleased > 0) runtime_SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift); s->npreleased = 0; if(s->npages > npage) { // Trim extra and put it back in the heap. t = runtime_FixAlloc_Alloc(&h->spanalloc); runtime_MSpan_Init(t, s->start + npage, s->npages - npage); s->npages = npage; p = t->start; p -= ((uintptr)h->arena_start>>PageShift); if(p > 0) h->spans[p-1] = s; h->spans[p] = t; h->spans[p+t->npages-1] = t; t->needzero = s->needzero; runtime_atomicstore(&t->sweepgen, h->sweepgen); t->state = MSpanInUse; MHeap_FreeLocked(h, t); t->unusedsince = s->unusedsince; // preserve age }
// Allocates a span of the given size. h must be locked. // The returned span has been removed from the // free list, but its state is still MSpanFree. static MSpan* MHeap_AllocSpanLocked(MHeap *h, uintptr npage) { uintptr n; MSpan *s, *t; PageID p; // Try in fixed-size lists up to max. for(n=npage; n < nelem(h->free); n++) { if(!runtime·MSpanList_IsEmpty(&h->free[n])) { s = h->free[n].next; goto HaveSpan; } } // Best fit in list of large spans. if((s = MHeap_AllocLarge(h, npage)) == nil) { if(!MHeap_Grow(h, npage)) return nil; if((s = MHeap_AllocLarge(h, npage)) == nil) return nil; } HaveSpan: // Mark span in use. if(s->state != MSpanFree) runtime·throw("MHeap_AllocLocked - MSpan not free"); if(s->npages < npage) runtime·throw("MHeap_AllocLocked - bad npages"); runtime·MSpanList_Remove(s); if(s->next != nil || s->prev != nil) runtime·throw("still in list"); if(s->npreleased > 0) { runtime·SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift); mstats.heap_released -= s->npreleased<<PageShift; s->npreleased = 0; } if(s->npages > npage) { // Trim extra and put it back in the heap. t = runtime·FixAlloc_Alloc(&h->spanalloc); runtime·MSpan_Init(t, s->start + npage, s->npages - npage); s->npages = npage; p = t->start; p -= ((uintptr)h->arena_start>>PageShift); if(p > 0) h->spans[p-1] = s; h->spans[p] = t; h->spans[p+t->npages-1] = t; t->needzero = s->needzero; s->state = MSpanStack; // prevent coalescing with s t->state = MSpanStack; MHeap_FreeSpanLocked(h, t); t->unusedsince = s->unusedsince; // preserve age (TODO: wrong: t is possibly merged and/or deallocated at this point) s->state = MSpanFree; }
static MSpan* MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass) { uintptr n; MSpan *s, *t; PageID p; // Try in fixed-size lists up to max. for(n=npage; n < nelem(h->free); n++) { if(!runtime·MSpanList_IsEmpty(&h->free[n])) { s = h->free[n].next; goto HaveSpan; } } // Best fit in list of large spans. if((s = MHeap_AllocLarge(h, npage)) == nil) { if(!MHeap_Grow(h, npage)) return nil; if((s = MHeap_AllocLarge(h, npage)) == nil) return nil; } HaveSpan: // Mark span in use. if(s->state != MSpanFree) runtime·throw("MHeap_AllocLocked - MSpan not free"); if(s->npages < npage) runtime·throw("MHeap_AllocLocked - bad npages"); runtime·MSpanList_Remove(s); s->state = MSpanInUse; mstats.heap_idle -= s->npages<<PageShift; mstats.heap_released -= s->npreleased<<PageShift; s->npreleased = 0; if(s->npages > npage) { // Trim extra and put it back in the heap. t = runtime·FixAlloc_Alloc(&h->spanalloc); mstats.mspan_inuse = h->spanalloc.inuse; mstats.mspan_sys = h->spanalloc.sys; runtime·MSpan_Init(t, s->start + npage, s->npages - npage); s->npages = npage; p = t->start; if(sizeof(void*) == 8) p -= ((uintptr)h->arena_start>>PageShift); if(p > 0) h->map[p-1] = s; h->map[p] = t; h->map[p+t->npages-1] = t; *(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark t->state = MSpanInUse; MHeap_FreeLocked(h, t); }
static MSpan* MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass) { uintptr n; MSpan *s, *t; // Try in fixed-size lists up to max. for(n=npage; n < nelem(h->free); n++) { if(!MSpanList_IsEmpty(&h->free[n])) { s = h->free[n].next; goto HaveSpan; } } // Best fit in list of large spans. if((s = MHeap_AllocLarge(h, npage)) == nil) { if(!MHeap_Grow(h, npage)) return nil; if((s = MHeap_AllocLarge(h, npage)) == nil) return nil; } HaveSpan: // Mark span in use. if(s->state != MSpanFree) throw("MHeap_AllocLocked - MSpan not free"); if(s->npages < npage) throw("MHeap_AllocLocked - bad npages"); MSpanList_Remove(s); s->state = MSpanInUse; if(s->npages > npage) { // Trim extra and put it back in the heap. t = FixAlloc_Alloc(&h->spanalloc); mstats.mspan_inuse = h->spanalloc.inuse; mstats.mspan_sys = h->spanalloc.sys; MSpan_Init(t, s->start + npage, s->npages - npage); s->npages = npage; MHeapMap_Set(&h->map, t->start - 1, s); MHeapMap_Set(&h->map, t->start, t); MHeapMap_Set(&h->map, t->start + t->npages - 1, t); t->state = MSpanInUse; MHeap_FreeLocked(h, t); } // Record span info, because gc needs to be // able to map interior pointer to containing span. s->sizeclass = sizeclass; for(n=0; n<npage; n++) MHeapMap_Set(&h->map, s->start+n, s); return s; }
static MSpan* MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass) { uintptr n; MSpan *s, *t; PageID p; // Try in fixed-size lists up to max. for(n=npage; n < nelem(h->free); n++) { if(!runtime·MSpanList_IsEmpty(&h->free[n])) { s = h->free[n].next; goto HaveSpan; } } // Best fit in list of large spans. if((s = MHeap_AllocLarge(h, npage)) == nil) { if(!MHeap_Grow(h, npage)) return nil; if((s = MHeap_AllocLarge(h, npage)) == nil) return nil; } HaveSpan: // Mark span in use. if(s->state != MSpanFree) runtime·throw("MHeap_AllocLocked - MSpan not free"); if(s->npages < npage) runtime·throw("MHeap_AllocLocked - bad npages"); runtime·MSpanList_Remove(s); s->state = MSpanInUse; mstats.heap_idle -= s->npages<<PageShift; mstats.heap_released -= s->npreleased<<PageShift; if(s->npreleased > 0) { // We have called runtime·SysUnused with these pages, and on // Unix systems it called madvise. At this point at least // some BSD-based kernels will return these pages either as // zeros or with the old data. For our caller, the first word // in the page indicates whether the span contains zeros or // not (this word was set when the span was freed by // MCentral_Free or runtime·MCentral_FreeSpan). If the first // page in the span is returned as zeros, and some subsequent // page is returned with the old data, then we will be // returning a span that is assumed to be all zeros, but the // actual data will not be all zeros. Avoid that problem by // explicitly marking the span as not being zeroed, just in // case. The beadbead constant we use here means nothing, it // is just a unique constant not seen elsewhere in the // runtime, as a clue in case it turns up unexpectedly in // memory or in a stack trace. *(uintptr*)(s->start<<PageShift) = (uintptr)0xbeadbeadbeadbeadULL; } s->npreleased = 0; if(s->npages > npage) { // Trim extra and put it back in the heap. t = runtime·FixAlloc_Alloc(&h->spanalloc); mstats.mspan_inuse = h->spanalloc.inuse; mstats.mspan_sys = h->spanalloc.sys; runtime·MSpan_Init(t, s->start + npage, s->npages - npage); s->npages = npage; p = t->start; if(sizeof(void*) == 8) p -= ((uintptr)h->arena_start>>PageShift); if(p > 0) h->map[p-1] = s; h->map[p] = t; h->map[p+t->npages-1] = t; *(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark t->state = MSpanInUse; MHeap_FreeLocked(h, t); t->unusedsince = s->unusedsince; // preserve age }