// Allocate a new span of npage pages from the heap // and record its size class in the HeapMap and HeapMapCache. MSpan* runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero) { MSpan *s; runtime_lock(h); mstats.heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc; runtime_m()->mcache->local_cachealloc = 0; s = MHeap_AllocLocked(h, npage, sizeclass); if(s != nil) { mstats.heap_inuse += npage<<PageShift; if(large) { mstats.heap_objects++; mstats.heap_alloc += npage<<PageShift; // Swept spans are at the end of lists. if(s->npages < nelem(h->free)) runtime_MSpanList_InsertBack(&h->busy[s->npages], s); else runtime_MSpanList_InsertBack(&h->busylarge, s); } } runtime_unlock(h); if(s != nil) { if(needzero && s->needzero) runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift); s->needzero = 0; } return s; }
// Allocate a new span of npage pages from the heap // and record its size class in the HeapMap and HeapMapCache. MSpan* MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct) { MSpan *s; lock(h); mstats.heap_alloc += m->mcache->local_alloc; m->mcache->local_alloc = 0; s = MHeap_AllocLocked(h, npage, sizeclass); if(s != nil) { mstats.heap_inuse += npage<<PageShift; if(acct) mstats.heap_alloc += npage<<PageShift; } unlock(h); return s; }
runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct) { MSpan *s; runtime·lock(h); runtime·purgecachedstats(m); s = MHeap_AllocLocked(h, npage, sizeclass); if(s != nil) { mstats.heap_inuse += npage<<PageShift; if(acct) { mstats.heap_objects++; mstats.heap_alloc += npage<<PageShift; } } runtime·unlock(h); return s; }
runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct) { MSpan *s; runtime·lock(h); mstats.heap_alloc += m->mcache->local_alloc; m->mcache->local_alloc = 0; mstats.heap_objects += m->mcache->local_objects; m->mcache->local_objects = 0; s = MHeap_AllocLocked(h, npage, sizeclass); if(s != nil) { mstats.heap_inuse += npage<<PageShift; if(acct) { mstats.heap_objects++; mstats.heap_alloc += npage<<PageShift; } } runtime·unlock(h); return s; }
runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32 zeroed) { MSpan *s; runtime·lock(h); mstats.heap_alloc += m->mcache->local_cachealloc; m->mcache->local_cachealloc = 0; s = MHeap_AllocLocked(h, npage, sizeclass); if(s != nil) { mstats.heap_inuse += npage<<PageShift; if(acct) { mstats.heap_objects++; mstats.heap_alloc += npage<<PageShift; } } runtime·unlock(h); if(s != nil && *(uintptr*)(s->start<<PageShift) != 0 && zeroed) runtime·memclr((byte*)(s->start<<PageShift), s->npages<<PageShift); return s; }
// Allocate a new span of npage pages from the heap // and record its size class in the HeapMap and HeapMapCache. MSpan* runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32 zeroed) { MSpan *s; runtime_lock(h); runtime_purgecachedstats(runtime_m()->mcache); s = MHeap_AllocLocked(h, npage, sizeclass); if(s != nil) { mstats.heap_inuse += npage<<PageShift; if(acct) { mstats.heap_objects++; mstats.heap_alloc += npage<<PageShift; } } runtime_unlock(h); if(s != nil && *(uintptr*)(s->start<<PageShift) != 0 && zeroed) runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift); return s; }