SlabItem* SlabAllocator::allocateItemFromSlab(SlabRow* slr) { if (slr == nullptr) { return nullptr; } SlabItem* item = nullptr; SlabHeader* sh = slr->firstSlab; // first slab guaranteed pre-allocated // search all headers until you can satisfy the request SlabHeader* shPrev = sh; sh = sh->next; while (sh != nullptr) { if (sh->itemsRemaining > 0) { break; } shPrev = sh; sh = sh->next; } if (sh == nullptr) { sh = shPrev->next = createSlab(itemSize); } // if request still fails, we return nullptr if (sh != nullptr) { item = sh->nextFreeItem; sh->nextFreeItem = sh->nextFreeItem->next; --(sh->itemsRemaining); } return item; }
void* ArenaImpl<kChunkBytes>::allocSlow(size_t nbytes) { // Large allocations go directly to malloc without discarding our // current chunk. if (UNLIKELY(nbytes >= kChunkBytes)) { #if defined(VALGRIND) || !defined(USE_JEMALLOC) // We want all our pointers to be kMinBytes - 1 byte aligned. // Without jemalloc we have to do that by hand. auto extra = kMinBytes - 1; #else auto extra = 0; #endif char* ptr = static_cast<char*>(malloc(nbytes + extra)); #ifdef DEBUG m_externalAllocSize += nbytes + extra; #endif m_externalPtrs.push(ptr); // save ptr before aligning it // align up to (extra + 1) bytes ptr = (char*)((uintptr_t(ptr) + extra) & ~extra); assert((intptr_t(ptr) & (kMinBytes - 1)) == 0); return ptr; } createSlab(); return alloc(nbytes); }
SlabRow* SlabAllocator::createSlabRow(unsigned int itemsize) { if ((mUnmappedHeap + SlabAllocator::slabRowAndSlabSize) > (mHeap + mHeapSize)) { return nullptr; } SlabRow* slr = mUnmappedHeap; mUnmappedHeap += sizeof(SlabRow); slr->itemSize = itemSize; slr->firstSlab = createSlab(itemSize); slr->next = nullptr; return slr; }