void Heap::allocateMediumBumpRanges(std::lock_guard<StaticMutex>& lock, size_t sizeClass, BumpAllocator& allocator, BumpRangeCache& rangeCache) { MediumPage* page = allocateMediumPage(lock, sizeClass); BASSERT(!rangeCache.size()); MediumLine* lines = page->begin(); // Due to overlap from the previous line, the last line in the page may not be able to fit any objects. size_t end = MediumPage::lineCount; if (!m_mediumLineMetadata[sizeClass][MediumPage::lineCount - 1].objectCount) --end; // Find a free line. for (size_t lineNumber = 0; lineNumber < end; ++lineNumber) { if (lines[lineNumber].refCount(lock)) continue; // In a fragmented page, some free ranges might not fit in the cache. if (rangeCache.size() == rangeCache.capacity()) { m_mediumPagesWithFreeLines[sizeClass].push(page); return; } LineMetadata& lineMetadata = m_mediumLineMetadata[sizeClass][lineNumber]; char* begin = lines[lineNumber].begin() + lineMetadata.startOffset; unsigned short objectCount = lineMetadata.objectCount; lines[lineNumber].ref(lock, lineMetadata.objectCount); page->ref(lock); // Merge with subsequent free lines. while (++lineNumber < end) { if (lines[lineNumber].refCount(lock)) break; LineMetadata& lineMetadata = m_mediumLineMetadata[sizeClass][lineNumber]; objectCount += lineMetadata.objectCount; lines[lineNumber].ref(lock, lineMetadata.objectCount); page->ref(lock); } if (!allocator.canAllocate()) allocator.refill({ begin, objectCount }); else rangeCache.push({ begin, objectCount }); } }
void* Allocator::reallocate(void* object, size_t newSize) { if (!m_isBmallocEnabled) return realloc(object, newSize); void* result = allocate(newSize); if (!object) return result; size_t oldSize = 0; switch (objectType(object)) { case Small: { SmallPage* page = SmallPage::get(SmallLine::get(object)); oldSize = objectSize(page->sizeClass()); break; } case Medium: { MediumPage* page = MediumPage::get(MediumLine::get(object)); oldSize = objectSize(page->sizeClass()); break; } case Large: { std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); LargeObject largeObject(object); oldSize = largeObject.size(); break; } case XLarge: { std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); Range range = PerProcess<Heap>::getFastCase()->findXLarge(lock, object); RELEASE_BASSERT(range); oldSize = range.size(); break; } } size_t copySize = std::min(oldSize, newSize); memcpy(result, object, copySize); m_deallocator.deallocate(object); return result; }
void Heap::deallocateMediumLine(std::lock_guard<StaticMutex>& lock, MediumLine* line) { BASSERT(!line->refCount(lock)); MediumPage* page = MediumPage::get(line); size_t refCount = page->refCount(lock); page->deref(lock); switch (refCount) { case MediumPage::lineCount: { // First free line in the page. m_mediumPagesWithFreeLines[page->sizeClass()].push(page); break; } case 1: { // Last free line in the page. m_mediumPages.push(page); m_scavenger.run(); break; } } }
MediumPage* Heap::allocateMediumPage(std::lock_guard<StaticMutex>& lock, size_t sizeClass) { Vector<MediumPage*>& mediumPagesWithFreeLines = m_mediumPagesWithFreeLines[sizeClass]; while (mediumPagesWithFreeLines.size()) { MediumPage* page = mediumPagesWithFreeLines.pop(); if (!page->refCount(lock) || page->sizeClass() != sizeClass) // Page was promoted to the pages list. continue; return page; } MediumPage* page = [this, sizeClass]() { if (m_mediumPages.size()) return m_mediumPages.pop(); m_isAllocatingPages = true; return m_vmHeap.allocateMediumPage(); }(); page->setSizeClass(sizeClass); return page; }
MediumLine* Heap::allocateMediumLineSlowCase(std::lock_guard<StaticMutex>& lock) { m_isAllocatingPages = true; MediumPage* page = [this]() { if (m_mediumPages.size()) return m_mediumPages.pop(); MediumPage* page = m_vmHeap.allocateMediumPage(); vmAllocatePhysicalPages(page->begin()->begin(), vmPageSize); return page; }(); MediumLine* line = page->begin(); for (auto it = line + 1; it != page->end(); ++it) m_mediumLines.push(it); page->ref(lock); return line; }