void* Allocator::reallocate(void* object, size_t newSize) { if (!m_isBmallocEnabled) return realloc(object, newSize); void* result = allocate(newSize); if (!object) return result; size_t oldSize = 0; switch (objectType(object)) { case Small: { SmallPage* page = SmallPage::get(SmallLine::get(object)); oldSize = objectSize(page->sizeClass()); break; } case Medium: { MediumPage* page = MediumPage::get(MediumLine::get(object)); oldSize = objectSize(page->sizeClass()); break; } case Large: { std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); LargeObject largeObject(object); oldSize = largeObject.size(); break; } case XLarge: { std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); Range range = PerProcess<Heap>::getFastCase()->findXLarge(lock, object); RELEASE_BASSERT(range); oldSize = range.size(); break; } } size_t copySize = std::min(oldSize, newSize); memcpy(result, object, copySize); m_deallocator.deallocate(object); return result; }
void Heap::deallocateSmallLine(std::lock_guard<StaticMutex>& lock, SmallLine* line) { BASSERT(!line->refCount(lock)); SmallPage* page = SmallPage::get(line); size_t refCount = page->refCount(lock); page->deref(lock); switch (refCount) { case SmallPage::lineCount: { // First free line in the page. m_smallPagesWithFreeLines[page->sizeClass()].push(page); break; } case 1: { // Last free line in the page. m_smallPages.push(page); m_scavenger.run(); break; } } }
SmallPage* Heap::allocateSmallPage(std::lock_guard<StaticMutex>& lock, size_t sizeClass) { Vector<SmallPage*>& smallPagesWithFreeLines = m_smallPagesWithFreeLines[sizeClass]; while (smallPagesWithFreeLines.size()) { SmallPage* page = smallPagesWithFreeLines.pop(); if (!page->refCount(lock) || page->sizeClass() != sizeClass) // Page was promoted to the pages list. continue; return page; } SmallPage* page = [this, sizeClass]() { if (m_smallPages.size()) return m_smallPages.pop(); m_isAllocatingPages = true; return m_vmHeap.allocateSmallPage(); }(); page->setSizeClass(sizeClass); return page; }