MediumLine* Heap::allocateMediumLineSlowCase(std::lock_guard<StaticMutex>& lock) { m_isAllocatingPages = true; MediumPage* page = [this]() { if (m_mediumPages.size()) return m_mediumPages.pop(); MediumPage* page = m_vmHeap.allocateMediumPage(); vmAllocatePhysicalPages(page->begin()->begin(), vmPageSize); return page; }(); MediumLine* line = page->begin(); for (auto it = line + 1; it != page->end(); ++it) m_mediumLines.push(it); page->ref(lock); return line; }
void Heap::allocateMediumBumpRanges(std::lock_guard<StaticMutex>& lock, size_t sizeClass, BumpAllocator& allocator, BumpRangeCache& rangeCache) { MediumPage* page = allocateMediumPage(lock, sizeClass); BASSERT(!rangeCache.size()); MediumLine* lines = page->begin(); // Due to overlap from the previous line, the last line in the page may not be able to fit any objects. size_t end = MediumPage::lineCount; if (!m_mediumLineMetadata[sizeClass][MediumPage::lineCount - 1].objectCount) --end; // Find a free line. for (size_t lineNumber = 0; lineNumber < end; ++lineNumber) { if (lines[lineNumber].refCount(lock)) continue; // In a fragmented page, some free ranges might not fit in the cache. if (rangeCache.size() == rangeCache.capacity()) { m_mediumPagesWithFreeLines[sizeClass].push(page); return; } LineMetadata& lineMetadata = m_mediumLineMetadata[sizeClass][lineNumber]; char* begin = lines[lineNumber].begin() + lineMetadata.startOffset; unsigned short objectCount = lineMetadata.objectCount; lines[lineNumber].ref(lock, lineMetadata.objectCount); page->ref(lock); // Merge with subsequent free lines. while (++lineNumber < end) { if (lines[lineNumber].refCount(lock)) break; LineMetadata& lineMetadata = m_mediumLineMetadata[sizeClass][lineNumber]; objectCount += lineMetadata.objectCount; lines[lineNumber].ref(lock, lineMetadata.objectCount); page->ref(lock); } if (!allocator.canAllocate()) allocator.refill({ begin, objectCount }); else rangeCache.push({ begin, objectCount }); } }