void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size, size_t unalignedSize) { BASSERT(size <= largeMax); BASSERT(size >= largeMin); BASSERT(size == roundUpToMultipleOf<largeAlignment>(size)); BASSERT(unalignedSize <= largeMax); BASSERT(unalignedSize >= largeMin); BASSERT(unalignedSize == roundUpToMultipleOf<largeAlignment>(unalignedSize)); BASSERT(alignment <= largeChunkSize / 2); BASSERT(alignment >= largeAlignment); BASSERT(isPowerOfTwo(alignment)); LargeObject largeObject = m_largeObjects.take(alignment, size, unalignedSize); if (!largeObject) { m_isAllocatingPages = true; largeObject = m_vmHeap.allocateLargeObject(alignment, size, unalignedSize); } size_t alignmentMask = alignment - 1; if (test(largeObject.begin(), alignmentMask)) { size_t prefixSize = roundUpToMultipleOf(alignment, largeObject.begin() + largeMin) - largeObject.begin(); std::pair<LargeObject, LargeObject> pair = largeObject.split(prefixSize); m_largeObjects.insert(pair.first); largeObject = pair.second; } return allocateLarge(lock, largeObject, size); }
void* allocate(size_t size) { CALL("allocate(size_t size)"); ASSERT(size >= 0); if (size > RecommendedPageSize) return allocateLarge(size); size_t normalisedSize = normaliseSize(size); if (normalisedSize <= _normalisedSizeOfFreeInCurrentPage) { allocate_in_current_page: ASSERT(normalisedSize <= _normalisedSizeOfFreeInCurrentPage); void* res = _freeInCurrentPage; _freeInCurrentPage += normalisedSize; _normalisedSizeOfFreeInCurrentPage -= normalisedSize; return res; }; // normalisedSize > _normalisedSizeOfFreeInCurrentPage, try next page if (!_currentPage->next) { size_t normalisedRecommendedPageSize = normaliseSize(RecommendedPageSize); _currentPage->next = new(normalisedRecommendedPageSize) Page(normalisedRecommendedPageSize); _currentPage->next->next = 0; }; _currentPage = _currentPage->next; _freeInCurrentPage = _currentPage->memory(); _normalisedSizeOfFreeInCurrentPage = _currentPage->normalisedSize; goto allocate_in_current_page; }; // void* allocate(size_t size)
void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t size) { BASSERT(size <= largeMax); BASSERT(size >= largeMin); BASSERT(size == roundUpToMultipleOf<largeAlignment>(size)); LargeObject largeObject = m_largeObjects.take(size); if (!largeObject) { m_isAllocatingPages = true; largeObject = m_vmHeap.allocateLargeObject(size); } return allocateLarge(lock, largeObject, size); }
void* Allocator::allocateSlowCase(size_t size) { if (!m_isBmallocEnabled) return malloc(size); if (size <= mediumMax) { size_t sizeClass = bmalloc::sizeClass(size); BumpAllocator& allocator = m_bumpAllocators[sizeClass]; allocator.refill(allocateBumpRange(sizeClass)); return allocator.allocate(); } if (size <= largeMax) return allocateLarge(size); return allocateXLarge(size); }
void* Allocator::allocateSlowCase(size_t size) { if (!m_isBmallocEnabled) return malloc(size); if (size <= maskSizeClassMax) { size_t sizeClass = bmalloc::maskSizeClass(size); BumpAllocator& allocator = m_bumpAllocators[sizeClass]; refillAllocator(allocator, sizeClass); return allocator.allocate(); } if (size <= smallMax) return allocateLogSizeClass(size); return allocateLarge(size); }