Exemple #1
0
LargeRange LargeMap::remove(size_t alignment, size_t size)
{
    size_t alignmentMask = alignment - 1;

    LargeRange* candidate = m_free.end();
    for (LargeRange* it = m_free.begin(); it != m_free.end(); ++it) {
        if (it->size() < size)
            continue;

        if (candidate != m_free.end() && candidate->begin() < it->begin())
            continue;

        if (test(it->begin(), alignmentMask)) {
            char* aligned = roundUpToMultipleOf(alignment, it->begin());
            if (aligned < it->begin()) // Check for overflow.
                continue;

            char* alignedEnd = aligned + size;
            if (alignedEnd < aligned) // Check for overflow.
                continue;

            if (alignedEnd > it->end())
                continue;
        }

        candidate = it;
    }
    
    if (candidate == m_free.end())
        return LargeRange();

    return m_free.pop(candidate);
}
Exemple #2
0
void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size, size_t unalignedSize)
{
    BASSERT(size <= largeMax);
    BASSERT(size >= largeMin);
    BASSERT(size == roundUpToMultipleOf<largeAlignment>(size));
    BASSERT(unalignedSize <= largeMax);
    BASSERT(unalignedSize >= largeMin);
    BASSERT(unalignedSize == roundUpToMultipleOf<largeAlignment>(unalignedSize));
    BASSERT(alignment <= largeChunkSize / 2);
    BASSERT(alignment >= largeAlignment);
    BASSERT(isPowerOfTwo(alignment));

    LargeObject largeObject = m_largeObjects.take(alignment, size, unalignedSize);
    if (!largeObject) {
        m_isAllocatingPages = true;
        largeObject = m_vmHeap.allocateLargeObject(alignment, size, unalignedSize);
    }

    size_t alignmentMask = alignment - 1;
    if (test(largeObject.begin(), alignmentMask)) {
        size_t prefixSize = roundUpToMultipleOf(alignment, largeObject.begin() + largeMin) - largeObject.begin();
        std::pair<LargeObject, LargeObject> pair = largeObject.split(prefixSize);
        m_largeObjects.insert(pair.first);
        largeObject = pair.second;
    }

    return allocateLarge(lock, largeObject, size);
}
 FixedVMPoolExecutableAllocator()
     : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes
 {
     size_t reservationSize;
     if (Options::jitMemoryReservationSize())
         reservationSize = Options::jitMemoryReservationSize();
     else
         reservationSize = fixedExecutableMemoryPoolSize;
     reservationSize = roundUpToMultipleOf(pageSize(), reservationSize);
     m_reservation = PageReservation::reserveWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
     if (m_reservation) {
         ASSERT(m_reservation.size() == reservationSize);
         addFreshFreeSpace(m_reservation.base(), m_reservation.size());
         
         startOfFixedExecutableMemoryPool = reinterpret_cast<uintptr_t>(m_reservation.base());
     }
 }
Exemple #4
0
void* Allocator::allocateImpl(size_t alignment, size_t size, bool crashOnFailure)
{
    BASSERT(isPowerOfTwo(alignment));

    if (!m_isBmallocEnabled) {
        void* result = nullptr;
        if (posix_memalign(&result, alignment, size))
            return nullptr;
        return result;
    }

    if (!size)
        size = alignment;

    if (size <= smallMax && alignment <= smallMax)
        return allocate(roundUpToMultipleOf(alignment, size));

    std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
    Heap* heap = PerProcess<Heap>::getFastCase();
    if (crashOnFailure)
        return heap->allocateLarge(lock, alignment, size);
    return heap->tryAllocateLarge(lock, alignment, size);
}