/*
 * In a low memory or high fragmentation situation, alignable chunks of the
 * desired size may still be available, even if there are no more contiguous
 * free chunks that meet the |size + alignment - pageSize| requirement of
 * MapAlignedPagesSlow. In this case, try harder to find an alignable chunk
 * by temporarily holding onto the unaligned parts of each chunk until the
 * allocator gives us a chunk that either is, or can be aligned.
 */
static void*
MapAlignedPagesLastDitch(size_t size, size_t alignment)
{
    void* tempMaps[MaxLastDitchAttempts];
    int attempt = 0;
    void* p = MapMemory(size);
    if (OffsetFromAligned(p, alignment) == 0)
        return p;
    for (; attempt < MaxLastDitchAttempts; ++attempt) {
        GetNewChunk(&p, tempMaps + attempt, size, alignment);
        if (OffsetFromAligned(p, alignment) == 0) {
            if (tempMaps[attempt])
                UnmapPages(tempMaps[attempt], size);
            break;
        }
        if (!tempMaps[attempt])
            break; /* Bail if GetNewChunk failed. */
    }
    if (OffsetFromAligned(p, alignment)) {
        UnmapPages(p, size);
        p = nullptr;
    }
    while (--attempt >= 0)
        UnmapPages(tempMaps[attempt], size);
    return p;
}
void*
MapAlignedPages(size_t size, size_t alignment)
{
    MOZ_ASSERT(size >= alignment);
    MOZ_ASSERT(size % alignment == 0);
    MOZ_ASSERT(size % pageSize == 0);
    MOZ_ASSERT(alignment % allocGranularity == 0);

    void* p = MapMemory(size);

    /* Special case: If we want page alignment, no further work is needed. */
    if (alignment == allocGranularity)
        return p;

    if (OffsetFromAligned(p, alignment) == 0)
        return p;

    void* retainedAddr;
    GetNewChunk(&p, &retainedAddr, size, alignment);
    if (retainedAddr)
        UnmapPages(retainedAddr, size);
    if (p) {
        if (OffsetFromAligned(p, alignment) == 0)
            return p;
        UnmapPages(p, size);
    }

    p = MapAlignedPagesSlow(size, alignment);
    if (!p)
        return MapAlignedPagesLastDitch(size, alignment);

    MOZ_ASSERT(OffsetFromAligned(p, alignment) == 0);
    return p;
}
static void*
MapAlignedPagesSlow(size_t size, size_t alignment)
{
    /* Overallocate and unmap the region's edges. */
    size_t reqSize = size + alignment - pageSize;
    void* region = MapMemory(reqSize);
    if (!region)
        return nullptr;

    void* regionEnd = (void*)(uintptr_t(region) + reqSize);
    void* front;
    void* end;
    if (growthDirection <= 0) {
        size_t offset = OffsetFromAligned(regionEnd, alignment);
        end = (void*)(uintptr_t(regionEnd) - offset);
        front = (void*)(uintptr_t(end) - size);
    } else {
        size_t offset = OffsetFromAligned(region, alignment);
        front = (void*)(uintptr_t(region) + (offset ? alignment - offset : 0));
        end = (void*)(uintptr_t(front) + size);
    }

    if (front != region)
        UnmapPages(region, uintptr_t(front) - uintptr_t(region));
    if (end != regionEnd)
        UnmapPages(end, uintptr_t(regionEnd) - uintptr_t(end));

    return front;
}
static void*
MapAlignedPagesSlow(size_t size, size_t alignment)
{
    /*
     * Windows requires that there be a 1:1 mapping between VM allocation
     * and deallocation operations.  Therefore, take care here to acquire the
     * final result via one mapping operation.  This means unmapping any
     * preliminary result that is not correctly aligned.
     */
    void* p;
    do {
        /*
         * Over-allocate in order to map a memory region that is definitely
         * large enough, then deallocate and allocate again the correct size,
         * within the over-sized mapping.
         *
         * Since we're going to unmap the whole thing anyway, the first
         * mapping doesn't have to commit pages.
         */
        size_t reserveSize = size + alignment - pageSize;
        p = MapMemory(reserveSize, MEM_RESERVE);
        if (!p)
            return nullptr;
        void* chunkStart = (void*)AlignBytes(uintptr_t(p), alignment);
        UnmapPages(p, reserveSize);
        p = MapMemoryAt(chunkStart, size, MEM_COMMIT | MEM_RESERVE);

        /* Failure here indicates a race with another thread, so try again. */
    } while (!p);

    return p;
}
/*
 * mmap calls don't have to be matched with calls to munmap, so we can unmap
 * just the pages we don't need. However, as we don't know a priori if addresses
 * are handed out in increasing or decreasing order, we have to try both
 * directions (depending on the environment, one will always fail).
 */
static void
GetNewChunk(void** aAddress, void** aRetainedAddr, size_t size, size_t alignment)
{
    void* address = *aAddress;
    void* retainedAddr = nullptr;
    bool addrsGrowDown = growthDirection <= 0;
    int i = 0;
    for (; i < 2; ++i) {
        /* Try the direction indicated by growthDirection. */
        if (addrsGrowDown) {
            size_t offset = OffsetFromAligned(address, alignment);
            void* head = (void*)((uintptr_t)address - offset);
            void* tail = (void*)((uintptr_t)head + size);
            if (MapMemoryAt(head, offset)) {
                UnmapPages(tail, offset);
                if (growthDirection >= -8)
                    --growthDirection;
                address = head;
                break;
            }
        } else {
            size_t offset = alignment - OffsetFromAligned(address, alignment);
            void* head = (void*)((uintptr_t)address + offset);
            void* tail = (void*)((uintptr_t)address + size);
            if (MapMemoryAt(tail, offset)) {
                UnmapPages(address, offset);
                if (growthDirection <= 8)
                    ++growthDirection;
                address = head;
                break;
            }
        }
        /* If we're confident in the growth direction, don't try the other. */
        if (growthDirection < -8 || growthDirection > 8)
            break;
        /* If that failed, try the opposite direction. */
        addrsGrowDown = !addrsGrowDown;
    }
    /* If our current chunk cannot be aligned, see if the next one is aligned. */
    if (OffsetFromAligned(address, alignment)) {
        retainedAddr = address;
        address = MapMemory(size);
    }
    *aAddress = address;
    *aRetainedAddr = retainedAddr;
}
Ejemplo n.º 6
0
AllocGCChunk()
{
    void *p;

#ifdef JS_GC_HAS_MAP_ALIGN
    p = MapAlignedPages(GC_CHUNK_SIZE, GC_CHUNK_SIZE);
    if (!p)
        return NULL;
#else
    /*
     * Windows requires that there be a 1:1 mapping between VM allocation
     * and deallocation operations.  Therefore, take care here to acquire the
     * final result via one mapping operation.  This means unmapping any
     * preliminary result that is not correctly aligned.
     */
    p = MapPages(NULL, GC_CHUNK_SIZE);
    if (!p)
        return NULL;

    if (reinterpret_cast<jsuword>(p) & GC_CHUNK_MASK) {
        UnmapPages(p, GC_CHUNK_SIZE);
        p = MapPages(FindChunkStart(p), GC_CHUNK_SIZE);
        while (!p) {
            /*
             * Over-allocate in order to map a memory region that is
             * definitely large enough then deallocate and allocate again the
             * correct size, within the over-sized mapping.
             */
            p = MapPages(NULL, GC_CHUNK_SIZE * 2);
            if (!p)
                return 0;
            UnmapPages(p, GC_CHUNK_SIZE * 2);
            p = MapPages(FindChunkStart(p), GC_CHUNK_SIZE);

            /*
             * Failure here indicates a race with another thread, so
             * try again.
             */
        }
    }
#endif /* !JS_GC_HAS_MAP_ALIGN */

    JS_ASSERT(!(reinterpret_cast<jsuword>(p) & GC_CHUNK_MASK));
    return p;
}
Ejemplo n.º 7
0
static void *
MapAlignedPagesRecursively(size_t size, size_t alignment, int& recursions)
{
    if (++recursions >= OS2_MAX_RECURSIONS)
        return NULL;

    void *tmp;
    if (DosAllocMem(&tmp, size,
                    OBJ_ANY | PAG_COMMIT | PAG_READ | PAG_WRITE)) {
        JS_ALWAYS_TRUE(DosAllocMem(&tmp, size,
                                   PAG_COMMIT | PAG_READ | PAG_WRITE) == 0);
    }
    size_t offset = reinterpret_cast<jsuword>(tmp) & (alignment - 1);
    if (!offset)
        return tmp;

    /* if there are 'filler' bytes of free space above 'tmp', free 'tmp',
     * then reallocate it as a 'filler'-sized block;  assuming we're not
     * in a race with another thread, the next recursion should succeed
     */
    size_t filler = size + alignment - offset;
    unsigned long cb = filler;
    unsigned long flags = 0;
    unsigned long rc = DosQueryMem(&(static_cast<char*>(tmp))[size],
                                   &cb, &flags);
    if (!rc && (flags & PAG_FREE) && cb >= filler) {
        UnmapPages(tmp, 0);
        if (DosAllocMem(&tmp, filler,
                        OBJ_ANY | PAG_COMMIT | PAG_READ | PAG_WRITE)) {
            JS_ALWAYS_TRUE(DosAllocMem(&tmp, filler,
                                       PAG_COMMIT | PAG_READ | PAG_WRITE) == 0);
        }
    }

    void *p = MapAlignedPagesRecursively(size, alignment, recursions);
    UnmapPages(tmp, 0);

    return p;
}
/*
 * On Windows, map and unmap calls must be matched, so we deallocate the
 * unaligned chunk, then reallocate the unaligned part to block off the
 * old address and force the allocator to give us a new one.
 */
static void
GetNewChunk(void** aAddress, void** aRetainedAddr, size_t size, size_t alignment)
{
    void* address = *aAddress;
    void* retainedAddr = nullptr;
    do {
        size_t retainedSize;
        size_t offset = OffsetFromAligned(address, alignment);
        if (!offset)
            break;
        UnmapPages(address, size);
        retainedSize = alignment - offset;
        retainedAddr = MapMemoryAt(address, retainedSize, MEM_RESERVE);
        address = MapMemory(size, MEM_COMMIT | MEM_RESERVE);
        /* If retainedAddr is null here, we raced with another thread. */
    } while (!retainedAddr);
    *aAddress = address;
    *aRetainedAddr = retainedAddr;
}
Ejemplo n.º 9
0
FreeGCChunk(void *p)
{
    JS_ASSERT(p);
    JS_ASSERT(!(reinterpret_cast<jsuword>(p) & GC_CHUNK_MASK));
    UnmapPages(p, GC_CHUNK_SIZE);
}