void* allocPages(void* addr, size_t len, size_t align) { RELEASE_ASSERT(len < INT_MAX - align); ASSERT(len >= kPageAllocationGranularity); ASSERT(!(len & kPageAllocationGranularityOffsetMask)); ASSERT(align >= kPageAllocationGranularity); ASSERT(!(align & kPageAllocationGranularityOffsetMask)); ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffsetMask)); size_t alignOffsetMask = align - 1; size_t alignBaseMask = ~alignOffsetMask; ASSERT(!(reinterpret_cast<uintptr_t>(addr) & alignOffsetMask)); // If the client passed null as the address, choose a good one. if (!addr) { addr = getRandomPageBase(); addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & alignBaseMask); } // The common case, which is also the least work we can do, is that the // address and length are suitable. Just try it. void* ret = systemAllocPages(addr, len); // If the alignment is to our liking, we're done. if (!(reinterpret_cast<uintptr_t>(ret) & alignOffsetMask)) return ret; // Annoying. Unmap and map a larger range to be sure to succeed on the // second, slower attempt. freePages(ret, len); size_t tryLen = len + (align - kPageAllocationGranularity); // We loop to cater for the unlikely case where another thread maps on top // of the aligned location we choose. int count = 0; while (count++ < 100) { ret = systemAllocPages(addr, tryLen); // We can now try and trim out a subset of the mapping. addr = reinterpret_cast<void*>((reinterpret_cast<uintptr_t>(ret) + alignOffsetMask) & alignBaseMask); // On POSIX systems, we can trim the oversized mapping to fit exactly. // This will always work on POSIX systems. if (trimMapping(ret, tryLen, addr, len)) return addr; // On Windows, you can't trim an existing mapping so we unmap and remap // a subset. We used to do for all platforms, but OSX 10.8 has a // broken mmap() that ignores address hints for valid, unused addresses. freePages(ret, tryLen); ret = systemAllocPages(addr, len); if (ret == addr) return ret; // Unlikely race / collision. Do the simple thing and just start again. freePages(ret, len); addr = getRandomPageBase(); addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & alignBaseMask); } IMMEDIATE_CRASH(); return 0; }
static NEVER_INLINE void partitionsOutOfMemoryUsing32M() { size_t signature = 32 * 1024 * 1024; alias(&signature); IMMEDIATE_CRASH(); }
static NEVER_INLINE void partitionsOutOfMemoryUsingLessThan16M() { size_t signature = 16 * 1024 * 1024 - 1; alias(&signature); IMMEDIATE_CRASH(); }
// TODO(haraken): Like partitionOutOfMemoryWithLotsOfUncommitedPages(), // we should probably have a way to distinguish physical memory OOM from // virtual address space OOM. static NEVER_INLINE void blinkGCOutOfMemory() { IMMEDIATE_CRASH(); }