PageAllocationAligned PageAllocationAligned::allocate(size_t size, size_t alignment, OSAllocator::Usage usage, bool writable) { ASSERT(isPageAligned(size)); ASSERT(isPageAligned(alignment)); ASSERT(isPowerOfTwo(alignment)); ASSERT(size >= alignment); size_t alignmentMask = alignment - 1; #if OS(DARWIN) int flags = VM_FLAGS_ANYWHERE; if (usage != OSAllocator::UnknownUsage) flags |= usage; int protection = PROT_READ; if (writable) protection |= PROT_WRITE; vm_address_t address = 0; vm_map(current_task(), &address, size, alignmentMask, flags, MEMORY_OBJECT_NULL, 0, FALSE, protection, PROT_READ | PROT_WRITE, VM_INHERIT_DEFAULT); return PageAllocationAligned(reinterpret_cast<void*>(address), size); #else size_t alignmentDelta = alignment - pageSize(); // Resererve with suffcient additional VM to correctly align. size_t reservationSize = size + alignmentDelta; void* reservationBase = OSAllocator::reserveUncommitted(reservationSize, usage, writable, false); // Select an aligned region within the reservation and commit. void* alignedBase = reinterpret_cast<uintptr_t>(reservationBase) & alignmentMask ? reinterpret_cast<void*>((reinterpret_cast<uintptr_t>(reservationBase) & ~alignmentMask) + alignment) : reservationBase; OSAllocator::commit(alignedBase, size, writable, false); return PageAllocationAligned(alignedBase, size, reservationBase, reservationSize); #endif }
void decommitVirtualPages(uint8* baseVirtualAddress,size_t numPages) { assert(isPageAligned(baseVirtualAddress)); auto numBytes = numPages << getPreferredVirtualPageSizeLog2(); if(madvise(baseVirtualAddress,numBytes,MADV_DONTNEED)) { throw; } if(mprotect(baseVirtualAddress,numBytes,PROT_NONE)) { throw; } }
JSStack::JSStack(VM& vm, size_t capacity) : m_vm(vm) , m_end(0) , m_topCallFrame(vm.topCallFrame) { ASSERT(capacity && isPageAligned(capacity)); m_reservation = PageReservation::reserve(roundUpAllocationSize(capacity * sizeof(Register), commitSize), OSAllocator::JSVMStackPages); updateStackLimit(highAddress()); m_commitEnd = highAddress(); m_lastStackTop = getBaseOfStack(); disableErrorStackReserve(); m_topCallFrame = 0; }
JSStack::JSStack(VM& vm) : m_vm(vm) , m_topCallFrame(vm.topCallFrame) #if ENABLE(LLINT_C_LOOP) , m_end(0) , m_reservedZoneSizeInRegisters(0) #endif { #if ENABLE(LLINT_C_LOOP) size_t capacity = Options::maxPerThreadStackUsage(); ASSERT(capacity && isPageAligned(capacity)); m_reservation = PageReservation::reserve(WTF::roundUpToMultipleOf(commitSize, capacity), OSAllocator::JSVMStackPages); setStackLimit(highAddress()); m_commitTop = highAddress(); m_lastStackTop = baseOfStack(); #endif // ENABLE(LLINT_C_LOOP) m_topCallFrame = 0; }
void freeVirtualPages(uint8* baseVirtualAddress,size_t numPages) { assert(isPageAligned(baseVirtualAddress)); if(munmap(baseVirtualAddress,numPages << getPreferredVirtualPageSizeLog2())) { throw; } }
bool commitVirtualPages(uint8* baseVirtualAddress,size_t numPages) { assert(isPageAligned(baseVirtualAddress)); return mprotect(baseVirtualAddress,numPages << getPreferredVirtualPageSizeLog2(),PROT_READ | PROT_WRITE) == 0; }
void freeVirtualPages(uint8* baseVirtualAddress,size_t numPages) { assert(isPageAligned(baseVirtualAddress)); auto result = VirtualFree(baseVirtualAddress,0/*numPages << getPageSizeLog2()*/,MEM_RELEASE); if(!result) { throw; } }
void decommitVirtualPages(uint8* baseVirtualAddress,size_t numPages) { assert(isPageAligned(baseVirtualAddress)); auto result = VirtualFree(baseVirtualAddress,numPages << getPageSizeLog2(),MEM_DECOMMIT); if(!result) { throw; } }
bool setVirtualPageAccess(uint8* baseVirtualAddress,size_t numPages,MemoryAccess access) { assert(isPageAligned(baseVirtualAddress)); DWORD oldProtection = 0; return VirtualProtect(baseVirtualAddress,numPages << getPageSizeLog2(),memoryAccessAsWin32Flag(access),&oldProtection) != 0; }
bool commitVirtualPages(uint8* baseVirtualAddress,size_t numPages,MemoryAccess access) { assert(isPageAligned(baseVirtualAddress)); return baseVirtualAddress == VirtualAlloc(baseVirtualAddress,numPages << getPageSizeLog2(),MEM_COMMIT,memoryAccessAsWin32Flag(access)); }