static bool do_protect(void *address, int size, int protect) { #ifdef TRACK_ALLOCATIONS uae_u32 allocated_size = find_allocation(address); assert(allocated_size == size); #endif #ifdef _WIN32 DWORD old; if (VirtualProtect(address, size, protect_to_native(protect), &old) == 0) { uae_log("VM: uae_vm_protect(%p, %d, %d) VirtualProtect failed (%d)\n", address, size, protect, GetLastError()); return false; } #else if (mprotect(address, size, protect_to_native(protect)) != 0) { uae_log("VM: uae_vm_protect(%p, %d, %d) mprotect failed (%d)\n", address, size, protect, errno); return false; } #endif return true; }
void *uae_vm_commit(void *address, uae_u32 size, int protect) { uae_log("VM: Commit 0x%-8x bytes at %p (%s)\n", size, address, protect_description(protect)); #ifdef _WIN32 int va_type = MEM_COMMIT ; int va_protect = protect_to_native(protect); address = VirtualAlloc(address, size, va_type, va_protect); #else #ifdef CLEAR_MEMORY_ON_COMMIT do_protect(address, size, UAE_VM_READ_WRITE); memset(address, 0, size); #endif do_protect(address, size, protect); #endif return address; }
static void *try_reserve(uintptr_t try_addr, uae_u32 size, int flags) { void *address = NULL; if (try_addr) { uae_log("VM: Reserve 0x%-8x bytes, try address 0x%llx\n", size, (uae_u64) try_addr); } else { uae_log("VM: Reserve 0x%-8x bytes\n", size); } #ifdef _WIN32 int va_type = MEM_RESERVE; if (flags & UAE_VM_WRITE_WATCH) { va_type |= MEM_WRITE_WATCH; } int va_protect = protect_to_native(UAE_VM_NO_ACCESS); address = VirtualAlloc((void *) try_addr, size, va_type, va_protect); if (address == NULL) { return NULL; } #else int mmap_flags = MAP_PRIVATE | MAP_ANON; address = mmap((void *) try_addr, size, PROT_NONE, mmap_flags, -1, 0); if (address == MAP_FAILED) { return NULL; } #endif #ifdef CPU_64_BIT if (flags & UAE_VM_32BIT) { uintptr_t end = (uintptr_t) address + size; if (address && end > (uintptr_t) 0x100000000ULL) { uae_log("VM: Reserve 0x%-8x bytes, got address 0x%llx (> 32-bit)\n", size, (uae_u64) (uintptr_t) address); #ifdef _WIN32 VirtualFree(address, 0, MEM_RELEASE); #else munmap(address, size); #endif return NULL; } } #endif return address; }
static void *uae_vm_alloc_with_flags(uae_u32 size, int flags, int protect) { void *address = NULL; static bool first_allocation = true; if (first_allocation) { /* FIXME: log contents of /proc/self/maps on Linux */ /* FIXME: use VirtualQuery function on Windows? */ first_allocation = false; } #ifdef LOG_ALLOCATIONS uae_log("VM: Allocate 0x%-8x bytes [%d] (%s)\n", size, flags, protect_description(protect)); #endif #ifdef _WIN32 int va_type = MEM_COMMIT | MEM_RESERVE; if (flags & UAE_VM_WRITE_WATCH) { va_type |= MEM_WRITE_WATCH; } int va_protect = protect_to_native(protect); #else int mmap_flags = MAP_PRIVATE | MAP_ANON; int mmap_prot = protect_to_native(protect); #endif #ifndef CPU_64_BIT flags &= ~UAE_VM_32BIT; #endif if (flags & UAE_VM_32BIT) { /* Stupid algorithm to find available space, but should * work well enough when there is not a lot of allocations. */ int step = uae_vm_page_size(); uae_u8 *p = (uae_u8 *) 0x40000000; uae_u8 *p_end = natmem_reserved - size; if (size > 1024 * 1024) { /* Reserve some space for smaller allocations */ p += 32 * 1024 * 1024; step = 1024 * 1024; } #ifdef HAVE_MAP_32BIT address = mmap(0, size, mmap_prot, mmap_flags | MAP_32BIT, -1, 0); if (address == MAP_FAILED) { address = NULL; } #endif while (address == NULL) { if (p > p_end) { break; } #ifdef _WIN32 address = VirtualAlloc(p, size, va_type, va_protect); #else address = mmap(p, size, mmap_prot, mmap_flags, -1, 0); // write_log("VM: trying %p step is 0x%x = %p\n", p, step, address); if (address == MAP_FAILED) { address = NULL; } else if (((uintptr_t) address) + size > (uintptr_t) 0xffffffff) { munmap(address, size); address = NULL; } #endif p += step; } } else { #ifdef _WIN32 address = VirtualAlloc(NULL, size, va_type, va_protect); #else address = mmap(0, size, mmap_prot, mmap_flags, -1, 0); if (address == MAP_FAILED) { address = NULL; } #endif } if (address == NULL) { uae_log("VM: uae_vm_alloc(%u, %d, %d) mmap failed (%d)\n", size, flags, protect, errno); return NULL; } #ifdef TRACK_ALLOCATIONS add_allocation(address, size); #endif #ifdef LOG_ALLOCATIONS uae_log("VM: %p\n", address); #endif return address; }
static void *uae_vm_alloc_with_flags(uae_u32 size, int flags, int protect) { void *address = NULL; uae_log("VM: Allocate 0x%-8x bytes [%d] (%s)\n", size, flags, protect_description(protect)); #ifdef _WIN32 int va_type = MEM_COMMIT | MEM_RESERVE; if (flags & UAE_VM_WRITE_WATCH) { va_type |= MEM_WRITE_WATCH; } int va_protect = protect_to_native(protect); #ifdef CPU_64_BIT if (flags & UAE_VM_32BIT) { /* Stupid algorithm to find available space, but should * work well enough when there is not a lot of allocations. */ uae_u8 *p = (uae_u8 *) 0x50000000; while (address == NULL) { if (p >= (void*) 0x60000000) { break; } address = VirtualAlloc(p, size, va_type, va_protect); p += uae_vm_page_size(); } } #endif if (!address) { address = VirtualAlloc(NULL, size, va_type, va_protect); } #else //size = size < uae_vm_page_size() ? uae_vm_page_size() : size; int mmap_flags = MAP_PRIVATE | MAP_ANON; int mmap_prot = protect_to_native(protect); #ifdef CPU_64_BIT if (flags & UAE_VM_32BIT) { #ifdef HAVE_MAP_32BIT mmap_flags |= MAP_32BIT; #else /* Stupid algorithm to find available space, but should * work well enough when there is not a lot of allocations. */ uae_u8 *p = natmem_offset - 0x10000000; uae_u8 *p_end = p + 0x10000000; while (address == NULL) { if (p >= p_end) { break; } address = mmap(p, size, mmap_prot, mmap_flags, -1, 0); /* FIXME: check 32-bit result */ if (address == MAP_FAILED) { address = NULL; } p += uae_vm_page_size(); } #endif } #endif if (address == NULL) { address = mmap(0, size, mmap_prot, mmap_flags, -1, 0); if (address == MAP_FAILED) { address = NULL; } } #endif if (address == NULL) { uae_log("VM: uae_vm_alloc(%u, %d, %d) mmap failed (%d)\n", size, flags, protect, errno); return NULL; } #ifdef TRACK_ALLOCATIONS add_allocation(address, size); #endif uae_log("VM: %p\n", address); return address; }
static void *uae_vm_alloc_with_flags(uae_u32 size, int flags, int protect) { void *address = NULL; static bool first_allocation = true; if (first_allocation) { /* FIXME: log contents of /proc/self/maps on Linux */ /* FIXME: use VirtualQuery function on Windows? */ first_allocation = false; } #ifdef LOG_ALLOCATIONS uae_log("VM: Allocate 0x%-8x bytes [%d] (%s)\n", size, flags, protect_description(protect)); #endif #ifdef _WIN32 int native_flags = MEM_COMMIT | MEM_RESERVE; if (flags & UAE_VM_WRITE_WATCH) { native_flags |= MEM_WRITE_WATCH; } int native_protect = protect_to_native(protect); #else int native_flags = MAP_PRIVATE | MAP_ANON; int native_protect = protect_to_native(protect); #endif #ifndef CPU_64_BIT flags &= ~UAE_VM_32BIT; #endif if (flags & UAE_VM_32BIT) { /* Stupid algorithm to find available space, but should * work well enough when there is not a lot of allocations. */ /* FIXME: Consider allocating a bigger chunk of memory, and manually * keep track of allocations. */ #if 1 if (!address) { address = try_alloc_32bit( size, native_flags, native_protect, (uae_u8 *) 0x40000000, natmem_reserved - size); } #endif if (!address && natmem_reserved < (uae_u8 *) 0x60000000) { address = try_alloc_32bit( size, native_flags, native_protect, (uae_u8 *) natmem_reserved + natmem_reserved_size, (uae_u8 *) 0xffffffff - size + 1); } if (!address) { address = try_alloc_32bit( size, native_flags, native_protect, (uae_u8 *) 0x20000000, min((uae_u8 *) 0x40000000, natmem_reserved - size)); } } else { #ifdef _WIN32 address = VirtualAlloc(NULL, size, native_flags, native_protect); #else address = mmap(0, size, native_protect, native_flags, -1, 0); if (address == MAP_FAILED) { address = NULL; } #endif } if (address == NULL) { uae_log("VM: uae_vm_alloc(%u, %d, %d) mmap failed (%d)\n", size, flags, protect, errno); return NULL; } #ifdef TRACK_ALLOCATIONS add_allocation(address, size); #endif #ifdef LOG_ALLOCATIONS uae_log("VM: %p\n", address); #endif return address; }