static void _atomic_allocate_initialize( uint64_t storagesize ) { if( storagesize < 1024 ) storagesize = BUILD_SIZE_TEMPORARY_MEMORY; _memory_temporary.storage = memory_allocate( 0, storagesize, 16, MEMORY_PERSISTENT ); _memory_temporary.end = pointer_offset( _memory_temporary.storage, storagesize ); _memory_temporary.size = storagesize; _memory_temporary.maxchunk = ( storagesize / 8 ); atomic_storeptr( &_memory_temporary.head, _memory_temporary.storage ); }
static void _atomic_allocate_initialize(size_t storagesize) { if (storagesize < 1024) storagesize = _foundation_config.temporary_memory; if (!storagesize) { memset(&_memory_temporary, 0, sizeof(_memory_temporary)); return; } _memory_temporary.storage = memory_allocate(0, storagesize, 16, MEMORY_PERSISTENT); _memory_temporary.end = pointer_offset(_memory_temporary.storage, storagesize); _memory_temporary.size = storagesize; _memory_temporary.maxchunk = (storagesize / 8); atomic_storeptr(&_memory_temporary.head, _memory_temporary.storage); }
void _thread_finalize(void) { #if FOUNDATION_PLATFORM_APPLE for (int i = 0; i < 1024; ++i) { if (atomic_loadptr(&_thread_local_blocks[i].block)) { void* block = atomic_loadptr(&_thread_local_blocks[i].block); _thread_local_blocks[i].thread = 0; atomic_storeptr(&_thread_local_blocks[i].block, 0); memory_deallocate(block); } } #endif thread_exit(); }
static void* _memory_allocate_malloc_raw(size_t size, unsigned int align, unsigned int hint) { FOUNDATION_UNUSED(hint); //If we align manually, we must be able to retrieve the original pointer for passing to free() //Thus all allocations need to go through that path #if FOUNDATION_PLATFORM_WINDOWS # if FOUNDATION_SIZE_POINTER == 4 # if BUILD_ENABLE_MEMORY_GUARD char* memory = _aligned_malloc((size_t)size + FOUNDATION_MAX_ALIGN * 3, align); if (memory) memory = _memory_guard_initialize(memory, (size_t)size); return memory; # else return _aligned_malloc((size_t)size, align); # endif # else unsigned int padding, extra_padding = 0; size_t allocate_size; char* raw_memory; void* memory; long vmres; if (!(hint & MEMORY_32BIT_ADDRESS)) { padding = (align > FOUNDATION_SIZE_POINTER ? align : FOUNDATION_SIZE_POINTER); #if BUILD_ENABLE_MEMORY_GUARD extra_padding = FOUNDATION_MAX_ALIGN * 3; #endif raw_memory = _aligned_malloc((size_t)size + padding + extra_padding, align); if (raw_memory) { memory = raw_memory + padding; //Will be aligned since padding is multiple of alignment (minimum align/pad is pointer size) *((void**)memory - 1) = raw_memory; FOUNDATION_ASSERT(!((uintptr_t)raw_memory & 1)); FOUNDATION_ASSERT(!((uintptr_t)memory & 1)); #if BUILD_ENABLE_MEMORY_GUARD memory = _memory_guard_initialize(memory, size); FOUNDATION_ASSERT(!((uintptr_t)memory & 1)); #endif return memory; } log_errorf(HASH_MEMORY, ERROR_OUT_OF_MEMORY, STRING_CONST("Unable to allocate %" PRIsize " bytes of memory"), size); return 0; } # if BUILD_ENABLE_MEMORY_GUARD extra_padding = FOUNDATION_MAX_ALIGN * 3; # endif allocate_size = size + FOUNDATION_SIZE_POINTER + extra_padding + align; raw_memory = 0; vmres = NtAllocateVirtualMemory(INVALID_HANDLE_VALUE, &raw_memory, 1, &allocate_size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); if (vmres != 0) { log_errorf(HASH_MEMORY, ERROR_OUT_OF_MEMORY, STRING_CONST("Unable to allocate %" PRIsize " bytes of memory in low 32bit address space"), size); return 0; } memory = _memory_align_pointer(raw_memory + FOUNDATION_SIZE_POINTER, align); *((void**)memory - 1) = (void*)((uintptr_t)raw_memory | 1); # if BUILD_ENABLE_MEMORY_GUARD memory = _memory_guard_initialize(memory, size); # endif FOUNDATION_ASSERT(!((uintptr_t)raw_memory & 1)); FOUNDATION_ASSERT(!((uintptr_t)memory & 1)); return memory; # endif #else # if FOUNDATION_SIZE_POINTER > 4 if (!(hint & MEMORY_32BIT_ADDRESS)) # endif { #if BUILD_ENABLE_MEMORY_GUARD size_t extra_padding = FOUNDATION_MAX_ALIGN * 3; #else size_t extra_padding = 0; #endif size_t allocate_size = size + align + FOUNDATION_SIZE_POINTER + extra_padding; char* raw_memory = malloc(allocate_size); if (raw_memory) { void* memory = _memory_align_pointer(raw_memory + FOUNDATION_SIZE_POINTER, align); *((void**)memory - 1) = raw_memory; FOUNDATION_ASSERT(!((uintptr_t)raw_memory & 1)); FOUNDATION_ASSERT(!((uintptr_t)memory & 1)); #if BUILD_ENABLE_MEMORY_GUARD memory = _memory_guard_initialize(memory, size); FOUNDATION_ASSERT(!((uintptr_t)memory & 1)); #endif return memory; } log_errorf(HASH_MEMORY, ERROR_OUT_OF_MEMORY, STRING_CONST("Unable to allocate %" PRIsize " bytes of memory (%" PRIsize " requested)"), size, allocate_size); return 0; } # if FOUNDATION_SIZE_POINTER > 4 size_t allocate_size; char* raw_memory; void* memory; # if BUILD_ENABLE_MEMORY_GUARD unsigned int extra_padding = FOUNDATION_MAX_ALIGN * 3; #else unsigned int extra_padding = 0; # endif allocate_size = size + align + FOUNDATION_SIZE_POINTER * 2 + extra_padding; #ifndef MAP_UNINITIALIZED #define MAP_UNINITIALIZED 0 #endif #ifndef MAP_ANONYMOUS #define MAP_ANONYMOUS MAP_ANON #endif # ifndef MAP_32BIT //On MacOSX app needs to be linked with -pagezero_size 10000 -image_base 100000000 to // 1) Free up low 4Gb address range by reducing page zero size // 2) Move executable base address above 4Gb to free up more memory address space #define MMAP_REGION_START ((uintptr_t)0x10000) #define MMAP_REGION_END ((uintptr_t)0x80000000) static atomicptr_t baseaddr = { (void*)MMAP_REGION_START }; bool retried = false; do { raw_memory = mmap(atomic_loadptr(&baseaddr), allocate_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED, -1, 0); if (((uintptr_t)raw_memory >= MMAP_REGION_START) && (uintptr_t)(raw_memory + allocate_size) < MMAP_REGION_END) { atomic_storeptr(&baseaddr, pointer_offset(raw_memory, allocate_size)); break; } if (raw_memory && (raw_memory != MAP_FAILED)) { if (munmap(raw_memory, allocate_size) < 0) log_warn(HASH_MEMORY, WARNING_SYSTEM_CALL_FAIL, STRING_CONST("Failed to munmap pages outside 32-bit range")); } raw_memory = 0; if (retried) break; retried = true; atomic_storeptr(&baseaddr, (void*)MMAP_REGION_START); } while (true); # else raw_memory = mmap(0, allocate_size, PROT_READ | PROT_WRITE, MAP_32BIT | MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED, -1, 0); if (raw_memory == MAP_FAILED) { raw_memory = mmap(0, allocate_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED, -1, 0); if (raw_memory == MAP_FAILED) raw_memory = 0; if ((uintptr_t)raw_memory > 0xFFFFFFFFULL) { if (munmap(raw_memory, allocate_size) < 0) log_warn(HASH_MEMORY, WARNING_SYSTEM_CALL_FAIL, STRING_CONST("Failed to munmap pages outside 32-bit range")); raw_memory = 0; } } # endif if (!raw_memory) { string_const_t errmsg = system_error_message(0); log_errorf(HASH_MEMORY, ERROR_OUT_OF_MEMORY, STRING_CONST("Unable to allocate %" PRIsize " bytes of memory in low 32bit address space: %.*s"), size, STRING_FORMAT(errmsg)); return 0; } memory = _memory_align_pointer(raw_memory + FOUNDATION_SIZE_POINTER * 2, align); *((uintptr_t*)memory - 1) = ((uintptr_t)raw_memory | 1); *((uintptr_t*)memory - 2) = (uintptr_t)allocate_size; FOUNDATION_ASSERT(!((uintptr_t)raw_memory & 1)); FOUNDATION_ASSERT(!((uintptr_t)memory & 1)); # if BUILD_ENABLE_MEMORY_GUARD memory = _memory_guard_initialize(memory, size); FOUNDATION_ASSERT(!((uintptr_t)memory & 1)); # endif return memory; # endif #endif }