static void * osTryReserveHeapMemory (W_ len, void *hint) { void *base, *top; void *start, *end; /* We try to allocate len + MBLOCK_SIZE, because we need memory which is MBLOCK_SIZE aligned, and then we discard what we don't need */ base = my_mmap(hint, len + MBLOCK_SIZE, MEM_RESERVE); top = (void*)((W_)base + len + MBLOCK_SIZE); if (((W_)base & MBLOCK_MASK) != 0) { start = MBLOCK_ROUND_UP(base); end = MBLOCK_ROUND_DOWN(top); ASSERT(((W_)end - (W_)start) == len); if (munmap(base, (W_)start-(W_)base) < 0) { sysErrorBelch("unable to release slop before heap"); } if (munmap(end, (W_)top-(W_)end) < 0) { sysErrorBelch("unable to release slop after heap"); } } else { start = base; } return start; }
void *osReserveHeapMemory (void *startAddress, W_ *len) { void *start; heap_base = VirtualAlloc(startAddress, *len + MBLOCK_SIZE, MEM_RESERVE, PAGE_READWRITE); if (heap_base == NULL) { if (GetLastError() == ERROR_NOT_ENOUGH_MEMORY) { errorBelch("out of memory"); } else { sysErrorBelch( "osReserveHeapMemory: VirtualAlloc MEM_RESERVE %llu bytes \ at address %p bytes failed", len + MBLOCK_SIZE, startAddress); } stg_exit(EXIT_FAILURE); } // VirtualFree MEM_RELEASE must always match a // previous MEM_RESERVE call, in address and size // so we necessarily leak some address space here, // before and after the aligned area // It is not a huge problem because we never commit // that memory start = MBLOCK_ROUND_UP(heap_base); return start; }