void *system_mmap_huge_pages(void *UNUSED, void *addr, size_t n, size_t align) { #ifndef HAVE_HUGETLBFS return system_mmap(UNUSED, addr, n, align); #else static const int prot = PROT_READ | PROT_WRITE; static const int flags = MAP_PRIVATE; if (align & _hugepage_mask) { log_mem("increasing alignment from %zu to %ld in huge page allocation\n", align, _hugepage_size); align = _hugepage_size; } if (n & _hugepage_mask) { long r = n & _hugepage_mask; long padding = _hugepage_size - r; log_mem("adding %ld bytes to huge page allocation request\n", padding); n += padding; } void *p = NULL; if (_hugepage_fd >= 0) { p = _mmap_lucky(addr, n, prot, flags, _hugepage_fd, 0, align); } else { p = system_mmap(UNUSED, addr, n, align); } log_mem("mmap %zu bytes at %p from huge pages for a total of %zu\n", n, p, _update_total(n)); return p; #endif }
static uint8_t * alloc_chunk (struct Alloc *alloc, uint32_t size) { size = round_to (size, 4096); uint8_t *map = system_mmap (0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); struct AllocMmapChunk *chunk = (struct AllocMmapChunk *) (map); chunk->buffer = map; chunk->size = size; chunk->brk = chunk_overhead (); chunk->next = alloc->chunks; alloc->chunks = chunk; MARK_UNDEFINED (chunk->buffer + chunk->brk, size - chunk->brk); return map; }