void * thd_start(void *arg) { unsigned main_arena_ind = *(unsigned *)arg; void *p; unsigned arena_ind; size_t size; int err; p = malloc(1); assert_ptr_not_null(p, "Error in malloc()"); free(p); size = sizeof(arena_ind); if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind, sizeof(main_arena_ind)))) { char buf[BUFERROR_BUF]; buferror(err, buf, sizeof(buf)); test_fail("Error in mallctl(): %s", buf); } size = sizeof(arena_ind); if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) { char buf[BUFERROR_BUF]; buferror(err, buf, sizeof(buf)); test_fail("Error in mallctl(): %s", buf); } assert_u_eq(arena_ind, main_arena_ind, "Arena index should be same as for main thread"); return (NULL); }
static void * pages_map(void *addr, size_t size) { void *ret; /* * We don't use MAP_FIXED here, because it can cause the *replacement* * of existing mappings, and we only want to create new mappings. */ ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); assert(ret != NULL); if (ret == MAP_FAILED) ret = NULL; else if (addr != NULL && ret != addr) { /* * We succeeded in mapping memory, but not in the right place. */ if (munmap(ret, size) == -1) { char buf[BUFERROR_BUF]; buferror(errno, buf, sizeof(buf)); malloc_printf("<jemalloc: Error in munmap(): %s\n", buf); if (opt_abort) abort(); } ret = NULL; } assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL && ret == addr)); return (ret); }
static void os_pages_unmap(void *addr, size_t size) { assert(ALIGNMENT_ADDR2BASE(addr, os_page) == (vaddr_t)addr); assert(ALIGNMENT_CEILING(size, os_page) == size); #ifdef _WIN32 if (VirtualFree(addr, 0, MEM_RELEASE) == 0) #else if (munmap(addr, size) == -1) #endif { char buf[BUFERROR_BUF]; buferror(get_errno(), buf, sizeof(buf)); malloc_printf("<jemalloc>: Error in " #ifdef _WIN32 "VirtualFree" #else "munmap" #endif "(): %s\n", buf); if (opt_abort) { abort(); } } }
static void * pages_map(void *addr, size_t size #ifdef JEMALLOC_ENABLE_MEMKIND , unsigned partition #endif ) { void *ret; assert(size != 0); #ifdef _WIN32 /* * If VirtualAlloc can't allocate at the given address when one is * given, it fails and returns NULL. */ ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); #else #ifdef JEMALLOC_ENABLE_MEMKIND if (partition && memkind_partition_mmap) { ret = memkind_partition_mmap(partition, addr, size); } else { #endif /* JEMALLOC_ENABLE_MEMKIND */ /* * We don't use MAP_FIXED here, because it can cause the *replacement* * of existing mappings, and we only want to create new mappings. */ ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); #ifdef JEMALLOC_ENABLE_MEMKIND } #endif /* JEMALLOC_ENABLE_MEMKIND */ assert(ret != NULL); if (ret == MAP_FAILED) ret = NULL; else if (addr != NULL && ret != addr) { /* * We succeeded in mapping memory, but not in the right place. */ if (munmap(ret, size) == -1) { char buf[BUFERROR_BUF]; buferror(get_errno(), buf, sizeof(buf)); malloc_printf("<jemalloc>: Error in munmap(): %s\n", buf); if (opt_abort) abort(); } ret = NULL; } #endif assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL && ret == addr)); return (ret); }
static void * pages_map(void *addr, size_t size) { void *ret; assert(size != 0); #ifdef _WIN32 /* * If VirtualAlloc can't allocate at the given address when one is * given, it fails and returns NULL. */ ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); #else /* * We don't use MAP_FIXED here, because it can cause the *replacement* * of existing mappings, and we only want to create new mappings. */ ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); assert(ret != NULL); if (ret == MAP_FAILED) ret = NULL; else if (addr != NULL && ret != addr) { /* * We succeeded in mapping memory, but not in the right place. */ if (munmap(ret, size) == -1) { char buf[BUFERROR_BUF]; buferror(get_errno(), buf, sizeof(buf)); malloc_printf("<jemalloc: Error in munmap(): %s\n", buf); if (opt_abort) abort(); } ret = NULL; } #endif #if defined(__ANDROID__) if (ret != NULL) { /* Name this memory as being used by libc */ prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, ret, size, "libc_malloc"); } #endif assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL && ret == addr)); return (ret); }
static void pages_unmap(void *addr, size_t size) { if (munmap(addr, size) == -1) { char buf[BUFERROR_BUF]; buferror(errno, buf, sizeof(buf)); malloc_printf("<jemalloc>: Error in munmap(): %s\n", buf); if (opt_abort) abort(); } }
TEST_END TEST_BEGIN(test_alignment_and_size) { #define NITER 4 size_t alignment, size, total; unsigned i; int err; void *ps[NITER]; for (i = 0; i < NITER; i++) ps[i] = NULL; for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (size = 1; size < 3 * alignment && size < (1U << 31); size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { err = posix_memalign(&ps[i], alignment, size); if (err) { char buf[BUFERROR_BUF]; buferror(get_errno(), buf, sizeof(buf)); test_fail( "Error for alignment=%zu, " "size=%zu (%#zx): %s", alignment, size, size, buf); } total += malloc_usable_size(ps[i]); if (total >= (MAXALIGN << 1)) break; } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { free(ps[i]); ps[i] = NULL; } } } purge(); } #undef NITER }
static void pages_unmap(void *addr, size_t size) { #ifdef _WIN32 if (VirtualFree(addr, 0, MEM_RELEASE) == 0) #else if (munmap(addr, size) == -1) #endif { char buf[BUFERROR_BUF]; buferror(get_errno(), buf, sizeof(buf)); malloc_printf("<jemalloc>: Error in " #ifdef _WIN32 "VirtualFree" #else "munmap" #endif "(): %s\n", buf); if (opt_abort) abort(); } }
void * huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc) { void *ret; size_t copysize; /* Try to avoid moving the allocation. */ ret = huge_ralloc_no_move(ptr, oldsize, size, extra); if (ret != NULL) return (ret); /* * size and oldsize are different enough that we need to use a * different size class. In that case, fall back to allocating new * space and copying. */ if (alignment > chunksize) ret = huge_palloc(size + extra, alignment, zero); else ret = huge_malloc(size + extra, zero); if (ret == NULL) { if (extra == 0) return (NULL); /* Try again, this time without extra. */ if (alignment > chunksize) ret = huge_palloc(size, alignment, zero); else ret = huge_malloc(size, zero); if (ret == NULL) return (NULL); } /* * Copy at most size bytes (not size+extra), since the caller has no * expectation that the extra bytes will be reliably preserved. */ copysize = (size < oldsize) ? size : oldsize; #ifdef JEMALLOC_MREMAP /* * Use mremap(2) if this is a huge-->huge reallocation, and neither the * source nor the destination are in dss. */ if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false))) { size_t newsize = huge_salloc(ret); /* * Remove ptr from the tree of huge allocations before * performing the remap operation, in order to avoid the * possibility of another thread acquiring that mapping before * this one removes it from the tree. */ huge_dalloc(ptr, false); if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED, ret) == MAP_FAILED) { /* * Assuming no chunk management bugs in the allocator, * the only documented way an error can occur here is * if the application changed the map type for a * portion of the old allocation. This is firmly in * undefined behavior territory, so write a diagnostic * message, and optionally abort. */ char buf[BUFERROR_BUF]; buferror(buf, sizeof(buf)); malloc_printf("<jemalloc>: Error in mremap(): %s\n", buf); if (opt_abort) abort(); memcpy(ret, ptr, copysize); chunk_dealloc_mmap(ptr, oldsize); } } else #endif { memcpy(ret, ptr, copysize); iqallocx(ptr, try_tcache_dalloc); } return (ret); }
bool chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed) { bool ret; unsigned i; off_t off; void *vaddr; size_t cumsize, voff; size_t sizes[nfds]; malloc_mutex_lock(&swap_mtx); /* Get file sizes. */ for (i = 0, cumsize = 0; i < nfds; i++) { off = lseek(fds[i], 0, SEEK_END); if (off == ((off_t)-1)) { ret = true; goto RETURN; } if (PAGE_CEILING(off) != off) { /* Truncate to a multiple of the page size. */ off &= ~PAGE_MASK; if (ftruncate(fds[i], off) != 0) { ret = true; goto RETURN; } } sizes[i] = off; if (cumsize + off < cumsize) { /* * Cumulative file size is greater than the total * address space. Bail out while it's still obvious * what the problem is. */ ret = true; goto RETURN; } cumsize += off; } /* Round down to a multiple of the chunk size. */ cumsize &= ~chunksize_mask; if (cumsize == 0) { ret = true; goto RETURN; } /* * Allocate a chunk-aligned region of anonymous memory, which will * be the final location for the memory-mapped files. */ vaddr = chunk_alloc_mmap_noreserve(cumsize); if (vaddr == NULL) { ret = true; goto RETURN; } /* Overlay the files onto the anonymous mapping. */ for (i = 0, voff = 0; i < nfds; i++) { void *addr = mmap((void *)((uintptr_t)vaddr + voff), sizes[i], PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fds[i], 0); if (addr == MAP_FAILED) { char buf[BUFERROR_BUF]; buferror(errno, buf, sizeof(buf)); malloc_write( "<jemalloc>: Error in mmap(..., MAP_FIXED, ...): "); malloc_write(buf); malloc_write("\n"); if (opt_abort) abort(); if (munmap(vaddr, voff) == -1) { buferror(errno, buf, sizeof(buf)); malloc_write("<jemalloc>: Error in munmap(): "); malloc_write(buf); malloc_write("\n"); } ret = true; goto RETURN; } assert(addr == (void *)((uintptr_t)vaddr + voff)); /* * Tell the kernel that the mapping will be accessed randomly, * and that it should not gratuitously sync pages to the * filesystem. */ #ifdef MADV_RANDOM madvise(addr, sizes[i], MADV_RANDOM); #endif #ifdef MADV_NOSYNC madvise(addr, sizes[i], MADV_NOSYNC); #endif voff += sizes[i]; } swap_prezeroed = prezeroed; swap_base = vaddr; swap_end = swap_base; swap_max = (void *)((uintptr_t)vaddr + cumsize); /* Copy the fds array for mallctl purposes. */ swap_fds = (int *)base_alloc(nfds * sizeof(int)); if (swap_fds == NULL) { ret = true; goto RETURN; } memcpy(swap_fds, fds, nfds * sizeof(int)); swap_nfds = nfds; #ifdef JEMALLOC_STATS swap_avail = cumsize; #endif swap_enabled = true; ret = false; RETURN: malloc_mutex_unlock(&swap_mtx); return (ret); }
void * huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero) { void *ret; size_t copysize; /* Try to avoid moving the allocation. */ ret = huge_ralloc_no_move(ptr, oldsize, size, extra); if (ret != NULL) return (ret); /* * size and oldsize are different enough that we need to use a * different size class. In that case, fall back to allocating new * space and copying. */ if (alignment != 0) ret = huge_palloc(size + extra, alignment, zero); else ret = huge_malloc(size + extra, zero); if (ret == NULL) { if (extra == 0) return (NULL); /* Try again, this time without extra. */ if (alignment != 0) ret = huge_palloc(size, alignment, zero); else ret = huge_malloc(size, zero); if (ret == NULL) return (NULL); } /* * Copy at most size bytes (not size+extra), since the caller has no * expectation that the extra bytes will be reliably preserved. */ copysize = (size < oldsize) ? size : oldsize; /* * Use mremap(2) if this is a huge-->huge reallocation, and neither the * source nor the destination are in swap or dss. */ #ifdef JEMALLOC_MREMAP_FIXED if (oldsize >= chunksize # ifdef JEMALLOC_SWAP && (swap_enabled == false || (chunk_in_swap(ptr) == false && chunk_in_swap(ret) == false)) # endif # ifdef JEMALLOC_DSS && chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false # endif ) { size_t newsize = huge_salloc(ret); if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED, ret) == MAP_FAILED) { /* * Assuming no chunk management bugs in the allocator, * the only documented way an error can occur here is * if the application changed the map type for a * portion of the old allocation. This is firmly in * undefined behavior territory, so write a diagnostic * message, and optionally abort. */ char buf[BUFERROR_BUF]; buferror(errno, buf, sizeof(buf)); malloc_write("<jemalloc>: Error in mremap(): "); malloc_write(buf); malloc_write("\n"); if (opt_abort) abort(); memcpy(ret, ptr, copysize); idalloc(ptr); } else huge_dalloc(ptr, false); } else #endif { memcpy(ret, ptr, copysize); idalloc(ptr); } return (ret); }