Esempio n. 1
0
static void weak_clear_entry_no_lock(azone_t *azone, weak_entry_t *entry, uintptr_t *weak_refs_count, auto_weak_callback_block_t **head)
{
    // clear referrers, update counters, update lists
    unsigned count = entry->referrers.num_allocated;
    unsigned index = 0;
    for (; index < count; ++index) {
        weak_referrer_t *ref = &entry->referrers.refs[index];
        if (ref->referrer) {
            if (azone->control.log & AUTO_LOG_WEAK) malloc_printf("%s: WEAK: clearing ref to %p at %p (value was %p)\n", auto_prelude(), entry->referent, ref->referrer, *ref->referrer);
            if (*ref->referrer != entry->referent) {
                malloc_printf("__weak value %p at location %p not equal to %p and so will not be cleared\n", *ref->referrer, ref->referrer, entry->referent);
                void **base = (void **)auto_zone_base_pointer((auto_zone_t*)azone, ref->referrer);
                if (base) {
                    auto_memory_type_t type = auto_zone_get_layout_type((auto_zone_t*)azone, base);
                    malloc_printf("...location is %s starting at %p with first slot value %p\n",
                                  (type & AUTO_OBJECT) ? "an object" : "a data block",
                                  base,
                                  *base);
                }
                continue;
            }
            *ref->referrer = NULL;
            ++*weak_refs_count;
            if (ref->block && ref->block->callback_function && !ref->block->next) {
                // chain it if isn't already chained & there is a callout to call
                ref->block->next = *head;
                *head = ref->block;
            }
        }
    }
    
    weak_entry_remove_no_lock(azone, entry);
}
Esempio n. 2
0
static test_status_t
p_test_impl(bool do_malloc_init, test_t *t, va_list ap)
{
	test_status_t ret;

	if (do_malloc_init) {
		/*
		 * Make sure initialization occurs prior to running tests.
		 * Tests are special because they may use internal facilities
		 * prior to triggering initialization as a side effect of
		 * calling into the public API.
		 */
		if (nallocx(1, 0) == 0) {
			malloc_printf("Initialization error");
			return (test_status_fail);
		}
	}

	ret = test_status_pass;
	for (; t != NULL; t = va_arg(ap, test_t *)) {
		t();
		if (test_status > ret)
			ret = test_status;
	}

	malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n",
	    test_status_string(test_status_pass),
	    test_counts[test_status_pass], test_count,
	    test_status_string(test_status_skip),
	    test_counts[test_status_skip], test_count,
	    test_status_string(test_status_fail),
	    test_counts[test_status_fail], test_count);

	return (ret);
}
Esempio n. 3
0
void *
je_thread_start(void *arg)
{
	unsigned main_arena_ind = *(unsigned *)arg;
	void *p;
	unsigned arena_ind;
	size_t size;
	int err;

	p = malloc(1);
	if (p == NULL) {
		malloc_printf("%s(): Error in malloc()\n", __func__);
		return (void *)1;
	}
	free(p);

	size = sizeof(arena_ind);
	if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind,
	    sizeof(main_arena_ind)))) {
		malloc_printf("%s(): Error in mallctl(): %s\n", __func__,
		    strerror(err));
		return (void *)1;
	}

	size = sizeof(arena_ind);
	if ((err = mallctl("thread.arena", &arena_ind, &size, NULL,
	    0))) {
		malloc_printf("%s(): Error in mallctl(): %s\n", __func__,
		    strerror(err));
		return (void *)1;
	}
	assert(arena_ind == main_arena_ind);

	return (NULL);
}
Esempio n. 4
0
void* realloc(void* oldmem, size_t bytes)
{
    void *retval;
    int oldsize;
    jmalloc_header_t *jhead;
    void *ra = __builtin_return_address(0);

    if (oldmem)
    {
	jhead = oldmem;
	jhead--;
	if (jhead->magic == JMALLOC_MAGIC)
	    oldsize = jhead->len;
	else
	{
	    if (jhead->magic == JFREE_MAGIC)
	    {
		malloc_printf("MEMORY: %d about to realloc from %p memory %p "
		    "(%d->%d bytes), allocated from %p and already released "
		    "from %p\n", getpid(), ra, oldmem, jhead->len, bytes,
		    jhead->alloc_ra, jhead->free_ra);
		__display_chunk(mem2chunk(jhead));
	    }
	    else
	    {
		malloc_printf("MEMORY: %d about to realloc garbage %p from %p: "
		    "alloc_ra %p, free_ra %p, len %d, magic %#x\n",
		    getpid(), oldmem, ra, jhead->alloc_ra, jhead->free_ra,
		    jhead->len, jhead->magic);
	    }
#ifdef __CONFIG_RG_DBG_ULIBC_MALLOC_CRASH__
	    /* Let's give to complete the printing */
	    sleep(2);
	    *((int *)0) = 0xfaceface;
#endif
	    return NULL;
	}
    }
    else
	oldsize = 0;

    retval = malloc(bytes);
    if (retval && oldsize && bytes)
	memcpy(retval, oldmem, bytes < oldsize? bytes : oldsize);

    if (retval)
    {
	jhead = retval;
	jhead--;
	jhead->alloc_ra = ra;
    }

    free(oldmem);
    return retval;
}
Esempio n. 5
0
int
main(void)
{
    malloc_printf("Test begin\n");

    test_bitmap_size();
    test_bitmap_init();
    test_bitmap_set();
    test_bitmap_unset();
    test_bitmap_sfu();

    malloc_printf("Test end\n");
    return (0);
}
Esempio n. 6
0
static void *
pages_map(void *addr, size_t size)
{
	void *ret;

	/*
	 * We don't use MAP_FIXED here, because it can cause the *replacement*
	 * of existing mappings, and we only want to create new mappings.
	 */
	ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
	    -1, 0);
	assert(ret != NULL);

	if (ret == MAP_FAILED)
		ret = NULL;
	else if (addr != NULL && ret != addr) {
		/*
		 * We succeeded in mapping memory, but not in the right place.
		 */
		if (munmap(ret, size) == -1) {
			char buf[BUFERROR_BUF];

			buferror(errno, buf, sizeof(buf));
			malloc_printf("<jemalloc: Error in munmap(): %s\n",
			    buf);
			if (opt_abort)
				abort();
		}
		ret = NULL;
	}

	assert(ret == NULL || (addr == NULL && ret != addr)
	    || (addr != NULL && ret == addr));
	return (ret);
}
Esempio n. 7
0
// Register a new weak reference.
// referent is the object being weakly pointed to
// referrer is the memory location that will be zeroed when referent dies
// Does not check whether referent is currently live (fixme probably should)
// Does not check whether referrer is already a weak reference location for 
//   the given referent or any other referent (fixme maybe should)
// Does not change the scannability of referrer; referrer should be non-scanned
void weak_register(azone_t *azone, const void *referent, void **referrer,  auto_weak_callback_block_t *block)
{
    weak_entry_t *entry;

    if (azone->control.log & AUTO_LOG_WEAK) malloc_printf("%s: WEAK: registering weak reference to %p at %p\n", auto_prelude(), referent, referrer);

    spin_lock(&azone->weak_refs_table_lock);
    if (*referrer) weak_unregister_no_lock(azone, *referrer, referrer);
    if (referent) {
        if ((entry = weak_entry_for_referent(azone, referent))) {
            append_referrer_no_lock(&entry->referrers, referrer, block);
        } 
        else {
            weak_entry_t new_entry;
            new_entry.referent = referent;
            new_entry.referrers.refs = NULL;
            new_entry.referrers.num_refs = 0;
            new_entry.referrers.num_allocated = 0;
            append_referrer_no_lock(&new_entry.referrers, referrer, block);
            weak_grow_maybe_no_lock(azone);
            azone->num_weak_refs++;
            weak_entry_insert_no_lock(azone, &new_entry);
        }
    }
    // make sure that anyone accessing this via a read gets the value
    *referrer = (void *)referent;
    spin_unlock(&azone->weak_refs_table_lock);
}
Esempio n. 8
0
void
p_test_fini(void)
{

	test_counts[test_status]++;
	malloc_printf("%s: %s\n", test_name, test_status_string(test_status));
}
 usword_t SubzoneBlockRef::dec_refcount_no_lock() const {
     Admin *admin = subzone()->admin();
     if (has_refcount()) {
         // non-zero reference count, check the overflow table.
         PtrIntHashMap &retains = admin->retains();
         PtrIntHashMap::iterator retain_iter = retains.find(address());
         if (retain_iter != retains.end() && retain_iter->first == address()) {
             if (--retain_iter->second == 1) {
                 // transition from 2 -> 1
                 retains.erase(retain_iter);
                 return 1;
             } else {
                 return retain_iter->second;
             }
         } else {
             // transition from 1 -> 0
             subzone()->clear_has_refcount(q());
             return 0;
         }
     }
     // underflow.
     malloc_printf("reference count underflow for %p, break on auto_refcount_underflow_error to debug.\n", address());
     auto_refcount_underflow_error(address());
     return -1;
 }
Esempio n. 10
0
static void
os_pages_unmap(void *addr, size_t size) {
	assert(ALIGNMENT_ADDR2BASE(addr, os_page) == (vaddr_t)addr);
	assert(ALIGNMENT_CEILING(size, os_page) == size);

#ifdef _WIN32
	if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
#else
	if (munmap(addr, size) == -1)
#endif
	{
		char buf[BUFERROR_BUF];

		buferror(get_errno(), buf, sizeof(buf));
		malloc_printf("<jemalloc>: Error in "
#ifdef _WIN32
		    "VirtualFree"
#else
		    "munmap"
#endif
		    "(): %s\n", buf);
		if (opt_abort) {
			abort();
		}
	}
}
Esempio n. 11
0
static void *
pages_map(void *addr, size_t size
#ifdef JEMALLOC_ENABLE_MEMKIND
, unsigned partition
#endif
)
{
	void *ret;

	assert(size != 0);

#ifdef _WIN32
	/*
	 * If VirtualAlloc can't allocate at the given address when one is
	 * given, it fails and returns NULL.
	 */
	ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
	    PAGE_READWRITE);
#else
#ifdef JEMALLOC_ENABLE_MEMKIND
	if (partition && memkind_partition_mmap) {
		ret = memkind_partition_mmap(partition, addr, size);
	}
        else {
#endif /* JEMALLOC_ENABLE_MEMKIND */
	/*
	 * We don't use MAP_FIXED here, because it can cause the *replacement*
	 * of existing mappings, and we only want to create new mappings.
	 */
	ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
#ifdef JEMALLOC_ENABLE_MEMKIND
	}
#endif /* JEMALLOC_ENABLE_MEMKIND */
	assert(ret != NULL);

	if (ret == MAP_FAILED)
		ret = NULL;
	else if (addr != NULL && ret != addr) {
		/*
		 * We succeeded in mapping memory, but not in the right place.
		 */
		if (munmap(ret, size) == -1) {
			char buf[BUFERROR_BUF];

			buferror(get_errno(), buf, sizeof(buf));
			malloc_printf("<jemalloc>: Error in munmap(): %s\n",
			    buf);
			if (opt_abort)
				abort();
		}
		ret = NULL;
	}
#endif
	assert(ret == NULL || (addr == NULL && ret != addr)
	    || (addr != NULL && ret == addr));
	return (ret);
}
 usword_t LargeBlockRef::dec_refcount_no_lock() const {
     usword_t rc = refcount();
     if (rc <= 0) {
         malloc_printf("reference count underflow for %p, break on auto_refcount_underflow_error to debug\n", address());
         auto_refcount_underflow_error(address());
     } else {
         rc = rc - 1;
         _large->set_refcount(rc);
     }
     return rc;
 }            
Esempio n. 13
0
// Add new_entry to the zone's table of weak references.
// Does not check whether the referent is already in the table.
// Does not update num_weak_refs.
static void weak_entry_insert_no_lock(azone_t *azone, weak_entry_t *new_entry)
{
    weak_entry_t *table = azone->weak_refs_table;

    if (!table) { malloc_printf("no auto weak ref table!\n"); return; }

    unsigned table_size = azone->max_weak_refs;
    unsigned hash_index = hash(new_entry->referent) % table_size;
    unsigned index = hash_index;

    do {
        weak_entry_t *entry = table + index;
        if (entry->referent == NULL) {
            *entry = *new_entry;
            return;
        }
        index++; if (index == table_size) index = 0;
    } while (index != hash_index);
    malloc_printf("no room for new entry in auto weak ref table!\n");
}
Esempio n. 14
0
void
test_skip(const char *format, ...)
{
	va_list ap;

	va_start(ap, format);
	malloc_vcprintf(NULL, NULL, format, ap);
	va_end(ap);
	malloc_printf("\n");
	test_status = test_status_skip;
}
Esempio n. 15
0
TEST_END

static void
print_filled_extents(const void *p, uint8_t c, size_t len)
{
	const uint8_t *pc = (const uint8_t *)p;
	size_t i, range0;
	uint8_t c0;

	malloc_printf("  p=%p, c=%#x, len=%zu:", p, c, len);
	range0 = 0;
	c0 = pc[0];
	for (i = 0; i < len; i++) {
		if (pc[i] != c0) {
			malloc_printf(" %#x[%zu..%zu)", c0, range0, i);
			range0 = i;
			c0 = pc[i];
		}
	}
	malloc_printf(" %#x[%zu..%zu)\n", c0, range0, i);
}
Esempio n. 16
0
int
main(void)
{
	int ret = 0;
	void *p;
	unsigned arena_ind;
	size_t size;
	int err;
	je_thread_t threads[NTHREADS];
	unsigned i;

	malloc_printf("Test begin\n");

	p = malloc(1);
	if (p == NULL) {
		malloc_printf("%s(): Error in malloc()\n", __func__);
		ret = 1;
		goto label_return;
	}

	size = sizeof(arena_ind);
	if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) {
		malloc_printf("%s(): Error in mallctl(): %s\n", __func__,
		    strerror(err));
		ret = 1;
		goto label_return;
	}

	for (i = 0; i < NTHREADS; i++) {
		je_thread_create(&threads[i], je_thread_start,
		    (void *)&arena_ind);
	}

	for (i = 0; i < NTHREADS; i++)
		je_thread_join(threads[i], (void *)&ret);

label_return:
	malloc_printf("Test end\n");
	return (ret);
}
Esempio n. 17
0
static void *
pages_map(void *addr, size_t size)
{
	void *ret;

	assert(size != 0);

#ifdef _WIN32
	/*
	 * If VirtualAlloc can't allocate at the given address when one is
	 * given, it fails and returns NULL.
	 */
	ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
	    PAGE_READWRITE);
#else
	/*
	 * We don't use MAP_FIXED here, because it can cause the *replacement*
	 * of existing mappings, and we only want to create new mappings.
	 */
	ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
	    -1, 0);
	assert(ret != NULL);

	if (ret == MAP_FAILED)
		ret = NULL;
	else if (addr != NULL && ret != addr) {
		/*
		 * We succeeded in mapping memory, but not in the right place.
		 */
		if (munmap(ret, size) == -1) {
			char buf[BUFERROR_BUF];

			buferror(get_errno(), buf, sizeof(buf));
			malloc_printf("<jemalloc: Error in munmap(): %s\n",
			    buf);
			if (opt_abort)
				abort();
		}
		ret = NULL;
	}
#endif
#if defined(__ANDROID__)
	if (ret != NULL) {
		/* Name this memory as being used by libc */
		prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, ret,
		    size, "libc_malloc");
	}
#endif
	assert(ret == NULL || (addr == NULL && ret != addr)
	    || (addr != NULL && ret == addr));
	return (ret);
}
Esempio n. 18
0
void
p_test_fail(const char *format, ...)
{
	va_list ap;

	va_start(ap, format);
	malloc_vcprintf(NULL, NULL, format, ap);
	format = va_arg(ap, const char *);
	malloc_vcprintf(NULL, NULL, format, ap);
	va_end(ap);
	malloc_printf("\n");
	test_status = test_status_fail;
}
Esempio n. 19
0
static void
pages_unmap(void *addr, size_t size)
{

	if (munmap(addr, size) == -1) {
		char buf[BUFERROR_BUF];

		buferror(errno, buf, sizeof(buf));
		malloc_printf("<jemalloc>: Error in munmap(): %s\n", buf);
		if (opt_abort)
			abort();
	}
}
Esempio n. 20
0
int
main(void)
{
	int ret = 0;
	je_thread_t thread;

	malloc_printf("Test begin\n");

	je_thread_start(NULL);

	je_thread_create(&thread, je_thread_start, NULL);
	je_thread_join(thread, (void *)&ret);

	je_thread_start(NULL);

	je_thread_create(&thread, je_thread_start, NULL);
	je_thread_join(thread, (void *)&ret);

	je_thread_start(NULL);

	malloc_printf("Test end\n");
	return (ret);
}
Esempio n. 21
0
// Unregister an already-registered weak reference. 
// This is used when referrer's storage is about to go away, but referent 
//   isn't dead yet. (Otherwise, zeroing referrer later would be a 
//   bad memory access.)
// Does nothing if referent/referrer is not a currently active weak reference.
// fixme currently requires old referent value to be passed in (lame)
// fixme unregistration should be automatic if referrer is collected
static void weak_unregister_no_lock(azone_t *azone, const void *referent, void **referrer)
{
    weak_entry_t *entry;

    if (azone->control.log & AUTO_LOG_WEAK) malloc_printf("%s: WEAK: unregistering weak reference to %p at %p\n", auto_prelude(), referent, referrer);

    if ((entry = weak_entry_for_referent(azone, referent))) {
        remove_referrer_no_lock(&entry->referrers, referrer);
        if (entry->referrers.num_refs == 0) {
            weak_entry_remove_no_lock(azone, entry);
            azone->num_weak_refs--;
        }
    } 
}
Esempio n. 22
0
test_status_t
p_test(test_t *t, ...)
{
	test_status_t ret;
	va_list ap;

	/*
	 * Make sure initialization occurs prior to running tests.  Tests are
	 * special because they may use internal facilities prior to triggering
	 * initialization as a side effect of calling into the public API.  This
	 * is a final safety that works even if jemalloc_constructor() doesn't
	 * run, as for MSVC builds.
	 */
	if (nallocx(1, 0) == 0) {
		malloc_printf("Initialization error");
		return (test_status_fail);
	}

	ret = test_status_pass;
	va_start(ap, t);
	for (; t != NULL; t = va_arg(ap, test_t *)) {
		t();
		if (test_status > ret)
			ret = test_status;
	}
	va_end(ap);

	malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n",
	    test_status_string(test_status_pass),
	    test_counts[test_status_pass], test_count,
	    test_status_string(test_status_skip),
	    test_counts[test_status_skip], test_count,
	    test_status_string(test_status_fail),
	    test_counts[test_status_fail], test_count);

	return (ret);
}
Esempio n. 23
0
TEST_END

int
main(void) {
	/* Ensure tsd bootstrapped. */
	if (nallocx(1, 0) == 0) {
		malloc_printf("Initialization error");
		return test_status_fail;
	}

	return test_no_reentrancy(
	    test_tsd_main_thread,
	    test_tsd_sub_thread,
	    test_tsd_reincarnation);
}
Esempio n. 24
0
void
compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a,
    void (*func_a), const char *name_b, void (*func_b))
{
	timedelta_t timer_a, timer_b;
	char ratio_buf[6];

	time_func(&timer_a, nwarmup, niter, func_a);
	time_func(&timer_b, nwarmup, niter, func_b);

	timer_ratio(&timer_a, &timer_b, ratio_buf, sizeof(ratio_buf));
	malloc_printf("%"PRIu64" iterations, %s=%"PRIu64"us, "
	    "%s=%"PRIu64"us, ratio=1:%s\n",
	    niter, name_a, timer_usec(&timer_a), name_b, timer_usec(&timer_b),
	    ratio_buf);
}
Esempio n. 25
0
File: tsd.c Progetto: DawidvC/chapel
TEST_END

int
main(void)
{

	/* Core tsd bootstrapping must happen prior to data_tsd_boot(). */
	if (nallocx(1, 0) == 0) {
		malloc_printf("Initialization error");
		return (test_status_fail);
	}
	data_tsd_boot();

	return (test(
	    test_tsd_main_thread,
	    test_tsd_sub_thread));
}
Esempio n. 26
0
// Remove old_referrer from list, if it's present.
// Does not remove duplicates.
// fixme this is slow if old_referrer is not present.
static void remove_referrer_no_lock(weak_referrer_array_t *list, void **old_referrer)
{

    unsigned index = hash(old_referrer) % list->num_allocated;
    unsigned start_index = index, hash_displacement = 0;
    while (list->refs[index].referrer != old_referrer) {
        index++;
        hash_displacement++;
        if (index == list->num_allocated)
            index = 0;
        if (index == start_index || hash_displacement > list->max_hash_displacement) {
            malloc_printf("%s: attempted to remove unregistered weak referrer %p\n", auto_prelude(), old_referrer);
            return;
        }
    }
    list->refs[index].referrer = NULL;
    list->num_refs--;
}
Esempio n. 27
0
void
ckh_delete(tsd_t *tsd, ckh_t *ckh) {
	assert(ckh != NULL);

#ifdef CKH_VERBOSE
	malloc_printf(
	    "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64","
	    " nshrinkfails: %"FMTu64", ninserts: %"FMTu64","
	    " nrelocs: %"FMTu64"\n", __func__, ckh,
	    (unsigned long long)ckh->ngrows,
	    (unsigned long long)ckh->nshrinks,
	    (unsigned long long)ckh->nshrinkfails,
	    (unsigned long long)ckh->ninserts,
	    (unsigned long long)ckh->nrelocs);
#endif

	idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
	if (config_debug) {
		memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
	}
}
Esempio n. 28
0
void
ckh_delete(ckh_t *ckh)
{

	assert(ckh != NULL);

#ifdef CKH_VERBOSE
	malloc_printf(
	    "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64","
	    " nshrinkfails: %"PRIu64", ninserts: %"PRIu64","
	    " nrelocs: %"PRIu64"\n", __func__, ckh,
	    (unsigned long long)ckh->ngrows,
	    (unsigned long long)ckh->nshrinks,
	    (unsigned long long)ckh->nshrinkfails,
	    (unsigned long long)ckh->ninserts,
	    (unsigned long long)ckh->nrelocs);
#endif

	idalloc(ckh->tab);
	if (config_debug)
		memset(ckh, 0x5a, sizeof(ckh_t));
}
Esempio n. 29
0
void
ckh_delete(ckh_t *ckh)
{

	assert(ckh != NULL);
	dassert(ckh->magic == CKH_MAGIC);

#ifdef CKH_VERBOSE
	malloc_printf(
	    "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64","
	    " nshrinkfails: %"PRIu64", ninserts: %"PRIu64","
	    " nrelocs: %"PRIu64"\n", __func__, ckh,
	    (unsigned long long)ckh->ngrows,
	    (unsigned long long)ckh->nshrinks,
	    (unsigned long long)ckh->nshrinkfails,
	    (unsigned long long)ckh->ninserts,
	    (unsigned long long)ckh->nrelocs);
#endif

	idalloc(ckh->tab);
#ifdef JEMALLOC_DEBUG
	memset(ckh, 0x5a, sizeof(ckh_t));
#endif
}
Esempio n. 30
0
static void
pages_unmap(void *addr, size_t size)
{

#ifdef _WIN32
	if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
#else
	if (munmap(addr, size) == -1)
#endif
	{
		char buf[BUFERROR_BUF];

		buferror(get_errno(), buf, sizeof(buf));
		malloc_printf("<jemalloc>: Error in "
#ifdef _WIN32
		              "VirtualFree"
#else
		              "munmap"
#endif
		              "(): %s\n", buf);
		if (opt_abort)
			abort();
	}
}