예제 #1
0
파일: chunk_mmap.c 프로젝트: Abioy/windmill
static void
pages_unmap(void *addr, size_t size)
{

	if (munmap(addr, size) == -1) {
		char buf[BUFERROR_BUF];

		buferror(errno, buf, sizeof(buf));
		malloc_write("<jemalloc>: Error in munmap(): ");
		malloc_write(buf);
		malloc_write("\n");
		if (opt_abort)
			abort();
	}
}
void je_malloc_disable_init() {
  if (pthread_atfork(je_malloc_disable_prefork,
      je_malloc_disable_postfork_parent, je_malloc_disable_postfork_child) != 0) {
    malloc_write("<jemalloc>: Error in pthread_atfork()\n");
    if (opt_abort)
      abort();
  }
}
예제 #3
0
bool
pages_boot(void) {
	os_page = os_page_detect();
	if (os_page > PAGE) {
		malloc_write("<jemalloc>: Unsupported system page size\n");
		if (opt_abort) {
			abort();
		}
		return true;
	}

#ifndef _WIN32
	mmap_flags = MAP_PRIVATE | MAP_ANON;
#endif
#ifdef __CHERI_PURE_CAPABILITY__
	mmap_flags |= MAP_ALIGNED(LG_PAGE);
#endif

#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
	os_overcommits = os_overcommits_sysctl();
#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
	os_overcommits = os_overcommits_proc();
#  ifdef MAP_NORESERVE
	if (os_overcommits) {
		mmap_flags |= MAP_NORESERVE;
	}
#  endif
#else
	os_overcommits = false;
#endif

	init_thp_state();

#ifdef __FreeBSD__
	/*
	 * FreeBSD doesn't need the check; madvise(2) is known to work.
	 */
#else
	/* Detect lazy purge runtime support. */
	if (pages_can_purge_lazy) {
		bool committed = false;
		void *madv_free_page = os_pages_map(NULL, PAGE, PAGE, &committed);
		if (madv_free_page == NULL) {
			return true;
		}
		assert(pages_can_purge_lazy_runtime);
		if (pages_purge_lazy(madv_free_page, PAGE)) {
			pages_can_purge_lazy_runtime = false;
		}
		os_pages_unmap(madv_free_page, PAGE);
	}
#endif

	return false;
}
예제 #4
0
static void
pthread_create_once(void)
{
    pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
    if (pthread_create_fptr == NULL) {
        malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
            "\"pthread_create\")\n");
        abort();
    }

    isthreaded = true;
}
예제 #5
0
파일: chunk_mmap.c 프로젝트: Abioy/windmill
static void *
pages_map(void *addr, size_t size, bool noreserve)
{
	void *ret;

	/*
	 * We don't use MAP_FIXED here, because it can cause the *replacement*
	 * of existing mappings, and we only want to create new mappings.
	 */
	int flags = MAP_PRIVATE | MAP_ANON;
#ifdef MAP_NORESERVE
	if (noreserve)
		flags |= MAP_NORESERVE;
#endif
	ret = mmap(addr, size, PROT_READ | PROT_WRITE, flags, -1, 0);
	assert(ret != NULL);

	if (ret == MAP_FAILED)
		ret = NULL;
	else if (addr != NULL && ret != addr) {
		/*
		 * We succeeded in mapping memory, but not in the right place.
		 */
		if (munmap(ret, size) == -1) {
			char buf[BUFERROR_BUF];

			buferror(errno, buf, sizeof(buf));
			malloc_write("<jemalloc>: Error in munmap(): ");
			malloc_write(buf);
			malloc_write("\n");
			if (opt_abort)
				abort();
		}
		ret = NULL;
	}

	assert(ret == NULL || (addr == NULL && ret != addr)
	    || (addr != NULL && ret == addr));
	return (ret);
}
예제 #6
0
파일: chunk_mmap.c 프로젝트: Abioy/windmill
bool
chunk_mmap_boot(void)
{

#ifdef NO_TLS
	if (pthread_key_create(&mmap_unaligned_tsd, NULL) != 0) {
		malloc_write("<jemalloc>: Error in pthread_key_create()\n");
		return (true);
	}
#endif

	return (false);
}
예제 #7
0
static void
init_thp_state(void) {
	if (!have_madvise_huge) {
		if (metadata_thp_enabled() && opt_abort) {
			malloc_write("<jemalloc>: no MADV_HUGEPAGE support\n");
			abort();
		}
		goto label_error;
	}

	static const char sys_state_madvise[] = "always [madvise] never\n";
	static const char sys_state_always[] = "[always] madvise never\n";
	static const char sys_state_never[] = "always madvise [never]\n";
	char buf[sizeof(sys_state_madvise)];

#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
	int fd = (int)syscall(SYS_open,
	    "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
#else
	int fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
#endif
	if (fd == -1) {
		goto label_error;
	}

	ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf));
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
	syscall(SYS_close, fd);
#else
	close(fd);
#endif

	if (strncmp(buf, sys_state_madvise, (size_t)nread) == 0) {
		init_system_thp_mode = thp_mode_default;
	} else if (strncmp(buf, sys_state_always, (size_t)nread) == 0) {
		init_system_thp_mode = thp_mode_always;
	} else if (strncmp(buf, sys_state_never, (size_t)nread) == 0) {
		init_system_thp_mode = thp_mode_never;
	} else {
		goto label_error;
	}
	return;
label_error:
	opt_thp = init_system_thp_mode = thp_mode_not_supported;
}
예제 #8
0
static bool
base_pages_alloc(size_t minsize)
{
	size_t csize;
	bool zero;

	if (base_pages != NULL) {
		/* TODO: remove this implementation restriction */
		malloc_write("<jemalloc>: Internal allocation limit reached\n");
		abort();
	}
	assert(minsize != 0);
	csize = CHUNK_CEILING(minsize);
	zero = false;
	base_pages = chunk_alloc(csize, true, &zero);
	if (base_pages == NULL)
		return (true);
	base_next_addr = base_pages;
	base_past_addr = (void *)((uintptr_t)base_pages + csize);

	return (false);
}
예제 #9
0
파일: stats.c 프로젝트: imace/kkS
void
stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
    const char *opts)
{
	int err;
	uint64_t epoch;
	size_t u64sz;
	bool general = true;
	bool merged = true;
	bool unmerged = true;
	bool bins = true;
	bool large = true;

	/*
	 * Refresh stats, in case mallctl() was called by the application.
	 *
	 * Check for OOM here, since refreshing the ctl cache can trigger
	 * allocation.  In practice, none of the subsequent mallctl()-related
	 * calls in this function will cause OOM if this one succeeds.
	 * */
	epoch = 1;
	u64sz = sizeof(uint64_t);
	err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
	if (err != 0) {
		if (err == EAGAIN) {
			malloc_write("<jemalloc>: Memory allocation failure in "
			    "mallctl(\"epoch\", ...)\n");
			return;
		}
		malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
		    "...)\n");
		abort();
	}

	if (opts != NULL) {
		unsigned i;

		for (i = 0; opts[i] != '\0'; i++) {
			switch (opts[i]) {
			case 'g':
				general = false;
				break;
			case 'm':
				merged = false;
				break;
			case 'a':
				unmerged = false;
				break;
			case 'b':
				bins = false;
				break;
			case 'l':
				large = false;
				break;
			default:;
			}
		}
	}

	malloc_cprintf(write_cb, cbopaque,
	    "___ Begin jemalloc statistics ___\n");
	if (general) {
		int err;
		const char *cpv;
		bool bv;
		unsigned uv;
		ssize_t ssv;
		size_t sv, bsz, ssz, sssz, cpsz;

		bsz = sizeof(bool);
		ssz = sizeof(size_t);
		sssz = sizeof(ssize_t);
		cpsz = sizeof(const char *);

		CTL_GET("version", &cpv, const char *);
		malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
		CTL_GET("config.debug", &bv, bool);
		malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
		    bv ? "enabled" : "disabled");

#define	OPT_WRITE_BOOL(n)						\
		if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0))	\
		    == 0) {						\
			malloc_cprintf(write_cb, cbopaque,		\
			    "  opt."#n": %s\n", bv ? "true" : "false");	\
		}
#define	OPT_WRITE_SIZE_T(n)						\
		if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0))	\
		    == 0) {						\
			malloc_cprintf(write_cb, cbopaque,		\
			"  opt."#n": %zu\n", sv);			\
		}
#define	OPT_WRITE_SSIZE_T(n)						\
		if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0))	\
		    == 0) {						\
			malloc_cprintf(write_cb, cbopaque,		\
			    "  opt."#n": %zd\n", ssv);			\
		}
#define	OPT_WRITE_CHAR_P(n)						\
		if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0))	\
		    == 0) {						\
			malloc_cprintf(write_cb, cbopaque,		\
			    "  opt."#n": \"%s\"\n", cpv);		\
		}

		malloc_cprintf(write_cb, cbopaque,
		    "Run-time option settings:\n");
		OPT_WRITE_BOOL(abort)
		OPT_WRITE_SIZE_T(lg_chunk)
		OPT_WRITE_CHAR_P(dss)
		OPT_WRITE_SIZE_T(narenas)
		OPT_WRITE_SSIZE_T(lg_dirty_mult)
		OPT_WRITE_BOOL(stats_print)
		OPT_WRITE_BOOL(junk)
		OPT_WRITE_SIZE_T(quarantine)
		OPT_WRITE_BOOL(redzone)
		OPT_WRITE_BOOL(zero)
		OPT_WRITE_BOOL(utrace)
		OPT_WRITE_BOOL(valgrind)
		OPT_WRITE_BOOL(xmalloc)
		OPT_WRITE_BOOL(tcache)
		OPT_WRITE_SSIZE_T(lg_tcache_max)
		OPT_WRITE_BOOL(prof)
		OPT_WRITE_CHAR_P(prof_prefix)
		OPT_WRITE_BOOL(prof_active)
		OPT_WRITE_SSIZE_T(lg_prof_sample)
		OPT_WRITE_BOOL(prof_accum)
		OPT_WRITE_SSIZE_T(lg_prof_interval)
		OPT_WRITE_BOOL(prof_gdump)
		OPT_WRITE_BOOL(prof_final)
		OPT_WRITE_BOOL(prof_leak)

#undef OPT_WRITE_BOOL
#undef OPT_WRITE_SIZE_T
#undef OPT_WRITE_SSIZE_T
#undef OPT_WRITE_CHAR_P

		malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);

		CTL_GET("arenas.narenas", &uv, unsigned);
		malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);

		malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
		    sizeof(void *));

		CTL_GET("arenas.quantum", &sv, size_t);
		malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);

		CTL_GET("arenas.page", &sv, size_t);
		malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);

		CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
		if (ssv >= 0) {
			malloc_cprintf(write_cb, cbopaque,
			    "Min active:dirty page ratio per arena: %u:1\n",
			    (1U << ssv));
		} else {
			malloc_cprintf(write_cb, cbopaque,
			    "Min active:dirty page ratio per arena: N/A\n");
		}
		if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0))
		    == 0) {
			malloc_cprintf(write_cb, cbopaque,
			    "Maximum thread-cached size class: %zu\n", sv);
		}
		if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 &&
		    bv) {
			CTL_GET("opt.lg_prof_sample", &sv, size_t);
			malloc_cprintf(write_cb, cbopaque,
			    "Average profile sample interval: %"PRIu64
			    " (2^%zu)\n", (((uint64_t)1U) << sv), sv);

			CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
			if (ssv >= 0) {
				malloc_cprintf(write_cb, cbopaque,
				    "Average profile dump interval: %"PRIu64
				    " (2^%zd)\n",
				    (((uint64_t)1U) << ssv), ssv);
			} else {
				malloc_cprintf(write_cb, cbopaque,
				    "Average profile dump interval: N/A\n");
			}
		}
		CTL_GET("opt.lg_chunk", &sv, size_t);
		malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n",
		    (ZU(1) << sv), sv);
	}

	if (config_stats) {
		size_t *cactive;
		size_t allocated, active, mapped;
		size_t chunks_current, chunks_high;
		uint64_t chunks_total;
		size_t huge_allocated;
		uint64_t huge_nmalloc, huge_ndalloc;

		CTL_GET("stats.cactive", &cactive, size_t *);
		CTL_GET("stats.allocated", &allocated, size_t);
		CTL_GET("stats.active", &active, size_t);
		CTL_GET("stats.mapped", &mapped, size_t);
		malloc_cprintf(write_cb, cbopaque,
		    "Allocated: %zu, active: %zu, mapped: %zu\n",
		    allocated, active, mapped);
		malloc_cprintf(write_cb, cbopaque,
		    "Current active ceiling: %zu\n", atomic_read_z(cactive));

		/* Print chunk stats. */
		CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
		CTL_GET("stats.chunks.high", &chunks_high, size_t);
		CTL_GET("stats.chunks.current", &chunks_current, size_t);
		malloc_cprintf(write_cb, cbopaque, "chunks: nchunks   "
		    "highchunks    curchunks\n");
		malloc_cprintf(write_cb, cbopaque,
		    "  %13"PRIu64" %12zu %12zu\n",
		    chunks_total, chunks_high, chunks_current);

		/* Print huge stats. */
		CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);
		CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t);
		CTL_GET("stats.huge.allocated", &huge_allocated, size_t);
		malloc_cprintf(write_cb, cbopaque,
		    "huge: nmalloc      ndalloc    allocated\n");
		malloc_cprintf(write_cb, cbopaque,
		    " %12"PRIu64" %12"PRIu64" %12zu\n",
		    huge_nmalloc, huge_ndalloc, huge_allocated);

		if (merged) {
			unsigned narenas;

			CTL_GET("arenas.narenas", &narenas, unsigned);
			{
				VARIABLE_ARRAY(bool, initialized, narenas);
				size_t isz;
				unsigned i, ninitialized;

				isz = sizeof(bool) * narenas;
				xmallctl("arenas.initialized", initialized,
				    &isz, NULL, 0);
				for (i = ninitialized = 0; i < narenas; i++) {
					if (initialized[i])
						ninitialized++;
				}

				if (ninitialized > 1 || unmerged == false) {
					/* Print merged arena stats. */
					malloc_cprintf(write_cb, cbopaque,
					    "\nMerged arenas stats:\n");
					stats_arena_print(write_cb, cbopaque,
					    narenas, bins, large);
				}
			}
		}

		if (unmerged) {
			unsigned narenas;

			/* Print stats for each arena. */

			CTL_GET("arenas.narenas", &narenas, unsigned);
			{
				VARIABLE_ARRAY(bool, initialized, narenas);
				size_t isz;
				unsigned i;

				isz = sizeof(bool) * narenas;
				xmallctl("arenas.initialized", initialized,
				    &isz, NULL, 0);

				for (i = 0; i < narenas; i++) {
					if (initialized[i]) {
						malloc_cprintf(write_cb,
						    cbopaque,
						    "\narenas[%u]:\n", i);
						stats_arena_print(write_cb,
						    cbopaque, i, bins, large);
					}
				}
			}
		}
	}
	malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n");
}
예제 #10
0
bool
chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
{
    bool ret;
    unsigned i;
    off_t off;
    void *vaddr;
    size_t cumsize, voff;
    size_t sizes[nfds];

    malloc_mutex_lock(&swap_mtx);

    /* Get file sizes. */
    for (i = 0, cumsize = 0; i < nfds; i++) {
        off = lseek(fds[i], 0, SEEK_END);
        if (off == ((off_t)-1)) {
            ret = true;
            goto RETURN;
        }
        if (PAGE_CEILING(off) != off) {
            /* Truncate to a multiple of the page size. */
            off &= ~PAGE_MASK;
            if (ftruncate(fds[i], off) != 0) {
                ret = true;
                goto RETURN;
            }
        }
        sizes[i] = off;
        if (cumsize + off < cumsize) {
            /*
             * Cumulative file size is greater than the total
             * address space.  Bail out while it's still obvious
             * what the problem is.
             */
            ret = true;
            goto RETURN;
        }
        cumsize += off;
    }

    /* Round down to a multiple of the chunk size. */
    cumsize &= ~chunksize_mask;
    if (cumsize == 0) {
        ret = true;
        goto RETURN;
    }

    /*
     * Allocate a chunk-aligned region of anonymous memory, which will
     * be the final location for the memory-mapped files.
     */
    vaddr = chunk_alloc_mmap_noreserve(cumsize);
    if (vaddr == NULL) {
        ret = true;
        goto RETURN;
    }

    /* Overlay the files onto the anonymous mapping. */
    for (i = 0, voff = 0; i < nfds; i++) {
        void *addr = mmap((void *)((uintptr_t)vaddr + voff), sizes[i],
                          PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fds[i], 0);
        if (addr == MAP_FAILED) {
            char buf[BUFERROR_BUF];


            buferror(errno, buf, sizeof(buf));
            malloc_write(
                "<jemalloc>: Error in mmap(..., MAP_FIXED, ...): ");
            malloc_write(buf);
            malloc_write("\n");
            if (opt_abort)
                abort();
            if (munmap(vaddr, voff) == -1) {
                buferror(errno, buf, sizeof(buf));
                malloc_write("<jemalloc>: Error in munmap(): ");
                malloc_write(buf);
                malloc_write("\n");
            }
            ret = true;
            goto RETURN;
        }
        assert(addr == (void *)((uintptr_t)vaddr + voff));

        /*
         * Tell the kernel that the mapping will be accessed randomly,
         * and that it should not gratuitously sync pages to the
         * filesystem.
         */
#ifdef MADV_RANDOM
        madvise(addr, sizes[i], MADV_RANDOM);
#endif
#ifdef MADV_NOSYNC
        madvise(addr, sizes[i], MADV_NOSYNC);
#endif

        voff += sizes[i];
    }

    swap_prezeroed = prezeroed;
    swap_base = vaddr;
    swap_end = swap_base;
    swap_max = (void *)((uintptr_t)vaddr + cumsize);

    /* Copy the fds array for mallctl purposes. */
    swap_fds = (int *)base_alloc(nfds * sizeof(int));
    if (swap_fds == NULL) {
        ret = true;
        goto RETURN;
    }
    memcpy(swap_fds, fds, nfds * sizeof(int));
    swap_nfds = nfds;

#ifdef JEMALLOC_STATS
    swap_avail = cumsize;
#endif

    swap_enabled = true;

    ret = false;
RETURN:
    malloc_mutex_unlock(&swap_mtx);
    return (ret);
}
예제 #11
0
파일: huge.c 프로젝트: 08keelr/TrinityCore
void *
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
    size_t alignment, bool zero)
{
	void *ret;
	size_t copysize;

	/* Try to avoid moving the allocation. */
	ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
	if (ret != NULL)
		return (ret);

	/*
	 * size and oldsize are different enough that we need to use a
	 * different size class.  In that case, fall back to allocating new
	 * space and copying.
	 */
	if (alignment != 0)
		ret = huge_palloc(size + extra, alignment, zero);
	else
		ret = huge_malloc(size + extra, zero);

	if (ret == NULL) {
		if (extra == 0)
			return (NULL);
		/* Try again, this time without extra. */
		if (alignment != 0)
			ret = huge_palloc(size, alignment, zero);
		else
			ret = huge_malloc(size, zero);

		if (ret == NULL)
			return (NULL);
	}

	/*
	 * Copy at most size bytes (not size+extra), since the caller has no
	 * expectation that the extra bytes will be reliably preserved.
	 */
	copysize = (size < oldsize) ? size : oldsize;

	/*
	 * Use mremap(2) if this is a huge-->huge reallocation, and neither the
	 * source nor the destination are in swap or dss.
	 */
#ifdef JEMALLOC_MREMAP_FIXED
	if (oldsize >= chunksize
#  ifdef JEMALLOC_SWAP
	    && (swap_enabled == false || (chunk_in_swap(ptr) == false &&
	    chunk_in_swap(ret) == false))
#  endif
#  ifdef JEMALLOC_DSS
	    && chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
#  endif
	    ) {
		size_t newsize = huge_salloc(ret);

		if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
		    ret) == MAP_FAILED) {
			/*
			 * Assuming no chunk management bugs in the allocator,
			 * the only documented way an error can occur here is
			 * if the application changed the map type for a
			 * portion of the old allocation.  This is firmly in
			 * undefined behavior territory, so write a diagnostic
			 * message, and optionally abort.
			 */
			char buf[BUFERROR_BUF];

			buferror(errno, buf, sizeof(buf));
			malloc_write("<jemalloc>: Error in mremap(): ");
			malloc_write(buf);
			malloc_write("\n");
			if (opt_abort)
				abort();
			memcpy(ret, ptr, copysize);
			idalloc(ptr);
		} else
			huge_dalloc(ptr, false);
	} else
#endif
	{
		memcpy(ret, ptr, copysize);
		idalloc(ptr);
	}
	return (ret);
}