Esempio n. 1
0
static bool
ctl_arena_init(ctl_arena_stats_t *astats)
{

	if (astats->bstats == NULL) {
		astats->bstats = (malloc_bin_stats_t *)base_alloc(nbins *
		    sizeof(malloc_bin_stats_t));
		if (astats->bstats == NULL)
			return (true);
	}
	if (astats->lstats == NULL) {
		astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
		    sizeof(malloc_large_stats_t));
		if (astats->lstats == NULL)
			return (true);
	}

	return (false);
}
Esempio n. 2
0
void *
base_calloc(size_t number, size_t size)
{
	void *ret = base_alloc(number * size);

	if (ret != NULL)
		memset(ret, 0, number * size);

	return (ret);
}
Esempio n. 3
0
	inline base_alloc CondExpOp(
		enum CompareOp     cop          ,
		const base_alloc&       left         ,
		const base_alloc&       right        ,
		const base_alloc&       exp_if_true  ,
		const base_alloc&       exp_if_false )
	{	// not used
		assert(false);

		// to void compiler error
		return base_alloc();
	}
Esempio n. 4
0
static bool
ctl_init(void)
{
	bool ret;

	malloc_mutex_lock(&ctl_mtx);
	if (ctl_initialized == false) {
#ifdef JEMALLOC_STATS
		unsigned i;
#endif

		/*
		 * Allocate space for one extra arena stats element, which
		 * contains summed stats across all arenas.
		 */
		ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
		    (narenas + 1) * sizeof(ctl_arena_stats_t));
		if (ctl_stats.arenas == NULL) {
			ret = true;
			goto RETURN;
		}
		memset(ctl_stats.arenas, 0, (narenas + 1) *
		    sizeof(ctl_arena_stats_t));

		/*
		 * Initialize all stats structures, regardless of whether they
		 * ever get used.  Lazy initialization would allow errors to
		 * cause inconsistent state to be viewable by the application.
		 */
#ifdef JEMALLOC_STATS
		for (i = 0; i <= narenas; i++) {
			if (ctl_arena_init(&ctl_stats.arenas[i])) {
				ret = true;
				goto RETURN;
			}
		}
#endif
		ctl_stats.arenas[narenas].initialized = true;

		ctl_epoch = 0;
		ctl_refresh();
		ctl_initialized = true;
	}

	ret = false;
RETURN:
	malloc_mutex_unlock(&ctl_mtx);
	return (ret);
}
Esempio n. 5
0
static bool
ctl_init(void)
{
	bool ret;

	malloc_mutex_lock(&ctl_mtx);
	if (ctl_initialized == false) {
		/*
		 * Allocate space for one extra arena stats element, which
		 * contains summed stats across all arenas.
		 */
		assert(narenas_auto == narenas_total_get());
		ctl_stats.narenas = narenas_auto;
		ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
		    (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
		if (ctl_stats.arenas == NULL) {
			ret = true;
			goto label_return;
		}
		memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
		    sizeof(ctl_arena_stats_t));

		/*
		 * Initialize all stats structures, regardless of whether they
		 * ever get used.  Lazy initialization would allow errors to
		 * cause inconsistent state to be viewable by the application.
		 */
		if (config_stats) {
			unsigned i;
			for (i = 0; i <= ctl_stats.narenas; i++) {
				if (ctl_arena_init(&ctl_stats.arenas[i])) {
					ret = true;
					goto label_return;
				}
			}
		}
		ctl_stats.arenas[ctl_stats.narenas].initialized = true;

		ctl_epoch = 0;
		ctl_refresh();
		ctl_initialized = true;
	}

	ret = false;
label_return:
	malloc_mutex_unlock(&ctl_mtx);
	return (ret);
}
Esempio n. 6
0
extent_node_t *
base_node_alloc(void)
{
	extent_node_t *ret;

	malloc_mutex_lock(&base_mtx);
	if (base_nodes != NULL) {
		ret = base_nodes;
		base_nodes = *(extent_node_t **)ret;
		malloc_mutex_unlock(&base_mtx);
	} else {
		malloc_mutex_unlock(&base_mtx);
		ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
	}

	return (ret);
}
Esempio n. 7
0
extent_node_t *
base_node_alloc(void)
{
	extent_node_t *ret;

	malloc_mutex_lock(&base_mtx);
	if (base_nodes != NULL) {
		ret = base_nodes;
		base_nodes = *(extent_node_t **)ret;
		malloc_mutex_unlock(&base_mtx);
		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret,
		    sizeof(extent_node_t));
	} else {
		malloc_mutex_unlock(&base_mtx);
		ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
	}

	return (ret);
}
Esempio n. 8
0
bool
chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
{
    bool ret;
    unsigned i;
    off_t off;
    void *vaddr;
    size_t cumsize, voff;
    size_t sizes[nfds];

    malloc_mutex_lock(&swap_mtx);

    /* Get file sizes. */
    for (i = 0, cumsize = 0; i < nfds; i++) {
        off = lseek(fds[i], 0, SEEK_END);
        if (off == ((off_t)-1)) {
            ret = true;
            goto RETURN;
        }
        if (PAGE_CEILING(off) != off) {
            /* Truncate to a multiple of the page size. */
            off &= ~PAGE_MASK;
            if (ftruncate(fds[i], off) != 0) {
                ret = true;
                goto RETURN;
            }
        }
        sizes[i] = off;
        if (cumsize + off < cumsize) {
            /*
             * Cumulative file size is greater than the total
             * address space.  Bail out while it's still obvious
             * what the problem is.
             */
            ret = true;
            goto RETURN;
        }
        cumsize += off;
    }

    /* Round down to a multiple of the chunk size. */
    cumsize &= ~chunksize_mask;
    if (cumsize == 0) {
        ret = true;
        goto RETURN;
    }

    /*
     * Allocate a chunk-aligned region of anonymous memory, which will
     * be the final location for the memory-mapped files.
     */
    vaddr = chunk_alloc_mmap_noreserve(cumsize);
    if (vaddr == NULL) {
        ret = true;
        goto RETURN;
    }

    /* Overlay the files onto the anonymous mapping. */
    for (i = 0, voff = 0; i < nfds; i++) {
        void *addr = mmap((void *)((uintptr_t)vaddr + voff), sizes[i],
                          PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fds[i], 0);
        if (addr == MAP_FAILED) {
            char buf[BUFERROR_BUF];


            buferror(errno, buf, sizeof(buf));
            malloc_write(
                "<jemalloc>: Error in mmap(..., MAP_FIXED, ...): ");
            malloc_write(buf);
            malloc_write("\n");
            if (opt_abort)
                abort();
            if (munmap(vaddr, voff) == -1) {
                buferror(errno, buf, sizeof(buf));
                malloc_write("<jemalloc>: Error in munmap(): ");
                malloc_write(buf);
                malloc_write("\n");
            }
            ret = true;
            goto RETURN;
        }
        assert(addr == (void *)((uintptr_t)vaddr + voff));

        /*
         * Tell the kernel that the mapping will be accessed randomly,
         * and that it should not gratuitously sync pages to the
         * filesystem.
         */
#ifdef MADV_RANDOM
        madvise(addr, sizes[i], MADV_RANDOM);
#endif
#ifdef MADV_NOSYNC
        madvise(addr, sizes[i], MADV_NOSYNC);
#endif

        voff += sizes[i];
    }

    swap_prezeroed = prezeroed;
    swap_base = vaddr;
    swap_end = swap_base;
    swap_max = (void *)((uintptr_t)vaddr + cumsize);

    /* Copy the fds array for mallctl purposes. */
    swap_fds = (int *)base_alloc(nfds * sizeof(int));
    if (swap_fds == NULL) {
        ret = true;
        goto RETURN;
    }
    memcpy(swap_fds, fds, nfds * sizeof(int));
    swap_nfds = nfds;

#ifdef JEMALLOC_STATS
    swap_avail = cumsize;
#endif

    swap_enabled = true;

    ret = false;
RETURN:
    malloc_mutex_unlock(&swap_mtx);
    return (ret);
}
Esempio n. 9
0
static rtree_leaf_elm_t *
rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
	return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms *
	    sizeof(rtree_leaf_elm_t), CACHELINE);
}