Ejemplo n.º 1
0
void *
huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
{
	void *ret;
	size_t csize;
	extent_node_t *node;
	bool is_zeroed;
	pool_t *pool;

	/* Allocate one or more contiguous chunks for this request. */

	csize = CHUNK_CEILING(size);
	if (csize == 0) {
		/* size is large enough to cause size_t wrap-around. */
		return (NULL);
	}

	/*
	 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
	 * it is possible to make correct junk/zero fill decisions below.
	 */
	is_zeroed = zero;
	arena = choose_arena(arena);
	pool = arena->pool;

	/* Allocate an extent node with which to track the chunk. */
	node = base_node_alloc(pool);
	if (node == NULL)
		return (NULL);

	ret = arena_chunk_alloc_huge(arena, csize, alignment, &is_zeroed);
	if (ret == NULL) {
		base_node_dalloc(pool, node);
		return (NULL);
	}

	/* Insert node into huge. */
	node->addr = ret;
	node->size = csize;
	node->arena = arena;

	malloc_mutex_lock(&pool->huge_mtx);
	extent_tree_ad_insert(&pool->huge, node);
	malloc_mutex_unlock(&pool->huge_mtx);

	if (config_fill && zero == false) {
		if (opt_junk)
			memset(ret, 0xa5, csize);
		else if (opt_zero && is_zeroed == false)
			memset(ret, 0, csize);
	}

	return (ret);
}
Ejemplo n.º 2
0
static int
thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
    void *newp, size_t newlen)
{
	int ret;
	unsigned newind, oldind;

	malloc_mutex_lock(&ctl_mtx);
	newind = oldind = choose_arena(NULL)->ind;
	WRITE(newind, unsigned);
	READ(oldind, unsigned);
	if (newind != oldind) {
		arena_t *arena;

		if (newind >= ctl_stats.narenas) {
			/* New arena index is out of range. */
			ret = EFAULT;
			goto label_return;
		}

		/* Initialize arena if necessary. */
		malloc_mutex_lock(&arenas_lock);
		if ((arena = arenas[newind]) == NULL && (arena =
		    arenas_extend(newind)) == NULL) {
			malloc_mutex_unlock(&arenas_lock);
			ret = EAGAIN;
			goto label_return;
		}
		assert(arena == arenas[newind]);
		arenas[oldind]->nthreads--;
		arenas[newind]->nthreads++;
		malloc_mutex_unlock(&arenas_lock);

		/* Set new arena association. */
		if (config_tcache) {
			tcache_t *tcache;
			if ((uintptr_t)(tcache = *tcache_tsd_get()) >
			    (uintptr_t)TCACHE_STATE_MAX) {
				tcache_arena_dissociate(tcache);
				tcache_arena_associate(tcache, arena);
			}
		}
		arenas_tsd_set(&arena);
	}

	ret = 0;
label_return:
	malloc_mutex_unlock(&ctl_mtx);
	return (ret);
}
Ejemplo n.º 3
0
static int
thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
    void *newp, size_t newlen)
{
	int ret;
	unsigned newind, oldind;

	newind = oldind = choose_arena()->ind;
	WRITE(newind, unsigned);
	READ(oldind, unsigned);
	if (newind != oldind) {
		arena_t *arena;

		if (newind >= narenas) {
			/* New arena index is out of range. */
			ret = EFAULT;
			goto RETURN;
		}

		/* Initialize arena if necessary. */
		malloc_mutex_lock(&arenas_lock);
		if ((arena = arenas[newind]) == NULL)
			arena = arenas_extend(newind);
		arenas[oldind]->nthreads--;
		arenas[newind]->nthreads++;
		malloc_mutex_unlock(&arenas_lock);
		if (arena == NULL) {
			ret = EAGAIN;
			goto RETURN;
		}

		/* Set new arena association. */
		ARENA_SET(arena);
#ifdef JEMALLOC_TCACHE
		{
			tcache_t *tcache = TCACHE_GET();
			if (tcache != NULL)
				tcache->arena = arena;
		}
#endif
	}

	ret = 0;
RETURN:
	return (ret);
}