示例#1
0
void *
huge_palloc(size_t size, size_t alignment, bool zero)
{
	void *ret;
	size_t csize;
	extent_node_t *node;
	bool is_zeroed;

	/* Allocate one or more contiguous chunks for this request. */

	csize = CHUNK_CEILING(size);
	if (csize == 0) {
		/* size is large enough to cause size_t wrap-around. */
		return (NULL);
	}

	/* Allocate an extent node with which to track the chunk. */
	node = base_node_alloc();
	if (node == NULL)
		return (NULL);

	/*
	 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
	 * it is possible to make correct junk/zero fill decisions below.
	 */
	is_zeroed = zero;
	ret = chunk_alloc(csize, alignment, false, &is_zeroed,
	    chunk_dss_prec_get());
	if (ret == NULL) {
		base_node_dealloc(node);
		return (NULL);
	}

	/* Insert node into huge. */
	node->addr = ret;
	node->size = csize;

	malloc_mutex_lock(&huge_mtx);
	extent_tree_ad_insert(&huge, node);
	if (config_stats) {
		stats_cactive_add(csize);
		huge_nmalloc++;
		huge_allocated += csize;
	}
	malloc_mutex_unlock(&huge_mtx);

	if (config_fill && zero == false) {
		if (opt_junk)
			memset(ret, 0xa5, csize);
		else if (opt_zero && is_zeroed == false)
			memset(ret, 0, csize);
	}

	return (ret);
}
示例#2
0
文件: huge.c 项目: KaiZhang666/nvml
void *
huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
{
	void *ret;
	size_t csize;
	extent_node_t *node;
	bool is_zeroed;
	pool_t *pool;

	/* Allocate one or more contiguous chunks for this request. */

	csize = CHUNK_CEILING(size);
	if (csize == 0) {
		/* size is large enough to cause size_t wrap-around. */
		return (NULL);
	}

	/*
	 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
	 * it is possible to make correct junk/zero fill decisions below.
	 */
	is_zeroed = zero;
	arena = choose_arena(arena);
	pool = arena->pool;

	/* Allocate an extent node with which to track the chunk. */
	node = base_node_alloc(pool);
	if (node == NULL)
		return (NULL);

	ret = arena_chunk_alloc_huge(arena, csize, alignment, &is_zeroed);
	if (ret == NULL) {
		base_node_dalloc(pool, node);
		return (NULL);
	}

	/* Insert node into huge. */
	node->addr = ret;
	node->size = csize;
	node->arena = arena;

	malloc_mutex_lock(&pool->huge_mtx);
	extent_tree_ad_insert(&pool->huge, node);
	malloc_mutex_unlock(&pool->huge_mtx);

	if (config_fill && zero == false) {
		if (opt_junk)
			memset(ret, 0xa5, csize);
		else if (opt_zero && is_zeroed == false)
			memset(ret, 0, csize);
	}

	return (ret);
}
示例#3
0
void *
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
{

	/*
	 * Avoid moving the allocation if the size class can be left the same.
	 */
	if (oldsize > arena_maxclass
	    && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
	    && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
		assert(CHUNK_CEILING(oldsize) == oldsize);
		if (config_fill && opt_junk && size < oldsize) {
			memset((void *)((uintptr_t)ptr + size), 0x5a,
			    oldsize - size);
		}
		return (ptr);
	}

	/* Reallocation would require a move. */
	return (NULL);
}
示例#4
0
bool
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
    size_t usize_max, bool zero)
{

	assert(s2u(oldsize) == oldsize);

	/* Both allocations must be huge to avoid a move. */
	if (oldsize < chunksize || usize_max < chunksize)
		return (true);

	if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
		/* Attempt to expand the allocation in-place. */
		if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero))
			return (false);
		/* Try again, this time with usize_min. */
		if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
		    CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
		    oldsize, usize_min, zero))
			return (false);
	}

	/*
	 * Avoid moving the allocation if the existing chunk size accommodates
	 * the new size.
	 */
	if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
	    && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
		huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
		    zero);
		return (false);
	}

	/* Attempt to shrink the allocation in-place. */
	if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max))
		return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max));
	return (true);
}
示例#5
0
文件: huge.c 项目: Abioy/windmill
void *
huge_malloc(size_t size, bool zero)
{
	void *ret;
	size_t csize;
	extent_node_t *node;

	/* Allocate one or more contiguous chunks for this request. */

	csize = CHUNK_CEILING(size);
	if (csize == 0) {
		/* size is large enough to cause size_t wrap-around. */
		return (NULL);
	}

	/* Allocate an extent node with which to track the chunk. */
	node = base_node_alloc();
	if (node == NULL)
		return (NULL);

	ret = chunk_alloc(csize, false, &zero);
	if (ret == NULL) {
		base_node_dealloc(node);
		return (NULL);
	}

	/* Insert node into huge. */
	node->addr = ret;
	node->size = csize;

	malloc_mutex_lock(&huge_mtx);
	extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
	stats_cactive_add(csize);
	huge_nmalloc++;
	huge_allocated += csize;
#endif
	malloc_mutex_unlock(&huge_mtx);

#ifdef JEMALLOC_FILL
	if (zero == false) {
		if (opt_junk)
			memset(ret, 0xa5, csize);
		else if (opt_zero)
			memset(ret, 0, csize);
	}
#endif

	return (ret);
}
示例#6
0
static bool
base_pages_alloc(size_t minsize)
{
	size_t csize;

	assert(minsize != 0);
	csize = CHUNK_CEILING(minsize);
	base_pages = chunk_alloc_base(csize);
	if (base_pages == NULL)
		return (true);
	base_next_addr = base_pages;
	base_past_addr = (void *)((uintptr_t)base_pages + csize);

	return (false);
}
/* je_iterate calls callback for each allocation found in the memory region
 * between [base, base+size).  base will be rounded down to by the jemalloc
 * chunk size, and base+size will be rounded up to the chunk size.  If no memory
 * managed by jemalloc is found in the requested region, je_iterate returns -1
 * and sets errno to EINVAL.
 *
 * je_iterate must be called when no allocations are in progress, either
 * when single-threaded (for example just after a fork), or between
 * jemalloc_prefork() and jemalloc_postfork_parent().  The callback must
 * not attempt to allocate with jemalloc.
 */
int je_iterate(uintptr_t base, size_t size,
    void (*callback)(uintptr_t ptr, size_t size, void* arg), void* arg) {

  int error = EINVAL;
  uintptr_t ptr = (uintptr_t)CHUNK_ADDR2BASE(base);
  uintptr_t end = CHUNK_CEILING(base + size);

  while (ptr < end) {
    extent_node_t *node;

    node = chunk_lookup((void *)ptr, false);
    if (node == NULL) {
      ptr += chunksize;
      continue;
    }

    assert(extent_node_achunk_get(node) ||
        (uintptr_t)extent_node_addr_get(node) == ptr);

    error = 0;
    if (extent_node_achunk_get(node)) {
      /* Chunk */
      arena_chunk_t *chunk = (arena_chunk_t *)ptr;
      ptr += chunksize;

      if (&chunk->node != node) {
          /* Empty retained chunk */
          continue;
      }

      je_iterate_chunk(chunk, callback, arg);
    } else if ((uintptr_t)extent_node_addr_get(node) == ptr) {
      /* Huge allocation */
      callback(ptr, extent_node_size_get(node), arg);
      ptr += extent_node_size_get(node);
    }
  }

  if (error) {
    set_errno(error);
    return -1;
  }

  return 0;
}
示例#8
0
static bool
base_pages_alloc(size_t minsize)
{
	size_t csize;
	bool zero;

	assert(minsize != 0);
	csize = CHUNK_CEILING(minsize);
	zero = false;
	base_pages = chunk_alloc(csize, chunksize, true, &zero,
	    chunk_dss_prec_get());
	if (base_pages == NULL)
		return (true);
	base_next_addr = base_pages;
	base_past_addr = (void *)((uintptr_t)base_pages + csize);

	return (false);
}
示例#9
0
static bool
base_pages_alloc(size_t minsize)
{
	size_t csize;
	bool zero;

	if (base_pages != NULL) {
		/* TODO: remove this implementation restriction */
		malloc_write("<jemalloc>: Internal allocation limit reached\n");
		abort();
	}
	assert(minsize != 0);
	csize = CHUNK_CEILING(minsize);
	zero = false;
	base_pages = chunk_alloc(csize, true, &zero);
	if (base_pages == NULL)
		return (true);
	base_next_addr = base_pages;
	base_past_addr = (void *)((uintptr_t)base_pages + csize);

	return (false);
}
示例#10
0
bool
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
    bool zero)
{
	size_t usize;

	/* Both allocations must be huge to avoid a move. */
	if (oldsize < chunksize)
		return (true);

	assert(s2u(oldsize) == oldsize);
	usize = s2u(size);
	if (usize == 0) {
		/* size_t overflow. */
		return (true);
	}

	/*
	 * Avoid moving the allocation if the existing chunk size accommodates
	 * the new size.
	 */
	if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)
	    && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
		huge_ralloc_no_move_similar(ptr, oldsize, usize, size, extra,
		    zero);
		return (false);
	}

	/* Shrink the allocation in-place. */
	if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)) {
		huge_ralloc_no_move_shrink(ptr, oldsize, usize);
		return (false);
	}

	/* Attempt to expand the allocation in-place. */
	if (huge_ralloc_no_move_expand(ptr, oldsize, size + extra, zero)) {
		if (extra == 0)
			return (true);

		/* Try again, this time without extra. */
		return (huge_ralloc_no_move_expand(ptr, oldsize, size, zero));
	}
	return (false);
}
示例#11
0
void *huge_alloc(struct thread_cache *cache, size_t size, size_t alignment) {
    size_t real_size = CHUNK_CEILING(size);
    struct arena *arena;
    void *chunk = huge_chunk_alloc(cache, real_size, alignment, &arena);
    if (unlikely(!chunk)) {
        return NULL;
    }

    extent_tree *huge = acquire_huge(arena);

    struct extent_node *node = node_alloc(get_huge_nodes(arena));
    if (unlikely(!node)) {
        chunk_free(get_recycler(arena), chunk, real_size);
        chunk = NULL;
    } else {
        node->size = real_size;
        node->addr = chunk;
        extent_tree_ad_insert(huge, node);
    }

    release_huge(arena);
    maybe_unlock_arena(arena);
    return chunk;
}
示例#12
0
/* Only handles large allocations that require more than chunk alignment. */
void *
huge_palloc(size_t size, size_t alignment, bool zero)
{
	void *ret;
	size_t alloc_size, chunk_size, offset;
	extent_node_t *node;

	/*
	 * This allocation requires alignment that is even larger than chunk
	 * alignment.  This means that huge_malloc() isn't good enough.
	 *
	 * Allocate almost twice as many chunks as are demanded by the size or
	 * alignment, in order to assure the alignment can be achieved, then
	 * unmap leading and trailing chunks.
	 */
	assert(alignment >= chunksize);

	chunk_size = CHUNK_CEILING(size);

	if (size >= alignment)
		alloc_size = chunk_size + alignment - chunksize;
	else
		alloc_size = (alignment << 1) - chunksize;

	/* Allocate an extent node with which to track the chunk. */
	node = base_node_alloc();
	if (node == NULL)
		return (NULL);

	ret = chunk_alloc(alloc_size, false, &zero);
	if (ret == NULL) {
		base_node_dealloc(node);
		return (NULL);
	}

	offset = (uintptr_t)ret & (alignment - 1);
	assert((offset & chunksize_mask) == 0);
	assert(offset < alloc_size);
	if (offset == 0) {
		/* Trim trailing space. */
		chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
		    - chunk_size);
	} else {
		size_t trailsize;

		/* Trim leading space. */
		chunk_dealloc(ret, alignment - offset);

		ret = (void *)((uintptr_t)ret + (alignment - offset));

		trailsize = alloc_size - (alignment - offset) - chunk_size;
		if (trailsize != 0) {
		    /* Trim trailing space. */
		    assert(trailsize < alloc_size);
		    chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
			trailsize);
		}
	}

	/* Insert node into huge. */
	node->addr = ret;
	node->size = chunk_size;

	malloc_mutex_lock(&huge_mtx);
	extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
	huge_nmalloc++;
	huge_allocated += chunk_size;
#endif
	malloc_mutex_unlock(&huge_mtx);

#ifdef JEMALLOC_FILL
	if (zero == false) {
		if (opt_junk)
			memset(ret, 0xa5, chunk_size);
		else if (opt_zero)
			memset(ret, 0, chunk_size);
	}
#endif

	return (ret);
}