Пример #1
0
JEMALLOC_INLINE_C int
extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
{
	vaddr_t a_addr = (vaddr_t)extent_node_addr_get(a);
	vaddr_t b_addr = (vaddr_t)extent_node_addr_get(b);

	return ((a_addr > b_addr) - (a_addr < b_addr));
}
Пример #2
0
JEMALLOC_INLINE_C int
extent_ad_comp(extent_node_t *a, extent_node_t *b)
{
	uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
	uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);

	return ((a_addr > b_addr) - (a_addr < b_addr));
}
Пример #3
0
static bool
huge_node_set(const void *ptr, extent_node_t *node)
{

	assert(extent_node_addr_get(node) == ptr);
	assert(!extent_node_achunk_get(node));
	return (chunk_register(ptr, node));
}
/* je_iterate calls callback for each allocation found in the memory region
 * between [base, base+size).  base will be rounded down to by the jemalloc
 * chunk size, and base+size will be rounded up to the chunk size.  If no memory
 * managed by jemalloc is found in the requested region, je_iterate returns -1
 * and sets errno to EINVAL.
 *
 * je_iterate must be called when no allocations are in progress, either
 * when single-threaded (for example just after a fork), or between
 * jemalloc_prefork() and jemalloc_postfork_parent().  The callback must
 * not attempt to allocate with jemalloc.
 */
int je_iterate(uintptr_t base, size_t size,
    void (*callback)(uintptr_t ptr, size_t size, void* arg), void* arg) {

  int error = EINVAL;
  uintptr_t ptr = (uintptr_t)CHUNK_ADDR2BASE(base);
  uintptr_t end = CHUNK_CEILING(base + size);

  while (ptr < end) {
    extent_node_t *node;

    node = chunk_lookup((void *)ptr, false);
    if (node == NULL) {
      ptr += chunksize;
      continue;
    }

    assert(extent_node_achunk_get(node) ||
        (uintptr_t)extent_node_addr_get(node) == ptr);

    error = 0;
    if (extent_node_achunk_get(node)) {
      /* Chunk */
      arena_chunk_t *chunk = (arena_chunk_t *)ptr;
      ptr += chunksize;

      if (&chunk->node != node) {
          /* Empty retained chunk */
          continue;
      }

      je_iterate_chunk(chunk, callback, arg);
    } else if ((uintptr_t)extent_node_addr_get(node) == ptr) {
      /* Huge allocation */
      callback(ptr, extent_node_size_get(node), arg);
      ptr += extent_node_size_get(node);
    }
  }

  if (error) {
    set_errno(error);
    return -1;
  }

  return 0;
}
Пример #5
0
void
huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
	extent_node_t *node;
	arena_t *arena;

	node = huge_node_get(ptr);
	arena = extent_node_arena_get(node);
	huge_node_unset(ptr, node);
	malloc_mutex_lock(&arena->huge_mtx);
	ql_remove(&arena->huge, node, ql_link);
	malloc_mutex_unlock(&arena->huge_mtx);

	huge_dalloc_junk(extent_node_addr_get(node),
	    extent_node_size_get(node));
	arena_chunk_dalloc_huge(extent_node_arena_get(node),
	    extent_node_addr_get(node), extent_node_size_get(node));
	idalloctm(tsd, node, tcache, true, true);
}
Пример #6
0
JEMALLOC_INLINE_C int
extent_szad_comp(const extent_node_t *a, const extent_node_t *b)
{
	int ret;
	size_t a_qsize = extent_quantize(extent_node_size_get(a));
	size_t b_qsize = extent_quantize(extent_node_size_get(b));

	/*
	 * Compare based on quantized size rather than size, in order to sort
	 * equally useful extents only by address.
	 */
	ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
	if (ret == 0) {
		uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
		uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);

		ret = (a_addr > b_addr) - (a_addr < b_addr);
	}

	return (ret);
}
Пример #7
0
/*
 * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
 * sparse data structures such as radix tree nodes efficient with respect to
 * physical memory usage.
 */
void *
base_alloc(tsdn_t *tsdn, size_t size)
{
	void *ret;
	size_t csize, usize;
	extent_node_t *node;
	extent_node_t key;

	/*
	 * Round size up to nearest multiple of the cacheline size, so that
	 * there is no chance of false cache line sharing.
	 */
	csize = CACHELINE_CEILING(size);

	usize = s2u(csize);
	extent_node_init(&key, NULL, NULL, usize, 0, false, false);
	malloc_mutex_lock(tsdn, &base_mtx);
	node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key);
	if (node != NULL) {
		/* Use existing space. */
		extent_tree_szsnad_remove(&base_avail_szsnad, node);
	} else {
		/* Try to allocate more space. */
		node = base_chunk_alloc(tsdn, csize);
	}
	if (node == NULL) {
		ret = NULL;
		goto label_return;
	}

	ret = extent_node_addr_get(node);
	if (extent_node_size_get(node) > csize) {
		extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
		extent_node_size_set(node, extent_node_size_get(node) - csize);
		extent_tree_szsnad_insert(&base_avail_szsnad, node);
	} else
		base_node_dalloc(tsdn, node);
	if (config_stats) {
		base_allocated += csize;
		/*
		 * Add one PAGE to base_resident for every page boundary that is
		 * crossed by the new allocation.
		 */
		base_resident += PAGE_CEILING((vaddr_t)ret + csize) -
		    PAGE_CEILING((vaddr_t)ret);
	}
	JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
label_return:
	malloc_mutex_unlock(tsdn, &base_mtx);
	return (ret);
}