Esempio n. 1
0
static bool
huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
{
	extent_node_t *node;
	arena_t *arena;
	chunk_hooks_t chunk_hooks;
	size_t cdiff;
	bool pre_zeroed, post_zeroed;

	node = huge_node_get(ptr);
	arena = extent_node_arena_get(node);
	pre_zeroed = extent_node_zeroed_get(node);
	chunk_hooks = chunk_hooks_get(arena);

	assert(oldsize > usize);

	/* Split excess chunks. */
	cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
	if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
	    CHUNK_CEILING(usize), cdiff, true, arena->ind))
		return (true);

	if (oldsize > usize) {
		size_t sdiff = oldsize - usize;
		if (config_fill && unlikely(opt_junk_free)) {
			huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
			    sdiff);
			post_zeroed = false;
		} else {
			post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
			    CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
			    CHUNK_CEILING(oldsize),
			    CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
		}
	} else
		post_zeroed = pre_zeroed;

	malloc_mutex_lock(&arena->huge_mtx);
	/* Update the size of the huge allocation. */
	extent_node_size_set(node, usize);
	/* Update zeroed. */
	extent_node_zeroed_set(node, post_zeroed);
	malloc_mutex_unlock(&arena->huge_mtx);

	/* Zap the excess chunks. */
	arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);

	return (false);
}
/* je_iterate calls callback for each allocation found in the memory region
 * between [base, base+size).  base will be rounded down to by the jemalloc
 * chunk size, and base+size will be rounded up to the chunk size.  If no memory
 * managed by jemalloc is found in the requested region, je_iterate returns -1
 * and sets errno to EINVAL.
 *
 * je_iterate must be called when no allocations are in progress, either
 * when single-threaded (for example just after a fork), or between
 * jemalloc_prefork() and jemalloc_postfork_parent().  The callback must
 * not attempt to allocate with jemalloc.
 */
int je_iterate(uintptr_t base, size_t size,
    void (*callback)(uintptr_t ptr, size_t size, void* arg), void* arg) {

  int error = EINVAL;
  uintptr_t ptr = (uintptr_t)CHUNK_ADDR2BASE(base);
  uintptr_t end = CHUNK_CEILING(base + size);

  while (ptr < end) {
    extent_node_t *node;

    node = chunk_lookup((void *)ptr, false);
    if (node == NULL) {
      ptr += chunksize;
      continue;
    }

    assert(extent_node_achunk_get(node) ||
        (uintptr_t)extent_node_addr_get(node) == ptr);

    error = 0;
    if (extent_node_achunk_get(node)) {
      /* Chunk */
      arena_chunk_t *chunk = (arena_chunk_t *)ptr;
      ptr += chunksize;

      if (&chunk->node != node) {
          /* Empty retained chunk */
          continue;
      }

      je_iterate_chunk(chunk, callback, arg);
    } else if ((uintptr_t)extent_node_addr_get(node) == ptr) {
      /* Huge allocation */
      callback(ptr, extent_node_size_get(node), arg);
      ptr += extent_node_size_get(node);
    }
  }

  if (error) {
    set_errno(error);
    return -1;
  }

  return 0;
}
Esempio n. 3
0
static void
huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
{
	extent_node_t *node;
	arena_t *arena;
	chunk_purge_t *chunk_purge;
	bool zeroed;

	node = huge_node_get(ptr);
	arena = extent_node_arena_get(node);

	malloc_mutex_lock(&arena->lock);
	chunk_purge = arena->chunk_purge;
	malloc_mutex_unlock(&arena->lock);

	if (oldsize > usize) {
		size_t sdiff = oldsize - usize;
		zeroed = !chunk_purge_wrapper(arena, chunk_purge,
		    CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
		    CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
		if (config_fill && unlikely(opt_junk_free)) {
			huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
			    sdiff);
			zeroed = false;
		}
	} else
		zeroed = true;

	malloc_mutex_lock(&arena->huge_mtx);
	/* Update the size of the huge allocation. */
	extent_node_size_set(node, usize);
	/* Clear node's zeroed field if zeroing failed above. */
	extent_node_zeroed_set(node, extent_node_zeroed_get(node) && zeroed);
	malloc_mutex_unlock(&arena->huge_mtx);

	/* Zap the excess chunks. */
	arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
}
Esempio n. 4
0
void
tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
    , tcache_t *tcache
#endif
    )
{
	void *flush, *deferred, *ptr;
	unsigned i, nflush, ndeferred;
	bool first_pass;

	assert(binind < nbins);
	assert(rem <= tbin->ncached);
	assert(tbin->ncached > 0 || tbin->avail == NULL);

	for (flush = tbin->avail, nflush = tbin->ncached - rem, first_pass =
	    true; flush != NULL; flush = deferred, nflush = ndeferred) {
		/* Lock the arena bin associated with the first object. */
		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(flush);
		arena_t *arena = chunk->arena;
		arena_bin_t *bin = &arena->bins[binind];

#ifdef JEMALLOC_PROF
		if (arena == tcache->arena) {
			malloc_mutex_lock(&arena->lock);
			arena_prof_accum(arena, tcache->prof_accumbytes);
			malloc_mutex_unlock(&arena->lock);
			tcache->prof_accumbytes = 0;
		}
#endif

		malloc_mutex_lock(&bin->lock);
#ifdef JEMALLOC_STATS
		if (arena == tcache->arena) {
			bin->stats.nflushes++;
			bin->stats.nrequests += tbin->tstats.nrequests;
			tbin->tstats.nrequests = 0;
		}
#endif
		deferred = NULL;
		ndeferred = 0;
		for (i = 0; i < nflush; i++) {
			ptr = flush;
			assert(ptr != NULL);
			flush = *(void **)ptr;
			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
			if (chunk->arena == arena) {
				size_t pageind = ((uintptr_t)ptr -
				    (uintptr_t)chunk) >> PAGE_SHIFT;
				arena_chunk_map_t *mapelm =
				    &chunk->map[pageind-map_bias];
				arena_dalloc_bin(arena, chunk, ptr, mapelm);
			} else {
				/*
				 * This object was allocated via a different
				 * arena bin than the one that is currently
				 * locked.  Stash the object, so that it can be
				 * handled in a future pass.
				 */
				*(void **)ptr = deferred;
				deferred = ptr;
				ndeferred++;
			}
		}