Exemplo n.º 1
0
void
huge_dalloc(void *ptr, bool unmap)
{
	extent_node_t *node, key;

	malloc_mutex_lock(&huge_mtx);

	/* Extract from tree of huge allocations. */
	key.addr = ptr;
	node = extent_tree_ad_search(&huge, &key);
	assert(node != NULL);
	assert(node->addr == ptr);
	extent_tree_ad_remove(&huge, node);

	if (config_stats) {
		stats_cactive_sub(node->size);
		huge_ndalloc++;
		huge_allocated -= node->size;
	}

	malloc_mutex_unlock(&huge_mtx);

	if (unmap && config_fill && config_dss && opt_junk)
		memset(node->addr, 0x5a, node->size);

	chunk_dealloc(node->addr, node->size, unmap);

	base_node_dealloc(node);
}
Exemplo n.º 2
0
void
huge_dalloc(void *ptr, bool unmap)
{
	extent_node_t *node, key;

	malloc_mutex_lock(&huge_mtx);

	/* Extract from tree of huge allocations. */
	key.addr = ptr;
	node = extent_tree_ad_search(&huge, &key);
	assert(node != NULL);
	assert(node->addr == ptr);
	extent_tree_ad_remove(&huge, node);

#ifdef JEMALLOC_STATS
	huge_ndalloc++;
	huge_allocated -= node->size;
#endif

	malloc_mutex_unlock(&huge_mtx);

	if (unmap) {
	/* Unmap chunk. */
#ifdef JEMALLOC_FILL
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
		if (opt_junk)
			memset(node->addr, 0x5a, node->size);
#endif
#endif
		chunk_dealloc(node->addr, node->size);
	}

	base_node_dealloc(node);
}
Exemplo n.º 3
0
bool
chunk_dealloc_dss(void *chunk, size_t size)
{
	bool ret;

	malloc_mutex_lock(&dss_mtx);
	if ((uintptr_t)chunk >= (uintptr_t)dss_base
	    && (uintptr_t)chunk < (uintptr_t)dss_max) {
		extent_node_t *node;

		/* Try to coalesce with other unused chunks. */
		node = chunk_dealloc_dss_record(chunk, size);
		if (node != NULL) {
			chunk = node->addr;
			size = node->size;
		}

		/* Get the current end of the DSS. */
		dss_max = sbrk(0);

		/*
		 * Try to shrink the DSS if this chunk is at the end of the
		 * DSS.  The sbrk() call here is subject to a race condition
		 * with threads that use brk(2) or sbrk(2) directly, but the
		 * alternative would be to leak memory for the sake of poorly
		 * designed multi-threaded programs.
		 */
		if ((void *)((uintptr_t)chunk + size) == dss_max
		    && (dss_prev = sbrk(-(intptr_t)size)) == dss_max) {
			/* Success. */
			dss_max = (void *)((intptr_t)dss_prev - (intptr_t)size);

			if (node != NULL) {
				extent_tree_szad_remove(&dss_chunks_szad, node);
				extent_tree_ad_remove(&dss_chunks_ad, node);
				base_node_dealloc(node);
			}
		} else
			madvise(chunk, size, MADV_DONTNEED);

		ret = false;
		goto RETURN;
	}

	ret = true;
RETURN:
	malloc_mutex_unlock(&dss_mtx);
	return (ret);
}
Exemplo n.º 4
0
bool
chunk_dealloc_swap(void *chunk, size_t size)
{
    bool ret;

    assert(swap_enabled);

    malloc_mutex_lock(&swap_mtx);
    if ((uintptr_t)chunk >= (uintptr_t)swap_base
            && (uintptr_t)chunk < (uintptr_t)swap_max) {
        extent_node_t *node;

        /* Try to coalesce with other unused chunks. */
        node = chunk_dealloc_swap_record(chunk, size);
        if (node != NULL) {
            chunk = node->addr;
            size = node->size;
        }

        /*
         * Try to shrink the in-use memory if this chunk is at the end
         * of the in-use memory.
         */
        if ((void *)((uintptr_t)chunk + size) == swap_end) {
            swap_end = (void *)((uintptr_t)swap_end - size);

            if (node != NULL) {
                extent_tree_szad_remove(&swap_chunks_szad,
                                        node);
                extent_tree_ad_remove(&swap_chunks_ad, node);
                base_node_dealloc(node);
            }
        } else
            madvise(chunk, size, MADV_DONTNEED);

#ifdef JEMALLOC_STATS
        swap_avail += size;
#endif
        ret = false;
        goto RETURN;
    }

    ret = true;
RETURN:
    malloc_mutex_unlock(&swap_mtx);
    return (ret);
}
Exemplo n.º 5
0
void
huge_dalloc(void *ptr, bool unmap)
{
	extent_node_t *node, key;

	malloc_mutex_lock(&huge_mtx);

	/* Extract from tree of huge allocations. */
	key.addr = ptr;
#ifdef JEMALLOC_ENABLE_MEMKIND
	key.partition = -1;
	do {
		key.partition++;
#endif
		node = extent_tree_ad_search(&huge, &key);
#ifdef JEMALLOC_ENABLE_MEMKIND
	} while ((node == NULL || node->partition != key.partition) &&
		 key.partition < 256); /* FIXME hard coding partition max to 256 */
#endif

	assert(node != NULL);
	assert(node->addr == ptr);

	extent_tree_ad_remove(&huge, node);

	if (config_stats) {
		stats_cactive_sub(node->size);
		huge_ndalloc++;
		huge_allocated -= node->size;
	}

	malloc_mutex_unlock(&huge_mtx);

	if (unmap)
		huge_dalloc_junk(node->addr, node->size);

	chunk_dealloc(node->addr, node->size, unmap
#ifdef JEMALLOC_ENABLE_MEMKIND
, key.partition
#endif
);

	base_node_dealloc(node);
}
Exemplo n.º 6
0
void
huge_dalloc(pool_t *pool, void *ptr)
{
	extent_node_t *node, key;

	malloc_mutex_lock(&pool->huge_mtx);

	/* Extract from tree of huge allocations. */
	key.addr = ptr;
	node = extent_tree_ad_search(&pool->huge, &key);
	assert(node != NULL);
	assert(node->addr == ptr);
	extent_tree_ad_remove(&pool->huge, node);

	malloc_mutex_unlock(&pool->huge_mtx);

	huge_dalloc_junk(node->addr, node->size);
	arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
	base_node_dalloc(pool, node);
}
Exemplo n.º 7
0
static void *
chunk_recycle_swap(size_t size, bool *zero)
{
    extent_node_t *node, key;

    key.addr = NULL;
    key.size = size;
    malloc_mutex_lock(&swap_mtx);
    node = extent_tree_szad_nsearch(&swap_chunks_szad, &key);
    if (node != NULL) {
        void *ret = node->addr;

        /* Remove node from the tree. */
        extent_tree_szad_remove(&swap_chunks_szad, node);
        if (node->size == size) {
            extent_tree_ad_remove(&swap_chunks_ad, node);
            base_node_dealloc(node);
        } else {
            /*
             * Insert the remainder of node's address range as a
             * smaller chunk.  Its position within swap_chunks_ad
             * does not change.
             */
            assert(node->size > size);
            node->addr = (void *)((uintptr_t)node->addr + size);
            node->size -= size;
            extent_tree_szad_insert(&swap_chunks_szad, node);
        }
#ifdef JEMALLOC_STATS
        swap_avail -= size;
#endif
        malloc_mutex_unlock(&swap_mtx);

        if (*zero)
            memset(ret, 0, size);
        return (ret);
    }
    malloc_mutex_unlock(&swap_mtx);

    return (NULL);
}
Exemplo n.º 8
0
void huge_free(void *ptr) {
    struct extent_node *node, key;
    key.addr = ptr;
    struct arena *arena = get_huge_arena(ptr);

    maybe_lock_arena(arena);
    extent_tree *huge = acquire_huge(arena);

    node = extent_tree_ad_search(huge, &key);
    assert(node);
    size_t size = node->size;
    extent_tree_ad_remove(huge, node);
    node_free(get_huge_nodes(arena), node);
    release_huge(arena);

    if (purge_ratio >= 0) {
        memory_decommit(ptr, size);
    }
    chunk_free(get_recycler(arena), ptr, size);
    maybe_unlock_arena(arena);
}
Exemplo n.º 9
0
static extent_node_t *
chunk_dealloc_swap_record(void *chunk, size_t size)
{
    extent_node_t *xnode, *node, *prev, key;

    xnode = NULL;
    while (true) {
        key.addr = (void *)((uintptr_t)chunk + size);
        node = extent_tree_ad_nsearch(&swap_chunks_ad, &key);
        /* Try to coalesce forward. */
        if (node != NULL && node->addr == key.addr) {
            /*
             * Coalesce chunk with the following address range.
             * This does not change the position within
             * swap_chunks_ad, so only remove/insert from/into
             * swap_chunks_szad.
             */
            extent_tree_szad_remove(&swap_chunks_szad, node);
            node->addr = chunk;
            node->size += size;
            extent_tree_szad_insert(&swap_chunks_szad, node);
            break;
        } else if (xnode == NULL) {
            /*
             * It is possible that base_node_alloc() will cause a
             * new base chunk to be allocated, so take care not to
             * deadlock on swap_mtx, and recover if another thread
             * deallocates an adjacent chunk while this one is busy
             * allocating xnode.
             */
            malloc_mutex_unlock(&swap_mtx);
            xnode = base_node_alloc();
            malloc_mutex_lock(&swap_mtx);
            if (xnode == NULL)
                return (NULL);
        } else {
            /* Coalescing forward failed, so insert a new node. */
            node = xnode;
            xnode = NULL;
            node->addr = chunk;
            node->size = size;
            extent_tree_ad_insert(&swap_chunks_ad, node);
            extent_tree_szad_insert(&swap_chunks_szad, node);
            break;
        }
    }
    /* Discard xnode if it ended up unused do to a race. */
    if (xnode != NULL)
        base_node_dealloc(xnode);

    /* Try to coalesce backward. */
    prev = extent_tree_ad_prev(&swap_chunks_ad, node);
    if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
            chunk) {
        /*
         * Coalesce chunk with the previous address range.  This does
         * not change the position within swap_chunks_ad, so only
         * remove/insert node from/into swap_chunks_szad.
         */
        extent_tree_szad_remove(&swap_chunks_szad, prev);
        extent_tree_ad_remove(&swap_chunks_ad, prev);

        extent_tree_szad_remove(&swap_chunks_szad, node);
        node->addr = prev->addr;
        node->size += prev->size;
        extent_tree_szad_insert(&swap_chunks_szad, node);

        base_node_dealloc(prev);
    }

    return (node);
}
Exemplo n.º 10
0
static void *huge_move_expand(struct thread_cache *cache, void *old_addr, size_t old_size, size_t new_size) {
    struct arena *arena;
    void *new_addr = huge_chunk_alloc(cache, new_size, CHUNK_SIZE, &arena);
    if (unlikely(!new_addr)) {
        return NULL;
    }

    bool gap = true;
    if (unlikely(memory_remap_fixed(old_addr, old_size, new_addr, new_size))) {
        memcpy(new_addr, old_addr, old_size);
        if (purge_ratio >= 0) {
            memory_decommit(old_addr, old_size);
        }
        gap = false;
    } else {
        // Attempt to fill the virtual memory hole. The kernel should provide a flag for preserving
        // the old mapping to avoid the possibility of this failing and creating fragmentation.
        //
        // https://lkml.org/lkml/2014/10/2/624
        void *extra = memory_map(old_addr, old_size, false);
        if (likely(extra)) {
            if (unlikely(extra != old_addr)) {
                memory_unmap(extra, old_size);
            } else {
                gap = false;
            }
        }
    }

    struct extent_node key;
    key.addr = old_addr;

    struct arena *old_arena = get_huge_arena(old_addr);

    extent_tree *huge = acquire_huge(old_arena);
    struct extent_node *node = extent_tree_ad_search(huge, &key);
    assert(node);
    extent_tree_ad_remove(huge, node);
    node->addr = new_addr;
    node->size = new_size;

    if (arena != old_arena) {
        release_huge(old_arena);
        huge = acquire_huge(arena);
    }

    extent_tree_ad_insert(huge, node);
    release_huge(arena);

    if (!gap) {
        if (arena != old_arena && old_arena) {
            mutex_lock(&old_arena->mutex);
        }
        chunk_free(get_recycler(old_arena), old_addr, old_size);
        if (arena != old_arena && old_arena) {
            mutex_unlock(&old_arena->mutex);
        }
    }

    maybe_unlock_arena(arena);
    return new_addr;
}