示例#1
0
bool
chunk_dealloc_dss(void *chunk, size_t size)
{
	bool ret;

	malloc_mutex_lock(&dss_mtx);
	if ((uintptr_t)chunk >= (uintptr_t)dss_base
	    && (uintptr_t)chunk < (uintptr_t)dss_max) {
		extent_node_t *node;

		/* Try to coalesce with other unused chunks. */
		node = chunk_dealloc_dss_record(chunk, size);
		if (node != NULL) {
			chunk = node->addr;
			size = node->size;
		}

		/* Get the current end of the DSS. */
		dss_max = sbrk(0);

		/*
		 * Try to shrink the DSS if this chunk is at the end of the
		 * DSS.  The sbrk() call here is subject to a race condition
		 * with threads that use brk(2) or sbrk(2) directly, but the
		 * alternative would be to leak memory for the sake of poorly
		 * designed multi-threaded programs.
		 */
		if ((void *)((uintptr_t)chunk + size) == dss_max
		    && (dss_prev = sbrk(-(intptr_t)size)) == dss_max) {
			/* Success. */
			dss_max = (void *)((intptr_t)dss_prev - (intptr_t)size);

			if (node != NULL) {
				extent_tree_szad_remove(&dss_chunks_szad, node);
				extent_tree_ad_remove(&dss_chunks_ad, node);
				base_node_dealloc(node);
			}
		} else
			madvise(chunk, size, MADV_DONTNEED);

		ret = false;
		goto RETURN;
	}

	ret = true;
RETURN:
	malloc_mutex_unlock(&dss_mtx);
	return (ret);
}
示例#2
0
bool
chunk_dealloc_swap(void *chunk, size_t size)
{
    bool ret;

    assert(swap_enabled);

    malloc_mutex_lock(&swap_mtx);
    if ((uintptr_t)chunk >= (uintptr_t)swap_base
            && (uintptr_t)chunk < (uintptr_t)swap_max) {
        extent_node_t *node;

        /* Try to coalesce with other unused chunks. */
        node = chunk_dealloc_swap_record(chunk, size);
        if (node != NULL) {
            chunk = node->addr;
            size = node->size;
        }

        /*
         * Try to shrink the in-use memory if this chunk is at the end
         * of the in-use memory.
         */
        if ((void *)((uintptr_t)chunk + size) == swap_end) {
            swap_end = (void *)((uintptr_t)swap_end - size);

            if (node != NULL) {
                extent_tree_szad_remove(&swap_chunks_szad,
                                        node);
                extent_tree_ad_remove(&swap_chunks_ad, node);
                base_node_dealloc(node);
            }
        } else
            madvise(chunk, size, MADV_DONTNEED);

#ifdef JEMALLOC_STATS
        swap_avail += size;
#endif
        ret = false;
        goto RETURN;
    }

    ret = true;
RETURN:
    malloc_mutex_unlock(&swap_mtx);
    return (ret);
}
示例#3
0
static void *
chunk_recycle_swap(size_t size, bool *zero)
{
    extent_node_t *node, key;

    key.addr = NULL;
    key.size = size;
    malloc_mutex_lock(&swap_mtx);
    node = extent_tree_szad_nsearch(&swap_chunks_szad, &key);
    if (node != NULL) {
        void *ret = node->addr;

        /* Remove node from the tree. */
        extent_tree_szad_remove(&swap_chunks_szad, node);
        if (node->size == size) {
            extent_tree_ad_remove(&swap_chunks_ad, node);
            base_node_dealloc(node);
        } else {
            /*
             * Insert the remainder of node's address range as a
             * smaller chunk.  Its position within swap_chunks_ad
             * does not change.
             */
            assert(node->size > size);
            node->addr = (void *)((uintptr_t)node->addr + size);
            node->size -= size;
            extent_tree_szad_insert(&swap_chunks_szad, node);
        }
#ifdef JEMALLOC_STATS
        swap_avail -= size;
#endif
        malloc_mutex_unlock(&swap_mtx);

        if (*zero)
            memset(ret, 0, size);
        return (ret);
    }
    malloc_mutex_unlock(&swap_mtx);

    return (NULL);
}
示例#4
0
static extent_node_t *
chunk_dealloc_swap_record(void *chunk, size_t size)
{
    extent_node_t *xnode, *node, *prev, key;

    xnode = NULL;
    while (true) {
        key.addr = (void *)((uintptr_t)chunk + size);
        node = extent_tree_ad_nsearch(&swap_chunks_ad, &key);
        /* Try to coalesce forward. */
        if (node != NULL && node->addr == key.addr) {
            /*
             * Coalesce chunk with the following address range.
             * This does not change the position within
             * swap_chunks_ad, so only remove/insert from/into
             * swap_chunks_szad.
             */
            extent_tree_szad_remove(&swap_chunks_szad, node);
            node->addr = chunk;
            node->size += size;
            extent_tree_szad_insert(&swap_chunks_szad, node);
            break;
        } else if (xnode == NULL) {
            /*
             * It is possible that base_node_alloc() will cause a
             * new base chunk to be allocated, so take care not to
             * deadlock on swap_mtx, and recover if another thread
             * deallocates an adjacent chunk while this one is busy
             * allocating xnode.
             */
            malloc_mutex_unlock(&swap_mtx);
            xnode = base_node_alloc();
            malloc_mutex_lock(&swap_mtx);
            if (xnode == NULL)
                return (NULL);
        } else {
            /* Coalescing forward failed, so insert a new node. */
            node = xnode;
            xnode = NULL;
            node->addr = chunk;
            node->size = size;
            extent_tree_ad_insert(&swap_chunks_ad, node);
            extent_tree_szad_insert(&swap_chunks_szad, node);
            break;
        }
    }
    /* Discard xnode if it ended up unused do to a race. */
    if (xnode != NULL)
        base_node_dealloc(xnode);

    /* Try to coalesce backward. */
    prev = extent_tree_ad_prev(&swap_chunks_ad, node);
    if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
            chunk) {
        /*
         * Coalesce chunk with the previous address range.  This does
         * not change the position within swap_chunks_ad, so only
         * remove/insert node from/into swap_chunks_szad.
         */
        extent_tree_szad_remove(&swap_chunks_szad, prev);
        extent_tree_ad_remove(&swap_chunks_ad, prev);

        extent_tree_szad_remove(&swap_chunks_szad, node);
        node->addr = prev->addr;
        node->size += prev->size;
        extent_tree_szad_insert(&swap_chunks_szad, node);

        base_node_dealloc(prev);
    }

    return (node);
}