示例#1
0
void
huge_dalloc(void *ptr, bool unmap)
{
	extent_node_t *node, key;

	malloc_mutex_lock(&huge_mtx);

	/* Extract from tree of huge allocations. */
	key.addr = ptr;
	node = extent_tree_ad_search(&huge, &key);
	assert(node != NULL);
	assert(node->addr == ptr);
	extent_tree_ad_remove(&huge, node);

#ifdef JEMALLOC_STATS
	huge_ndalloc++;
	huge_allocated -= node->size;
#endif

	malloc_mutex_unlock(&huge_mtx);

	if (unmap) {
	/* Unmap chunk. */
#ifdef JEMALLOC_FILL
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
		if (opt_junk)
			memset(node->addr, 0x5a, node->size);
#endif
#endif
		chunk_dealloc(node->addr, node->size);
	}

	base_node_dealloc(node);
}
示例#2
0
void
huge_dalloc(void *ptr, bool unmap)
{
	extent_node_t *node, key;

	malloc_mutex_lock(&huge_mtx);

	/* Extract from tree of huge allocations. */
	key.addr = ptr;
	node = extent_tree_ad_search(&huge, &key);
	assert(node != NULL);
	assert(node->addr == ptr);
	extent_tree_ad_remove(&huge, node);

	if (config_stats) {
		stats_cactive_sub(node->size);
		huge_ndalloc++;
		huge_allocated -= node->size;
	}

	malloc_mutex_unlock(&huge_mtx);

	if (unmap && config_fill && config_dss && opt_junk)
		memset(node->addr, 0x5a, node->size);

	chunk_dealloc(node->addr, node->size, unmap);

	base_node_dealloc(node);
}
示例#3
0
void *
huge_palloc(size_t size, size_t alignment, bool zero)
{
	void *ret;
	size_t csize;
	extent_node_t *node;
	bool is_zeroed;

	/* Allocate one or more contiguous chunks for this request. */

	csize = CHUNK_CEILING(size);
	if (csize == 0) {
		/* size is large enough to cause size_t wrap-around. */
		return (NULL);
	}

	/* Allocate an extent node with which to track the chunk. */
	node = base_node_alloc();
	if (node == NULL)
		return (NULL);

	/*
	 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
	 * it is possible to make correct junk/zero fill decisions below.
	 */
	is_zeroed = zero;
	ret = chunk_alloc(csize, alignment, false, &is_zeroed,
	    chunk_dss_prec_get());
	if (ret == NULL) {
		base_node_dealloc(node);
		return (NULL);
	}

	/* Insert node into huge. */
	node->addr = ret;
	node->size = csize;

	malloc_mutex_lock(&huge_mtx);
	extent_tree_ad_insert(&huge, node);
	if (config_stats) {
		stats_cactive_add(csize);
		huge_nmalloc++;
		huge_allocated += csize;
	}
	malloc_mutex_unlock(&huge_mtx);

	if (config_fill && zero == false) {
		if (opt_junk)
			memset(ret, 0xa5, csize);
		else if (opt_zero && is_zeroed == false)
			memset(ret, 0, csize);
	}

	return (ret);
}
示例#4
0
文件: huge.c 项目: Abioy/windmill
void *
huge_malloc(size_t size, bool zero)
{
	void *ret;
	size_t csize;
	extent_node_t *node;

	/* Allocate one or more contiguous chunks for this request. */

	csize = CHUNK_CEILING(size);
	if (csize == 0) {
		/* size is large enough to cause size_t wrap-around. */
		return (NULL);
	}

	/* Allocate an extent node with which to track the chunk. */
	node = base_node_alloc();
	if (node == NULL)
		return (NULL);

	ret = chunk_alloc(csize, false, &zero);
	if (ret == NULL) {
		base_node_dealloc(node);
		return (NULL);
	}

	/* Insert node into huge. */
	node->addr = ret;
	node->size = csize;

	malloc_mutex_lock(&huge_mtx);
	extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
	stats_cactive_add(csize);
	huge_nmalloc++;
	huge_allocated += csize;
#endif
	malloc_mutex_unlock(&huge_mtx);

#ifdef JEMALLOC_FILL
	if (zero == false) {
		if (opt_junk)
			memset(ret, 0xa5, csize);
		else if (opt_zero)
			memset(ret, 0, csize);
	}
#endif

	return (ret);
}
示例#5
0
bool
chunk_dealloc_dss(void *chunk, size_t size)
{
	bool ret;

	malloc_mutex_lock(&dss_mtx);
	if ((uintptr_t)chunk >= (uintptr_t)dss_base
	    && (uintptr_t)chunk < (uintptr_t)dss_max) {
		extent_node_t *node;

		/* Try to coalesce with other unused chunks. */
		node = chunk_dealloc_dss_record(chunk, size);
		if (node != NULL) {
			chunk = node->addr;
			size = node->size;
		}

		/* Get the current end of the DSS. */
		dss_max = sbrk(0);

		/*
		 * Try to shrink the DSS if this chunk is at the end of the
		 * DSS.  The sbrk() call here is subject to a race condition
		 * with threads that use brk(2) or sbrk(2) directly, but the
		 * alternative would be to leak memory for the sake of poorly
		 * designed multi-threaded programs.
		 */
		if ((void *)((uintptr_t)chunk + size) == dss_max
		    && (dss_prev = sbrk(-(intptr_t)size)) == dss_max) {
			/* Success. */
			dss_max = (void *)((intptr_t)dss_prev - (intptr_t)size);

			if (node != NULL) {
				extent_tree_szad_remove(&dss_chunks_szad, node);
				extent_tree_ad_remove(&dss_chunks_ad, node);
				base_node_dealloc(node);
			}
		} else
			madvise(chunk, size, MADV_DONTNEED);

		ret = false;
		goto RETURN;
	}

	ret = true;
RETURN:
	malloc_mutex_unlock(&dss_mtx);
	return (ret);
}
示例#6
0
bool
chunk_dealloc_swap(void *chunk, size_t size)
{
    bool ret;

    assert(swap_enabled);

    malloc_mutex_lock(&swap_mtx);
    if ((uintptr_t)chunk >= (uintptr_t)swap_base
            && (uintptr_t)chunk < (uintptr_t)swap_max) {
        extent_node_t *node;

        /* Try to coalesce with other unused chunks. */
        node = chunk_dealloc_swap_record(chunk, size);
        if (node != NULL) {
            chunk = node->addr;
            size = node->size;
        }

        /*
         * Try to shrink the in-use memory if this chunk is at the end
         * of the in-use memory.
         */
        if ((void *)((uintptr_t)chunk + size) == swap_end) {
            swap_end = (void *)((uintptr_t)swap_end - size);

            if (node != NULL) {
                extent_tree_szad_remove(&swap_chunks_szad,
                                        node);
                extent_tree_ad_remove(&swap_chunks_ad, node);
                base_node_dealloc(node);
            }
        } else
            madvise(chunk, size, MADV_DONTNEED);

#ifdef JEMALLOC_STATS
        swap_avail += size;
#endif
        ret = false;
        goto RETURN;
    }

    ret = true;
RETURN:
    malloc_mutex_unlock(&swap_mtx);
    return (ret);
}
示例#7
0
void
huge_dalloc(void *ptr, bool unmap)
{
	extent_node_t *node, key;

	malloc_mutex_lock(&huge_mtx);

	/* Extract from tree of huge allocations. */
	key.addr = ptr;
#ifdef JEMALLOC_ENABLE_MEMKIND
	key.partition = -1;
	do {
		key.partition++;
#endif
		node = extent_tree_ad_search(&huge, &key);
#ifdef JEMALLOC_ENABLE_MEMKIND
	} while ((node == NULL || node->partition != key.partition) &&
		 key.partition < 256); /* FIXME hard coding partition max to 256 */
#endif

	assert(node != NULL);
	assert(node->addr == ptr);

	extent_tree_ad_remove(&huge, node);

	if (config_stats) {
		stats_cactive_sub(node->size);
		huge_ndalloc++;
		huge_allocated -= node->size;
	}

	malloc_mutex_unlock(&huge_mtx);

	if (unmap)
		huge_dalloc_junk(node->addr, node->size);

	chunk_dealloc(node->addr, node->size, unmap
#ifdef JEMALLOC_ENABLE_MEMKIND
, key.partition
#endif
);

	base_node_dealloc(node);
}
示例#8
0
static void *
chunk_recycle_swap(size_t size, bool *zero)
{
    extent_node_t *node, key;

    key.addr = NULL;
    key.size = size;
    malloc_mutex_lock(&swap_mtx);
    node = extent_tree_szad_nsearch(&swap_chunks_szad, &key);
    if (node != NULL) {
        void *ret = node->addr;

        /* Remove node from the tree. */
        extent_tree_szad_remove(&swap_chunks_szad, node);
        if (node->size == size) {
            extent_tree_ad_remove(&swap_chunks_ad, node);
            base_node_dealloc(node);
        } else {
            /*
             * Insert the remainder of node's address range as a
             * smaller chunk.  Its position within swap_chunks_ad
             * does not change.
             */
            assert(node->size > size);
            node->addr = (void *)((uintptr_t)node->addr + size);
            node->size -= size;
            extent_tree_szad_insert(&swap_chunks_szad, node);
        }
#ifdef JEMALLOC_STATS
        swap_avail -= size;
#endif
        malloc_mutex_unlock(&swap_mtx);

        if (*zero)
            memset(ret, 0, size);
        return (ret);
    }
    malloc_mutex_unlock(&swap_mtx);

    return (NULL);
}
示例#9
0
static extent_node_t *
chunk_dealloc_swap_record(void *chunk, size_t size)
{
    extent_node_t *xnode, *node, *prev, key;

    xnode = NULL;
    while (true) {
        key.addr = (void *)((uintptr_t)chunk + size);
        node = extent_tree_ad_nsearch(&swap_chunks_ad, &key);
        /* Try to coalesce forward. */
        if (node != NULL && node->addr == key.addr) {
            /*
             * Coalesce chunk with the following address range.
             * This does not change the position within
             * swap_chunks_ad, so only remove/insert from/into
             * swap_chunks_szad.
             */
            extent_tree_szad_remove(&swap_chunks_szad, node);
            node->addr = chunk;
            node->size += size;
            extent_tree_szad_insert(&swap_chunks_szad, node);
            break;
        } else if (xnode == NULL) {
            /*
             * It is possible that base_node_alloc() will cause a
             * new base chunk to be allocated, so take care not to
             * deadlock on swap_mtx, and recover if another thread
             * deallocates an adjacent chunk while this one is busy
             * allocating xnode.
             */
            malloc_mutex_unlock(&swap_mtx);
            xnode = base_node_alloc();
            malloc_mutex_lock(&swap_mtx);
            if (xnode == NULL)
                return (NULL);
        } else {
            /* Coalescing forward failed, so insert a new node. */
            node = xnode;
            xnode = NULL;
            node->addr = chunk;
            node->size = size;
            extent_tree_ad_insert(&swap_chunks_ad, node);
            extent_tree_szad_insert(&swap_chunks_szad, node);
            break;
        }
    }
    /* Discard xnode if it ended up unused do to a race. */
    if (xnode != NULL)
        base_node_dealloc(xnode);

    /* Try to coalesce backward. */
    prev = extent_tree_ad_prev(&swap_chunks_ad, node);
    if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
            chunk) {
        /*
         * Coalesce chunk with the previous address range.  This does
         * not change the position within swap_chunks_ad, so only
         * remove/insert node from/into swap_chunks_szad.
         */
        extent_tree_szad_remove(&swap_chunks_szad, prev);
        extent_tree_ad_remove(&swap_chunks_ad, prev);

        extent_tree_szad_remove(&swap_chunks_szad, node);
        node->addr = prev->addr;
        node->size += prev->size;
        extent_tree_szad_insert(&swap_chunks_szad, node);

        base_node_dealloc(prev);
    }

    return (node);
}
示例#10
0
/* Only handles large allocations that require more than chunk alignment. */
void *
huge_palloc(size_t size, size_t alignment, bool zero)
{
	void *ret;
	size_t alloc_size, chunk_size, offset;
	extent_node_t *node;

	/*
	 * This allocation requires alignment that is even larger than chunk
	 * alignment.  This means that huge_malloc() isn't good enough.
	 *
	 * Allocate almost twice as many chunks as are demanded by the size or
	 * alignment, in order to assure the alignment can be achieved, then
	 * unmap leading and trailing chunks.
	 */
	assert(alignment >= chunksize);

	chunk_size = CHUNK_CEILING(size);

	if (size >= alignment)
		alloc_size = chunk_size + alignment - chunksize;
	else
		alloc_size = (alignment << 1) - chunksize;

	/* Allocate an extent node with which to track the chunk. */
	node = base_node_alloc();
	if (node == NULL)
		return (NULL);

	ret = chunk_alloc(alloc_size, false, &zero);
	if (ret == NULL) {
		base_node_dealloc(node);
		return (NULL);
	}

	offset = (uintptr_t)ret & (alignment - 1);
	assert((offset & chunksize_mask) == 0);
	assert(offset < alloc_size);
	if (offset == 0) {
		/* Trim trailing space. */
		chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
		    - chunk_size);
	} else {
		size_t trailsize;

		/* Trim leading space. */
		chunk_dealloc(ret, alignment - offset);

		ret = (void *)((uintptr_t)ret + (alignment - offset));

		trailsize = alloc_size - (alignment - offset) - chunk_size;
		if (trailsize != 0) {
		    /* Trim trailing space. */
		    assert(trailsize < alloc_size);
		    chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
			trailsize);
		}
	}

	/* Insert node into huge. */
	node->addr = ret;
	node->size = chunk_size;

	malloc_mutex_lock(&huge_mtx);
	extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
	huge_nmalloc++;
	huge_allocated += chunk_size;
#endif
	malloc_mutex_unlock(&huge_mtx);

#ifdef JEMALLOC_FILL
	if (zero == false) {
		if (opt_junk)
			memset(ret, 0xa5, chunk_size);
		else if (opt_zero)
			memset(ret, 0, chunk_size);
	}
#endif

	return (ret);
}