예제 #1
0
void *
huge_palloc(size_t size, size_t alignment, bool zero)
{
	void *ret;
	size_t csize;
	extent_node_t *node;
	bool is_zeroed;

	/* Allocate one or more contiguous chunks for this request. */

	csize = CHUNK_CEILING(size);
	if (csize == 0) {
		/* size is large enough to cause size_t wrap-around. */
		return (NULL);
	}

	/* Allocate an extent node with which to track the chunk. */
	node = base_node_alloc();
	if (node == NULL)
		return (NULL);

	/*
	 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
	 * it is possible to make correct junk/zero fill decisions below.
	 */
	is_zeroed = zero;
	ret = chunk_alloc(csize, alignment, false, &is_zeroed,
	    chunk_dss_prec_get());
	if (ret == NULL) {
		base_node_dealloc(node);
		return (NULL);
	}

	/* Insert node into huge. */
	node->addr = ret;
	node->size = csize;

	malloc_mutex_lock(&huge_mtx);
	extent_tree_ad_insert(&huge, node);
	if (config_stats) {
		stats_cactive_add(csize);
		huge_nmalloc++;
		huge_allocated += csize;
	}
	malloc_mutex_unlock(&huge_mtx);

	if (config_fill && zero == false) {
		if (opt_junk)
			memset(ret, 0xa5, csize);
		else if (opt_zero && is_zeroed == false)
			memset(ret, 0, csize);
	}

	return (ret);
}
예제 #2
0
파일: huge.c 프로젝트: KaiZhang666/nvml
void *
huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
{
	void *ret;
	size_t csize;
	extent_node_t *node;
	bool is_zeroed;
	pool_t *pool;

	/* Allocate one or more contiguous chunks for this request. */

	csize = CHUNK_CEILING(size);
	if (csize == 0) {
		/* size is large enough to cause size_t wrap-around. */
		return (NULL);
	}

	/*
	 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
	 * it is possible to make correct junk/zero fill decisions below.
	 */
	is_zeroed = zero;
	arena = choose_arena(arena);
	pool = arena->pool;

	/* Allocate an extent node with which to track the chunk. */
	node = base_node_alloc(pool);
	if (node == NULL)
		return (NULL);

	ret = arena_chunk_alloc_huge(arena, csize, alignment, &is_zeroed);
	if (ret == NULL) {
		base_node_dalloc(pool, node);
		return (NULL);
	}

	/* Insert node into huge. */
	node->addr = ret;
	node->size = csize;
	node->arena = arena;

	malloc_mutex_lock(&pool->huge_mtx);
	extent_tree_ad_insert(&pool->huge, node);
	malloc_mutex_unlock(&pool->huge_mtx);

	if (config_fill && zero == false) {
		if (opt_junk)
			memset(ret, 0xa5, csize);
		else if (opt_zero && is_zeroed == false)
			memset(ret, 0, csize);
	}

	return (ret);
}
예제 #3
0
파일: huge.c 프로젝트: Abioy/windmill
void *
huge_malloc(size_t size, bool zero)
{
	void *ret;
	size_t csize;
	extent_node_t *node;

	/* Allocate one or more contiguous chunks for this request. */

	csize = CHUNK_CEILING(size);
	if (csize == 0) {
		/* size is large enough to cause size_t wrap-around. */
		return (NULL);
	}

	/* Allocate an extent node with which to track the chunk. */
	node = base_node_alloc();
	if (node == NULL)
		return (NULL);

	ret = chunk_alloc(csize, false, &zero);
	if (ret == NULL) {
		base_node_dealloc(node);
		return (NULL);
	}

	/* Insert node into huge. */
	node->addr = ret;
	node->size = csize;

	malloc_mutex_lock(&huge_mtx);
	extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
	stats_cactive_add(csize);
	huge_nmalloc++;
	huge_allocated += csize;
#endif
	malloc_mutex_unlock(&huge_mtx);

#ifdef JEMALLOC_FILL
	if (zero == false) {
		if (opt_junk)
			memset(ret, 0xa5, csize);
		else if (opt_zero)
			memset(ret, 0, csize);
	}
#endif

	return (ret);
}
예제 #4
0
void *huge_alloc(struct thread_cache *cache, size_t size, size_t alignment) {
    size_t real_size = CHUNK_CEILING(size);
    struct arena *arena;
    void *chunk = huge_chunk_alloc(cache, real_size, alignment, &arena);
    if (unlikely(!chunk)) {
        return NULL;
    }

    extent_tree *huge = acquire_huge(arena);

    struct extent_node *node = node_alloc(get_huge_nodes(arena));
    if (unlikely(!node)) {
        chunk_free(get_recycler(arena), chunk, real_size);
        chunk = NULL;
    } else {
        node->size = real_size;
        node->addr = chunk;
        extent_tree_ad_insert(huge, node);
    }

    release_huge(arena);
    maybe_unlock_arena(arena);
    return chunk;
}
예제 #5
0
static extent_node_t *
chunk_dealloc_swap_record(void *chunk, size_t size)
{
    extent_node_t *xnode, *node, *prev, key;

    xnode = NULL;
    while (true) {
        key.addr = (void *)((uintptr_t)chunk + size);
        node = extent_tree_ad_nsearch(&swap_chunks_ad, &key);
        /* Try to coalesce forward. */
        if (node != NULL && node->addr == key.addr) {
            /*
             * Coalesce chunk with the following address range.
             * This does not change the position within
             * swap_chunks_ad, so only remove/insert from/into
             * swap_chunks_szad.
             */
            extent_tree_szad_remove(&swap_chunks_szad, node);
            node->addr = chunk;
            node->size += size;
            extent_tree_szad_insert(&swap_chunks_szad, node);
            break;
        } else if (xnode == NULL) {
            /*
             * It is possible that base_node_alloc() will cause a
             * new base chunk to be allocated, so take care not to
             * deadlock on swap_mtx, and recover if another thread
             * deallocates an adjacent chunk while this one is busy
             * allocating xnode.
             */
            malloc_mutex_unlock(&swap_mtx);
            xnode = base_node_alloc();
            malloc_mutex_lock(&swap_mtx);
            if (xnode == NULL)
                return (NULL);
        } else {
            /* Coalescing forward failed, so insert a new node. */
            node = xnode;
            xnode = NULL;
            node->addr = chunk;
            node->size = size;
            extent_tree_ad_insert(&swap_chunks_ad, node);
            extent_tree_szad_insert(&swap_chunks_szad, node);
            break;
        }
    }
    /* Discard xnode if it ended up unused do to a race. */
    if (xnode != NULL)
        base_node_dealloc(xnode);

    /* Try to coalesce backward. */
    prev = extent_tree_ad_prev(&swap_chunks_ad, node);
    if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
            chunk) {
        /*
         * Coalesce chunk with the previous address range.  This does
         * not change the position within swap_chunks_ad, so only
         * remove/insert node from/into swap_chunks_szad.
         */
        extent_tree_szad_remove(&swap_chunks_szad, prev);
        extent_tree_ad_remove(&swap_chunks_ad, prev);

        extent_tree_szad_remove(&swap_chunks_szad, node);
        node->addr = prev->addr;
        node->size += prev->size;
        extent_tree_szad_insert(&swap_chunks_szad, node);

        base_node_dealloc(prev);
    }

    return (node);
}
예제 #6
0
static void *huge_move_expand(struct thread_cache *cache, void *old_addr, size_t old_size, size_t new_size) {
    struct arena *arena;
    void *new_addr = huge_chunk_alloc(cache, new_size, CHUNK_SIZE, &arena);
    if (unlikely(!new_addr)) {
        return NULL;
    }

    bool gap = true;
    if (unlikely(memory_remap_fixed(old_addr, old_size, new_addr, new_size))) {
        memcpy(new_addr, old_addr, old_size);
        if (purge_ratio >= 0) {
            memory_decommit(old_addr, old_size);
        }
        gap = false;
    } else {
        // Attempt to fill the virtual memory hole. The kernel should provide a flag for preserving
        // the old mapping to avoid the possibility of this failing and creating fragmentation.
        //
        // https://lkml.org/lkml/2014/10/2/624
        void *extra = memory_map(old_addr, old_size, false);
        if (likely(extra)) {
            if (unlikely(extra != old_addr)) {
                memory_unmap(extra, old_size);
            } else {
                gap = false;
            }
        }
    }

    struct extent_node key;
    key.addr = old_addr;

    struct arena *old_arena = get_huge_arena(old_addr);

    extent_tree *huge = acquire_huge(old_arena);
    struct extent_node *node = extent_tree_ad_search(huge, &key);
    assert(node);
    extent_tree_ad_remove(huge, node);
    node->addr = new_addr;
    node->size = new_size;

    if (arena != old_arena) {
        release_huge(old_arena);
        huge = acquire_huge(arena);
    }

    extent_tree_ad_insert(huge, node);
    release_huge(arena);

    if (!gap) {
        if (arena != old_arena && old_arena) {
            mutex_lock(&old_arena->mutex);
        }
        chunk_free(get_recycler(old_arena), old_addr, old_size);
        if (arena != old_arena && old_arena) {
            mutex_unlock(&old_arena->mutex);
        }
    }

    maybe_unlock_arena(arena);
    return new_addr;
}
예제 #7
0
파일: huge.c 프로젝트: 08keelr/TrinityCore
/* Only handles large allocations that require more than chunk alignment. */
void *
huge_palloc(size_t size, size_t alignment, bool zero)
{
	void *ret;
	size_t alloc_size, chunk_size, offset;
	extent_node_t *node;

	/*
	 * This allocation requires alignment that is even larger than chunk
	 * alignment.  This means that huge_malloc() isn't good enough.
	 *
	 * Allocate almost twice as many chunks as are demanded by the size or
	 * alignment, in order to assure the alignment can be achieved, then
	 * unmap leading and trailing chunks.
	 */
	assert(alignment >= chunksize);

	chunk_size = CHUNK_CEILING(size);

	if (size >= alignment)
		alloc_size = chunk_size + alignment - chunksize;
	else
		alloc_size = (alignment << 1) - chunksize;

	/* Allocate an extent node with which to track the chunk. */
	node = base_node_alloc();
	if (node == NULL)
		return (NULL);

	ret = chunk_alloc(alloc_size, false, &zero);
	if (ret == NULL) {
		base_node_dealloc(node);
		return (NULL);
	}

	offset = (uintptr_t)ret & (alignment - 1);
	assert((offset & chunksize_mask) == 0);
	assert(offset < alloc_size);
	if (offset == 0) {
		/* Trim trailing space. */
		chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
		    - chunk_size);
	} else {
		size_t trailsize;

		/* Trim leading space. */
		chunk_dealloc(ret, alignment - offset);

		ret = (void *)((uintptr_t)ret + (alignment - offset));

		trailsize = alloc_size - (alignment - offset) - chunk_size;
		if (trailsize != 0) {
		    /* Trim trailing space. */
		    assert(trailsize < alloc_size);
		    chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
			trailsize);
		}
	}

	/* Insert node into huge. */
	node->addr = ret;
	node->size = chunk_size;

	malloc_mutex_lock(&huge_mtx);
	extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
	huge_nmalloc++;
	huge_allocated += chunk_size;
#endif
	malloc_mutex_unlock(&huge_mtx);

#ifdef JEMALLOC_FILL
	if (zero == false) {
		if (opt_junk)
			memset(ret, 0xa5, chunk_size);
		else if (opt_zero)
			memset(ret, 0, chunk_size);
	}
#endif

	return (ret);
}