Exemplo n.º 1
0
void *
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
{
	void *ret;
	size_t offset;

	/*
	 * Ideally, there would be a way to specify alignment to mmap() (like
	 * NetBSD has), but in the absence of such a feature, we have to work
	 * hard to efficiently create aligned mappings.  The reliable, but
	 * slow method is to create a mapping that is over-sized, then trim the
	 * excess.  However, that always results in at least one call to
	 * pages_unmap().
	 *
	 * A more optimistic approach is to try mapping precisely the right
	 * amount, then try to append another mapping if alignment is off.  In
	 * practice, this works out well as long as the application is not
	 * interleaving mappings via direct mmap() calls.  If we do run into a
	 * situation where there is an interleaved mapping and we are unable to
	 * extend an unaligned mapping, our best option is to switch to the
	 * slow method until mmap() returns another aligned mapping.  This will
	 * tend to leave a gap in the memory map that is too small to cause
	 * later problems for the optimistic method.
	 *
	 * Another possible confounding factor is address space layout
	 * randomization (ASLR), which causes mmap(2) to disregard the
	 * requested address.  As such, repeatedly trying to extend unaligned
	 * mappings could result in an infinite loop, so if extension fails,
	 * immediately fall back to the reliable method of over-allocation
	 * followed by trimming.
	 */

	ret = pages_map(NULL, size);
	if (ret == NULL)
		return (NULL);

	offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
	if (offset != 0) {
		/* Try to extend chunk boundary. */
		if (pages_map((void *)((uintptr_t)ret + size), chunksize -
		    offset) == NULL) {
			/*
			 * Extension failed.  Clean up, then fall back to the
			 * reliable-but-expensive method.
			 */
			pages_unmap(ret, size);
			return (chunk_alloc_mmap_slow(size, alignment, true,
			    zero));
		} else {
			/* Clean up unneeded leading space. */
			pages_unmap(ret, chunksize - offset);
			ret = (void *)((uintptr_t)ret + (chunksize - offset));
		}
	}

	assert(ret != NULL);
	*zero = true;
	return (ret);
}
Exemplo n.º 2
0
static void *
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
{
	void *ret = (void *)((uintptr_t)addr + leadsize);

	assert(alloc_size >= leadsize + size);
#ifdef _WIN32
	{
		void *new_addr;

		pages_unmap(addr, alloc_size);
		new_addr = pages_map(ret, size);
		if (new_addr == ret)
			return (ret);
		if (new_addr)
			pages_unmap(new_addr, size);
		return (NULL);
	}
#else
	{
		size_t trailsize = alloc_size - leadsize - size;

		if (leadsize != 0)
			pages_unmap(addr, leadsize);
		if (trailsize != 0)
			pages_unmap((void *)((uintptr_t)ret + size), trailsize);
		return (ret);
	}
#endif
}
Exemplo n.º 3
0
void *
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
{
	void *ret;
	size_t offset;

	/*
	 * Ideally, there would be a way to specify alignment to mmap() (like
	 * NetBSD has), but in the absence of such a feature, we have to work
	 * hard to efficiently create aligned mappings.  The reliable, but
	 * slow method is to create a mapping that is over-sized, then trim the
	 * excess.  However, that always results in one or two calls to
	 * pages_unmap().
	 *
	 * Optimistically try mapping precisely the right amount before falling
	 * back to the slow method, with the expectation that the optimistic
	 * approach works most of the time.
	 */

	assert(alignment != 0);
	assert((alignment & chunksize_mask) == 0);

	ret = pages_map(NULL, size);
	if (ret == NULL)
		return (NULL);
	offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
	if (offset != 0) {
		pages_unmap(ret, size);
		return (chunk_alloc_mmap_slow(size, alignment, zero));
	}

	assert(ret != NULL);
	*zero = true;
	return (ret);
}
Exemplo n.º 4
0
static void *
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
{
	void *ret;
	size_t alloc_size;

	alloc_size = size + alignment - PAGE;
	/* Beware size_t wrap-around. */
	if (alloc_size < size)
		return (NULL);
	do {
		void *pages;
		size_t leadsize;
		pages = pages_map(NULL, alloc_size, commit);
		if (pages == NULL)
			return (NULL);
		leadsize = ALIGNMENT_CEILING((vaddr_t)pages, alignment) -
		    (vaddr_t)pages;
		ret = pages_trim(pages, alloc_size, leadsize, size, commit);
	} while (ret == NULL);

	assert(ret != NULL);
	*zero = true;
	return (ret);
}
Exemplo n.º 5
0
static void *
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned, bool *zero)
{
	void *ret, *pages;
	size_t alloc_size, leadsize, trailsize;

	alloc_size = size + alignment - PAGE;
	/* Beware size_t wrap-around. */
	if (alloc_size < size)
		return (NULL);
	pages = pages_map(NULL, alloc_size);
	if (pages == NULL)
		return (NULL);
	leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
	    (uintptr_t)pages;
	assert(alloc_size >= leadsize + size);
	trailsize = alloc_size - leadsize - size;
	ret = (void *)((uintptr_t)pages + leadsize);
	if (leadsize != 0) {
		/* Note that mmap() returned an unaligned mapping. */
		unaligned = true;
		pages_unmap(pages, leadsize);
	}
	if (trailsize != 0)
		pages_unmap((void *)((uintptr_t)ret + size), trailsize);

	assert(ret != NULL);
	*zero = true;
	return (ret);
}
Exemplo n.º 6
0
static void *
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero
#ifdef JEMALLOC_ENABLE_MEMKIND
, unsigned partition
#endif
)
{
	void *ret, *pages;
	size_t alloc_size, leadsize;

	alloc_size = size + alignment - PAGE;
	/* Beware size_t wrap-around. */
	if (alloc_size < size)
		return (NULL);
	do {
		pages = pages_map(NULL, alloc_size
#ifdef JEMALLOC_ENABLE_MEMKIND
, partition
#endif
);
		if (pages == NULL)
			return (NULL);
		leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
		    (uintptr_t)pages;
		ret = pages_trim(pages, alloc_size, leadsize, size
#ifdef JEMALLOC_ENABLE_MEMKIND
, partition
#endif
);
	} while (ret == NULL);

	assert(ret != NULL);
	*zero = true;
	return (ret);
}
Exemplo n.º 7
0
static void *
chunk_alloc_mmap_slow(size_t size, bool unaligned, bool noreserve)
{
	void *ret;
	size_t offset;

	/* Beware size_t wrap-around. */
	if (size + chunksize <= size)
		return (NULL);

	ret = pages_map(NULL, size + chunksize, noreserve);
	if (ret == NULL)
		return (NULL);

	/* Clean up unneeded leading/trailing space. */
	offset = CHUNK_ADDR2OFFSET(ret);
	if (offset != 0) {
		/* Note that mmap() returned an unaligned mapping. */
		unaligned = true;

		/* Leading space. */
		pages_unmap(ret, chunksize - offset);

		ret = (void *)((uintptr_t)ret +
		    (chunksize - offset));

		/* Trailing space. */
		pages_unmap((void *)((uintptr_t)ret + size),
		    offset);
	} else {
		/* Trailing space only. */
		pages_unmap((void *)((uintptr_t)ret + size),
		    chunksize);
	}

	/*
	 * If mmap() returned an aligned mapping, reset mmap_unaligned so that
	 * the next chunk_alloc_mmap() execution tries the fast allocation
	 * method.
	 */
	if (unaligned == false)
		MMAP_UNALIGNED_SET(false);

	return (ret);
}
Exemplo n.º 8
0
static void *
chunk_alloc_mmap_internal(size_t size, bool noreserve)
{
	void *ret;

	/*
	 * Ideally, there would be a way to specify alignment to mmap() (like
	 * NetBSD has), but in the absence of such a feature, we have to work
	 * hard to efficiently create aligned mappings.  The reliable, but
	 * slow method is to create a mapping that is over-sized, then trim the
	 * excess.  However, that always results in at least one call to
	 * pages_unmap().
	 *
	 * A more optimistic approach is to try mapping precisely the right
	 * amount, then try to append another mapping if alignment is off.  In
	 * practice, this works out well as long as the application is not
	 * interleaving mappings via direct mmap() calls.  If we do run into a
	 * situation where there is an interleaved mapping and we are unable to
	 * extend an unaligned mapping, our best option is to switch to the
	 * slow method until mmap() returns another aligned mapping.  This will
	 * tend to leave a gap in the memory map that is too small to cause
	 * later problems for the optimistic method.
	 *
	 * Another possible confounding factor is address space layout
	 * randomization (ASLR), which causes mmap(2) to disregard the
	 * requested address.  mmap_unaligned tracks whether the previous
	 * chunk_alloc_mmap() execution received any unaligned or relocated
	 * mappings, and if so, the current execution will immediately fall
	 * back to the slow method.  However, we keep track of whether the fast
	 * method would have succeeded, and if so, we make a note to try the
	 * fast method next time.
	 */

	if (MMAP_UNALIGNED_GET() == false) {
		size_t offset;

		ret = pages_map(NULL, size, noreserve);
		if (ret == NULL)
			return (NULL);

		offset = CHUNK_ADDR2OFFSET(ret);
		if (offset != 0) {
			MMAP_UNALIGNED_SET(true);
			/* Try to extend chunk boundary. */
			if (pages_map((void *)((uintptr_t)ret + size),
			    chunksize - offset, noreserve) == NULL) {
				/*
				 * Extension failed.  Clean up, then revert to
				 * the reliable-but-expensive method.
				 */
				pages_unmap(ret, size);
				ret = chunk_alloc_mmap_slow(size, true,
				    noreserve);
			} else {
				/* Clean up unneeded leading space. */
				pages_unmap(ret, chunksize - offset);
				ret = (void *)((uintptr_t)ret + (chunksize -
				    offset));
			}
		}
	} else
		ret = chunk_alloc_mmap_slow(size, false, noreserve);

	return (ret);
}