Ejemplo n.º 1
0
static void *
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
{
	void *ret = (void *)((uintptr_t)addr + leadsize);

	assert(alloc_size >= leadsize + size);
#ifdef _WIN32
	{
		void *new_addr;

		pages_unmap(addr, alloc_size);
		new_addr = pages_map(ret, size);
		if (new_addr == ret)
			return (ret);
		if (new_addr)
			pages_unmap(new_addr, size);
		return (NULL);
	}
#else
	{
		size_t trailsize = alloc_size - leadsize - size;

		if (leadsize != 0)
			pages_unmap(addr, leadsize);
		if (trailsize != 0)
			pages_unmap((void *)((uintptr_t)ret + size), trailsize);
		return (ret);
	}
#endif
}
Ejemplo n.º 2
0
static void *
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned, bool *zero)
{
	void *ret, *pages;
	size_t alloc_size, leadsize, trailsize;

	alloc_size = size + alignment - PAGE;
	/* Beware size_t wrap-around. */
	if (alloc_size < size)
		return (NULL);
	pages = pages_map(NULL, alloc_size);
	if (pages == NULL)
		return (NULL);
	leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
	    (uintptr_t)pages;
	assert(alloc_size >= leadsize + size);
	trailsize = alloc_size - leadsize - size;
	ret = (void *)((uintptr_t)pages + leadsize);
	if (leadsize != 0) {
		/* Note that mmap() returned an unaligned mapping. */
		unaligned = true;
		pages_unmap(pages, leadsize);
	}
	if (trailsize != 0)
		pages_unmap((void *)((uintptr_t)ret + size), trailsize);

	assert(ret != NULL);
	*zero = true;
	return (ret);
}
Ejemplo n.º 3
0
void *
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
{
	void *ret;
	size_t offset;

	/*
	 * Ideally, there would be a way to specify alignment to mmap() (like
	 * NetBSD has), but in the absence of such a feature, we have to work
	 * hard to efficiently create aligned mappings.  The reliable, but
	 * slow method is to create a mapping that is over-sized, then trim the
	 * excess.  However, that always results in at least one call to
	 * pages_unmap().
	 *
	 * A more optimistic approach is to try mapping precisely the right
	 * amount, then try to append another mapping if alignment is off.  In
	 * practice, this works out well as long as the application is not
	 * interleaving mappings via direct mmap() calls.  If we do run into a
	 * situation where there is an interleaved mapping and we are unable to
	 * extend an unaligned mapping, our best option is to switch to the
	 * slow method until mmap() returns another aligned mapping.  This will
	 * tend to leave a gap in the memory map that is too small to cause
	 * later problems for the optimistic method.
	 *
	 * Another possible confounding factor is address space layout
	 * randomization (ASLR), which causes mmap(2) to disregard the
	 * requested address.  As such, repeatedly trying to extend unaligned
	 * mappings could result in an infinite loop, so if extension fails,
	 * immediately fall back to the reliable method of over-allocation
	 * followed by trimming.
	 */

	ret = pages_map(NULL, size);
	if (ret == NULL)
		return (NULL);

	offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
	if (offset != 0) {
		/* Try to extend chunk boundary. */
		if (pages_map((void *)((uintptr_t)ret + size), chunksize -
		    offset) == NULL) {
			/*
			 * Extension failed.  Clean up, then fall back to the
			 * reliable-but-expensive method.
			 */
			pages_unmap(ret, size);
			return (chunk_alloc_mmap_slow(size, alignment, true,
			    zero));
		} else {
			/* Clean up unneeded leading space. */
			pages_unmap(ret, chunksize - offset);
			ret = (void *)((uintptr_t)ret + (chunksize - offset));
		}
	}

	assert(ret != NULL);
	*zero = true;
	return (ret);
}
Ejemplo n.º 4
0
void *
chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
{
	void *ret;
	size_t offset;

	/*
	 * Ideally, there would be a way to specify alignment to mmap() (like
	 * NetBSD has), but in the absence of such a feature, we have to work
	 * hard to efficiently create aligned mappings.  The reliable, but
	 * slow method is to create a mapping that is over-sized, then trim the
	 * excess.  However, that always results in one or two calls to
	 * pages_unmap().
	 *
	 * Optimistically try mapping precisely the right amount before falling
	 * back to the slow method, with the expectation that the optimistic
	 * approach works most of the time.
	 */

	assert(alignment != 0);
	assert((alignment & chunksize_mask) == 0);

	ret = pages_map(NULL, size);
	if (ret == NULL)
		return (NULL);
	offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
	if (offset != 0) {
		pages_unmap(ret, size);
		return (chunk_alloc_mmap_slow(size, alignment, zero));
	}

	assert(ret != NULL);
	*zero = true;
	return (ret);
}
Ejemplo n.º 5
0
static void *
chunk_alloc_mmap_slow(size_t size, bool unaligned, bool noreserve)
{
	void *ret;
	size_t offset;

	/* Beware size_t wrap-around. */
	if (size + chunksize <= size)
		return (NULL);

	ret = pages_map(NULL, size + chunksize, noreserve);
	if (ret == NULL)
		return (NULL);

	/* Clean up unneeded leading/trailing space. */
	offset = CHUNK_ADDR2OFFSET(ret);
	if (offset != 0) {
		/* Note that mmap() returned an unaligned mapping. */
		unaligned = true;

		/* Leading space. */
		pages_unmap(ret, chunksize - offset);

		ret = (void *)((uintptr_t)ret +
		    (chunksize - offset));

		/* Trailing space. */
		pages_unmap((void *)((uintptr_t)ret + size),
		    offset);
	} else {
		/* Trailing space only. */
		pages_unmap((void *)((uintptr_t)ret + size),
		    chunksize);
	}

	/*
	 * If mmap() returned an aligned mapping, reset mmap_unaligned so that
	 * the next chunk_alloc_mmap() execution tries the fast allocation
	 * method.
	 */
	if (unaligned == false)
		MMAP_UNALIGNED_SET(false);

	return (ret);
}
Ejemplo n.º 6
0
bool
chunk_dalloc_mmap(void *chunk, size_t size)
{

	if (config_munmap)
		pages_unmap(chunk, size);

	return (!config_munmap);
}
Ejemplo n.º 7
0
bool
chunk_dealloc_mmap(void *chunk, size_t size)
{

	if (config_munmap)
		pages_unmap(chunk, size);

	return (config_munmap == false);
}
Ejemplo n.º 8
0
void
chunk_dealloc_mmap(void *chunk, size_t size)
{

	pages_unmap(chunk, size);
}
Ejemplo n.º 9
0
static void *
chunk_alloc_mmap_internal(size_t size, bool noreserve)
{
	void *ret;

	/*
	 * Ideally, there would be a way to specify alignment to mmap() (like
	 * NetBSD has), but in the absence of such a feature, we have to work
	 * hard to efficiently create aligned mappings.  The reliable, but
	 * slow method is to create a mapping that is over-sized, then trim the
	 * excess.  However, that always results in at least one call to
	 * pages_unmap().
	 *
	 * A more optimistic approach is to try mapping precisely the right
	 * amount, then try to append another mapping if alignment is off.  In
	 * practice, this works out well as long as the application is not
	 * interleaving mappings via direct mmap() calls.  If we do run into a
	 * situation where there is an interleaved mapping and we are unable to
	 * extend an unaligned mapping, our best option is to switch to the
	 * slow method until mmap() returns another aligned mapping.  This will
	 * tend to leave a gap in the memory map that is too small to cause
	 * later problems for the optimistic method.
	 *
	 * Another possible confounding factor is address space layout
	 * randomization (ASLR), which causes mmap(2) to disregard the
	 * requested address.  mmap_unaligned tracks whether the previous
	 * chunk_alloc_mmap() execution received any unaligned or relocated
	 * mappings, and if so, the current execution will immediately fall
	 * back to the slow method.  However, we keep track of whether the fast
	 * method would have succeeded, and if so, we make a note to try the
	 * fast method next time.
	 */

	if (MMAP_UNALIGNED_GET() == false) {
		size_t offset;

		ret = pages_map(NULL, size, noreserve);
		if (ret == NULL)
			return (NULL);

		offset = CHUNK_ADDR2OFFSET(ret);
		if (offset != 0) {
			MMAP_UNALIGNED_SET(true);
			/* Try to extend chunk boundary. */
			if (pages_map((void *)((uintptr_t)ret + size),
			    chunksize - offset, noreserve) == NULL) {
				/*
				 * Extension failed.  Clean up, then revert to
				 * the reliable-but-expensive method.
				 */
				pages_unmap(ret, size);
				ret = chunk_alloc_mmap_slow(size, true,
				    noreserve);
			} else {
				/* Clean up unneeded leading space. */
				pages_unmap(ret, chunksize - offset);
				ret = (void *)((uintptr_t)ret + (chunksize -
				    offset));
			}
		}
	} else
		ret = chunk_alloc_mmap_slow(size, false, noreserve);

	return (ret);
}
Ejemplo n.º 10
0
static long kern_unlocked_ioctl(struct file *filp, unsigned int cmd,
		unsigned long arg) {
	/* this is the buffer which will hold the data of the buffer from user
	 * space... */
	BufferStruct b;
	/* for results from calls */
	int res;
	/* for printing buffers */
	char *cptr;
	/* for loop length */
	unsigned int sloop;
	/* for loops */
	unsigned int i;
	/* some unsigned ints for address manipulation... */
	unsigned long bpointer, offset, aligned, newsize;
	/* the newly created vma */
	struct vm_area_struct *vma;

	PR_DEBUG("start with ioctl %u", cmd);
	switch (cmd) {
	/*
	*	This is asking the kernel to map the memory to kernel space.
	*/
	case IOCTL_DEMO_MAP:
		/* get the data from the user */
		res = copy_from_user(&b, (void *)arg, sizeof(b));
		if (res) {
			PR_ERROR("problem with copy_from_user");
			return res;
		}
		PR_DEBUG("after copy");
		bpointer = (unsigned long)b.pointer;
		offset = bpointer % PAGE_SIZE;
		aligned = bpointer - offset;
		newsize = b.size + offset;
		PR_DEBUG("bpointer is %x", bpointer);
		PR_DEBUG("offset is %u", offset);
		PR_DEBUG("aligned is %x", aligned);
		PR_DEBUG("newsize is %u", newsize);

		/*
		* // make sure that the user data is page aligned...
		* if(((unsigned int)b.pointer)%PAGE_SIZE!=0) {
		*	PR_ERROR("pointer is not page aligned");
		*	return -EFAULT;
		* }
		* PR_DEBUG("after modulu check");
		*/
		/* find the number of pages */
		nr_pages = (newsize - 1) / PAGE_SIZE + 1;
		PR_DEBUG("nr_pages is %d", nr_pages);
		/* alocate page structures... */
		pages = kmalloc(nr_pages * sizeof(struct page *),
				GFP_KERNEL);
		if (IS_ERR(pages)) {
			PR_ERROR("could not allocate page structs");
			return PTR_ERR(pages);
		}
		PR_DEBUG("after pages allocation");
		/* get user pages and fault them in */
		down_write(&current->mm->mmap_sem);
		/* rw==READ means read from drive, write into memory area */
		res = get_user_pages(
			/* current->mm,
			current, */
			aligned,
			nr_pages,
			1,/* write */
			0,/* force */
			pages,
			NULL
		);
		vma = find_vma(current->mm, bpointer);
		vma->vm_flags |= VM_DONTCOPY;
		up_write(&current->mm->mmap_sem);
		PR_DEBUG("after get_user_pages res is %d", res);
		/* Errors and no page mapped should return here */
		if (res != nr_pages) {
			PR_ERROR("could not get_user_pages. res was %d", res);
			kfree(pages);
			return -EFAULT;
		}
		/* pages_lock();
		pages_reserve();
		pages_unlock(); */
		/* map the pages to kernel space... */
		vptr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
		if (vptr == NULL) {
			PR_ERROR("could not get_user_pages. res was %d", res);
			kfree(pages);
			return -EFAULT;
		}
		ptr = vptr + offset;
		size = b.size;
		PR_DEBUG("after vmap - vptr is %p", vptr);
		/* free the pages */
		kfree(pages);
		pages = NULL;
		PR_DEBUG("after freeing the pages");
		/* were dont! return with success */
		PR_DEBUG("success - on the way out");
		return 0;

	/*
	*	This is asking the kernel to unmap the data
	*	No arguments are passed
	*/
	case IOCTL_DEMO_UNMAP:
		/* this function does NOT return an error code. Strange...:) */
		vunmap(vptr);
		vptr = NULL;
		ptr = NULL;
		size = 0;
		nr_pages = 0;
		pages_unmap();
		return 0;

	/*
	*	This is asking the kernel to read the data.
	*	No arguments are passed
	*/
	case IOCTL_DEMO_READ:
		cptr = (char *)ptr;
		sloop = min_t(unsigned int, size, (unsigned int)10);
		PR_DEBUG("sloop is %d", sloop);
		for (i = 0; i < sloop; i++)
			PR_INFO("value of %d is %c", i, cptr[i]);
		return 0;
	/*
	*	This is asking the kernel to write on our data
	*	argument is the constant which will be used...
	*/
	case IOCTL_DEMO_WRITE:
		memset(ptr, arg, size);
		/* pages_dirty(); */
		return 0;
	}
	return -EINVAL;
}