Example #1
0
Address virtualMemory_allocate(Size size, int type) {
#if os_MAXVE
	return (Address) maxve_virtualMemory_allocate(size, type);
#else
    return check_mmap_result(mmap(0, (size_t) size, PROT, MAP_ANON | MAP_PRIVATE, -1, (off_t) 0));
#endif
}
Example #2
0
/* Generic virtual space allocator.
 * If the address parameters is specified, allocate at the specified address and fail if it cannot be allocated.
 * Use MAP_NORESERVE if reserveSwap is false
 * Use PROT_NONE if protNone is true, otherwise set all protection (i.e., allow any type of access).
 */
Address virtualMemory_allocatePrivateAnon(Address address, Size size, jboolean reserveSwap, jboolean protNone, int type) {
  int flags = MAP_PRIVATE | MAP_ANON;
#if os_LINUX
  /* For some reason, subsequent calls to mmap to allocate out of the space
   * reserved here only work if the reserved space is in 32-bit space. */
//  flags |= MAP_32BIT;
#endif
  int prot = protNone == JNI_TRUE ? PROT_NONE : PROT;
  if (reserveSwap == JNI_FALSE) {
     flags |= MAP_NORESERVE;
  }
  if (address != 0) {
	  flags |= MAP_FIXED;
  }

  void * result = mmap((void*) address, (size_t) size, prot, flags, -1, 0);

#if log_LOADER
	log_println("virtualMemory_allocatePrivateAnon(address=%p, size=%p, swap=%s, prot=%s) allocated at %p",
					address, size,
					reserveSwap==JNI_TRUE ? "true" : "false",
					protNone==JNI_TRUE ? "none" : "all",
					result);
#endif
  return check_mmap_result(result);
}
Example #3
0
boolean virtualMemory_allocateAtFixedAddress(Address address, Size size, int type) {
#if os_SOLARIS || os_DARWIN  || os_LINUX
    return check_mmap_result(mmap((void *) address, (size_t) size, PROT, MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, (off_t) 0)) != ALLOC_FAILED;
#elif os_MAXVE
    return (Address) maxve_virtualMemory_allocateAtFixedAddress((unsigned long)address, size, type) != ALLOC_FAILED;
#else
    c_UNIMPLEMENTED();
    return false;
#endif
}
Example #4
0
Address virtualMemory_allocateIn31BitSpace(Size size, int type) {
#if os_LINUX
    return check_mmap_result(mmap(0, (size_t) size, PROT, MAP_ANON | MAP_PRIVATE | MAP_32BIT, -1, (off_t) 0));
#elif os_MAXVE
    return (Address) maxve_virtualMemory_allocateIn31BitSpace(size, type);
#else
    c_UNIMPLEMENTED();
    return 0;
#endif
}
Example #5
0
/* Maps an FBChain of length 1 into user space */
static int map_fb_to_user_contig(struct file *filep,
				struct pme_fbchain *buffers,
				unsigned long *user_addr, size_t *size)
{
	void *data;
	size_t data_size;
	int ret;
	struct vm_area_struct *vma;
	struct pme_fb_vma *mem_node;

	/* The resulting FB Chain is a single, non multiple of PAGE_SIZE buffer.
	 * Map it into RAM */
	data = pme_fbchain_current(buffers);
	data_size = pme_fbchain_current_bufflen(buffers);

	mem_node = fb_vma_create(buffers, fb_phys_mapped, 1, data_size, 0, 0);
	if (!mem_node)
		return -ENOMEM;

	down_write(&current->mm->mmap_sem);

	*user_addr = (unsigned long) do_mmap(filep, 0,
					     data_size + offset_in_page(data),
					     PROT_READ | PROT_WRITE,
					     MAP_PRIVATE,
					     virt_to_phys(data) & PAGE_MASK);

	ret = check_mmap_result((void *) *user_addr);
	if (ret)
		goto err;

	vma = find_vma(current->mm, (unsigned long) *user_addr);
	vma->vm_private_data = mem_node;
	up_write(&current->mm->mmap_sem);

	*user_addr += offset_in_page(data);
	*size = data_size;

	return PME_MEM_CONTIG;
err:
	up_write(&current->mm->mmap_sem);
	fb_vma_free(mem_node);
	return -EINVAL;
}
Example #6
0
/* Map a non page aligned fbchain into user space.  This
 * requires creating a iovec and populating it correctly */
static int map_fb_to_user_sg(struct file *filep, struct pme_fbchain *buffers,
				unsigned long *user_addr, size_t *size)
{
	void *data;
	size_t data_size;
	struct iovec *vect;
	int vector_size, ret, list_count, index = 0;
	unsigned long paddr;
	struct vm_area_struct *vma;
	struct pme_fb_vma *mem_node, *iovec_mem_node;
	list_count = pme_fbchain_num(buffers);

	vector_size = sizeof(struct iovec) * list_count;
	iovec_mem_node = fb_vma_create(NULL, fb_phys_mapped, 1, list_count,
			vector_size, 0);
	if (!iovec_mem_node)
		return -ENOMEM;

	/* The space for the iovec is allocate as whole pages and
	 * a kernel mapping needs to be created in case they were
	 * allocated from high mem */
	vect = kmap(iovec_mem_node->iovec_pages);
	/* Create a mem node to keep track of the fbchain
	 * Otherwise, we won't know when to release the freebuff list */
	mem_node = fb_vma_create(buffers, fb_phys_mapped, 0, 0, 0, 0);
	if (!mem_node) {
		fb_vma_free(iovec_mem_node);
		kunmap(iovec_mem_node->iovec_pages);
		return -ENOMEM;
	}
	/* For each freebuff, map it to user space, storing the
	 * userspace data in the iovec */
	data = pme_fbchain_current(buffers);

	down_write(&current->mm->mmap_sem);

	while (data) {
		data_size = pme_fbchain_current_bufflen(buffers);
		vect[index].iov_base = (void *) do_mmap(filep, 0,
							data_size +
							offset_in_page(data),
							PROT_READ | PROT_WRITE,
							MAP_PRIVATE,
							virt_to_phys(data) &
							PAGE_MASK);
		ret = check_mmap_result(vect[index].iov_base);
		if (ret)
			/*  Need to unmap any previous sucesses */
			goto err;

		vma = find_vma(current->mm,
				(unsigned long) vect[index].iov_base);

		vma->vm_private_data = mem_node;
		atomic_inc(&mem_node->ref_count);

		vect[index].iov_base += offset_in_page(data);
		vect[index].iov_len = data_size;
		++index;
		data = pme_fbchain_next(buffers);
	}

	/* Now map the iovec into user spcae */
	paddr = page_to_pfn(iovec_mem_node->iovec_pages) << PAGE_SHIFT;
	*user_addr = (unsigned long) do_mmap(filep, 0,
					     vector_size +
					     offset_in_page(paddr),
					     PROT_READ |
					     PROT_WRITE, MAP_PRIVATE,
					     paddr & PAGE_MASK);

	ret = check_mmap_result((void *) *user_addr);
	if (ret)
		goto err;

	vma = find_vma(current->mm, (unsigned long) *user_addr);

	vma->vm_private_data = iovec_mem_node;

	up_write(&current->mm->mmap_sem);
	*user_addr += offset_in_page(paddr);
	*size = list_count;
	kunmap(iovec_mem_node->iovec_pages);
	return PME_MEM_SG;
err:
	while (index--)
		do_munmap(current->mm,
			((unsigned long)vect[index].iov_base) & PAGE_MASK,
			 vect[index].iov_len +
			 offset_in_page(vect[index].iov_base));

	up_write(&current->mm->mmap_sem);
	kunmap(iovec_mem_node->iovec_pages);
	return -EINVAL;
}
Example #7
0
/* Map a freebuffer chain into a processes virtual address space
 * when the buffers are a multiple of PAGE_SIZE */
static int fb_to_user_page_size(struct file *filep,
				struct pme_fbchain *buffers,
				unsigned long *user_addr,
				size_t *size)
{
	struct vm_area_struct *vma;
	int index, ret;
	void *data;
	size_t data_size;
	struct pme_fb_vma *mem_node;

	int list_count = pme_fbchain_num(buffers);
	/* These buffers are page aligned and occupy
	 * complete pages.  This means we can mmap it all at once */
	*size = list_count * pme_fbchain_max(buffers);

	/* We need to lock the mmap_sem because other threads
	 * could be modifying the address space layout */
	down_write(&current->mm->mmap_sem);

	*user_addr = do_mmap(filep, 0, *size,
			     PROT_READ | PROT_WRITE, MAP_PRIVATE, 0);
	ret = check_mmap_result((void *) *user_addr);
	if (ret)
		goto err;

	/* Lookup the new VMA and stuff the fbchain into
	 * it so when a page fault occurs, we can find the
	 * proper page and return it */
	vma = find_vma(current->mm, (unsigned long) *user_addr);

	mem_node = vma->vm_private_data = fb_vma_create(buffers,
			fb_page_mapped, 1, *size, 0,
			(*size + PAGE_SIZE - 1) / PAGE_SIZE);
	if (!mem_node) {
		ret = -ENOMEM;
		/* Make sure we clean the mapped area out of
		 * the users process space */
		 do_munmap(current->mm, (*user_addr) & PAGE_MASK,
			 *size + offset_in_page(*user_addr));
		goto err;
	}
	/* Pre compute the page* for each page in the buffer.  This step makes
	 * the nopage implementation easy as we have already determined
	 * which page* to return */
	index = 0;
	data = pme_fbchain_current(buffers);
	data_size = pme_fbchain_current_bufflen(buffers);
	while (data_size) {
		while (data_size) {
			mem_node->page_array[index] = virt_to_page(data);
			index++;
			if (data_size > PAGE_SIZE) {
				data_size -= PAGE_SIZE;
				data += PAGE_SIZE;
			} else
				data_size = 0;
		}
		data = pme_fbchain_next(buffers);
		data_size = pme_fbchain_current_bufflen(buffers);
	}
	up_write(&current->mm->mmap_sem);
	/* Re-adjust the size to be the actual data length of the buffer */
	*size = pme_fbchain_length(buffers);
	return PME_MEM_CONTIG;
err:
	up_write(&current->mm->mmap_sem);
	return ret;
}
Example #8
0
Address virtualMemory_mapFileAtFixedAddress(Address address, Size size, jint fd, Size offset) {
    return check_mmap_result(mmap((void *) address, (size_t) size, PROT, MAP_PRIVATE | MAP_FIXED, fd, (off_t) offset));
}
Example #9
0
Address virtualMemory_mapFileIn31BitSpace(jint size, jint fd, Size offset) {
	return check_mmap_result(mmap(0, (size_t) size, PROT, MAP_PRIVATE | MAP_32BIT, fd, (off_t) offset));
}
Example #10
0
Address virtualMemory_mapFile(Size size, jint fd, Size offset) {
	return check_mmap_result(mmap(0, (size_t) size, PROT, MAP_PRIVATE, fd, (off_t) offset));
 }