예제 #1
0
unsigned long
arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len,
			unsigned long pgoff, unsigned long flags)
{
	long map_shared = (flags & MAP_SHARED);
	unsigned long start_addr, align_mask = PAGE_SIZE - 1;
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;

	if (len > RGN_MAP_LIMIT)
		return -ENOMEM;

	/* handle fixed mapping: prevent overlap with huge pages */
	if (flags & MAP_FIXED) {
		if (is_hugepage_only_range(mm, addr, len))
			return -EINVAL;
		return addr;
	}

#ifdef CONFIG_HUGETLB_PAGE
	if (REGION_NUMBER(addr) == RGN_HPAGE)
		addr = 0;
#endif
	if (!addr)
		addr = mm->free_area_cache;

	if (map_shared && (TASK_SIZE > 0xfffffffful))
		/*
		 * For 64-bit tasks, align shared segments to 1MB to avoid potential
		 * performance penalty due to virtual aliasing (see ASDM).  For 32-bit
		 * tasks, we prefer to avoid exhausting the address space too quickly by
		 * limiting alignment to a single page.
		 */
		align_mask = SHMLBA - 1;

  full_search:
	start_addr = addr = (addr + align_mask) & ~align_mask;

	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
		/* At this point:  (!vma || addr < vma->vm_end). */
		if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
			if (start_addr != TASK_UNMAPPED_BASE) {
				/* Start a new search --- just in case we missed some holes.  */
				addr = TASK_UNMAPPED_BASE;
				goto full_search;
			}
			return -ENOMEM;
		}
		if (!vma || addr + len <= vma->vm_start) {
			/* Remember the address where we stopped this search:  */
			mm->free_area_cache = addr + len;
			return addr;
		}
		addr = (vma->vm_end + align_mask) & ~align_mask;
	}
}
예제 #2
0
unsigned long
arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len,
			unsigned long pgoff, unsigned long flags)
{
	long map_shared = (flags & MAP_SHARED);
	unsigned long align_mask = 0;
	struct mm_struct *mm = current->mm;
	struct vm_unmapped_area_info info;
	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);

	if (len > RGN_MAP_LIMIT)
		return -ENOMEM;

	/* handle fixed mapping: prevent overlap with huge pages */
	if (flags & MAP_FIXED) {
		if (is_hugepage_only_range(mm, addr, len))
			return -EINVAL;
		return addr;
	}

#ifdef CONFIG_HUGETLB_PAGE
	if (REGION_NUMBER(addr) == RGN_HPAGE)
		addr = 0;
#endif

#ifdef CONFIG_PAX_RANDMMAP
	if (mm->pax_flags & MF_PAX_RANDMMAP)
		addr = mm->free_area_cache;
	else
#endif

	if (!addr)
		addr = TASK_UNMAPPED_BASE;

	if (map_shared && (TASK_SIZE > 0xfffffffful))
		/*
		 * For 64-bit tasks, align shared segments to 1MB to avoid potential
		 * performance penalty due to virtual aliasing (see ASDM).  For 32-bit
		 * tasks, we prefer to avoid exhausting the address space too quickly by
		 * limiting alignment to a single page.
		 */
		align_mask = PAGE_MASK & (SHMLBA - 1);

	info.flags = 0;
	info.length = len;
	info.low_limit = addr;
	info.high_limit = TASK_SIZE;
	info.align_mask = align_mask;
	info.align_offset = 0;
	info.threadstack_offset = offset;
	return vm_unmapped_area(&info);
}