コード例 #1
0
unsigned long
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                          unsigned long len, unsigned long pgoff, unsigned long flags)
{
    struct mm_struct *mm = current->mm;
    struct vm_area_struct *vma;

    if (len & ~HPAGE_MASK)
        return -EINVAL;
    if (len > TASK_SIZE)
        return -ENOMEM;

    if (addr) {
        addr = ALIGN(addr, HPAGE_SIZE);
        vma = find_vma(mm, addr);
        if (TASK_SIZE - len >= addr &&
                (!vma || addr + len <= vma->vm_start))
            return addr;
    }
    if (mm->get_unmapped_area == arch_get_unmapped_area)
        return hugetlb_get_unmapped_area_bottomup(file, addr, len,
                pgoff, flags);
    else
        return hugetlb_get_unmapped_area_topdown(file, addr, len,
                pgoff, flags);
}
コード例 #2
0
ファイル: hugetlbpage.c プロジェクト: 0-T-0/ps4-linux
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
		unsigned long len, unsigned long pgoff, unsigned long flags)
{
	struct hstate *h = hstate_file(file);
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;

	if (len & ~huge_page_mask(h))
		return -EINVAL;
	if (len > TASK_SIZE)
		return -ENOMEM;

	if (flags & MAP_FIXED) {
		if (prepare_hugepage_range(file, addr, len))
			return -EINVAL;
		return addr;
	}

	if (addr) {
		addr = ALIGN(addr, huge_page_size(h));
		vma = find_vma(mm, addr);
		if (TASK_SIZE - len >= addr &&
		    (!vma || addr + len <= vma->vm_start))
			return addr;
	}
	if (current->mm->get_unmapped_area == arch_get_unmapped_area)
		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
				pgoff, flags);
	else
		return hugetlb_get_unmapped_area_topdown(file, addr, len,
				pgoff, flags);
}
コード例 #3
0
ファイル: hugetlbpage.c プロジェクト: ivucica/linux
unsigned long
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
		unsigned long len, unsigned long pgoff, unsigned long flags)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long task_size = TASK_SIZE;

	if (test_thread_flag(TIF_32BIT))
		task_size = STACK_TOP32;

	if (len & ~HPAGE_MASK)
		return -EINVAL;
	if (len > task_size)
		return -ENOMEM;

	if (addr) {
		addr = ALIGN(addr, HPAGE_SIZE);
		vma = find_vma(mm, addr);
		if (task_size - len >= addr &&
		    (!vma || addr + len <= vma->vm_start))
			return addr;
	}
	if (mm->get_unmapped_area == arch_get_unmapped_area)
		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
				pgoff, flags);
	else
		return hugetlb_get_unmapped_area_topdown(file, addr, len,
				pgoff, flags);
}
コード例 #4
0
unsigned long
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
		unsigned long len, unsigned long pgoff, unsigned long flags)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long task_size = TASK_SIZE;

	if (test_thread_flag(TIF_32BIT))
		task_size = STACK_TOP32;

	if (len & ~HPAGE_MASK)
		return -EINVAL;
	if (len > task_size)
		return -ENOMEM;

	if (flags & MAP_FIXED) {
		if (prepare_hugepage_range(file, addr, len))
			return -EINVAL;
		return addr;
	}

	if (addr) {
		addr = ALIGN(addr, HPAGE_SIZE);
		vma = find_vma(mm, addr);
		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
			return addr;
	}
	if (mm->get_unmapped_area == arch_get_unmapped_area)
		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
				pgoff, flags);
	else
		return hugetlb_get_unmapped_area_topdown(file, addr, len,
				pgoff, flags);
}
コード例 #5
0
ファイル: hugetlbpage.c プロジェクト: Endika/linux
unsigned long
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
		unsigned long len, unsigned long pgoff, unsigned long flags)
{
	struct hstate *h = hstate_file(file);
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long task_size = TASK_SIZE;

	if (test_thread_flag(TIF_32BIT))
		task_size = STACK_TOP32;

	if (len & ~huge_page_mask(h))
		return -EINVAL;
	if (len > task_size)
		return -ENOMEM;

	if (flags & MAP_FIXED) {
		if (prepare_hugepage_range(file, addr, len))
			return -EINVAL;
		return addr;
	}

	if (addr) {
		addr = ALIGN(addr, huge_page_size(h));
		vma = find_vma(mm, addr);
		if (task_size - len >= addr &&
		    (!vma || addr + len <= vm_start_gap(vma)))
			return addr;
	}
	if (mm->get_unmapped_area == arch_get_unmapped_area)
		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
				pgoff, flags);
	else
		return hugetlb_get_unmapped_area_topdown(file, addr, len,
				pgoff, flags);
}
コード例 #6
0
static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
        unsigned long addr0, unsigned long len,
        unsigned long pgoff, unsigned long flags)
{
    struct mm_struct *mm = current->mm;
    struct vm_area_struct *vma, *prev_vma;
    unsigned long base = mm->mmap_base, addr = addr0;
    unsigned long largest_hole = mm->cached_hole_size;
    int first_time = 1;

    /* don't allow allocations above current base */
    if (mm->free_area_cache > base)
        mm->free_area_cache = base;

    if (len <= largest_hole) {
        largest_hole = 0;
        mm->free_area_cache  = base;
    }
try_again:
    /* make sure it can fit in the remaining address space */
    if (mm->free_area_cache < len)
        goto fail;

    /* either no address requested or cant fit in requested address hole */
    addr = (mm->free_area_cache - len) & HPAGE_MASK;
    do {
        /*
         * Lookup failure means no vma is above this address,
         * i.e. return with success:
         */
        if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
            return addr;

        /*
         * new region fits between prev_vma->vm_end and
         * vma->vm_start, use it:
         */
        if (addr + len <= vma->vm_start &&
                (!prev_vma || (addr >= prev_vma->vm_end))) {
            /* remember the address as a hint for next time */
            mm->cached_hole_size = largest_hole;
            return (mm->free_area_cache = addr);
        } else {
            /* pull free_area_cache down to the first hole */
            if (mm->free_area_cache == vma->vm_end) {
                mm->free_area_cache = vma->vm_start;
                mm->cached_hole_size = largest_hole;
            }
        }

        /* remember the largest hole we saw so far */
        if (addr + largest_hole < vma->vm_start)
            largest_hole = vma->vm_start - addr;

        /* try just below the current vma->vm_start */
        addr = (vma->vm_start - len) & HPAGE_MASK;
    } while (len <= vma->vm_start);

fail:
    /*
     * if hint left us with no space for the requested
     * mapping then try again:
     */
    if (first_time) {
        mm->free_area_cache = base;
        largest_hole = 0;
        first_time = 0;
        goto try_again;
    }
    /*
     * A failed mmap() very likely causes application failure,
     * so fall back to the bottom-up function here. This scenario
     * can happen with large stack limits and large mmap()
     * allocations.
     */
    mm->free_area_cache = TASK_UNMAPPED_BASE;
    mm->cached_hole_size = ~0UL;
    addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
            len, pgoff, flags);

    /*
     * Restore the topdown base:
     */
    mm->free_area_cache = base;
    mm->cached_hole_size = ~0UL;

    return addr;
}
コード例 #7
0
static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
		unsigned long addr0, unsigned long len,
		unsigned long pgoff, unsigned long flags)
{
	struct hstate *h = hstate_file(file);
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma, *prev_vma;
	unsigned long base = mm->mmap_base, addr = addr0;
	unsigned long largest_hole = mm->cached_hole_size;
	int first_time = 1;

	
	if (mm->free_area_cache > base)
		mm->free_area_cache = base;

	if (len <= largest_hole) {
		largest_hole = 0;
		mm->free_area_cache  = base;
	}
try_again:
	
	if (mm->free_area_cache < len)
		goto fail;

	
	addr = (mm->free_area_cache - len) & huge_page_mask(h);
	do {
		vma = find_vma_prev(mm, addr, &prev_vma);
		if (!vma) {
			return addr;
			break;
		}

		if (addr + len <= vma->vm_start &&
			    (!prev_vma || (addr >= prev_vma->vm_end))) {
			
			mm->cached_hole_size = largest_hole;
			mm->free_area_cache = addr;
			return addr;
		} else {
			
			if (mm->free_area_cache == vma->vm_end) {
				mm->free_area_cache = vma->vm_start;
				mm->cached_hole_size = largest_hole;
			}
		}

		
		if (addr + largest_hole < vma->vm_start)
			largest_hole = vma->vm_start - addr;

		
		addr = (vma->vm_start - len) & huge_page_mask(h);

	} while (len <= vma->vm_start);

fail:
	if (first_time) {
		mm->free_area_cache = base;
		largest_hole = 0;
		first_time = 0;
		goto try_again;
	}
	mm->free_area_cache = TASK_UNMAPPED_BASE;
	mm->cached_hole_size = ~0UL;
	addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
			len, pgoff, flags);

	mm->free_area_cache = base;
	mm->cached_hole_size = ~0UL;

	return addr;
}