unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct vm_area_struct *vma; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1))) return -EINVAL; return addr; } if (len > TASK_SIZE) return -ENOMEM; if (!addr) addr = TASK_UNMAPPED_BASE; if (flags & MAP_PRIVATE) addr = PAGE_ALIGN(addr); else addr = COLOUR_ALIGN(addr); for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; if (!vma || addr + len <= vma->vm_start) return addr; addr = vma->vm_end; if (!(flags & MAP_PRIVATE)) addr = COLOUR_ALIGN(addr); } }
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct vm_area_struct * vmm; int do_color_align; if (len > TASK_SIZE) return -ENOMEM; if (flags & MAP_FIXED) { /* Even MAP_FIXED mappings must reside within TASK_SIZE. */ if (TASK_SIZE - len < addr) return -EINVAL; /* * We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) return -EINVAL; return addr; } do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; #ifdef CONFIG_PAX_RANDMMAP if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) #endif if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vmm = find_vma(current->mm, addr); if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len)) return addr; } addr = current->mm->mmap_base; if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { /* At this point: (!vmm || addr < vmm->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; if (check_heap_stack_gap(vmm, addr, len)) return addr; addr = vmm->vm_end; if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); } }
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct vm_area_struct * vmm; int do_color_align; unsigned long task_size; #if CONFIG_MIPS32 task_size = TASK_SIZE; #else task_size = (current->thread.mflags & MF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE; #endif if (flags & MAP_FIXED) { /* * We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && (addr & shm_align_mask)) return -EINVAL; return addr; } if (len > task_size) return -ENOMEM; do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vmm = find_vma(current->mm, addr); if (task_size - len >= addr && (!vmm || addr + len <= vmm->vm_start)) return addr; } addr = TASK_UNMAPPED_BASE; if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { /* At this point: (!vmm || addr < vmm->vm_end). */ if (task_size - len < addr) return -ENOMEM; if (!vmm || addr + len <= vmm->vm_start) return addr; addr = vmm->vm_end; if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); } }
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct vm_area_struct * vmm; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1))) return -EINVAL; return addr; } /* See asm-sparc/uaccess.h */ if (len > TASK_SIZE - PAGE_SIZE) return -ENOMEM; if (ARCH_SUN4C_SUN4 && len > 0x20000000) return -ENOMEM; if (!addr) addr = TASK_UNMAPPED_BASE; if (flags & MAP_SHARED) addr = COLOUR_ALIGN(addr); else addr = PAGE_ALIGN(addr); for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { /* At this point: (!vmm || addr < vmm->vm_end). */ if (ARCH_SUN4C_SUN4 && addr < 0xe0000000 && 0x20000000 - len < addr) { addr = PAGE_OFFSET; vmm = find_vma(current->mm, PAGE_OFFSET); } if (TASK_SIZE - PAGE_SIZE - len < addr) return -ENOMEM; if (!vmm || addr + len <= vmm->vm_start) return addr; addr = vmm->vm_end; if (flags & MAP_SHARED) addr = COLOUR_ALIGN(addr); } }
/* * We need to ensure that shared mappings are correctly aligned to * avoid aliasing issues with VIPT caches. We need to ensure that * a specific page of an object is always mapped at a multiple of * SHMLBA bytes. * * We unconditionally provide this function for all cases, however * in the VIVT case, we optimise out the alignment rules. */ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; int do_align = 0; int aliasing = cache_is_vipt_aliasing(); struct vm_unmapped_area_info info; /* * We only need to do colour alignment if either the I or D * caches alias. */ if (aliasing) do_align = filp || (flags & MAP_SHARED); /* * We enforce the MAP_FIXED case. */ if (flags & MAP_FIXED) { if (aliasing && flags & MAP_SHARED && (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) return -EINVAL; return addr; } if (len > TASK_SIZE) return -ENOMEM; if (addr) { if (do_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } info.flags = 0; info.length = len; info.low_limit = mm->mmap_base; info.high_limit = TASK_SIZE; info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; info.align_offset = pgoff << PAGE_SHIFT; return vm_unmapped_area(&info); }
static unsigned long arch_get_unmapped_area_common(struct file *filp, unsigned long addr0, unsigned long len, unsigned long pgoff, unsigned long flags, enum mmap_allocation_direction dir) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long addr = addr0; int do_color_align; if (unlikely(len > TASK_SIZE)) return -ENOMEM; if (flags & MAP_FIXED) { if (TASK_SIZE - len < addr) return -EINVAL; if ((flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) return -EINVAL; return addr; } do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } if (dir == UP) { addr = mm->mmap_base; if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { if (TASK_SIZE - len < addr) return -ENOMEM; if (!vma || addr + len <= vma->vm_start) return addr; addr = vma->vm_end; if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); } } else { if (len <= mm->cached_hole_size) { mm->cached_hole_size = 0; mm->free_area_cache = mm->mmap_base; } addr = mm->free_area_cache; if (do_color_align) { unsigned long base = COLOUR_ALIGN_DOWN(addr - len, pgoff); addr = base + len; } if (likely(addr > len)) { vma = find_vma(mm, addr - len); if (!vma || addr <= vma->vm_start) { return mm->free_area_cache = addr - len; } } if (unlikely(mm->mmap_base < len)) goto bottomup; addr = mm->mmap_base - len; if (do_color_align) addr = COLOUR_ALIGN_DOWN(addr, pgoff); do { vma = find_vma(mm, addr); if (likely(!vma || addr + len <= vma->vm_start)) { return mm->free_area_cache = addr; } if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_start - len; if (do_color_align) addr = COLOUR_ALIGN_DOWN(addr, pgoff); } while (likely(len < vma->vm_start)); bottomup: mm->cached_hole_size = ~0UL; mm->free_area_cache = TASK_UNMAPPED_BASE; addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); mm->free_area_cache = mm->mmap_base; mm->cached_hole_size = ~0UL; return addr; } }
unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long task_size = STACK_TOP32; unsigned long addr = addr0; int do_color_align; /* This should only ever run for 32-bit processes. */ BUG_ON(!test_thread_flag(TIF_32BIT)); if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; } if (unlikely(len > task_size)) return -ENOMEM; do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; /* requesting a specific address */ if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (task_size - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } /* check if free_area_cache is useful for us */ if (len <= mm->cached_hole_size) { mm->cached_hole_size = 0; mm->free_area_cache = mm->mmap_base; } /* either no address requested or can't fit in requested address hole */ addr = mm->free_area_cache; if (do_color_align) { unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff); addr = base + len; } /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { vma = find_vma(mm, addr-len); if (!vma || addr <= vma->vm_start) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr-len); } } if (unlikely(mm->mmap_base < len)) goto bottomup; addr = mm->mmap_base-len; if (do_color_align) addr = COLOUR_ALIGN_DOWN(addr, pgoff); do { /* * Lookup failure means no vma is above this address, * else if new region fits below vma->vm_start, * return with success: */ vma = find_vma(mm, addr); if (likely(!vma || addr+len <= vma->vm_start)) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); } /* remember the largest hole we saw so far */ if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ addr = vma->vm_start-len; if (do_color_align) addr = COLOUR_ALIGN_DOWN(addr, pgoff); } while (likely(len < vma->vm_start)); bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ mm->cached_hole_size = ~0UL; mm->free_area_cache = TASK_UNMAPPED_BASE; addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); /* * Restore the topdown base: */ mm->free_area_cache = mm->mmap_base; mm->cached_hole_size = ~0UL; return addr; }
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct * vma; unsigned long task_size = TASK_SIZE; unsigned long start_addr; int do_color_align; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; } if (test_thread_flag(TIF_32BIT)) task_size = STACK_TOP32; if (unlikely(len > task_size || len >= VA_EXCLUDE_START)) return -ENOMEM; do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (task_size - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } if (len > mm->cached_hole_size) { start_addr = addr = mm->free_area_cache; } else { start_addr = addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; } task_size -= len; full_search: if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (addr < VA_EXCLUDE_START && (addr + len) >= VA_EXCLUDE_START) { addr = VA_EXCLUDE_END; vma = find_vma(mm, VA_EXCLUDE_END); } if (unlikely(task_size < addr)) { if (start_addr != TASK_UNMAPPED_BASE) { start_addr = addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; } if (likely(!vma || addr + len <= vma->vm_start)) { /* * Remember the place where we stopped the search: */ mm->free_area_cache = addr + len; return addr; } if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); } }
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long start_addr; int do_colour_align; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && (addr & shm_align_mask)) return -EINVAL; return addr; } if (unlikely(len > TASK_SIZE)) return -ENOMEM; do_colour_align = 0; if (filp || (flags & MAP_SHARED)) do_colour_align = 1; if (addr) { if (do_colour_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } if (len > mm->cached_hole_size) { start_addr = addr = mm->free_area_cache; } else { mm->cached_hole_size = 0; start_addr = addr = TASK_UNMAPPED_BASE; } full_search: if (do_colour_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(mm->free_area_cache); for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (unlikely(TASK_SIZE - len < addr)) { /* * Start a new search - just in case we missed * some holes. */ if (start_addr != TASK_UNMAPPED_BASE) { start_addr = addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; } if (likely(!vma || addr + len <= vma->vm_start)) { /* * Remember the place where we stopped the search: */ mm->free_area_cache = addr + len; return addr; } if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; if (do_colour_align) addr = COLOUR_ALIGN(addr, pgoff); } }
static unsigned long arch_get_unmapped_area_foo(struct file *filp, unsigned long addr0, unsigned long len, unsigned long pgoff, unsigned long flags, enum mmap_allocation_direction dir) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long addr = addr0; int do_color_align; if (unlikely(len > TASK_SIZE)) return -ENOMEM; if (flags & MAP_FIXED) { /* Even MAP_FIXED mappings must reside within TASK_SIZE */ if (TASK_SIZE - len < addr) return -EINVAL; /* * We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) return -EINVAL; return addr; } do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; /* requesting a specific address */ if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } if (dir == UP) { addr = mm->mmap_base; if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; if (!vma || addr + len <= vma->vm_start) return addr; addr = vma->vm_end; if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); } } else { /* check if free_area_cache is useful for us */ if (len <= mm->cached_hole_size) { mm->cached_hole_size = 0; mm->free_area_cache = mm->mmap_base; } /* either no address requested or can't fit in requested address hole */ addr = mm->free_area_cache; if (do_color_align) { unsigned long base = COLOUR_ALIGN_DOWN(addr - len, pgoff); addr = base + len; } /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { vma = find_vma(mm, addr - len); if (!vma || addr <= vma->vm_start) { /* remember the address as a hint for next time */ return mm->free_area_cache = addr-len; } } if (unlikely(mm->mmap_base < len)) goto bottomup; addr = mm->mmap_base-len; if (do_color_align) addr = COLOUR_ALIGN_DOWN(addr, pgoff); do { /* * Lookup failure means no vma is above this address, * else if new region fits below vma->vm_start, * return with success: */ vma = find_vma(mm, addr); if (likely(!vma || addr+len <= vma->vm_start)) { /* remember the address as a hint for next time */ return mm->free_area_cache = addr; } /* remember the largest hole we saw so far */ if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ addr = vma->vm_start-len; if (do_color_align) addr = COLOUR_ALIGN_DOWN(addr, pgoff); } while (likely(len < vma->vm_start)); bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ mm->cached_hole_size = ~0UL; mm->free_area_cache = TASK_UNMAPPED_BASE; addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); /* * Restore the topdown base: */ mm->free_area_cache = mm->mmap_base; mm->cached_hole_size = ~0UL; return addr; } }
/* * We need to ensure that shared mappings are correctly aligned to * avoid aliasing issues with VIPT caches. We need to ensure that * a specific page of an object is always mapped at a multiple of * SHMLBA bytes. * * We unconditionally provide this function for all cases, however * in the VIVT case, we optimise out the alignment rules. */ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long start_addr = addr; int do_align = 0; int aliasing = cache_is_vipt_aliasing(); /* * We only need to do colour alignment if either the I or D * caches alias. */ if (aliasing) do_align = filp || (flags & MAP_SHARED); #ifdef CONFIG_ARM_FCSE start_addr = addr; #endif /* CONFIG_ARM_FCSE */ /* * We enforce the MAP_FIXED case. */ if (flags & MAP_FIXED) { if (aliasing && flags & MAP_SHARED && (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) return -EINVAL; goto found_addr; } if (len > TASK_SIZE) return -ENOMEM; if (addr) { if (do_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) goto found_addr; } if (len > mm->cached_hole_size) { start_addr = addr = mm->free_area_cache; } else { start_addr = addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; } /* 8 bits of randomness in 20 address space bits */ if ((current->flags & PF_RANDOMIZE) && !(current->personality & ADDR_NO_RANDOMIZE)) addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT; full_search: if (do_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) { /* * Start a new search - just in case we missed * some holes. */ if (start_addr != TASK_UNMAPPED_BASE) { start_addr = addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; } if (!vma || addr + len <= vma->vm_start) { /* * Remember the place where we stopped the search: */ mm->free_area_cache = addr + len; goto found_addr; } if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; if (do_align) addr = COLOUR_ALIGN(addr, pgoff); } found_addr: #ifdef CONFIG_ARM_FCSE { unsigned long new_addr = fcse_check_mmap_addr(mm, start_addr, addr, len, flags); if (new_addr != addr) { addr = new_addr; if (!(addr & ~PAGE_MASK)) { start_addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; goto full_search; } } } #endif /* CONFIG_ARM_FCSE */ return addr; }
/* * We need to ensure that shared mappings are correctly aligned to * avoid aliasing issues with VIPT caches. We need to ensure that * a specific page of an object is always mapped at a multiple of * SHMLBA bytes. * * We unconditionally provide this function for all cases, however * in the VIVT case, we optimise out the alignment rules. */ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long start_addr; int do_align = 0; int aliasing = cache_is_vipt_aliasing(); /* * We only need to do colour alignment if either the I or D * caches alias. */ if (aliasing) do_align = filp || (flags & MAP_SHARED); /* * We enforce the MAP_FIXED case. */ if (flags & MAP_FIXED) { if (aliasing && flags & MAP_SHARED && (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) return -EINVAL; return addr; } if (len > TASK_SIZE) return -ENOMEM; if (addr) { if (do_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } if (len > mm->cached_hole_size) { start_addr = addr = mm->free_area_cache; } else { start_addr = addr = mm->mmap_base; mm->cached_hole_size = 0; } full_search: if (do_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) { /* * Start a new search - just in case we missed * some holes. */ if (start_addr != TASK_UNMAPPED_BASE) { start_addr = addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; } if (!vma || addr + len <= vma->vm_start) { /* * Remember the place where we stopped the search: */ mm->free_area_cache = addr + len; return addr; } if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; if (do_align) addr = COLOUR_ALIGN(addr, pgoff); } }
unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_align = 0; int aliasing = cache_is_vipt_aliasing(); /* * We only need to do colour alignment if either the I or D * caches alias. */ if (aliasing) do_align = filp || (flags & MAP_SHARED); /* requested length too big for entire address space */ if (len > TASK_SIZE) return -ENOMEM; if (flags & MAP_FIXED) { if (aliasing && flags & MAP_SHARED && (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) return -EINVAL; return addr; } /* requesting a specific address */ if (addr) { if (do_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } /* check if free_area_cache is useful for us */ if (len <= mm->cached_hole_size) { mm->cached_hole_size = 0; mm->free_area_cache = mm->mmap_base; } /* either no address requested or can't fit in requested address hole */ addr = mm->free_area_cache; if (do_align) { unsigned long base = COLOUR_ALIGN_DOWN(addr - len, pgoff); addr = base + len; } /* make sure it can fit in the remaining address space */ if (addr > len) { vma = find_vma(mm, addr-len); if (!vma || addr <= vma->vm_start) /* remember the address as a hint for next time */ return (mm->free_area_cache = addr-len); } if (mm->mmap_base < len) goto bottomup; addr = mm->mmap_base - len; if (do_align) addr = COLOUR_ALIGN_DOWN(addr, pgoff); do { /* * Lookup failure means no vma is above this address, * else if new region fits below vma->vm_start, * return with success: */ vma = find_vma(mm, addr); if (!vma || addr+len <= vma->vm_start) /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); /* remember the largest hole we saw so far */ if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ addr = vma->vm_start - len; if (do_align) addr = COLOUR_ALIGN_DOWN(addr, pgoff); } while (len < vma->vm_start); bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ mm->cached_hole_size = ~0UL; mm->free_area_cache = TASK_UNMAPPED_BASE; addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); /* * Restore the topdown base: */ mm->free_area_cache = mm->mmap_base; mm->cached_hole_size = ~0UL; return addr; }
unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_align = 0; int aliasing = cache_is_vipt_aliasing(); struct vm_unmapped_area_info info; /* * We only need to do colour alignment if either the I or D * caches alias. */ if (aliasing) do_align = filp || (flags & MAP_SHARED); /* requested length too big for entire address space */ if (len > TASK_SIZE) return -ENOMEM; if (flags & MAP_FIXED) { if (aliasing && flags & MAP_SHARED && (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) return -EINVAL; return addr; } /* requesting a specific address */ if (addr) { if (do_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = FIRST_USER_ADDRESS; info.high_limit = mm->mmap_base; info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; info.align_offset = pgoff << PAGE_SHIFT; addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (addr & ~PAGE_MASK) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = mm->mmap_base; info.high_limit = TASK_SIZE; addr = vm_unmapped_area(&info); } return addr; }
static unsigned long arch_get_unmapped_area_common(struct file *filp, unsigned long addr0, unsigned long len, unsigned long pgoff, unsigned long flags, enum mmap_allocation_direction dir) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long addr = addr0; int do_color_align; struct vm_unmapped_area_info info; if (unlikely(len > TASK_SIZE)) return -ENOMEM; if (flags & MAP_FIXED) { /* Even MAP_FIXED mappings must reside within TASK_SIZE */ if (TASK_SIZE - len < addr) return -EINVAL; /* * We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) return -EINVAL; return addr; } do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; /* requesting a specific address */ #ifdef CONFIG_PAX_RANDMMAP if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) #endif if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } info.length = len; info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0; info.align_offset = pgoff << PAGE_SHIFT; if (dir == DOWN) { info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.low_limit = PAGE_SIZE; info.high_limit = mm->mmap_base; addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) return addr; /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ } info.flags = 0; info.low_limit = mm->mmap_base; info.high_limit = TASK_SIZE; return vm_unmapped_area(&info); }