unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct * vma; unsigned long task_size = TASK_SIZE; int do_color_align; struct vm_unmapped_area_info info; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; } if (test_thread_flag(TIF_32BIT)) task_size = STACK_TOP32; if (unlikely(len > task_size || len >= VA_EXCLUDE_START)) return -ENOMEM; do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; if (addr) { if (do_color_align) addr = COLOR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (task_size - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } info.flags = 0; info.length = len; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = min(task_size, VA_EXCLUDE_START); info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; info.align_offset = pgoff << PAGE_SHIFT; addr = vm_unmapped_area(&info); if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { VM_BUG_ON(addr != -ENOMEM); info.low_limit = VA_EXCLUDE_END; info.high_limit = task_size; addr = vm_unmapped_area(&info); } return addr; }
/* * place non-fixed mmaps firstly in the bottom part of memory, working up, and then in the top part * of memory, working down */ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct vm_area_struct *vma; struct vm_unmapped_area_info info; if (len > TASK_SIZE) return -ENOMEM; /* handle MAP_FIXED */ if (flags & MAP_FIXED) return addr; /* only honour a hint if we're not going to clobber something doing so */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(current->mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) goto success; } /* search between the bottom of user VM and the stack grow area */ info.flags = 0; info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = (current->mm->start_stack - 0x00200000); info.align_mask = 0; info.align_offset = 0; addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) goto success; VM_BUG_ON(addr != -ENOMEM); /* search from just above the WorkRAM area to the top of memory */ info.low_limit = PAGE_ALIGN(0x80000000); info.high_limit = TASK_SIZE; addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) goto success; VM_BUG_ON(addr != -ENOMEM); #if 0 printk("[area] l=%lx (ENOMEM) f='%s'\n", len, filp ? filp->f_path.dentry->d_name.name : ""); #endif return -ENOMEM; success: #if 0 printk("[area] l=%lx ad=%lx f='%s'\n", len, addr, filp ? filp->f_path.dentry->d_name.name : ""); #endif return addr; } /* end arch_get_unmapped_area() */
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; unsigned long task_size = TASK_SIZE; int do_color_align, last_mmap; struct vm_unmapped_area_info info; if (len > task_size) return -ENOMEM; do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; last_mmap = GET_LAST_MMAP(filp); if (flags & MAP_FIXED) { if ((flags & MAP_SHARED) && last_mmap && (addr - shared_align_offset(last_mmap, pgoff)) & (SHM_COLOUR - 1)) return -EINVAL; goto found_addr; } if (addr) { if (do_color_align && last_mmap) addr = COLOR_ALIGN(addr, last_mmap, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma_prev(mm, addr, &prev); if (task_size - len >= addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) goto found_addr; } info.flags = 0; info.length = len; info.low_limit = mm->mmap_legacy_base; info.high_limit = mmap_upper_limit(); info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; info.align_offset = shared_align_offset(last_mmap, pgoff); addr = vm_unmapped_area(&info); found_addr: if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK)) SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); return addr; }
/* * A vairant of hugetlb_get_unmapped_area doing topdown search * FIXME!! should we do as x86 does or non hugetlb area does ? * ie, use topdown or not based on mmap_is_legacy check ? */ unsigned long radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct hstate *h = hstate_file(file); int fixed = (flags & MAP_FIXED); unsigned long high_limit; struct vm_unmapped_area_info info; high_limit = DEFAULT_MAP_WINDOW; if (addr >= high_limit || (fixed && (addr + len > high_limit))) high_limit = TASK_SIZE; if (len & ~huge_page_mask(h)) return -EINVAL; if (len > high_limit) return -ENOMEM; if (fixed) { if (addr > high_limit - len) return -ENOMEM; if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; } if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (high_limit - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } /* * We are always doing an topdown search here. Slice code * does that too. */ info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW); info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; return vm_unmapped_area(&info); }
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct vm_unmapped_area_info info; int rc; if (len > TASK_SIZE - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) goto check_asce_limit; if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma))) goto check_asce_limit; } info.flags = 0; info.length = len; info.low_limit = mm->mmap_base; info.high_limit = TASK_SIZE; if (filp || (flags & MAP_SHARED)) info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; else info.align_mask = 0; info.align_offset = pgoff << PAGE_SHIFT; addr = vm_unmapped_area(&info); if (addr & ~PAGE_MASK) return addr; check_asce_limit: if (addr + len > current->mm->context.asce_limit) { rc = crst_table_upgrade(mm, addr + len); if (rc) return (unsigned long) rc; } return addr; }
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long task_size = TASK_SIZE; if (test_thread_flag(TIF_32BIT)) task_size = STACK_TOP32; if (len & ~huge_page_mask(h)) return -EINVAL; if (len > task_size) return -ENOMEM; if (flags & MAP_FIXED) { if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; } if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (task_size - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) return hugetlb_get_unmapped_area_bottomup(file, addr, len, pgoff, flags); else return hugetlb_get_unmapped_area_topdown(file, addr, len, pgoff, flags); }
unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long task_size = STACK_TOP32; unsigned long addr = addr0; int do_color_align; struct vm_unmapped_area_info info; /* This should only ever run for 32-bit processes. */ BUG_ON(!test_thread_flag(TIF_32BIT)); if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; } if (unlikely(len > task_size)) return -ENOMEM; do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; /* requesting a specific address */ if (addr) { if (do_color_align) addr = COLOR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (task_size - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = mm->mmap_base; info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; info.align_offset = pgoff << PAGE_SHIFT; addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (addr & ~PAGE_MASK) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = STACK_TOP32; addr = vm_unmapped_area(&info); } return addr; }
unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; int rc; /* requested length too big for entire address space */ if (len > TASK_SIZE - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) goto check_asce_limit; /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma))) goto check_asce_limit; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.high_limit = mm->mmap_base; if (filp || (flags & MAP_SHARED)) info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; else info.align_mask = 0; info.align_offset = pgoff << PAGE_SHIFT; addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (addr & ~PAGE_MASK) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = TASK_SIZE; addr = vm_unmapped_area(&info); if (addr & ~PAGE_MASK) return addr; } check_asce_limit: if (addr + len > current->mm->context.asce_limit) { rc = crst_table_upgrade(mm, addr + len); if (rc) return (unsigned long) rc; } return addr; }
unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_color_align, last_mmap; struct vm_unmapped_area_info info; #ifdef CONFIG_64BIT /* This should only ever run for 32-bit processes. */ BUG_ON(!test_thread_flag(TIF_32BIT)); #endif /* requested length too big for entire address space */ if (len > TASK_SIZE) return -ENOMEM; do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; last_mmap = GET_LAST_MMAP(filp); if (flags & MAP_FIXED) { if ((flags & MAP_SHARED) && last_mmap && (addr - shared_align_offset(last_mmap, pgoff)) & (SHM_COLOUR - 1)) return -EINVAL; goto found_addr; } /* requesting a specific address */ if (addr) { if (do_color_align && last_mmap) addr = COLOR_ALIGN(addr, last_mmap, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) goto found_addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = mm->mmap_base; info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; info.align_offset = shared_align_offset(last_mmap, pgoff); addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) goto found_addr; VM_BUG_ON(addr != -ENOMEM); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags); found_addr: if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK)) SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); return addr; }
static unsigned long arch_get_unmapped_area_common(struct file *filp, unsigned long addr0, unsigned long len, unsigned long pgoff, unsigned long flags, enum mmap_allocation_direction dir) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long addr = addr0; int do_color_align; struct vm_unmapped_area_info info; if (unlikely(len > TASK_SIZE)) return -ENOMEM; if (flags & MAP_FIXED) { /* Even MAP_FIXED mappings must reside within TASK_SIZE */ if (TASK_SIZE - len < addr) return -EINVAL; /* * We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) return -EINVAL; return addr; } do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; /* requesting a specific address */ if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } info.length = len; info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0; info.align_offset = pgoff << PAGE_SHIFT; if (dir == DOWN) { info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.low_limit = PAGE_SIZE; info.high_limit = mm->mmap_base; addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) return addr; /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ } info.flags = 0; info.low_limit = mm->mmap_base; info.high_limit = TASK_SIZE; return vm_unmapped_area(&info); }