/* cpd_get_private_area: This function attempts to allocate an address inside * the process 32MB relocated area of its address space. If no suitable * region is found cpd_get_alloced_area() is called. * * Notes: * - vma_p will never be NULL as a 4KB dummy VMA is mapped at 31MB. * - vma__prev_p will never be NULL as the brk vm_area is there or the loop * invarient has failed. */ unsigned long cpd_get_private_area(struct file *file_p, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned int start, end; struct vm_area_struct* vma_p; struct vm_area_struct* vma_prev_p; if (len < ARMPID_BRK_LIMIT) { /* Loop till we hit brk */ for (start = end = ARMPID_BRK_LIMIT; start >= PAGE_SIZE && len < end; end = vma_prev_p->vm_start) { start = end - len; vma_p = find_vma_prev(current->mm, end, &vma_prev_p); if (start >= vma_prev_p->vm_end) { return start; } if (!vma_prev_p) { break; } } } return cpd_get_alloced_area(file_p, addr, len, pgoff, flags); }
/* * A virtual address region being munmap()ed might share bounds table * with adjacent VMAs. We only need to free the backing physical * memory of these shared bounds tables entries covered in this virtual * address region. */ static int unmap_edge_bts(struct mm_struct *mm, unsigned long start, unsigned long end) { int ret; long __user *bde_start, *bde_end; struct vm_area_struct *prev, *next; bool prev_shared = false, next_shared = false; bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start); bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1); /* * Check whether bde_start and bde_end are shared with adjacent * VMAs. * * We already unliked the VMAs from the mm's rbtree so 'start' * is guaranteed to be in a hole. This gets us the first VMA * before the hole in to 'prev' and the next VMA after the hole * in to 'next'. */ next = find_vma_prev(mm, start, &prev); if (prev && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(prev->vm_end-1)) == bde_start) prev_shared = true; if (next && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(next->vm_start)) == bde_end) next_shared = true; /* * This virtual address region being munmap()ed is only * covered by one bounds table. * * In this case, if this table is also shared with adjacent * VMAs, only part of the backing physical memory of the bounds * table need be freeed. Otherwise the whole bounds table need * be unmapped. */ if (bde_start == bde_end) { return unmap_shared_bt(mm, bde_start, start, end, prev_shared, next_shared); } /* * If more than one bounds tables are covered in this virtual * address region being munmap()ed, we need to separately check * whether bde_start and bde_end are shared with adjacent VMAs. */ ret = unmap_shared_bt(mm, bde_start, start, end, prev_shared, false); if (ret) return ret; ret = unmap_shared_bt(mm, bde_end, start, end, false, next_shared); if (ret) return ret; return 0; }
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; unsigned long task_size = TASK_SIZE; int do_color_align, last_mmap; struct vm_unmapped_area_info info; if (len > task_size) return -ENOMEM; do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; last_mmap = GET_LAST_MMAP(filp); if (flags & MAP_FIXED) { if ((flags & MAP_SHARED) && last_mmap && (addr - shared_align_offset(last_mmap, pgoff)) & (SHM_COLOUR - 1)) return -EINVAL; goto found_addr; } if (addr) { if (do_color_align && last_mmap) addr = COLOR_ALIGN(addr, last_mmap, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma_prev(mm, addr, &prev); if (task_size - len >= addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) goto found_addr; } info.flags = 0; info.length = len; info.low_limit = mm->mmap_legacy_base; info.high_limit = mmap_upper_limit(); info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; info.align_offset = shared_align_offset(last_mmap, pgoff); addr = vm_unmapped_area(&info); found_addr: if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK)) SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); return addr; }
unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct vm_area_struct *vma, *prev_vma; struct mm_struct *mm = current->mm; unsigned long base = mm->mmap_base, addr = addr0; int first_time = 1; unsigned long begin, end; find_start_end(flags, &begin, &end); /* requested length too big for entire address space */ if (len > end) return -ENOMEM; /* dont allow allocations above current base */ if (mm->free_area_cache > base) mm->free_area_cache = base; /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (end - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } /* free_area_cache is not really optimized for 32 bit apps */ if (sysctl_legacy_va_layout && ((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))) goto fail; try_again: /* make sure it can fit in the remaining address space */ if (mm->free_area_cache < len) goto fail; /* either no address requested or cant fit in requested address hole */ addr = (mm->free_area_cache - len) & PAGE_MASK; do { /* * Lookup failure means no vma is above this address, * i.e. return with success: */ if (!(vma = find_vma_prev(mm, addr, &prev_vma))) return addr; /* * new region fits between prev_vma->vm_end and * vma->vm_start, use it: */ if (addr && addr+len <= vma->vm_start && (!prev_vma || (addr >= prev_vma->vm_end))) /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); else /* pull free_area_cache down to the first hole */ if (mm->free_area_cache == vma->vm_end) mm->free_area_cache = vma->vm_start; /* try just below the current vma->vm_start */ addr = vma->vm_start-len; } while (len < vma->vm_start); fail: /* * if hint left us with no space for the requested * mapping then try again: */ if (first_time) { mm->free_area_cache = base; first_time = 0; goto try_again; } /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ mm->free_area_cache = begin; addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); /* * Restore the topdown base: */ mm->free_area_cache = base; return addr; }
static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr0, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev_vma; unsigned long base = mm->mmap_base, addr = addr0; unsigned long largest_hole = mm->cached_hole_size; int first_time = 1; /* don't allow allocations above current base */ if (mm->free_area_cache > base) mm->free_area_cache = base; if (len <= largest_hole) { largest_hole = 0; mm->free_area_cache = base; } try_again: /* make sure it can fit in the remaining address space */ if (mm->free_area_cache < len) goto fail; /* either no address requested or cant fit in requested address hole */ addr = (mm->free_area_cache - len) & HPAGE_MASK; do { /* * Lookup failure means no vma is above this address, * i.e. return with success: */ if (!(vma = find_vma_prev(mm, addr, &prev_vma))) return addr; /* * new region fits between prev_vma->vm_end and * vma->vm_start, use it: */ if (addr + len <= vma->vm_start && (!prev_vma || (addr >= prev_vma->vm_end))) { /* remember the address as a hint for next time */ mm->cached_hole_size = largest_hole; return (mm->free_area_cache = addr); } else { /* pull free_area_cache down to the first hole */ if (mm->free_area_cache == vma->vm_end) { mm->free_area_cache = vma->vm_start; mm->cached_hole_size = largest_hole; } } /* remember the largest hole we saw so far */ if (addr + largest_hole < vma->vm_start) largest_hole = vma->vm_start - addr; /* try just below the current vma->vm_start */ addr = (vma->vm_start - len) & HPAGE_MASK; } while (len <= vma->vm_start); fail: /* * if hint left us with no space for the requested * mapping then try again: */ if (first_time) { mm->free_area_cache = base; largest_hole = 0; first_time = 0; goto try_again; } /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ mm->free_area_cache = TASK_UNMAPPED_BASE; mm->cached_hole_size = ~0UL; addr = hugetlb_get_unmapped_area_bottomup(file, addr0, len, pgoff, flags); /* * Restore the topdown base: */ mm->free_area_cache = base; mm->cached_hole_size = ~0UL; return addr; }
static inline unsigned long move_vma(struct vm_area_struct * vma, unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long new_addr) { struct mm_struct * mm = vma->vm_mm; struct vm_area_struct * new_vma, * next, * prev; int allocated_vma; new_vma = NULL; next = find_vma_prev(mm, new_addr, &prev); if (next) { if (prev && prev->vm_end == new_addr && can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) { spin_lock(&mm->page_table_lock); prev->vm_end = new_addr + new_len; spin_unlock(&mm->page_table_lock); new_vma = prev; if (next != prev->vm_next) BUG(); if (prev->vm_end == next->vm_start && can_vma_merge(next, prev->vm_flags)) { spin_lock(&mm->page_table_lock); prev->vm_end = next->vm_end; __vma_unlink(mm, next, prev); spin_unlock(&mm->page_table_lock); mm->map_count--; kmem_cache_free(vm_area_cachep, next); } } else if (next->vm_start == new_addr + new_len && can_vma_merge(next, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) { spin_lock(&mm->page_table_lock); next->vm_start = new_addr; spin_unlock(&mm->page_table_lock); new_vma = next; } } else { prev = find_vma(mm, new_addr-1); if (prev && prev->vm_end == new_addr && can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) { spin_lock(&mm->page_table_lock); prev->vm_end = new_addr + new_len; spin_unlock(&mm->page_table_lock); new_vma = prev; } } allocated_vma = 0; if (!new_vma) { new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (!new_vma) goto out; allocated_vma = 1; } if (!move_page_tables(current->mm, new_addr, addr, old_len)) { unsigned long vm_locked = vma->vm_flags & VM_LOCKED; if (allocated_vma) { *new_vma = *vma; new_vma->vm_start = new_addr; new_vma->vm_end = new_addr+new_len; new_vma->vm_pgoff += (addr-vma->vm_start) >> PAGE_SHIFT; new_vma->vm_raend = 0; if (new_vma->vm_file) get_file(new_vma->vm_file); if (new_vma->vm_ops && new_vma->vm_ops->open) new_vma->vm_ops->open(new_vma); insert_vm_struct(current->mm, new_vma); } /* XXX: possible errors masked, mapping might remain */ do_munmap(current->mm, addr, old_len); current->mm->total_vm += new_len >> PAGE_SHIFT; if (vm_locked) { current->mm->locked_vm += new_len >> PAGE_SHIFT; if (new_len > old_len) make_pages_present(new_addr + old_len, new_addr + new_len); } return new_addr; }
unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_color_align, last_mmap; struct vm_unmapped_area_info info; #ifdef CONFIG_64BIT /* This should only ever run for 32-bit processes. */ BUG_ON(!test_thread_flag(TIF_32BIT)); #endif /* requested length too big for entire address space */ if (len > TASK_SIZE) return -ENOMEM; do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; last_mmap = GET_LAST_MMAP(filp); if (flags & MAP_FIXED) { if ((flags & MAP_SHARED) && last_mmap && (addr - shared_align_offset(last_mmap, pgoff)) & (SHM_COLOUR - 1)) return -EINVAL; goto found_addr; } /* requesting a specific address */ if (addr) { if (do_color_align && last_mmap) addr = COLOR_ALIGN(addr, last_mmap, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) goto found_addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = mm->mmap_base; info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; info.align_offset = shared_align_offset(last_mmap, pgoff); addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) goto found_addr; VM_BUG_ON(addr != -ENOMEM); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags); found_addr: if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK)) SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); return addr; }
static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr0, unsigned long len, unsigned long pgoff, unsigned long flags) { struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev_vma; unsigned long base = mm->mmap_base, addr = addr0; unsigned long largest_hole = mm->cached_hole_size; int first_time = 1; if (mm->free_area_cache > base) mm->free_area_cache = base; if (len <= largest_hole) { largest_hole = 0; mm->free_area_cache = base; } try_again: if (mm->free_area_cache < len) goto fail; addr = (mm->free_area_cache - len) & huge_page_mask(h); do { vma = find_vma_prev(mm, addr, &prev_vma); if (!vma) { return addr; break; } if (addr + len <= vma->vm_start && (!prev_vma || (addr >= prev_vma->vm_end))) { mm->cached_hole_size = largest_hole; mm->free_area_cache = addr; return addr; } else { if (mm->free_area_cache == vma->vm_end) { mm->free_area_cache = vma->vm_start; mm->cached_hole_size = largest_hole; } } if (addr + largest_hole < vma->vm_start) largest_hole = vma->vm_start - addr; addr = (vma->vm_start - len) & huge_page_mask(h); } while (len <= vma->vm_start); fail: if (first_time) { mm->free_area_cache = base; largest_hole = 0; first_time = 0; goto try_again; } mm->free_area_cache = TASK_UNMAPPED_BASE; mm->cached_hole_size = ~0UL; addr = hugetlb_get_unmapped_area_bottomup(file, addr0, len, pgoff, flags); mm->free_area_cache = base; mm->cached_hole_size = ~0UL; return addr; }