int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr; int ret; if (!vdso_enabled) return 0; down_write(&mm->mmap_sem); addr = vdso_addr(mm->start_stack, vdso_size); addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } current->mm->context.vdso = (void *)addr; ret = install_special_mapping(mm, addr, vdso_size, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| VM_ALWAYSDUMP, vdso_pages); if (ret) { current->mm->context.vdso = NULL; goto up_fail; } up_fail: up_write(&mm->mmap_sem); return ret; }
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { int ret; unsigned long addr; struct mm_struct *mm = current->mm; down_write(&mm->mmap_sem); addr = vdso_addr(mm->start_stack); addr = get_unmapped_area(NULL, addr, PAGE_SIZE, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } ret = install_special_mapping(mm, addr, PAGE_SIZE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, &vdso_page); if (ret) goto up_fail; mm->context.vdso = (void *)addr; up_fail: up_write(&mm->mmap_sem); return ret; }
/* Setup a VMA at program startup for the vsyscall page. Not called for compat tasks */ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) { struct mm_struct *mm = current->mm; unsigned long addr; int ret; unsigned len = round_up(vdso_end - vdso_start, PAGE_SIZE); if (!vdso_enabled) return 0; down_write(&mm->mmap_sem); addr = vdso_addr(mm->start_stack, len); addr = get_unmapped_area(NULL, addr, len, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } ret = install_special_mapping(mm, addr, len, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| VM_ALWAYSDUMP, vdso_pages); if (ret) goto up_fail; current->mm->context.vdso = (void *)addr; up_fail: up_write(&mm->mmap_sem); return ret; }
static int map_vdso(const struct vdso_image *image, bool calculate_addr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long addr; int ret = 0; static struct page *no_pages[] = {NULL}; static struct vm_special_mapping vvar_mapping = { .name = "[vvar]", .pages = no_pages, }; if (calculate_addr) { addr = vdso_addr(current->mm->start_stack, image->sym_end_mapping); } else { addr = 0; } down_write(&mm->mmap_sem); addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } current->mm->context.vdso = (void __user *)addr; /* * MAYWRITE to allow gdb to COW and set breakpoints */ vma = _install_special_mapping(mm, addr, image->size, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, &image->text_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto up_fail; } vma = _install_special_mapping(mm, addr + image->size, image->sym_end_mapping - image->size, VM_READ, &vvar_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto up_fail; } if (image->sym_vvar_page) ret = remap_pfn_range(vma, addr + image->sym_vvar_page, __pa_symbol(&__vvar_page) >> PAGE_SHIFT, PAGE_SIZE, PAGE_READONLY); if (ret) goto up_fail; #ifdef CONFIG_HPET_TIMER if (hpet_address && image->sym_hpet_page) { ret = io_remap_pfn_range(vma, addr + image->sym_hpet_page, hpet_address >> PAGE_SHIFT, PAGE_SIZE, pgprot_noncached(PAGE_READONLY)); if (ret) goto up_fail; } #endif up_fail: if (ret) current->mm->context.vdso = NULL; up_write(&mm->mmap_sem); return ret; }
static int map_vdso(const struct vdso_image *image, struct vm_special_mapping *vdso_mapping) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long text_start, addr = 0; int ret = 0; down_write(&mm->mmap_sem); /* * First, get an unmapped region: then randomize it, and make sure that * region is free. */ if (current->flags & PF_RANDOMIZE) { addr = get_unmapped_area(NULL, 0, image->size - image->sym_vvar_start, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } addr = vdso_addr(addr, image->size - image->sym_vvar_start); } addr = get_unmapped_area(NULL, addr, image->size - image->sym_vvar_start, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } text_start = addr - image->sym_vvar_start; current->mm->context.vdso = (void __user *)text_start; /* * MAYWRITE to allow gdb to COW and set breakpoints */ vma = _install_special_mapping(mm, text_start, image->size, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto up_fail; } vma = _install_special_mapping(mm, addr, -image->sym_vvar_start, VM_READ|VM_MAYREAD, &vvar_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); do_munmap(mm, text_start, image->size, NULL); } up_fail: if (ret) current->mm->context.vdso = NULL; up_write(&mm->mmap_sem); return ret; }