int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr = AARCH32_VECTORS_BASE; static const struct vm_special_mapping spec = { .name = "[vectors]", .pages = vectors_page, }; void *ret; if (down_write_killable(&mm->mmap_sem)) return -EINTR; current->mm->context.vdso = (void *)addr; /* Map vectors page at the high address. */ ret = _install_special_mapping(mm, addr, PAGE_SIZE, VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC, &spec); up_write(&mm->mmap_sem); return PTR_ERR_OR_ZERO(ret); } #endif /* CONFIG_COMPAT */ static struct vm_special_mapping vdso_spec[2] __ro_after_init = { { .name = "[vvar]", }, { .name = "[vdso]", }, };
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long vdso_base, vdso_text_len, vdso_mapping_len; void *ret; vdso_text_len = vdso_pages << PAGE_SHIFT; /* Be sure to map the data page */ vdso_mapping_len = vdso_text_len + PAGE_SIZE; if (down_write_killable(&mm->mmap_sem)) return -EINTR; vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); if (IS_ERR_VALUE(vdso_base)) { ret = ERR_PTR(vdso_base); goto up_fail; } ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, VM_READ|VM_MAYREAD, &vdso_spec[0]); if (IS_ERR(ret)) goto up_fail; vdso_base += PAGE_SIZE; mm->context.vdso = (void *)vdso_base; ret = _install_special_mapping(mm, vdso_base, vdso_text_len, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, &vdso_spec[1]); if (IS_ERR(ret)) goto up_fail; up_write(&mm->mmap_sem); return 0; up_fail: mm->context.vdso = NULL; up_write(&mm->mmap_sem); return PTR_ERR(ret); }
/* Setup a VMA at program startup for the vsyscall page */ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr; int ret = 0; struct vm_area_struct *vma; static struct page *no_pages[] = {NULL}; #ifdef CONFIG_X86_X32_ABI if (test_thread_flag(TIF_X32)) return x32_setup_additional_pages(bprm, uses_interp); #endif if (vdso_enabled != 1) /* Other values all mean "disabled" */ return 0; down_write(&mm->mmap_sem); addr = get_unmapped_area(NULL, 0, vdso32_size + VDSO_OFFSET(VDSO_PREV_PAGES), 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } addr += VDSO_OFFSET(VDSO_PREV_PAGES); current->mm->context.vdso = (void *)addr; /* * MAYWRITE to allow gdb to COW and set breakpoints */ ret = install_special_mapping(mm, addr, vdso32_size, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso32_pages); if (ret) goto up_fail; vma = _install_special_mapping(mm, addr - VDSO_OFFSET(VDSO_PREV_PAGES), VDSO_OFFSET(VDSO_PREV_PAGES), VM_READ, no_pages); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto up_fail; } ret = remap_pfn_range(vma, addr - VDSO_OFFSET(VDSO_VVAR_PAGE), __pa_symbol(&__vvar_page) >> PAGE_SHIFT, PAGE_SIZE, PAGE_READONLY); if (ret) goto up_fail; #ifdef CONFIG_HPET_TIMER if (hpet_address) { ret = io_remap_pfn_range(vma, addr - VDSO_OFFSET(VDSO_HPET_PAGE), hpet_address >> PAGE_SHIFT, PAGE_SIZE, pgprot_noncached(PAGE_READONLY)); if (ret) goto up_fail; } #endif current_thread_info()->sysenter_return = VDSO32_SYMBOL(addr, SYSENTER_RETURN); up_fail: if (ret) current->mm->context.vdso = NULL; up_write(&mm->mmap_sem); return ret; }
static int map_vdso(const struct vdso_image *image, bool calculate_addr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long addr; int ret = 0; static struct page *no_pages[] = {NULL}; static struct vm_special_mapping vvar_mapping = { .name = "[vvar]", .pages = no_pages, }; if (calculate_addr) { addr = vdso_addr(current->mm->start_stack, image->sym_end_mapping); } else { addr = 0; } down_write(&mm->mmap_sem); addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } current->mm->context.vdso = (void __user *)addr; /* * MAYWRITE to allow gdb to COW and set breakpoints */ vma = _install_special_mapping(mm, addr, image->size, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, &image->text_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto up_fail; } vma = _install_special_mapping(mm, addr + image->size, image->sym_end_mapping - image->size, VM_READ, &vvar_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto up_fail; } if (image->sym_vvar_page) ret = remap_pfn_range(vma, addr + image->sym_vvar_page, __pa_symbol(&__vvar_page) >> PAGE_SHIFT, PAGE_SIZE, PAGE_READONLY); if (ret) goto up_fail; #ifdef CONFIG_HPET_TIMER if (hpet_address && image->sym_hpet_page) { ret = io_remap_pfn_range(vma, addr + image->sym_hpet_page, hpet_address >> PAGE_SHIFT, PAGE_SIZE, pgprot_noncached(PAGE_READONLY)); if (ret) goto up_fail; } #endif up_fail: if (ret) current->mm->context.vdso = NULL; up_write(&mm->mmap_sem); return ret; }
static int map_vdso(const struct vdso_image *image, struct vm_special_mapping *vdso_mapping) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long text_start, addr = 0; int ret = 0; down_write(&mm->mmap_sem); /* * First, get an unmapped region: then randomize it, and make sure that * region is free. */ if (current->flags & PF_RANDOMIZE) { addr = get_unmapped_area(NULL, 0, image->size - image->sym_vvar_start, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } addr = vdso_addr(addr, image->size - image->sym_vvar_start); } addr = get_unmapped_area(NULL, addr, image->size - image->sym_vvar_start, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } text_start = addr - image->sym_vvar_start; current->mm->context.vdso = (void __user *)text_start; /* * MAYWRITE to allow gdb to COW and set breakpoints */ vma = _install_special_mapping(mm, text_start, image->size, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto up_fail; } vma = _install_special_mapping(mm, addr, -image->sym_vvar_start, VM_READ|VM_MAYREAD, &vvar_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); do_munmap(mm, text_start, image->size, NULL); } up_fail: if (ret) current->mm->context.vdso = NULL; up_write(&mm->mmap_sem); return ret; }
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mips_vdso_image *image = current->thread.abi->vdso; struct mm_struct *mm = current->mm; unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr; struct vm_area_struct *vma; struct resource gic_res; int ret; if (down_write_killable(&mm->mmap_sem)) return -EINTR; /* * Determine total area size. This includes the VDSO data itself, the * data page, and the GIC user page if present. Always create a mapping * for the GIC user area if the GIC is present regardless of whether it * is the current clocksource, in case it comes into use later on. We * only map a page even though the total area is 64K, as we only need * the counter registers at the start. */ gic_size = gic_present ? PAGE_SIZE : 0; vvar_size = gic_size + PAGE_SIZE; size = vvar_size + image->size; base = get_unmapped_area(NULL, 0, size, 0, 0); if (IS_ERR_VALUE(base)) { ret = base; goto out; } data_addr = base + gic_size; vdso_addr = data_addr + PAGE_SIZE; vma = _install_special_mapping(mm, base, vvar_size, VM_READ | VM_MAYREAD, &vdso_vvar_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } /* Map GIC user page. */ if (gic_size) { ret = gic_get_usm_range(&gic_res); if (ret) goto out; ret = io_remap_pfn_range(vma, base, gic_res.start >> PAGE_SHIFT, gic_size, pgprot_noncached(PAGE_READONLY)); if (ret) goto out; } /* Map data page. */ ret = remap_pfn_range(vma, data_addr, virt_to_phys(&vdso_data) >> PAGE_SHIFT, PAGE_SIZE, PAGE_READONLY); if (ret) goto out; /* Map VDSO image. */ vma = _install_special_mapping(mm, vdso_addr, image->size, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, &image->mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } mm->context.vdso = (void *)vdso_addr; ret = 0; out: up_write(&mm->mmap_sem); return ret; }