int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr; int ret; if (!vdso_enabled) return 0; down_write(&mm->mmap_sem); addr = vdso_addr(mm->start_stack, vdso_size); addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } current->mm->context.vdso = (void *)addr; ret = install_special_mapping(mm, addr, vdso_size, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| VM_ALWAYSDUMP, vdso_pages); if (ret) { current->mm->context.vdso = NULL; goto up_fail; } up_fail: up_write(&mm->mmap_sem); return ret; }
/* * Called from binfmt_elf. Create a VMA for the vDSO page. */ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { int ret; unsigned long vdso_base; struct mm_struct *mm = current->mm; if (down_write_killable(&mm->mmap_sem)) return -EINTR; /* Try to get it loaded right near ld.so/glibc. */ vdso_base = STACK_TOP; vdso_base = get_unmapped_area(NULL, vdso_base, PAGE_SIZE, 0, 0); if (IS_ERR_VALUE(vdso_base)) { ret = vdso_base; goto up_fail; } /* MAYWRITE to allow gdb to COW and set breakpoints. */ ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, &vdso_page); if (ret) goto up_fail; mm->context.vdso = (void *)vdso_base; up_fail: up_write(&mm->mmap_sem); return ret; }
/* Setup a VMA at program startup for the vsyscall page */ int arch_restore_additional_pages (void* addr) { struct mm_struct *mm = current->mm; int ret = 0; down_write(&mm->mmap_sem); current->mm->context.vdso = addr; ret = install_special_mapping(mm, (u_long) addr, PAGE_SIZE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso32_pages); if (ret) goto up_fail; current_thread_info()->sysenter_return = VDSO32_SYMBOL((u_long) addr, SYSENTER_RETURN); up_fail: if (ret) current->mm->context.vdso = NULL; up_write(&mm->mmap_sem); return ret; }
void uml_setup_stubs(struct mm_struct *mm) { int err, ret; if (!skas_needs_stub) return; ret = init_stub_pte(mm, STUB_CODE, (unsigned long) &__syscall_stub_start); if (ret) goto out; ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack); if (ret) goto out; mm->context.stub_pages[0] = virt_to_page(&__syscall_stub_start); mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack); /* dup_mmap already holds mmap_sem */ err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START, VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC | VM_DONTCOPY, mm->context.stub_pages); if (err) { printk(KERN_ERR "install_special_mapping returned %d\n", err); goto out; } return; out: force_sigsegv(SIGSEGV, current); }
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { int ret; unsigned long addr; struct mm_struct *mm = current->mm; down_write(&mm->mmap_sem); addr = vdso_addr(mm->start_stack); addr = get_unmapped_area(NULL, addr, PAGE_SIZE, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } ret = install_special_mapping(mm, addr, PAGE_SIZE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, &vdso_page); if (ret) goto up_fail; mm->context.vdso = (void *)addr; up_fail: up_write(&mm->mmap_sem); return ret; }
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long vdso_base, vdso_mapping_len; int ret; /* Be sure to map the data page */ vdso_mapping_len = (vdso_pages + 1) << PAGE_SHIFT; down_write(&mm->mmap_sem); vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); if (IS_ERR_VALUE(vdso_base)) { ret = vdso_base; goto up_fail; } mm->context.vdso = (void *)vdso_base; ret = install_special_mapping(mm, vdso_base, vdso_mapping_len, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_pagelist); if (ret) { mm->context.vdso = NULL; goto up_fail; } up_fail: up_write(&mm->mmap_sem); return ret; }
int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack) { struct mm_struct *mm = current->mm; unsigned long vdso_base; int retval = 0; if (!notify_exec()) sim_notify_exec(bprm->filename); down_write(&mm->mmap_sem); vdso_base = VDSO_BASE; retval = install_special_mapping(mm, vdso_base, PAGE_SIZE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_pages); #ifndef __tilegx__ if (!retval) { unsigned long addr = MEM_USER_INTRPT; addr = mmap_region(NULL, addr, INTRPT_SIZE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 0); if (addr > (unsigned long) -PAGE_SIZE) retval = (int) addr; } #endif up_write(&mm->mmap_sem); return retval; }
/* Setup a VMA at program startup for the vsyscall page */ int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack) { struct mm_struct *mm = current->mm; unsigned long addr; int ret; down_write(&mm->mmap_sem); addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } ret = install_special_mapping(mm, addr, PAGE_SIZE, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | VM_ALWAYSDUMP, syscall_pages); if (unlikely(ret)) goto up_fail; current->mm->context.vdso = (void *)addr; up_fail: up_write(&mm->mmap_sem); return ret; }
/* Setup a VMA at program startup for the vsyscall page. Not called for compat tasks */ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) { struct mm_struct *mm = current->mm; unsigned long addr; int ret; unsigned len = round_up(vdso_end - vdso_start, PAGE_SIZE); if (!vdso_enabled) return 0; down_write(&mm->mmap_sem); addr = vdso_addr(mm->start_stack, len); addr = get_unmapped_area(NULL, addr, len, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } ret = install_special_mapping(mm, addr, len, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| VM_ALWAYSDUMP, vdso_pages); if (ret) goto up_fail; current->mm->context.vdso = (void *)addr; up_fail: up_write(&mm->mmap_sem); return ret; }
/* Setup a VMA at program startup for the vsyscall page */ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr; int ret = 0; bool compat; #ifdef CONFIG_X86_X32_ABI if (test_thread_flag(TIF_X32)) return x32_setup_additional_pages(bprm, uses_interp); #endif if (vdso_enabled == VDSO_DISABLED) return 0; down_write(&mm->mmap_sem); /* Test compat mode once here, in case someone changes it via sysctl */ compat = (vdso_enabled == VDSO_COMPAT); map_compat_vdso(compat); if (compat) addr = VDSO_HIGH_BASE; else { addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } } current->mm->context.vdso = (void *)addr; if (compat_uses_vma || !compat) { /* * MAYWRITE to allow gdb to COW and set breakpoints */ ret = install_special_mapping(mm, addr, PAGE_SIZE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso32_pages); if (ret) goto up_fail; } current_thread_info()->sysenter_return = VDSO32_SYMBOL(addr, SYSENTER_RETURN); up_fail: if (ret) current->mm->context.vdso = NULL; up_write(&mm->mmap_sem); return ret; }
/* Setup a VMA at program startup for the vsyscall page */ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr; int ret = 0; bool compat; if (vdso_enabled == VDSO_DISABLED) return 0; down_write(&mm->mmap_sem); /* Test compat mode once here, in case someone changes it via sysctl */ compat = (vdso_enabled == VDSO_COMPAT); map_compat_vdso(compat); if (compat) addr = VDSO_HIGH_BASE; else { addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } } if (compat_uses_vma || !compat) { /* * MAYWRITE to allow gdb to COW and set breakpoints * * Make sure the vDSO gets into every core dump. * Dumping its contents makes post-mortem fully * interpretable later without matching up the same * kernel and hardware config to see what PC values * meant. */ ret = install_special_mapping(mm, addr, PAGE_SIZE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| VM_ALWAYSDUMP, vdso32_pages); if (ret) goto up_fail; } current->mm->context.vdso = (void *)addr; current_thread_info()->sysenter_return = VDSO32_SYMBOL(addr, SYSENTER_RETURN); up_fail: up_write(&mm->mmap_sem); return ret; }
int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack) { struct mm_struct *mm = current->mm; unsigned long vdso_base; int retval = 0; /* * Notify the simulator that an exec just occurred. * If we can't find the filename of the mapping, just use * whatever was passed as the linux_binprm filename. */ if (!notify_exec()) sim_notify_exec(bprm->filename); down_write(&mm->mmap_sem); /* * MAYWRITE to allow gdb to COW and set breakpoints */ vdso_base = VDSO_BASE; retval = install_special_mapping(mm, vdso_base, PAGE_SIZE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_pages); #ifndef __tilegx__ /* * Set up a user-interrupt mapping here; the user can't * create one themselves since it is above TASK_SIZE. * We make it unwritable by default, so the model for adding * interrupt vectors always involves an mprotect. */ if (!retval) { unsigned long addr = MEM_USER_INTRPT; addr = mmap_region(NULL, addr, INTRPT_SIZE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 0); if (addr > (unsigned long) -PAGE_SIZE) retval = (int) addr; } #endif up_write(&mm->mmap_sem); return retval; }
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; int ret; down_write(&mm->mmap_sem); /* Map kuser helpers to user space address */ ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC, kuser_page); up_write(&mm->mmap_sem); return ret; }
int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr = AARCH32_VECTORS_BASE; int ret; down_write(&mm->mmap_sem); current->mm->context.vdso = (void *)addr; /* Map vectors page at the high address. */ ret = install_special_mapping(mm, addr, PAGE_SIZE, VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC, vectors_page); up_write(&mm->mmap_sem); return ret; }
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { int err; struct mm_struct *mm = current->mm; if (!vdso_enabled) return 0; down_write(&mm->mmap_sem); err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdsop); up_write(&mm->mmap_sem); return err; }
/* Setup a VMA at program startup for the vsyscall page */ int syscall32_setup_pages(struct linux_binprm *bprm, int exstack) { struct mm_struct *mm = current->mm; int ret; down_write(&mm->mmap_sem); /* * MAYWRITE to allow gdb to COW and set breakpoints * * Make sure the vDSO gets into every core dump. * Dumping its contents makes post-mortem fully interpretable later * without matching up the same kernel and hardware config to see * what PC values meant. */ /* Could randomize here */ ret = install_special_mapping(mm, VSYSCALL32_BASE, PAGE_SIZE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| VM_ALWAYSDUMP, syscall32_pages); up_write(&mm->mmap_sem); return ret; }
static void wire_weird_pages(void) { /* 0x80000000 */ if (dpages[0] == NULL) { dpages[0] = alloc_pages(GFP_KERNEL, 0); } down_write(¤t->mm->mmap_sem); int ret = install_special_mapping(current->mm, 0x80000000, PAGE_SIZE, VM_READ | VM_WRITE | VM_SHARED | VM_DONTCOPY, dpages); up_write(¤t->mm->mmap_sem); void* addr = page_address(dpages[0]); memset(addr, 'w', PAGE_SIZE); printk("wired weird page! (%p, %d, %p)\n", dpages[0], ret, addr); }
/* Setup a VMA at program startup for the vsyscall page */ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp, unsigned long map_address) { struct mm_struct *mm = current->mm; unsigned long addr = map_address; int ret = 0; bool compat; unsigned long flags; if (vdso_enabled == VDSO_DISABLED && map_address == 0) { current->mm->context.vdso = NULL; return 0; } flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC | VM_MAYWRITE | mm->def_flags; ret = -ENOMEM; if (ub_memory_charge(mm, PAGE_SIZE, flags, NULL, UB_SOFT)) goto err_charge; down_write(&mm->mmap_sem); /* Test compat mode once here, in case someone changes it via sysctl */ compat = (vdso_enabled == VDSO_COMPAT); map_compat_vdso(compat); if (!compat || map_address) { addr = get_unmapped_area_prot(NULL, addr, PAGE_SIZE, 0, 0, 1); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } } else addr = VDSO_HIGH_BASE; current->mm->context.vdso = (void *)addr; if (compat_uses_vma || !compat || map_address) { struct page **pages = uts_prep_vdso_pages_locked(compat); if (IS_ERR(pages)) { ret = PTR_ERR(pages); goto up_fail; } /* * MAYWRITE to allow gdb to COW and set breakpoints */ ret = install_special_mapping(mm, addr, PAGE_SIZE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, pages); if (ret) goto up_fail; } current_thread_info()->sysenter_return = VDSO32_SYMBOL(addr, SYSENTER_RETURN); up_fail: if (ret) current->mm->context.vdso = NULL; up_write(&mm->mmap_sem); if (ret < 0) ub_memory_uncharge(mm, PAGE_SIZE, flags, NULL); err_charge: return ret; }
/* Setup a VMA at program startup for the vsyscall page */ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr; int ret = 0; struct vm_area_struct *vma; static struct page *no_pages[] = {NULL}; #ifdef CONFIG_X86_X32_ABI if (test_thread_flag(TIF_X32)) return x32_setup_additional_pages(bprm, uses_interp); #endif if (vdso_enabled != 1) /* Other values all mean "disabled" */ return 0; down_write(&mm->mmap_sem); addr = get_unmapped_area(NULL, 0, vdso32_size + VDSO_OFFSET(VDSO_PREV_PAGES), 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } addr += VDSO_OFFSET(VDSO_PREV_PAGES); current->mm->context.vdso = (void *)addr; /* * MAYWRITE to allow gdb to COW and set breakpoints */ ret = install_special_mapping(mm, addr, vdso32_size, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso32_pages); if (ret) goto up_fail; vma = _install_special_mapping(mm, addr - VDSO_OFFSET(VDSO_PREV_PAGES), VDSO_OFFSET(VDSO_PREV_PAGES), VM_READ, no_pages); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto up_fail; } ret = remap_pfn_range(vma, addr - VDSO_OFFSET(VDSO_VVAR_PAGE), __pa_symbol(&__vvar_page) >> PAGE_SHIFT, PAGE_SIZE, PAGE_READONLY); if (ret) goto up_fail; #ifdef CONFIG_HPET_TIMER if (hpet_address) { ret = io_remap_pfn_range(vma, addr - VDSO_OFFSET(VDSO_HPET_PAGE), hpet_address >> PAGE_SHIFT, PAGE_SIZE, pgprot_noncached(PAGE_READONLY)); if (ret) goto up_fail; } #endif current_thread_info()->sysenter_return = VDSO32_SYMBOL(addr, SYSENTER_RETURN); up_fail: if (ret) current->mm->context.vdso = NULL; up_write(&mm->mmap_sem); return ret; }
int setup_vdso_pages(void) { struct page **pagelist; unsigned long pages; struct mm_struct *mm = current->mm; unsigned long vdso_base = 0; int retval = 0; if (!vdso_ready) return 0; mm->context.vdso_base = 0; pagelist = vdso_pagelist; pages = vdso_pages; #ifdef CONFIG_COMPAT if (is_compat_task()) { pagelist = vdso32_pagelist; pages = vdso32_pages; } #endif /* * vDSO has a problem and was disabled, just don't "enable" it for the * process. */ if (pages == 0) return 0; vdso_base = get_unmapped_area(NULL, vdso_base, (pages << PAGE_SHIFT) + ((VDSO_ALIGNMENT - 1) & PAGE_MASK), 0, 0); if (IS_ERR_VALUE(vdso_base)) { retval = vdso_base; return retval; } /* Add required alignment. */ vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT); /* * Put vDSO base into mm struct. We need to do this before calling * install_special_mapping or the perf counter mmap tracking code * will fail to recognise it as a vDSO (since arch_vma_name fails). */ mm->context.vdso_base = vdso_base; /* * our vma flags don't have VM_WRITE so by default, the process isn't * allowed to write those pages. * gdb can break that with ptrace interface, and thus trigger COW on * those pages but it's then your responsibility to never do that on * the "data" page of the vDSO or you'll stop getting kernel updates * and your nice userland gettimeofday will be totally dead. * It's fine to use that for setting breakpoints in the vDSO code * pages though */ retval = install_special_mapping(mm, vdso_base, pages << PAGE_SHIFT, VM_READ|VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, pagelist); if (retval) mm->context.vdso_base = 0; return retval; }