/* Setup a VMA at program startup for the vsyscall page */
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp,
				unsigned long map_address)
{
	struct mm_struct *mm = current->mm;
	unsigned long addr = map_address;
	int ret = 0;
	bool compat;
	unsigned long flags;

	if (vdso_enabled == VDSO_DISABLED && map_address == 0) {
		current->mm->context.vdso = NULL;
		return 0;
	}

	flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC | VM_MAYWRITE |
		mm->def_flags;

	ret = -ENOMEM;
	if (ub_memory_charge(mm, PAGE_SIZE, flags, NULL, UB_SOFT))
		goto err_charge;

	down_write(&mm->mmap_sem);

	/* Test compat mode once here, in case someone
	   changes it via sysctl */
	compat = (vdso_enabled == VDSO_COMPAT);

	map_compat_vdso(compat);

	if (!compat || map_address) {
		addr = get_unmapped_area_prot(NULL, addr, PAGE_SIZE, 0, 0, 1);
		if (IS_ERR_VALUE(addr)) {
			ret = addr;
			goto up_fail;
		}
	} else
		addr = VDSO_HIGH_BASE;

	current->mm->context.vdso = (void *)addr;

	if (compat_uses_vma || !compat || map_address) {
		struct page **pages = uts_prep_vdso_pages_locked(compat);
		if (IS_ERR(pages)) {
			ret = PTR_ERR(pages);
			goto up_fail;
		}

		/*
		 * MAYWRITE to allow gdb to COW and set breakpoints
		 */
		ret = install_special_mapping(mm, addr, PAGE_SIZE,
					      VM_READ|VM_EXEC|
					      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
					      pages);

		if (ret)
			goto up_fail;
	}

	current_thread_info()->sysenter_return =
		VDSO32_SYMBOL(addr, SYSENTER_RETURN);

  up_fail:
	if (ret)
		current->mm->context.vdso = NULL;

	up_write(&mm->mmap_sem);
	if (ret < 0)
		ub_memory_uncharge(mm, PAGE_SIZE, flags, NULL);
err_charge:

	return ret;
}
Пример #2
0
/* Setup a VMA at program startup for the vsyscall page */
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp,
				unsigned long map_address)
{
	struct mm_struct *mm = current->mm;
	unsigned long addr = map_address;
	int ret = 0;
	bool compat;
	unsigned long flags;

	if (vdso_enabled == VDSO_DISABLED && map_address == 0) {
		current->mm->context.vdso = NULL;
		return 0;
	}

	flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC | VM_MAYWRITE |
		mm->def_flags;

	ret = -ENOMEM;
	if (ub_memory_charge(mm, PAGE_SIZE, flags, NULL, UB_SOFT))
		goto err_charge;

	down_write(&mm->mmap_sem);

	/* Test compat mode once here, in case someone
	   changes it via sysctl */
	compat = (vdso_enabled == VDSO_COMPAT);

	map_compat_vdso(compat);

	if (!compat || map_address) {
		addr = get_unmapped_area_prot(NULL, addr, PAGE_SIZE, 0, 0, 1);
		if (IS_ERR_VALUE(addr)) {
			ret = addr;
			goto up_fail;
		}
	} else
		addr = VDSO_HIGH_BASE;

	current->mm->context.vdso = (void *)addr;

	if (compat_uses_vma || !compat || map_address) {
		/*
		 * MAYWRITE to allow gdb to COW and set breakpoints
		 *
		 * Make sure the vDSO gets into every core dump.
		 * Dumping its contents makes post-mortem fully
		 * interpretable later without matching up the same
		 * kernel and hardware config to see what PC values
		 * meant.
		 */
		ret = install_special_mapping(mm, addr, PAGE_SIZE,
					      VM_READ|VM_EXEC|
					      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
					      VM_ALWAYSDUMP,
					      vdso32_pages);

		if (ret)
			goto up_fail;
	}

	current_thread_info()->sysenter_return =
		VDSO32_SYMBOL(addr, SYSENTER_RETURN);

  up_fail:
	if (ret)
		current->mm->context.vdso = NULL;

	up_write(&mm->mmap_sem);
	if (ret < 0)
		ub_memory_uncharge(mm, PAGE_SIZE, flags, NULL);
err_charge:

	return ret;
}