Exemplo n.º 1
0
/*
 * Set the bit indicating "nosegneg" library variants should be used.
 * We only need to bother in pure 32-bit mode; compat 32-bit processes
 * can have un-truncated segments, so wrapping around is allowed.
 */
static void __init fiddle_vdso(void)
{
#ifdef CONFIG_X86_32
	u32 *mask;
	mask = VDSO32_SYMBOL(&vdso32_int80_start, NOTE_MASK);
	*mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
	mask = VDSO32_SYMBOL(&vdso32_sysenter_start, NOTE_MASK);
	*mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
#endif
}
Exemplo n.º 2
0
/* Setup a VMA at program startup for the vsyscall page */
int arch_restore_additional_pages (void* addr)
{
	struct mm_struct *mm = current->mm;
	int ret = 0;

	down_write(&mm->mmap_sem);

	current->mm->context.vdso = addr;

	ret = install_special_mapping(mm, (u_long) addr, PAGE_SIZE,
				      VM_READ|VM_EXEC|
				      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
				      vdso32_pages);
	if (ret) goto up_fail;

	current_thread_info()->sysenter_return = VDSO32_SYMBOL((u_long) addr, SYSENTER_RETURN);

up_fail:
	if (ret)
		current->mm->context.vdso = NULL;

	up_write(&mm->mmap_sem);

	return ret;
}
Exemplo n.º 3
0
/* Setup a VMA at program startup for the vsyscall page */
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
	struct mm_struct *mm = current->mm;
	unsigned long addr;
	int ret = 0;
	bool compat;

#ifdef CONFIG_X86_X32_ABI
	if (test_thread_flag(TIF_X32))
		return x32_setup_additional_pages(bprm, uses_interp);
#endif

	if (vdso_enabled == VDSO_DISABLED)
		return 0;

	down_write(&mm->mmap_sem);

	/* Test compat mode once here, in case someone
	   changes it via sysctl */
	compat = (vdso_enabled == VDSO_COMPAT);

	map_compat_vdso(compat);

	if (compat)
		addr = VDSO_HIGH_BASE;
	else {
		addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
		if (IS_ERR_VALUE(addr)) {
			ret = addr;
			goto up_fail;
		}
	}

	current->mm->context.vdso = (void *)addr;

	if (compat_uses_vma || !compat) {
		/*
		 * MAYWRITE to allow gdb to COW and set breakpoints
		 */
		ret = install_special_mapping(mm, addr, PAGE_SIZE,
					      VM_READ|VM_EXEC|
					      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
					      vdso32_pages);

		if (ret)
			goto up_fail;
	}

	current_thread_info()->sysenter_return =
		VDSO32_SYMBOL(addr, SYSENTER_RETURN);

  up_fail:
	if (ret)
		current->mm->context.vdso = NULL;

	up_write(&mm->mmap_sem);

	return ret;
}
/* Setup a VMA at program startup for the vsyscall page */
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
	struct mm_struct *mm = current->mm;
	unsigned long addr;
	int ret = 0;
	bool compat;

	if (vdso_enabled == VDSO_DISABLED)
		return 0;

	down_write(&mm->mmap_sem);

	/* Test compat mode once here, in case someone
	   changes it via sysctl */
	compat = (vdso_enabled == VDSO_COMPAT);

	map_compat_vdso(compat);

	if (compat)
		addr = VDSO_HIGH_BASE;
	else {
		addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
		if (IS_ERR_VALUE(addr)) {
			ret = addr;
			goto up_fail;
		}
	}

	if (compat_uses_vma || !compat) {
		/*
		 * MAYWRITE to allow gdb to COW and set breakpoints
		 *
		 * Make sure the vDSO gets into every core dump.
		 * Dumping its contents makes post-mortem fully
		 * interpretable later without matching up the same
		 * kernel and hardware config to see what PC values
		 * meant.
		 */
		ret = install_special_mapping(mm, addr, PAGE_SIZE,
					      VM_READ|VM_EXEC|
					      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
					      VM_ALWAYSDUMP,
					      vdso32_pages);

		if (ret)
			goto up_fail;
	}

	current->mm->context.vdso = (void *)addr;
	current_thread_info()->sysenter_return =
		VDSO32_SYMBOL(addr, SYSENTER_RETURN);

  up_fail:
	up_write(&mm->mmap_sem);

	return ret;
}
int import_mm_struct_end(struct mm_struct *mm, struct task_struct *task)
{
	if (vdso_enabled != VDSO_ENABLED) {
		BUG_ON(!mm->context.vdso
		       && mm->context.vdso != (void *)VDSO_HIGH_BASE);
		return 0;
	}

	task_thread_info(task)->sysenter_return =
		VDSO32_SYMBOL(mm->context.vdso, SYSENTER_RETURN);

	return 0;
}
Exemplo n.º 6
0
int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
			compat_sigset_t *set, struct pt_regs *regs)
{
	struct rt_sigframe_ia32 __user *frame;
	void __user *restorer;
	int err = 0;
	void __user *fpstate = NULL;

	/* __copy_to_user optimizes that into a single 8 byte store */
	static const struct {
		u8 movl;
		u32 val;
		u16 int80;
		u8  pad;
	} __attribute__((packed)) code = {
		0xb8,
		__NR_ia32_rt_sigreturn,
		0x80cd,
		0
	};

	frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		return -EFAULT;

	put_user_try {
		put_user_ex(sig, &frame->sig);
		put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
		put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
		err |= copy_siginfo_to_user32(&frame->info, info);

		/* Create the ucontext.  */
		if (cpu_has_xsave)
			put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
		else
			put_user_ex(0, &frame->uc.uc_flags);
		put_user_ex(0, &frame->uc.uc_link);
		put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
		put_user_ex(sas_ss_flags(regs->sp),
			    &frame->uc.uc_stack.ss_flags);
		put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
		err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
					     regs, set->sig[0]);
		err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));

		if (ka->sa.sa_flags & SA_RESTORER)
			restorer = ka->sa.sa_restorer;
		else if (current->mm->context.vdso)
			/* Return stub is in 32bit vsyscall page */
			restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
		else
			restorer = &frame->retcode;
		put_user_ex(ptr_to_compat(restorer), &frame->pretcode);

		/*
		 * Not actually used anymore, but left because some gdb
		 * versions need it.
		 */
		put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
	} put_user_catch(err);

	if (err)
		return -EFAULT;

	/* Set up registers for signal handler */
	regs->sp = (unsigned long) frame;
	regs->ip = (unsigned long) ka->sa.sa_handler;

	/* Make -mregparm=3 work */
	regs->ax = sig;
	regs->dx = (unsigned long) &frame->info;
	regs->cx = (unsigned long) &frame->uc;

	loadsegment(ds, __USER32_DS);
	loadsegment(es, __USER32_DS);

	regs->cs = __USER32_CS;
	regs->ss = __USER32_DS;

	return 0;
}
Exemplo n.º 7
0
int ia32_setup_frame(int sig, struct k_sigaction *ka,
		     compat_sigset_t *set, struct pt_regs *regs)
{
	struct sigframe_ia32 __user *frame;
	void __user *restorer;
	int err = 0;
	void __user *fpstate = NULL;

	/* copy_to_user optimizes that into a single 8 byte store */
	static const struct {
		u16 poplmovl;
		u32 val;
		u16 int80;
	} __attribute__((packed)) code = {
		0xb858,		 /* popl %eax ; movl $...,%eax */
		__NR_ia32_sigreturn,
		0x80cd,		/* int $0x80 */
	};

	frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		return -EFAULT;

	if (__put_user(sig, &frame->sig))
		return -EFAULT;

	if (ia32_setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0]))
		return -EFAULT;

	if (_COMPAT_NSIG_WORDS > 1) {
		if (__copy_to_user(frame->extramask, &set->sig[1],
				   sizeof(frame->extramask)))
			return -EFAULT;
	}

	if (ka->sa.sa_flags & SA_RESTORER) {
		restorer = ka->sa.sa_restorer;
	} else {
		/* Return stub is in 32bit vsyscall page */
		if (current->mm->context.vdso)
			restorer = VDSO32_SYMBOL(current->mm->context.vdso,
						 sigreturn);
		else
			restorer = &frame->retcode;
	}

	put_user_try {
		put_user_ex(ptr_to_compat(restorer), &frame->pretcode);

		/*
		 * These are actually not used anymore, but left because some
		 * gdb versions depend on them as a marker.
		 */
		put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
	} put_user_catch(err);

	if (err)
		return -EFAULT;

	/* Set up registers for signal handler */
	regs->sp = (unsigned long) frame;
	regs->ip = (unsigned long) ka->sa.sa_handler;

	/* Make -mregparm=3 work */
	regs->ax = sig;
	regs->dx = 0;
	regs->cx = 0;

	loadsegment(ds, __USER32_DS);
	loadsegment(es, __USER32_DS);

	regs->cs = __USER32_CS;
	regs->ss = __USER32_DS;

	return 0;
}
Exemplo n.º 8
0
/* Setup a VMA at program startup for the vsyscall page */
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
	struct mm_struct *mm = current->mm;
	unsigned long addr;
	int ret = 0;
	struct vm_area_struct *vma;
	static struct page *no_pages[] = {NULL};

#ifdef CONFIG_X86_X32_ABI
	if (test_thread_flag(TIF_X32))
		return x32_setup_additional_pages(bprm, uses_interp);
#endif

	if (vdso_enabled != 1)  /* Other values all mean "disabled" */
		return 0;

	down_write(&mm->mmap_sem);

	addr = get_unmapped_area(NULL, 0, vdso32_size + VDSO_OFFSET(VDSO_PREV_PAGES), 0, 0);
	if (IS_ERR_VALUE(addr)) {
		ret = addr;
		goto up_fail;
	}

	addr += VDSO_OFFSET(VDSO_PREV_PAGES);

	current->mm->context.vdso = (void *)addr;

	/*
	 * MAYWRITE to allow gdb to COW and set breakpoints
	 */
	ret = install_special_mapping(mm,
			addr,
			vdso32_size,
			VM_READ|VM_EXEC|
			VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
			vdso32_pages);

	if (ret)
		goto up_fail;

	vma = _install_special_mapping(mm,
			addr -  VDSO_OFFSET(VDSO_PREV_PAGES),
			VDSO_OFFSET(VDSO_PREV_PAGES),
			VM_READ,
			no_pages);

	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto up_fail;
	}

	ret = remap_pfn_range(vma,
		addr - VDSO_OFFSET(VDSO_VVAR_PAGE),
		__pa_symbol(&__vvar_page) >> PAGE_SHIFT,
		PAGE_SIZE,
		PAGE_READONLY);

	if (ret)
		goto up_fail;

#ifdef CONFIG_HPET_TIMER
	if (hpet_address) {
		ret = io_remap_pfn_range(vma,
			addr - VDSO_OFFSET(VDSO_HPET_PAGE),
			hpet_address >> PAGE_SHIFT,
			PAGE_SIZE,
			pgprot_noncached(PAGE_READONLY));

		if (ret)
			goto up_fail;
	}
#endif

	current_thread_info()->sysenter_return =
		VDSO32_SYMBOL(addr, SYSENTER_RETURN);

  up_fail:
	if (ret)
		current->mm->context.vdso = NULL;

	up_write(&mm->mmap_sem);

	return ret;
}
Exemplo n.º 9
0
/*
 * Set the bit indicating "nosegneg" library variants should be used.
 */
static void __init fiddle_vdso(void)
{
	extern const char vdso32_default_start;
	u32 *mask = VDSO32_SYMBOL(&vdso32_default_start, NOTE_MASK);
	*mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
}
/* Setup a VMA at program startup for the vsyscall page */
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp,
				unsigned long map_address)
{
	struct mm_struct *mm = current->mm;
	unsigned long addr = map_address;
	int ret = 0;
	bool compat;
	unsigned long flags;

	if (vdso_enabled == VDSO_DISABLED && map_address == 0) {
		current->mm->context.vdso = NULL;
		return 0;
	}

	flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC | VM_MAYWRITE |
		mm->def_flags;

	ret = -ENOMEM;
	if (ub_memory_charge(mm, PAGE_SIZE, flags, NULL, UB_SOFT))
		goto err_charge;

	down_write(&mm->mmap_sem);

	/* Test compat mode once here, in case someone
	   changes it via sysctl */
	compat = (vdso_enabled == VDSO_COMPAT);

	map_compat_vdso(compat);

	if (!compat || map_address) {
		addr = get_unmapped_area_prot(NULL, addr, PAGE_SIZE, 0, 0, 1);
		if (IS_ERR_VALUE(addr)) {
			ret = addr;
			goto up_fail;
		}
	} else
		addr = VDSO_HIGH_BASE;

	current->mm->context.vdso = (void *)addr;

	if (compat_uses_vma || !compat || map_address) {
		struct page **pages = uts_prep_vdso_pages_locked(compat);
		if (IS_ERR(pages)) {
			ret = PTR_ERR(pages);
			goto up_fail;
		}

		/*
		 * MAYWRITE to allow gdb to COW and set breakpoints
		 */
		ret = install_special_mapping(mm, addr, PAGE_SIZE,
					      VM_READ|VM_EXEC|
					      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
					      pages);

		if (ret)
			goto up_fail;
	}

	current_thread_info()->sysenter_return =
		VDSO32_SYMBOL(addr, SYSENTER_RETURN);

  up_fail:
	if (ret)
		current->mm->context.vdso = NULL;

	up_write(&mm->mmap_sem);
	if (ret < 0)
		ub_memory_uncharge(mm, PAGE_SIZE, flags, NULL);
err_charge:

	return ret;
}
Exemplo n.º 11
0
int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
			compat_sigset_t *set, struct pt_regs *regs)
{
	struct rt_sigframe_ia32 __user *frame;
	void __user *restorer;
	int err = 0;
	void __user *fpstate = NULL;

	
	static const struct {
		u8 movl;
		u32 val;
		u16 int80;
		u8  pad;
	} __attribute__((packed)) code = {
		0xb8,
		__NR_ia32_rt_sigreturn,
		0x80cd,
		0,
	};

	frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		return -EFAULT;

	put_user_try {
		put_user_ex(sig, &frame->sig);
		put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
		put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
		err |= copy_siginfo_to_user32(&frame->info, info);

		
		if (cpu_has_xsave)
			put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
		else
			put_user_ex(0, &frame->uc.uc_flags);
		put_user_ex(0, &frame->uc.uc_link);
		put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
		put_user_ex(sas_ss_flags(regs->sp),
			    &frame->uc.uc_stack.ss_flags);
		put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
		err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
					     regs, set->sig[0]);
		err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));

		if (ka->sa.sa_flags & SA_RESTORER)
			restorer = ka->sa.sa_restorer;
		else
			restorer = VDSO32_SYMBOL(current->mm->context.vdso,
						 rt_sigreturn);
		put_user_ex(ptr_to_compat(restorer), &frame->pretcode);

		put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
	} put_user_catch(err);

	if (err)
		return -EFAULT;

	
	regs->sp = (unsigned long) frame;
	regs->ip = (unsigned long) ka->sa.sa_handler;

	
	regs->ax = sig;
	regs->dx = (unsigned long) &frame->info;
	regs->cx = (unsigned long) &frame->uc;

	loadsegment(ds, __USER32_DS);
	loadsegment(es, __USER32_DS);

	regs->cs = __USER32_CS;
	regs->ss = __USER32_DS;

	return 0;
}
Exemplo n.º 12
0
int ia32_setup_frame(int sig, struct k_sigaction *ka,
		     compat_sigset_t *set, struct pt_regs *regs)
{
	struct sigframe_ia32 __user *frame;
	void __user *restorer;
	int err = 0;
	void __user *fpstate = NULL;

	
	static const struct {
		u16 poplmovl;
		u32 val;
		u16 int80;
	} __attribute__((packed)) code = {
		0xb858,		 
		__NR_ia32_sigreturn,
		0x80cd,		
	};

	frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		return -EFAULT;

	if (__put_user(sig, &frame->sig))
		return -EFAULT;

	if (ia32_setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0]))
		return -EFAULT;

	if (_COMPAT_NSIG_WORDS > 1) {
		if (__copy_to_user(frame->extramask, &set->sig[1],
				   sizeof(frame->extramask)))
			return -EFAULT;
	}

	if (ka->sa.sa_flags & SA_RESTORER) {
		restorer = ka->sa.sa_restorer;
	} else {
		
		if (current->mm->context.vdso)
			restorer = VDSO32_SYMBOL(current->mm->context.vdso,
						 sigreturn);
		else
			restorer = &frame->retcode;
	}

	put_user_try {
		put_user_ex(ptr_to_compat(restorer), &frame->pretcode);

		put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
	} put_user_catch(err);

	if (err)
		return -EFAULT;

	
	regs->sp = (unsigned long) frame;
	regs->ip = (unsigned long) ka->sa.sa_handler;

	
	regs->ax = sig;
	regs->dx = 0;
	regs->cx = 0;

	loadsegment(ds, __USER32_DS);
	loadsegment(es, __USER32_DS);

	regs->cs = __USER32_CS;
	regs->ss = __USER32_DS;

	return 0;
}
Exemplo n.º 13
0
int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
			compat_sigset_t *set, struct pt_regs *regs)
{
	struct rt_sigframe __user *frame;
	void __user *restorer;
	int err = 0;
	void __user *fpstate = NULL;

	/* __copy_to_user optimizes that into a single 8 byte store */
	static const struct {
		u8 movl;
		u32 val;
		u16 int80;
		u16 pad;
		u8  pad2;
	} __attribute__((packed)) code = {
		0xb8,
		__NR_ia32_rt_sigreturn,
		0x80cd,
		0,
		0
	};

	frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		return -EFAULT;

	err |= __put_user(sig, &frame->sig);
	err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo);
	err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc);
	err |= copy_siginfo_to_user32(&frame->info, info);
	if (err)
		return -EFAULT;

	/* Create the ucontext.  */
	if (cpu_has_xsave)
		err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
	else
		err |= __put_user(0, &frame->uc.uc_flags);
	err |= __put_user(0, &frame->uc.uc_link);
	err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
	err |= __put_user(sas_ss_flags(regs->sp),
			  &frame->uc.uc_stack.ss_flags);
	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
	err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
				     regs, set->sig[0]);
	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
	if (err)
		return -EFAULT;

	if (ka->sa.sa_flags & SA_RESTORER)
		restorer = ka->sa.sa_restorer;
	else
		restorer = VDSO32_SYMBOL(current->mm->context.vdso,
					 rt_sigreturn);
	err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);

	/*
	 * Not actually used anymore, but left because some gdb
	 * versions need it.
	 */
	err |= __copy_to_user(frame->retcode, &code, 8);
	if (err)
		return -EFAULT;

	/* Set up registers for signal handler */
	regs->sp = (unsigned long) frame;
	regs->ip = (unsigned long) ka->sa.sa_handler;

	/* Make -mregparm=3 work */
	regs->ax = sig;
	regs->dx = (unsigned long) &frame->info;
	regs->cx = (unsigned long) &frame->uc;

	/* Make -mregparm=3 work */
	regs->ax = sig;
	regs->dx = (unsigned long) &frame->info;
	regs->cx = (unsigned long) &frame->uc;

	loadsegment(ds, __USER32_DS);
	loadsegment(es, __USER32_DS);

	regs->cs = __USER32_CS;
	regs->ss = __USER32_DS;

#if DEBUG_SIG
	printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
	       current->comm, current->pid, frame, regs->ip, frame->pretcode);
#endif

	return 0;
}
Exemplo n.º 14
0
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
			  sigset_t *set, struct pt_regs *regs)
{
	struct rt_sigframe __user *frame;
	void __user *restorer;
	int err = 0;
	int usig;

	frame = get_sigframe(ka, regs, sizeof(*frame));

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		goto give_sigsegv;

	usig = current_thread_info()->exec_domain
		&& current_thread_info()->exec_domain->signal_invmap
		&& sig < 32
		? current_thread_info()->exec_domain->signal_invmap[sig]
		: sig;

	err |= __put_user(usig, &frame->sig);
	err |= __put_user(&frame->info, &frame->pinfo);
	err |= __put_user(&frame->uc, &frame->puc);
	err |= copy_siginfo_to_user(&frame->info, info);
	if (err)
		goto give_sigsegv;

	/* Create the ucontext.  */
	err |= __put_user(0, &frame->uc.uc_flags);
	err |= __put_user(0, &frame->uc.uc_link);
	err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
	err |= __put_user(sas_ss_flags(regs->sp),
			  &frame->uc.uc_stack.ss_flags);
	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
	err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
				regs, set->sig[0]);
	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
	if (err)
		goto give_sigsegv;

	/* Set up to return from userspace.  */
	restorer = (void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
	if (ka->sa.sa_flags & SA_RESTORER)
		restorer = ka->sa.sa_restorer;
	err |= __put_user(restorer, &frame->pretcode);

	/*
	 * This is movl $__NR_rt_sigreturn, %ax ; int $0x80
	 *
	 * WE DO NOT USE IT ANY MORE! It's only left here for historical
	 * reasons and because gdb uses it as a signature to notice
	 * signal handler stack frames.
	 */
	err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
	err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1));
	err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));

	if (err)
		goto give_sigsegv;

	/* Set up registers for signal handler */
	regs->sp = (unsigned long)frame;
	regs->ip = (unsigned long)ka->sa.sa_handler;
	regs->ax = (unsigned long)usig;
	regs->dx = (unsigned long)&frame->info;
	regs->cx = (unsigned long)&frame->uc;

	regs->ds = __USER_DS;
	regs->es = __USER_DS;
	regs->ss = __USER_DS;
	regs->cs = __USER_CS;

	return 0;

give_sigsegv:
	force_sigsegv(sig, current);
	return -EFAULT;
}
Exemplo n.º 15
0
static int
setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
	    struct pt_regs *regs)
{
	struct sigframe __user *frame;
	void __user *restorer;
	int err = 0;
	int usig;

	frame = get_sigframe(ka, regs, sizeof(*frame));

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		goto give_sigsegv;

	usig = current_thread_info()->exec_domain
		&& current_thread_info()->exec_domain->signal_invmap
		&& sig < 32
		? current_thread_info()->exec_domain->signal_invmap[sig]
		: sig;

	err = __put_user(usig, &frame->sig);
	if (err)
		goto give_sigsegv;

	err = setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]);
	if (err)
		goto give_sigsegv;

	if (_NSIG_WORDS > 1) {
		err = __copy_to_user(&frame->extramask, &set->sig[1],
				      sizeof(frame->extramask));
		if (err)
			goto give_sigsegv;
	}

	if (current->mm->context.vdso)
		restorer = (void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
	else
		restorer = (void __user *)&frame->retcode;
	if (ka->sa.sa_flags & SA_RESTORER)
		restorer = ka->sa.sa_restorer;

	/* Set up to return from userspace.  */
	err |= __put_user(restorer, &frame->pretcode);

	/*
	 * This is popl %eax ; movl $__NR_sigreturn, %eax ; int $0x80
	 *
	 * WE DO NOT USE IT ANY MORE! It's only left here for historical
	 * reasons and because gdb uses it as a signature to notice
	 * signal handler stack frames.
	 */
	err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
	err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2));
	err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));

	if (err)
		goto give_sigsegv;

	/* Set up registers for signal handler */
	regs->sp = (unsigned long)frame;
	regs->ip = (unsigned long)ka->sa.sa_handler;
	regs->ax = (unsigned long)sig;
	regs->dx = 0;
	regs->cx = 0;

	regs->ds = __USER_DS;
	regs->es = __USER_DS;
	regs->ss = __USER_DS;
	regs->cs = __USER_CS;

	return 0;

give_sigsegv:
	force_sigsegv(sig, current);
	return -EFAULT;
}