Exemple #1
0
asmlinkage int syscall_trace_enter(struct pt_regs *regs)
{
	unsigned int saved_syscallno = regs->syscallno;

	/* Do the secure computing check first; failures should be fast. */
	if (secure_computing(regs->syscallno) == -1)
		return RET_SKIP_SYSCALL_TRACE;

	if (test_thread_flag_relaxed(TIF_SYSCALL_TRACE))
		tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);

	if (IS_SKIP_SYSCALL(regs->syscallno)) {
		/*
		 * RESTRICTION: we can't modify a return value of user
		 * issued syscall(-1) here. In order to ease this flavor,
		 * we need to treat whatever value in x0 as a return value,
		 * but this might result in a bogus value being returned.
		 */
		/*
		 * NOTE: syscallno may also be set to -1 if fatal signal is
		 * detected in tracehook_report_syscall_entry(), but since
		 * a value set to x0 here is not used in this case, we may
		 * neglect the case.
		 */
		if (!test_thread_flag_relaxed(TIF_SYSCALL_TRACE) ||
				(IS_SKIP_SYSCALL(saved_syscallno)))
			regs->regs[0] = -ENOSYS;
	}

	audit_syscall_entry(syscall_get_arch(), regs->syscallno,
		regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]);

	return regs->syscallno;
}
Exemple #2
0
/**
 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
 * @syscall: syscall number to send to userland
 * @reason: filter-supplied reason code to send to userland (via si_errno)
 *
 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
 */
static void seccomp_send_sigsys(int syscall, int reason)
{
	struct siginfo info;
	memset(&info, 0, sizeof(info));
	info.si_signo = SIGSYS;
	info.si_code = SYS_SECCOMP;
	info.si_call_addr = (void __user *)KSTK_EIP(current);
	info.si_errno = reason;
	info.si_arch = syscall_get_arch(current, task_pt_regs(current));
	info.si_syscall = syscall;
	force_sig_info(SIGSYS, &info, current);
}
Exemple #3
0
asmlinkage int syscall_trace_enter(struct pt_regs *regs)
{
	if (test_thread_flag(TIF_SYSCALL_TRACE))
		tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);

	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
		trace_sys_enter(regs, regs->syscallno);

	audit_syscall_entry(syscall_get_arch(), regs->syscallno,
		regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]);

	return regs->syscallno;
}
Exemple #4
0
/*
 * Endianness is explicitly ignored and left for BPF program authors to manage
 * as per the specific architecture.
 */
static void populate_seccomp_data(struct seccomp_data *sd)
{
	struct task_struct *task = current;
	struct pt_regs *regs = task_pt_regs(task);
	unsigned long args[6];

	sd->nr = syscall_get_nr(task, regs);
	sd->arch = syscall_get_arch();
	syscall_get_arguments(task, regs, 0, 6, args);
	sd->args[0] = args[0];
	sd->args[1] = args[1];
	sd->args[2] = args[2];
	sd->args[3] = args[3];
	sd->args[4] = args[4];
	sd->args[5] = args[5];
	sd->instruction_pointer = KSTK_EIP(task);
}
Exemple #5
0
/*
 * Notification of system call entry/exit
 * - triggered by current->work.syscall_trace
 */
asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
{
	user_exit();

	current_thread_info()->syscall = syscall;

	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
		if (tracehook_report_syscall_entry(regs))
			return -1;
		syscall = current_thread_info()->syscall;
	}

#ifdef CONFIG_SECCOMP
	if (unlikely(test_thread_flag(TIF_SECCOMP))) {
		int ret, i;
		struct seccomp_data sd;
		unsigned long args[6];

		sd.nr = syscall;
		sd.arch = syscall_get_arch();
		syscall_get_arguments(current, regs, 0, 6, args);
		for (i = 0; i < 6; i++)
			sd.args[i] = args[i];
		sd.instruction_pointer = KSTK_EIP(current);

		ret = __secure_computing(&sd);
		if (ret == -1)
			return ret;
		syscall = current_thread_info()->syscall;
	}
#endif

	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
		trace_sys_enter(regs, regs->regs[2]);

	audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
			    regs->regs[6], regs->regs[7]);

	/*
	 * Negative syscall numbers are mistaken for rejected syscalls, but
	 * won't have had the return value set appropriately, so we do so now.
	 */
	if (syscall < 0)
		syscall_set_return_value(current, regs, -ENOSYS, 0);
	return syscall;
}
Exemple #6
0
asmlinkage int syscall_trace_enter(struct pt_regs *regs)
{
	/* Do the secure computing check first; failures should be fast. */
	if (secure_computing() == -1)
		return -1;

	if (test_thread_flag_relaxed(TIF_SYSCALL_TRACE))
		tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);

	if (test_thread_flag_relaxed(TIF_SYSCALL_TRACEPOINT))
		trace_sys_enter(regs, regs->syscallno);

	audit_syscall_entry(syscall_get_arch(), regs->syscallno,
		regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]);

	return regs->syscallno;
}
Exemple #7
0
/*
 * Endianness is explicitly ignored and left for BPF program authors to manage
 * as per the specific architecture.
 */
static void populate_seccomp_data(struct seccomp_data *sd)
{
	struct task_struct *task = current;
	struct pt_regs *regs = task_pt_regs(task);

	sd->nr = syscall_get_nr(task, regs);
	sd->arch = syscall_get_arch(task, regs);

	/* Unroll syscall_get_args to help gcc on arm. */
	syscall_get_arguments(task, regs, 0, 1, (unsigned long *) &sd->args[0]);
	syscall_get_arguments(task, regs, 1, 1, (unsigned long *) &sd->args[1]);
	syscall_get_arguments(task, regs, 2, 1, (unsigned long *) &sd->args[2]);
	syscall_get_arguments(task, regs, 3, 1, (unsigned long *) &sd->args[3]);
	syscall_get_arguments(task, regs, 4, 1, (unsigned long *) &sd->args[4]);
	syscall_get_arguments(task, regs, 5, 1, (unsigned long *) &sd->args[5]);

	sd->instruction_pointer = KSTK_EIP(task);
}
Exemple #8
0
/**
 * bpf_load: checks and returns a pointer to the requested offset
 * @off: offset into struct seccomp_data to load from
 *
 * Returns the requested 32-bits of data.
 * seccomp_chk_filter() should assure that @off is 32-bit aligned
 * and not out of bounds.  Failure to do so is a BUG.
 */
u32 seccomp_bpf_load(int off)
{
	struct pt_regs *regs = task_pt_regs(current);
	if (off == BPF_DATA(nr))
		return syscall_get_nr(current, regs);
	if (off == BPF_DATA(arch))
		return syscall_get_arch(current, regs);
	if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) {
		unsigned long value;
		int arg = (off - BPF_DATA(args[0])) / sizeof(u64);
		int index = !!(off % sizeof(u64));
		syscall_get_arguments(current, regs, arg, 1, &value);
		return get_u32(value, index);
	}
	if (off == BPF_DATA(instruction_pointer))
		return get_u32(KSTK_EIP(current), 0);
	if (off == BPF_DATA(instruction_pointer) + sizeof(u32))
		return get_u32(KSTK_EIP(current), 1);
	/* seccomp_chk_filter should make this impossible. */
	BUG();
}
Exemple #9
0
/*
 * Notification of system call entry/exit
 * - triggered by current->work.syscall_trace
 */
asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
{
	long ret = 0;
	user_exit();

	if (secure_computing(syscall) == -1)
		return -1;

	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
	    tracehook_report_syscall_entry(regs))
		ret = -1;

	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
		trace_sys_enter(regs, regs->regs[2]);

	audit_syscall_entry(syscall_get_arch(),
			    syscall,
			    regs->regs[4], regs->regs[5],
			    regs->regs[6], regs->regs[7]);
	return syscall;
}