/* * AUTOFILLER * In simple cases in which extracting an event is just a matter of moving the * arguments to the buffer, this filler can be used instead of writing a * filler function. * The arguments to extract are be specified in g_ppm_events. */ int f_sys_autofill(struct event_filler_arguments *args, const struct ppm_event_entry *evinfo) { int res; unsigned long val; u32 j; int64_t retval; ASSERT(evinfo->n_autofill_args <= PPM_MAX_AUTOFILL_ARGS); for (j = 0; j < evinfo->n_autofill_args; j++) { if (evinfo->autofill_args[j].id >= 0) { #ifndef __NR_socketcall /* * Regular argument */ syscall_get_arguments(current, args->regs, evinfo->autofill_args[j].id, 1, &val); #else if (evinfo->paramtype == APT_SOCK) { val = args->socketcall_args[evinfo->autofill_args[j].id]; } else { /* * Regular argument */ syscall_get_arguments(current, args->regs, evinfo->autofill_args[j].id, 1, &val); } #endif res = val_to_ring(args, val, 0, true); if (unlikely(res != PPM_SUCCESS)) return res; } else if (evinfo->autofill_args[j].id == AF_ID_RETVAL) { /* * Return value */ retval = (int64_t)(long)syscall_get_return_value(current, args->regs); res = val_to_ring(args, retval, 0, false); if (unlikely(res != PPM_SUCCESS)) return res; } else if (evinfo->autofill_args[j].id == AF_ID_USEDEFAULT) { /* * Default Value */ res = val_to_ring(args, evinfo->autofill_args[j].default_val, 0, false); if (unlikely(res != PPM_SUCCESS)) return res; } else { ASSERT(false); } } return add_sentinel(args); }
static void prof_syscall_enter(struct pt_regs *regs, long id) { struct syscall_trace_enter *rec; struct syscall_metadata *sys_data; int syscall_nr; int size; syscall_nr = syscall_get_nr(current, regs); if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) return; sys_data = syscall_nr_to_meta(syscall_nr); if (!sys_data) return; /* get the size after alignment with the u32 buffer size field */ size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); size = ALIGN(size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); do { char raw_data[size]; /* zero the dead bytes from align to not leak stack to user */ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; rec = (struct syscall_trace_enter *) raw_data; tracing_generic_entry_update(&rec->ent, 0, 0); rec->ent.type = sys_data->enter_id; rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size); } while(0); }
static int collect_syscall(struct task_struct *target, struct syscall_info *info) { struct pt_regs *regs; if (!try_get_task_stack(target)) { /* Task has no stack, so the task isn't in a syscall. */ memset(info, 0, sizeof(*info)); info->data.nr = -1; return 0; } regs = task_pt_regs(target); if (unlikely(!regs)) { put_task_stack(target); return -EAGAIN; } info->sp = user_stack_pointer(regs); info->data.instruction_pointer = instruction_pointer(regs); info->data.nr = syscall_get_nr(target, regs); if (info->data.nr != -1L) syscall_get_arguments(target, regs, (unsigned long *)&info->data.args[0]); put_task_stack(target); return 0; }
void ftrace_syscall_enter(struct pt_regs *regs, long id) { struct syscall_trace_enter *entry; struct syscall_metadata *sys_data; struct ring_buffer_event *event; struct ring_buffer *buffer; int size; int syscall_nr; syscall_nr = syscall_get_nr(current, regs); if (syscall_nr < 0) return; if (!test_bit(syscall_nr, enabled_enter_syscalls)) return; sys_data = syscall_nr_to_meta(syscall_nr); if (!sys_data) return; size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; event = trace_current_buffer_lock_reserve(&buffer, sys_data->enter_id, size, 0, 0); if (!event) return; entry = ring_buffer_event_data(event); entry->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); if (!filter_current_check_discard(buffer, sys_data->enter_event, entry, event)) trace_current_buffer_unlock_commit(buffer, event, 0, 0); }
/* Log a system call that takes one filename as its first argument * (or second, if is_at is set). */ static void handle_name_arg(struct filemon *filemon, char op, is_at_enum is_at, struct pt_regs *regs) { struct { union { int val; unsigned long padding; } dirfd; union { const char * __user val; unsigned long padding; } ufname; } args = { { 0 }, { 0 } }; FILEMON_GETNAME_TYPE fname; syscall_get_arguments(current, regs, 0, /* first argument number to get */ is_at ? 2 : 1, /* number of args */ ((unsigned long *)&args) + (is_at ? 0 : 1)); if (is_at && args.dirfd.val != AT_FDCWD) return; fname = FILEMON_GETNAME(args.ufname.val); if (IS_ERR(fname)) { #ifdef FILEMON_DEBUG printk(KERN_WARNING "filemon: bad but acceptable filename?(%c:errno: %li)\n", op, PTR_ERR(fname)); #endif return; } filemon_log(filemon, op, "%s", FILEMON_GETNAME_NAME(fname) ? FILEMON_GETNAME_NAME(fname) : "null"); FILEMON_PUTNAME(fname); }
/* ftrace_syscall_enter_state - build state for filter matching * * @buf: buffer to populate with current task state for matching * @available: size available for use in the buffer. * @entry: optional pointer to the trace_entry member of the state. * * Returns 0 on success and non-zero otherwise. * If @entry is NULL, it will be ignored. */ static int ftrace_syscall_enter_state(u8 *buf, size_t available, struct trace_entry **entry) { struct syscall_trace_enter *sys_enter; struct syscall_metadata *sys_data; int size; int syscall_nr; struct pt_regs *regs = task_pt_regs(current); syscall_nr = syscall_get_nr(current, regs); if (syscall_nr < 0) return -EINVAL; sys_data = syscall_nr_to_meta(syscall_nr); if (!sys_data) return -EINVAL; /* Determine the actual size needed. */ size = ftrace_syscall_enter_state_size(sys_data->nb_args); BUG_ON(size > available); sys_enter = (struct syscall_trace_enter *)buf; /* Populating the struct trace_sys_enter is left to the caller, but * a pointer is returned to encourage opacity. */ if (entry) *entry = &sys_enter->ent; sys_enter->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, sys_enter->args); return 0; }
static void prof_syscall_enter(struct pt_regs *regs, long id) { struct syscall_metadata *sys_data; struct syscall_trace_enter *rec; unsigned long flags; char *raw_data; int syscall_nr; int size; int cpu; syscall_nr = syscall_get_nr(current, regs); if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) return; sys_data = syscall_nr_to_meta(syscall_nr); if (!sys_data) return; /* get the size after alignment with the u32 buffer size field */ size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); size = ALIGN(size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, "profile buffer not large enough")) return; /* Protect the per cpu buffer, begin the rcu read side */ local_irq_save(flags); cpu = smp_processor_id(); if (in_nmi()) raw_data = rcu_dereference(trace_profile_buf_nmi); else raw_data = rcu_dereference(trace_profile_buf); if (!raw_data) goto end; raw_data = per_cpu_ptr(raw_data, cpu); /* zero the dead bytes from align to not leak stack to user */ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; rec = (struct syscall_trace_enter *) raw_data; tracing_generic_entry_update(&rec->ent, 0, 0); rec->ent.type = sys_data->enter_id; rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); perf_tp_event(sys_data->enter_id, 0, 1, rec, size); end: local_irq_restore(flags); }
static void syscall_entry_unknown(struct lttng_event *event, struct pt_regs *regs, unsigned int id) { unsigned long args[UNKNOWN_SYSCALL_NRARGS]; syscall_get_arguments(current, regs, 0, UNKNOWN_SYSCALL_NRARGS, args); if (unlikely(is_compat_task())) __event_probe__compat_sys_unknown(event, id, args); else __event_probe__sys_unknown(event, id, args); }
/* Log a system call that takes two filenames as its first two arguments * (or second and fourth arguments, if is_at is set). */ static void handle_2name_arg(struct filemon *filemon, char op, is_at_enum is_at, struct pt_regs *regs) { union { int intval; const char * __user charval; unsigned long padding; } args[4] = { { 0 }, { 0 }, { 0 }, { 0 } }; FILEMON_GETNAME_TYPE fnames[2]; syscall_get_arguments(current, regs, 0, /* first argument number to get */ is_at ? 4 : 2, /* number of args */ (unsigned long *)&args); switch (is_at) { case is_at_true: /* In NetBSD filemon, linkat doesn't get logged. In Juniper * filemon, it gets logged just as link, without regard to the * fd args. We adopt the Juniper behavior. */ if (args[0].intval != AT_FDCWD) return; if (args[2].intval != AT_FDCWD) return; /* FALLTHRU */ case is_at_ignore: fnames[0] = FILEMON_GETNAME(args[1].charval); fnames[1] = FILEMON_GETNAME(args[3].charval); break; case is_at_false: fnames[0] = FILEMON_GETNAME(args[0].charval); fnames[1] = FILEMON_GETNAME(args[1].charval); break; default: BUG(); } if (IS_ERR(fnames[0]) || IS_ERR(fnames[1])) { #ifdef FILEMON_DEBUG printk(KERN_WARNING "filemon: bad but acceptable filename? (%c:errno: %li/%li)\n", op, PTR_ERR(fnames[0]), PTR_ERR(fnames[1])); #endif return; } filemon_log(filemon, op, "'%s' '%s'", FILEMON_GETNAME_NAME(fnames[0]) ? FILEMON_GETNAME_NAME(fnames[0]) : "null", FILEMON_GETNAME_NAME(fnames[1]) ? FILEMON_GETNAME_NAME(fnames[1]) : "null"); FILEMON_PUTNAME(fnames[0]); FILEMON_PUTNAME(fnames[1]); }
/* Log an open(2) system call. */ static void handle_open(struct filemon *filemon, char op __maybe_unused, /* We substitute 'R', 'W', or both */ is_at_enum is_at, struct pt_regs *regs) { struct { union { int val; unsigned long padding; } dirfd; union { const char * __user val; unsigned long padding; } ufname; unsigned long flags; } args = { { 0 }, { 0 } }; FILEMON_GETNAME_TYPE fname; int accmode; syscall_get_arguments(current, regs, 0, /* first argument number to get */ is_at ? 3 : 2, /* number of args */ ((unsigned long *)&args) + (is_at ? 0 : 1)); if (is_at && args.dirfd.val != AT_FDCWD) return; fname = FILEMON_GETNAME(args.ufname.val); if (IS_ERR(fname)) { #ifdef FILEMON_DEBUG printk(KERN_WARNING "filemon: can't happen: bad but acceptable filename at %p: (WR:errno: %li)\n", args.ufname.val, PTR_ERR(fname)); #endif return; } accmode = (args.flags & O_ACCMODE); /* Changed in filemon log format 4: a file opened read/write emits * two lines. */ if (accmode == O_RDONLY || accmode == O_RDWR) filemon_log(filemon, 'R', "%s", FILEMON_GETNAME_NAME(fname) ? FILEMON_GETNAME_NAME(fname) : "null"); if (accmode == O_WRONLY || accmode == O_RDWR) filemon_log(filemon, 'W', "%s", FILEMON_GETNAME_NAME(fname) ? FILEMON_GETNAME_NAME(fname) : "null"); FILEMON_PUTNAME(fname); }
static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) { struct trace_array *tr = data; struct ftrace_event_file *ftrace_file; struct syscall_trace_enter *entry; struct syscall_metadata *sys_data; struct ring_buffer_event *event; struct ring_buffer *buffer; unsigned long irq_flags; int pc; int syscall_nr; int size; syscall_nr = trace_get_syscall_nr(current, regs); if (syscall_nr < 0 || syscall_nr >= NR_syscalls) return; /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); if (!ftrace_file) return; if (ftrace_trigger_soft_disabled(ftrace_file)) return; sys_data = syscall_nr_to_meta(syscall_nr); if (!sys_data) return; size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; local_save_flags(irq_flags); pc = preempt_count(); buffer = tr->trace_buffer.buffer; event = trace_buffer_lock_reserve(buffer, sys_data->enter_event->event.type, size, irq_flags, pc); if (!event) return; entry = ring_buffer_event_data(event); entry->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); event_trigger_unlock_commit(ftrace_file, buffer, event, entry, irq_flags, pc); }
/* * Endianness is explicitly ignored and left for BPF program authors to manage * as per the specific architecture. */ static void populate_seccomp_data(struct seccomp_data *sd) { struct task_struct *task = current; struct pt_regs *regs = task_pt_regs(task); unsigned long args[6]; sd->nr = syscall_get_nr(task, regs); sd->arch = syscall_get_arch(); syscall_get_arguments(task, regs, 0, 6, args); sd->args[0] = args[0]; sd->args[1] = args[1]; sd->args[2] = args[2]; sd->args[3] = args[3]; sd->args[4] = args[4]; sd->args[5] = args[5]; sd->instruction_pointer = KSTK_EIP(task); }
/* * Notification of system call entry/exit * - triggered by current->work.syscall_trace */ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) { user_exit(); current_thread_info()->syscall = syscall; if (test_thread_flag(TIF_SYSCALL_TRACE)) { if (tracehook_report_syscall_entry(regs)) return -1; syscall = current_thread_info()->syscall; } #ifdef CONFIG_SECCOMP if (unlikely(test_thread_flag(TIF_SECCOMP))) { int ret, i; struct seccomp_data sd; unsigned long args[6]; sd.nr = syscall; sd.arch = syscall_get_arch(); syscall_get_arguments(current, regs, 0, 6, args); for (i = 0; i < 6; i++) sd.args[i] = args[i]; sd.instruction_pointer = KSTK_EIP(current); ret = __secure_computing(&sd); if (ret == -1) return ret; syscall = current_thread_info()->syscall; } #endif if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_enter(regs, regs->regs[2]); audit_syscall_entry(syscall, regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); /* * Negative syscall numbers are mistaken for rejected syscalls, but * won't have had the return value set appropriately, so we do so now. */ if (syscall < 0) syscall_set_return_value(current, regs, -ENOSYS, 0); return syscall; }
static int collect_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc) { struct pt_regs *regs = task_pt_regs(target); if (unlikely(!regs)) return -EAGAIN; *sp = user_stack_pointer(regs); *pc = instruction_pointer(regs); *callno = syscall_get_nr(target, regs); if (*callno != -1L && maxargs > 0) syscall_get_arguments(target, regs, 0, maxargs, args); return 0; }
static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) { struct trace_array *tr = data; struct syscall_trace_enter *entry; struct syscall_metadata *sys_data; struct ring_buffer_event *event; struct ring_buffer *buffer; unsigned long irq_flags; int pc; int syscall_nr; int size; syscall_nr = trace_get_syscall_nr(current, regs); if (syscall_nr < 0 || syscall_nr >= NR_syscalls) return; if (!test_bit(syscall_nr, tr->enabled_enter_syscalls)) return; sys_data = syscall_nr_to_meta(syscall_nr); if (!sys_data) return; size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; local_save_flags(irq_flags); pc = preempt_count(); buffer = tr->trace_buffer.buffer; event = trace_buffer_lock_reserve(buffer, sys_data->enter_event->event.type, size, irq_flags, pc); if (!event) return; entry = ring_buffer_event_data(event); entry->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); if (!filter_current_check_discard(buffer, sys_data->enter_event, entry, event)) trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); }
/** * bpf_load: checks and returns a pointer to the requested offset * @off: offset into struct seccomp_data to load from * * Returns the requested 32-bits of data. * seccomp_chk_filter() should assure that @off is 32-bit aligned * and not out of bounds. Failure to do so is a BUG. */ u32 seccomp_bpf_load(int off) { struct pt_regs *regs = task_pt_regs(current); if (off == BPF_DATA(nr)) return syscall_get_nr(current, regs); if (off == BPF_DATA(arch)) return syscall_get_arch(current, regs); if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) { unsigned long value; int arg = (off - BPF_DATA(args[0])) / sizeof(u64); int index = !!(off % sizeof(u64)); syscall_get_arguments(current, regs, arg, 1, &value); return get_u32(value, index); } if (off == BPF_DATA(instruction_pointer)) return get_u32(KSTK_EIP(current), 0); if (off == BPF_DATA(instruction_pointer) + sizeof(u32)) return get_u32(KSTK_EIP(current), 1); /* seccomp_chk_filter should make this impossible. */ BUG(); }
static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) { struct syscall_metadata *sys_data; struct syscall_trace_enter *rec; struct hlist_head *head; int syscall_nr; int rctx; int size; syscall_nr = trace_get_syscall_nr(current, regs); if (syscall_nr < 0) return; if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) return; sys_data = syscall_nr_to_meta(syscall_nr); if (!sys_data) return; /* get the size after alignment with the u32 buffer size field */ size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); size = ALIGN(size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "perf buffer not large enough")) return; rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, sys_data->enter_event->event.type, regs, &rctx); if (!rec) return; rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); head = this_cpu_ptr(sys_data->enter_event->perf_events); perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); }
static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) { struct syscall_metadata *sys_data; struct syscall_trace_enter *rec; struct hlist_head *head; int syscall_nr; int rctx; int size; syscall_nr = trace_get_syscall_nr(current, regs); if (syscall_nr < 0 || syscall_nr >= NR_syscalls) return; if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) return; sys_data = syscall_nr_to_meta(syscall_nr); if (!sys_data) return; head = this_cpu_ptr(sys_data->enter_event->perf_events); if (hlist_empty(head)) return; /* get the size after alignment with the u32 buffer size field */ size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); size = ALIGN(size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); rec = perf_trace_buf_alloc(size, NULL, &rctx); if (!rec) return; rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); perf_trace_buf_submit(rec, size, rctx, sys_data->enter_event->event.type, 1, regs, head, NULL); }
/* Log a system call that takes one integer as its first argument * (or second, if is_at is set). */ static void handle_int_arg(struct filemon *filemon, char op, is_at_enum is_at, struct pt_regs *regs) { struct { union { int val; unsigned long padding; } dirfd; union { long val; unsigned long padding; } arg; } args = { { 0 }, { 0 } }; syscall_get_arguments(current, regs, 0, /* first argument number to get */ is_at ? 2 : 1, /* number of args */ ((unsigned long *)&args) + (is_at ? 0 : 1)); if (is_at && args.dirfd.val != AT_FDCWD) return; filemon_log(filemon, op, "%li", args.arg.val); }
/* * Endianness is explicitly ignored and left for BPF program authors to manage * as per the specific architecture. */ static void populate_seccomp_data(struct seccomp_data *sd) { struct task_struct *task = current; struct pt_regs *regs = task_pt_regs(task); sd->nr = syscall_get_nr(task, regs); sd->arch = syscall_get_arch(task, regs); /* Unroll syscall_get_args to help gcc on arm. */ syscall_get_arguments(task, regs, 0, 1, (unsigned long *) &sd->args[0]); syscall_get_arguments(task, regs, 1, 1, (unsigned long *) &sd->args[1]); syscall_get_arguments(task, regs, 2, 1, (unsigned long *) &sd->args[2]); syscall_get_arguments(task, regs, 3, 1, (unsigned long *) &sd->args[3]); syscall_get_arguments(task, regs, 4, 1, (unsigned long *) &sd->args[4]); syscall_get_arguments(task, regs, 5, 1, (unsigned long *) &sd->args[5]); sd->instruction_pointer = KSTK_EIP(task); }
/* Handle symlinkat. * * This differs from handle_2name_arg in that the latter expects there * to be to fd arguments for both filenames, while symlink only has * one fd argument. */ static void handle_symlinkat(struct filemon *filemon, char op, is_at_enum is_at __maybe_unused, struct pt_regs *regs) { union { int intval; const char * __user charval; unsigned long padding; } args[3] = { { 0 }, { 0 }, { 0 } }; FILEMON_GETNAME_TYPE fnames[2]; syscall_get_arguments(current, regs, 0, /* first argument number to get */ 3, /* number of args */ (unsigned long *)&args); if (args[1].intval != AT_FDCWD) return; fnames[0] = FILEMON_GETNAME(args[0].charval); fnames[1] = FILEMON_GETNAME(args[2].charval); if (IS_ERR(fnames[0]) || IS_ERR(fnames[1])) { #ifdef FILEMON_DEBUG printk(KERN_WARNING "filemon: bad but acceptable filename? (%c:errno: %li/%li)\n", op, PTR_ERR(fnames[0]), PTR_ERR(fnames[1])); #endif return; } filemon_log(filemon, op, "'%s' '%s'", FILEMON_GETNAME_NAME(fnames[0]) ? FILEMON_GETNAME_NAME(fnames[0]) : "null", FILEMON_GETNAME_NAME(fnames[1]) ? FILEMON_GETNAME_NAME(fnames[1]) : "null"); FILEMON_PUTNAME(fnames[0]); FILEMON_PUTNAME(fnames[1]); }
/* * Parses the list of buffers of a xreadv or xwritev call, and pushes the size * (and optionally the data) to the ring. */ int32_t compat_parse_readv_writev_bufs(struct event_filler_arguments *args, const struct compat_iovec __user *iovsrc, unsigned long iovcnt, int64_t retval, int flags) { int32_t res; const struct compat_iovec *iov; u32 copylen; u32 j; u64 size = 0; unsigned long bufsize; char *targetbuf = args->str_storage; u32 targetbuflen = STR_STORAGE_SIZE; unsigned long val; u32 notcopied_len; compat_size_t tocopy_len; copylen = iovcnt * sizeof(struct compat_iovec); if (unlikely(copylen >= STR_STORAGE_SIZE)) return PPM_FAILURE_BUFFER_FULL; if (unlikely(ppm_copy_from_user(args->str_storage, iovsrc, copylen))) return PPM_FAILURE_INVALID_USER_MEMORY; iov = (const struct compat_iovec *)(args->str_storage); targetbuf += copylen; targetbuflen -= copylen; /* * Size */ if (flags & PRB_FLAG_PUSH_SIZE) { for (j = 0; j < iovcnt; j++) size += iov[j].iov_len; /* * Size is the total size of the buffers provided by the user. The number of * received bytes can be smaller */ if ((flags & PRB_FLAG_IS_WRITE) == 0) if (size > retval) size = retval; res = val_to_ring(args, size, 0, false, 0); if (unlikely(res != PPM_SUCCESS)) return res; } /* * data */ if (flags & PRB_FLAG_PUSH_DATA) { if (retval > 0 && iovcnt > 0) { /* * Retrieve the FD. It will be used for dynamic snaplen calculation. */ syscall_get_arguments(current, args->regs, 0, 1, &val); args->fd = (int)val; /* * Merge the buffers */ bufsize = 0; for (j = 0; j < iovcnt; j++) { if ((flags & PRB_FLAG_IS_WRITE) == 0) { if (bufsize >= retval) { ASSERT(bufsize >= retval); /* * Copied all the data even if we haven't reached the * end of the buffer. * Copy must stop here. */ break; } tocopy_len = min(iov[j].iov_len, (compat_size_t)((size_t)retval - bufsize)); tocopy_len = min(tocopy_len, (compat_size_t)(targetbuflen - bufsize - 1)); } else { tocopy_len = min(iov[j].iov_len, (compat_size_t)(targetbuflen - bufsize - 1)); } notcopied_len = (int)ppm_copy_from_user(targetbuf + bufsize, compat_ptr(iov[j].iov_base), tocopy_len); if (unlikely(notcopied_len != 0)) { /* * This means we had a page fault. Skip this event. */ return PPM_FAILURE_INVALID_USER_MEMORY; } bufsize += tocopy_len; if (tocopy_len != iov[j].iov_len) { /* * No space left in the args->str_storage buffer. * Copy must stop here. */ break; } } args->enforce_snaplen = true; res = val_to_ring(args, (unsigned long)targetbuf, bufsize, false, 0); if (unlikely(res != PPM_SUCCESS)) return res; } else { res = val_to_ring(args, 0, 0, false, 0); if (unlikely(res != PPM_SUCCESS)) return res; } } return PPM_SUCCESS; }
void syscall_entry_probe(void *__data, struct pt_regs *regs, long id) { struct lttng_channel *chan = __data; struct lttng_event *event, *unknown_event; const struct trace_syscall_entry *table, *entry; size_t table_len; if (unlikely(is_compat_task())) { table = compat_sc_table; table_len = ARRAY_SIZE(compat_sc_table); unknown_event = chan->sc_compat_unknown; } else { table = sc_table; table_len = ARRAY_SIZE(sc_table); unknown_event = chan->sc_unknown; } if (unlikely(id >= table_len)) { syscall_entry_unknown(unknown_event, regs, id); return; } if (unlikely(is_compat_task())) event = chan->compat_sc_table[id]; else event = chan->sc_table[id]; if (unlikely(!event)) { syscall_entry_unknown(unknown_event, regs, id); return; } entry = &table[id]; WARN_ON_ONCE(!entry); switch (entry->nrargs) { case 0: { void (*fptr)(void *__data) = entry->func; fptr(event); break; } case 1: { void (*fptr)(void *__data, unsigned long arg0) = entry->func; unsigned long args[1]; syscall_get_arguments(current, regs, 0, entry->nrargs, args); fptr(event, args[0]); break; } case 2: { void (*fptr)(void *__data, unsigned long arg0, unsigned long arg1) = entry->func; unsigned long args[2]; syscall_get_arguments(current, regs, 0, entry->nrargs, args); fptr(event, args[0], args[1]); break; } case 3: { void (*fptr)(void *__data, unsigned long arg0, unsigned long arg1, unsigned long arg2) = entry->func; unsigned long args[3]; syscall_get_arguments(current, regs, 0, entry->nrargs, args); fptr(event, args[0], args[1], args[2]); break; } case 4: { void (*fptr)(void *__data, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3) = entry->func; unsigned long args[4]; syscall_get_arguments(current, regs, 0, entry->nrargs, args); fptr(event, args[0], args[1], args[2], args[3]); break; } case 5: { void (*fptr)(void *__data, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4) = entry->func; unsigned long args[5]; syscall_get_arguments(current, regs, 0, entry->nrargs, args); fptr(event, args[0], args[1], args[2], args[3], args[4]); break; } case 6: { void (*fptr)(void *__data, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) = entry->func; unsigned long args[6]; syscall_get_arguments(current, regs, 0, entry->nrargs, args); fptr(event, args[0], args[1], args[2], args[3], args[4], args[5]); break; } default: break; } }