static void systrace_init(struct sysent *actual, systrace_sysent_t **interposed) { systrace_sysent_t *sysent = *interposed; int i; if (sysent == NULL) { *interposed = sysent = kmem_zalloc(sizeof (systrace_sysent_t) * NSYSCALL, KM_SLEEP); } for (i = 0; i < NSYSCALL; i++) { struct sysent *a = &actual[i]; systrace_sysent_t *s = &sysent[i]; if (LOADABLE_SYSCALL(a) && !LOADED_SYSCALL(a)) continue; if (a->sy_callc == dtrace_systrace_syscall) continue; #ifdef _SYSCALL32_IMPL if (a->sy_callc == dtrace_systrace_syscall32) continue; #endif s->stsy_underlying = a->sy_callc; } }
/* * "Decode" rv for use in the call to dtrace_probe() */ if (rval == ERESTART) { munged_rv0 = -1LL; /* System call will be reissued in user mode. Make DTrace report a -1 return. */ munged_rv1 = -1LL; } else if (rval != EJUSTRETURN) { if (rval) { munged_rv0 = -1LL; /* Mimic what libc will do. */ munged_rv1 = -1LL; } else { switch (sy->stsy_return_type) { case _SYSCALL_RET_INT_T: munged_rv0 = rv[0]; munged_rv1 = rv[1]; break; case _SYSCALL_RET_UINT_T: munged_rv0 = ((u_int)rv[0]); munged_rv1 = ((u_int)rv[1]); break; case _SYSCALL_RET_OFF_T: case _SYSCALL_RET_UINT64_T: munged_rv0 = *(u_int64_t *)rv; munged_rv1 = 0LL; break; case _SYSCALL_RET_ADDR_T: case _SYSCALL_RET_SIZE_T: case _SYSCALL_RET_SSIZE_T: munged_rv0 = *(user_addr_t *)rv; munged_rv1 = 0LL; break; case _SYSCALL_RET_NONE: munged_rv0 = 0LL; munged_rv1 = 0LL; break; default: munged_rv0 = 0LL; munged_rv1 = 0LL; break; } } } else { munged_rv0 = 0LL; munged_rv1 = 0LL; } (*systrace_probe)(id, munged_rv0, munged_rv0, munged_rv1, (uint64_t)rval, 0); } } #endif /* __APPLE__ */ #define SYSTRACE_SHIFT 16 #define SYSTRACE_ISENTRY(x) ((int)(x) >> SYSTRACE_SHIFT) #define SYSTRACE_SYSNUM(x) ((int)(x) & ((1 << SYSTRACE_SHIFT) - 1)) #define SYSTRACE_ENTRY(id) ((1 << SYSTRACE_SHIFT) | (id)) #define SYSTRACE_RETURN(id) (id) #if ((1 << SYSTRACE_SHIFT) <= NSYSCALL) #error 1 << SYSTRACE_SHIFT must exceed number of system calls #endif static dev_info_t *systrace_devi; static dtrace_provider_id_t systrace_id; #if !defined (__APPLE__) static void systrace_init(struct sysent *actual, systrace_sysent_t **interposed) { systrace_sysent_t *sysent = *interposed; int i; if (sysent == NULL) { *interposed = sysent = kmem_zalloc(sizeof (systrace_sysent_t) * NSYSCALL, KM_SLEEP); } for (i = 0; i < NSYSCALL; i++) { struct sysent *a = &actual[i]; systrace_sysent_t *s = &sysent[i]; if (LOADABLE_SYSCALL(a) && !LOADED_SYSCALL(a)) continue; if (a->sy_callc == dtrace_systrace_syscall) continue; #ifdef _SYSCALL32_IMPL if (a->sy_callc == dtrace_systrace_syscall32) continue; #endif s->stsy_underlying = a->sy_callc; } } #else #define systrace_init _systrace_init /* Avoid name clash with Darwin automagic conf symbol */ static void systrace_init(struct sysent *actual, systrace_sysent_t **interposed) { systrace_sysent_t *ssysent = *interposed; /* Avoid sysent shadow warning from bsd/sys/sysent.h */ int i; if (ssysent == NULL) { *interposed = ssysent = kmem_zalloc(sizeof (systrace_sysent_t) * NSYSCALL, KM_SLEEP); } for (i = 0; i < NSYSCALL; i++) { struct sysent *a = &actual[i]; systrace_sysent_t *s = &ssysent[i]; if (LOADABLE_SYSCALL(a) && !LOADED_SYSCALL(a)) continue; if (a->sy_callc == dtrace_systrace_syscall) continue; #ifdef _SYSCALL32_IMPL if (a->sy_callc == dtrace_systrace_syscall32) continue; #endif s->stsy_underlying = a->sy_callc; s->stsy_return_type = a->sy_return_type; } lck_mtx_init(&dtrace_systrace_lock, dtrace_lck_grp, dtrace_lck_attr); }
static void machtrace_init(const mach_trap_t *actual, machtrace_sysent_t **interposed) { machtrace_sysent_t *msysent = *interposed; int i; if (msysent == NULL) { *interposed = msysent = kmem_zalloc(sizeof (machtrace_sysent_t) * NSYSCALL, KM_SLEEP); } for (i = 0; i < NSYSCALL; i++) { const mach_trap_t *a = &actual[i]; machtrace_sysent_t *s = &msysent[i]; if (LOADABLE_SYSCALL(a) && !LOADED_SYSCALL(a)) continue; if (a->mach_trap_function == (mach_call_t)(dtrace_machtrace_syscall)) continue; s->stsy_underlying = a->mach_trap_function; } }
/* * Save the system call arguments in a safe place. * * On the i386 kernel: * * Copy the users args prior to changing the stack or stack pointer. * This is so /proc will be able to get a valid copy of the * args from the user stack even after the user stack has been changed. * Note that the kernel stack copy of the args may also have been * changed by a system call handler which takes C-style arguments. * * Note that this may be called by stop() from trap(). In that case * t_sysnum will be zero (syscall_exit clears it), so no args will be * copied. * * On the amd64 kernel: * * For 64-bit applications, lwp->lwp_ap normally points to %rdi..%r9 * in the reg structure. If the user is going to change the argument * registers, rax, or the stack and might want to get the args (for * /proc tracing), it must copy the args elsewhere via save_syscall_args(). * * For 32-bit applications, lwp->lwp_ap normally points to a copy of * the system call arguments on the kernel stack made from the user * stack. Copy the args prior to change the stack or stack pointer. * This is so /proc will be able to get a valid copy of the args * from the user stack even after that stack has been changed. * * This may be called from stop() even when we're not in a system call. * Since there's no easy way to tell, this must be safe (not panic). * If the copyins get data faults, return non-zero. */ int save_syscall_args() { kthread_t *t = curthread; klwp_t *lwp = ttolwp(t); uint_t code = t->t_sysnum; uint_t nargs; if (lwp->lwp_argsaved || code == 0) return (0); /* args already saved or not needed */ if (code >= NSYSCALL) { nargs = 0; /* illegal syscall */ } else { struct sysent *se = LWP_GETSYSENT(lwp); struct sysent *callp = se + code; nargs = callp->sy_narg; if (LOADABLE_SYSCALL(callp) && nargs == 0) { krwlock_t *module_lock; /* * Find out how many arguments the system * call uses. * * We have the property that loaded syscalls * never change the number of arguments they * use after they've been loaded once. This * allows us to stop for /proc tracing without * holding the module lock. * /proc is assured that sy_narg is valid. */ module_lock = lock_syscall(se, code); nargs = callp->sy_narg; rw_exit(module_lock); } } /* * Fetch the system call arguments. */ if (nargs == 0) goto out; ASSERT(nargs <= MAXSYSARGS); if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) { #if defined(_LP64) struct regs *rp = lwptoregs(lwp); lwp->lwp_arg[0] = rp->r_rdi; lwp->lwp_arg[1] = rp->r_rsi; lwp->lwp_arg[2] = rp->r_rdx; lwp->lwp_arg[3] = rp->r_rcx; lwp->lwp_arg[4] = rp->r_r8; lwp->lwp_arg[5] = rp->r_r9; if (nargs > 6 && copyin_args(rp, &lwp->lwp_arg[6], nargs - 6)) return (-1); } else { #endif if (COPYIN_ARGS32(lwptoregs(lwp), lwp->lwp_arg, nargs)) return (-1); } out: lwp->lwp_ap = lwp->lwp_arg; lwp->lwp_argsaved = 1; t->t_post_sys = 1; /* so lwp_ap will be reset */ return (0); }
/* * Save the system call arguments in a safe place. * lwp->lwp_ap normally points to the out regs in the reg structure. * If the user is going to change the out registers, g1, or the stack, * and might want to get the args (for /proc tracing), it must copy * the args elsewhere via save_syscall_args(). * * This may be called from stop() even when we're not in a system call. * Since there's no easy way to tell, this must be safe (not panic). * If the copyins get data faults, return non-zero. */ int save_syscall_args() { kthread_t *t = curthread; klwp_t *lwp = ttolwp(t); struct regs *rp = lwptoregs(lwp); uint_t code = t->t_sysnum; uint_t nargs; int i; caddr_t ua; model_t datamodel; if (lwp->lwp_argsaved || code == 0) return (0); /* args already saved or not needed */ if (code >= NSYSCALL) { nargs = 0; /* illegal syscall */ } else { struct sysent *se = LWP_GETSYSENT(lwp); struct sysent *callp = se + code; nargs = callp->sy_narg; if (LOADABLE_SYSCALL(callp) && nargs == 0) { krwlock_t *module_lock; /* * Find out how many arguments the system * call uses. * * We have the property that loaded syscalls * never change the number of arguments they * use after they've been loaded once. This * allows us to stop for /proc tracing without * holding the module lock. * /proc is assured that sy_narg is valid. */ module_lock = lock_syscall(se, code); nargs = callp->sy_narg; rw_exit(module_lock); } } /* * Fetch the system call arguments. */ if (nargs == 0) goto out; ASSERT(nargs <= MAXSYSARGS); if ((datamodel = lwp_getdatamodel(lwp)) == DATAMODEL_ILP32) { if (rp->r_g1 == 0) { /* indirect syscall */ lwp->lwp_arg[0] = (uint32_t)rp->r_o1; lwp->lwp_arg[1] = (uint32_t)rp->r_o2; lwp->lwp_arg[2] = (uint32_t)rp->r_o3; lwp->lwp_arg[3] = (uint32_t)rp->r_o4; lwp->lwp_arg[4] = (uint32_t)rp->r_o5; if (nargs > 5) { ua = (caddr_t)(uintptr_t)(caddr32_t)(uintptr_t) (rp->r_sp + MINFRAME32); for (i = 5; i < nargs; i++) { uint32_t a; if (fuword32(ua, &a) != 0) return (-1); lwp->lwp_arg[i] = a; ua += sizeof (a); } } } else { lwp->lwp_arg[0] = (uint32_t)rp->r_o0; lwp->lwp_arg[1] = (uint32_t)rp->r_o1; lwp->lwp_arg[2] = (uint32_t)rp->r_o2; lwp->lwp_arg[3] = (uint32_t)rp->r_o3; lwp->lwp_arg[4] = (uint32_t)rp->r_o4; lwp->lwp_arg[5] = (uint32_t)rp->r_o5; if (nargs > 6) { ua = (caddr_t)(uintptr_t)(caddr32_t)(uintptr_t) (rp->r_sp + MINFRAME32); for (i = 6; i < nargs; i++) { uint32_t a; if (fuword32(ua, &a) != 0) return (-1); lwp->lwp_arg[i] = a; ua += sizeof (a); } } } } else { ASSERT(datamodel == DATAMODEL_LP64); lwp->lwp_arg[0] = rp->r_o0; lwp->lwp_arg[1] = rp->r_o1; lwp->lwp_arg[2] = rp->r_o2; lwp->lwp_arg[3] = rp->r_o3; lwp->lwp_arg[4] = rp->r_o4; lwp->lwp_arg[5] = rp->r_o5; if (nargs > 6) { ua = (caddr_t)rp->r_sp + MINFRAME + STACK_BIAS; for (i = 6; i < nargs; i++) { unsigned long a; if (fulword(ua, &a) != 0) return (-1); lwp->lwp_arg[i] = a; ua += sizeof (a); } } } out: lwp->lwp_ap = lwp->lwp_arg; lwp->lwp_argsaved = 1; t->t_post_sys = 1; /* so lwp_ap will be reset */ return (0); }