static void profile_tick(void *arg) { profile_probe_t *prof = arg; #if defined(__x86_64__) x86_saved_state_t *kern_regs = find_kern_regs(current_thread()); if (NULL != kern_regs) { /* Kernel was interrupted. */ dtrace_probe(prof->prof_id, saved_state64(kern_regs)->isf.rip, 0x0, 0, 0, 0); } else { pal_register_cache_state(current_thread(), VALID); /* Possibly a user interrupt */ x86_saved_state_t *tagged_regs = (x86_saved_state_t *)find_user_regs(current_thread()); if (NULL == tagged_regs) { /* Too bad, so sad, no useful interrupt state. */ dtrace_probe(prof->prof_id, 0xcafebabe, 0x0, 0, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */ } else if (is_saved_state64(tagged_regs)) { x86_saved_state64_t *regs = saved_state64(tagged_regs); dtrace_probe(prof->prof_id, 0x0, regs->isf.rip, 0, 0, 0); } else { x86_saved_state32_t *regs = saved_state32(tagged_regs); dtrace_probe(prof->prof_id, 0x0, regs->eip, 0, 0, 0); } } #else #error Unknown architecture #endif }
int dtrace_getustackdepth(void) { thread_t thread = current_thread(); x86_saved_state_t *regs; user_addr_t pc, sp, fp; int n = 0; boolean_t is64Bit = proc_is64bit(current_proc()); if (thread == NULL) return 0; if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) return (-1); pal_register_cache_state(thread, VALID); regs = (x86_saved_state_t *)find_user_regs(thread); if (regs == NULL) return 0; if (is64Bit) { pc = regs->ss_64.isf.rip; sp = regs->ss_64.isf.rsp; fp = regs->ss_64.rbp; } else { pc = regs->ss_32.eip; sp = regs->ss_32.uesp; fp = regs->ss_32.ebp; } if (dtrace_adjust_stack(NULL, NULL, &pc, sp) == 1) { /* * we would have adjusted the stack if we had * supplied one (that is what rc == 1 means). * Also, as a side effect, the pc might have * been fixed up, which is good for calling * in to dtrace_getustack_common. */ n++; } /* * Note that unlike ppc, the x86 code does not use * CPU_DTRACE_USTACK_FP. This is because x86 always * traces from the fp, even in syscall/profile/fbt * providers. */ n += dtrace_getustack_common(NULL, 0, pc, fp); return (n); }
void * get_bsduthreadarg(thread_t th) { void *arg_ptr; struct uthread *ut; ut = get_bsdthread_info(th); if (ml_thread_is64bit(th) == TRUE) arg_ptr = (void *)saved_state64(find_user_regs(th)); else arg_ptr = (void *)(ut->uu_arg); return(arg_ptr); }
int dtrace_getustackdepth(void) { thread_t thread = current_thread(); ppc_saved_state_t *regs; user_addr_t pc, sp; int n = 0; boolean_t is64Bit = proc_is64bit(current_proc()); if (thread == NULL) return 0; if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) return (-1); regs = (ppc_saved_state_t *)find_user_regs(thread); if (regs == NULL) return 0; pc = regs->REGPC; sp = regs->REGSP; if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { n++; pc = regs->save_lr; } if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_USTACK_FP)) { /* * If the ustack fp flag is set, the stack frame from sp to * fp contains no valid call information. Start with the fp. */ if (is64Bit) sp = dtrace_fuword64(sp); else sp = (user_addr_t)dtrace_fuword32(sp); } n += dtrace_getustack_common(NULL, 0, pc, sp); return (n); }
void unix_syscall_return(int error) { thread_t thread; struct uthread *uthread; struct proc *p; unsigned int code; struct sysent *callp; thread = current_thread(); uthread = get_bsdthread_info(thread); pal_register_cache_state(thread, DIRTY); p = current_proc(); if (proc_is64bit(p)) { x86_saved_state64_t *regs; regs = saved_state64(find_user_regs(thread)); code = uthread->syscall_code; callp = (code >= NUM_SYSENT) ? &sysent[63] : &sysent[code]; #if CONFIG_DTRACE if (callp->sy_call == dtrace_systrace_syscall) dtrace_systrace_syscall_return( code, error, uthread->uu_rval ); #endif /* CONFIG_DTRACE */ AUDIT_SYSCALL_EXIT(code, p, uthread, error); if (error == ERESTART) { /* * repeat the syscall */ pal_syscall_restart( thread, find_user_regs(thread) ); } else if (error != EJUSTRETURN) { if (error) { regs->rax = error; regs->isf.rflags |= EFL_CF; /* carry bit */ } else { /* (not error) */ switch (callp->sy_return_type) { case _SYSCALL_RET_INT_T: regs->rax = uthread->uu_rval[0]; regs->rdx = uthread->uu_rval[1]; break; case _SYSCALL_RET_UINT_T: regs->rax = ((u_int)uthread->uu_rval[0]); regs->rdx = ((u_int)uthread->uu_rval[1]); break; case _SYSCALL_RET_OFF_T: case _SYSCALL_RET_ADDR_T: case _SYSCALL_RET_SIZE_T: case _SYSCALL_RET_SSIZE_T: case _SYSCALL_RET_UINT64_T: regs->rax = *((uint64_t *)(&uthread->uu_rval[0])); regs->rdx = 0; break; case _SYSCALL_RET_NONE: break; default: panic("unix_syscall: unknown return type"); break; } regs->isf.rflags &= ~EFL_CF; } } DEBUG_KPRINT_SYSCALL_UNIX( "unix_syscall_return: error=%d retval=(%llu,%llu)\n", error, regs->rax, regs->rdx); } else { x86_saved_state32_t *regs; regs = saved_state32(find_user_regs(thread)); regs->efl &= ~(EFL_CF); code = uthread->syscall_code; callp = (code >= NUM_SYSENT) ? &sysent[63] : &sysent[code]; #if CONFIG_DTRACE if (callp->sy_call == dtrace_systrace_syscall) dtrace_systrace_syscall_return( code, error, uthread->uu_rval ); #endif /* CONFIG_DTRACE */ AUDIT_SYSCALL_EXIT(code, p, uthread, error); if (error == ERESTART) { pal_syscall_restart( thread, find_user_regs(thread) ); } else if (error != EJUSTRETURN) { if (error) { regs->eax = error; regs->efl |= EFL_CF; /* carry bit */ } else { /* (not error) */ regs->eax = uthread->uu_rval[0]; regs->edx = uthread->uu_rval[1]; } } DEBUG_KPRINT_SYSCALL_UNIX( "unix_syscall_return: error=%d retval=(%u,%u)\n", error, regs->eax, regs->edx); } uthread->uu_flag &= ~UT_NOTCANCELPT; if (uthread->uu_lowpri_window) { /* * task is marked as a low priority I/O type * and the I/O we issued while in this system call * collided with normal I/O operations... we'll * delay in order to mitigate the impact of this * task on the normal operation of the system */ throttle_lowpri_io(1); } if (code != 180) KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, error, uthread->uu_rval[0], uthread->uu_rval[1], p->p_pid, 0); thread_exception_return(); /* NOTREACHED */ }
void dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit) { thread_t thread = current_thread(); savearea_t *regs; user_addr_t pc, sp; volatile uint16_t *flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; #if 0 uintptr_t oldcontext; size_t s1, s2; #endif boolean_t is64Bit = proc_is64bit(current_proc()); if (*flags & CPU_DTRACE_FAULT) return; if (pcstack_limit <= 0) return; /* * If there's no user context we still need to zero the stack. */ if (thread == NULL) goto zero; regs = (savearea_t *)find_user_regs(thread); if (regs == NULL) goto zero; *pcstack++ = (uint64_t)proc_selfpid(); pcstack_limit--; if (pcstack_limit <= 0) return; pc = regs->ss_32.eip; sp = regs->ss_32.ebp; #if 0 /* XXX signal stack crawl */ oldcontext = lwp->lwp_oldcontext; if (p->p_model == DATAMODEL_NATIVE) { s1 = sizeof (struct frame) + 2 * sizeof (long); s2 = s1 + sizeof (siginfo_t); } else { s1 = sizeof (struct frame32) + 3 * sizeof (int); s2 = s1 + sizeof (siginfo32_t); } #endif if(dtrace_adjust_stack(&pcstack, &pcstack_limit, &pc, sp) == 1) { /* * we made a change. */ *fpstack++ = 0; if (pcstack_limit <= 0) return; } while (pc != 0) { *pcstack++ = (uint64_t)pc; *fpstack++ = sp; pcstack_limit--; if (pcstack_limit <= 0) break; if (sp == 0) break; #if 0 /* XXX signal stack crawl */ if (oldcontext == sp + s1 || oldcontext == sp + s2) { if (p->p_model == DATAMODEL_NATIVE) { ucontext_t *ucp = (ucontext_t *)oldcontext; greg_t *gregs = ucp->uc_mcontext.gregs; sp = dtrace_fulword(&gregs[REG_FP]); pc = dtrace_fulword(&gregs[REG_PC]); oldcontext = dtrace_fulword(&ucp->uc_link); } else { ucontext_t *ucp = (ucontext_t *)oldcontext; greg_t *gregs = ucp->uc_mcontext.gregs; sp = dtrace_fuword32(&gregs[EBP]); pc = dtrace_fuword32(&gregs[EIP]); oldcontext = dtrace_fuword32(&ucp->uc_link); } } else #endif { if (is64Bit) { pc = dtrace_fuword64((sp + RETURN_OFFSET64)); sp = dtrace_fuword64(sp); } else { pc = dtrace_fuword32((sp + RETURN_OFFSET)); sp = dtrace_fuword32(sp); } } #if 0 /* XXX */ /* * This is totally bogus: if we faulted, we're going to clear * the fault and break. This is to deal with the apparently * broken Java stacks on x86. */ if (*flags & CPU_DTRACE_FAULT) { *flags &= ~CPU_DTRACE_FAULT; break; } #endif } zero: while (pcstack_limit-- > 0) *pcstack++ = 0; }
void dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit) { thread_t thread = current_thread(); x86_saved_state_t *regs; user_addr_t pc, sp, fp; volatile uint16_t *flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; int n; boolean_t is64Bit = proc_is64bit(current_proc()); if (*flags & CPU_DTRACE_FAULT) return; if (pcstack_limit <= 0) return; /* * If there's no user context we still need to zero the stack. */ if (thread == NULL) goto zero; regs = (x86_saved_state_t *)find_user_regs(thread); if (regs == NULL) goto zero; *pcstack++ = (uint64_t)proc_selfpid(); pcstack_limit--; if (pcstack_limit <= 0) return; if (is64Bit) { pc = regs->ss_64.isf.rip; sp = regs->ss_64.isf.rsp; fp = regs->ss_64.rbp; } else { pc = regs->ss_32.eip; sp = regs->ss_32.uesp; fp = regs->ss_32.ebp; } /* * The return value indicates if we've modified the stack. * Since there is nothing else to fix up in either case, * we can safely ignore it here. */ (void)dtrace_adjust_stack(&pcstack, &pcstack_limit, &pc, sp); if(pcstack_limit <= 0) return; /* * Note that unlike ppc, the x86 code does not use * CPU_DTRACE_USTACK_FP. This is because x86 always * traces from the fp, even in syscall/profile/fbt * providers. */ n = dtrace_getustack_common(pcstack, pcstack_limit, pc, fp); ASSERT(n >= 0); ASSERT(n <= pcstack_limit); pcstack += n; pcstack_limit -= n; zero: while (pcstack_limit-- > 0) *pcstack++ = 0; }
void dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit) { thread_t thread = current_thread(); ppc_saved_state_t *regs; user_addr_t pc, sp; volatile uint16_t *flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; #if 0 uintptr_t oldcontext; size_t s1, s2; #endif boolean_t is64Bit = proc_is64bit(current_proc()); if (*flags & CPU_DTRACE_FAULT) return; if (pcstack_limit <= 0) return; /* * If there's no user context we still need to zero the stack. */ if (thread == NULL) goto zero; regs = (ppc_saved_state_t *)find_user_regs(thread); if (regs == NULL) goto zero; *pcstack++ = (uint64_t)proc_selfpid(); pcstack_limit--; if (pcstack_limit <= 0) return; pc = regs->REGPC; sp = regs->REGSP; #if 0 /* XXX signal stack crawl*/ oldcontext = lwp->lwp_oldcontext; if (p->p_model == DATAMODEL_NATIVE) { s1 = sizeof (struct frame) + 2 * sizeof (long); s2 = s1 + sizeof (siginfo_t); } else { s1 = sizeof (struct frame32) + 3 * sizeof (int); s2 = s1 + sizeof (siginfo32_t); } #endif if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { *pcstack++ = (uint64_t)pc; *fpstack++ = 0; pcstack_limit--; if (pcstack_limit <= 0) return; /* * XXX This is wrong, but we do not yet support stack helpers. */ if (is64Bit) pc = dtrace_fuword64(sp); else pc = dtrace_fuword32(sp); } while (pc != 0) { *pcstack++ = (uint64_t)pc; *fpstack++ = sp; pcstack_limit--; if (pcstack_limit <= 0) break; if (sp == 0) break; #if 0 /* XXX signal stack crawl*/ if (oldcontext == sp + s1 || oldcontext == sp + s2) { if (p->p_model == DATAMODEL_NATIVE) { ucontext_t *ucp = (ucontext_t *)oldcontext; greg_t *gregs = ucp->uc_mcontext.gregs; sp = dtrace_fulword(&gregs[REG_FP]); pc = dtrace_fulword(&gregs[REG_PC]); oldcontext = dtrace_fulword(&ucp->uc_link); } else { ucontext_t *ucp = (ucontext_t *)oldcontext; greg_t *gregs = ucp->uc_mcontext.gregs; sp = dtrace_fuword32(&gregs[EBP]); pc = dtrace_fuword32(&gregs[EIP]); oldcontext = dtrace_fuword32(&ucp->uc_link); } } else #endif { if (is64Bit) { pc = dtrace_fuword64((sp + RETURN_OFFSET64)); sp = dtrace_fuword64(sp); } else { pc = dtrace_fuword32((sp + RETURN_OFFSET)); sp = dtrace_fuword32(sp); } } } zero: while (pcstack_limit-- > 0) *pcstack++ = 0; }
void dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit) { thread_t thread = current_thread(); ppc_saved_state_t *regs; user_addr_t pc, sp; volatile uint16_t *flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; int n; boolean_t is64Bit = proc_is64bit(current_proc()); if (*flags & CPU_DTRACE_FAULT) return; if (pcstack_limit <= 0) return; /* * If there's no user context we still need to zero the stack. */ if (thread == NULL) goto zero; regs = (ppc_saved_state_t *)find_user_regs(thread); if (regs == NULL) goto zero; *pcstack++ = (uint64_t)proc_selfpid(); pcstack_limit--; if (pcstack_limit <= 0) return; pc = regs->REGPC; sp = regs->REGSP; if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { *pcstack++ = (uint64_t)pc; pcstack_limit--; if (pcstack_limit <= 0) return; pc = regs->save_lr; } if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_USTACK_FP)) { /* * If the ustack fp flag is set, the stack frame from sp to * fp contains no valid call information. Start with the fp. */ if (is64Bit) sp = dtrace_fuword64(sp); else sp = (user_addr_t)dtrace_fuword32(sp); } n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp); ASSERT(n >= 0); ASSERT(n <= pcstack_limit); pcstack += n; pcstack_limit -= n; zero: while (pcstack_limit-- > 0) *pcstack++ = 0; }
static kern_return_t dtrace_machtrace_syscall(struct mach_call_args *args) { int code; /* The mach call number */ machtrace_sysent_t *sy; dtrace_id_t id; kern_return_t rval; #if 0 /* XXX */ proc_t *p; #endif syscall_arg_t *ip = (syscall_arg_t *)args; mach_call_t mach_call; #if defined (__x86_64__) { pal_register_cache_state(current_thread(), VALID); x86_saved_state_t *tagged_regs = (x86_saved_state_t *)find_user_regs(current_thread()); if (is_saved_state64(tagged_regs)) { code = saved_state64(tagged_regs)->rax & SYSCALL_NUMBER_MASK; } else { code = -saved_state32(tagged_regs)->eax; } } #else #error Unknown Architecture #endif sy = &machtrace_sysent[code]; if ((id = sy->stsy_entry) != DTRACE_IDNONE) { uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); if (uthread) uthread->t_dtrace_syscall_args = (void *)ip; (*machtrace_probe)(id, *ip, *(ip+1), *(ip+2), *(ip+3), *(ip+4)); if (uthread) uthread->t_dtrace_syscall_args = (void *)0; } #if 0 /* XXX */ /* * We want to explicitly allow DTrace consumers to stop a process * before it actually executes the meat of the syscall. */ p = ttoproc(curthread); mutex_enter(&p->p_lock); if (curthread->t_dtrace_stop && !curthread->t_lwp->lwp_nostop) { curthread->t_dtrace_stop = 0; stop(PR_REQUESTED, 0); } mutex_exit(&p->p_lock); #endif mach_call = (mach_call_t)(*sy->stsy_underlying); rval = mach_call(args); if ((id = sy->stsy_return) != DTRACE_IDNONE) (*machtrace_probe)(id, (uint64_t)rval, 0, 0, 0, 0); return (rval); }
int32_t dtrace_systrace_syscall(struct proc *pp, void *uap, int *rv) { unsigned short code; /* The system call number */ systrace_sysent_t *sy; dtrace_id_t id; int32_t rval; #if 0 /* XXX */ proc_t *p; #endif syscall_arg_t *ip = (syscall_arg_t *)uap; #if defined (__x86_64__) { pal_register_cache_state(current_thread(), VALID); x86_saved_state_t *tagged_regs = (x86_saved_state_t *)find_user_regs(current_thread()); if (is_saved_state64(tagged_regs)) { x86_saved_state64_t *regs = saved_state64(tagged_regs); code = regs->rax & SYSCALL_NUMBER_MASK; /* * Check for indirect system call... system call number * passed as 'arg0' */ if (code == 0) { code = regs->rdi; } } else { code = saved_state32(tagged_regs)->eax & I386_SYSCALL_NUMBER_MASK; if (code == 0) { vm_offset_t params = (vm_offset_t) (saved_state32(tagged_regs)->uesp + sizeof (int)); code = fuword(params); } } } #else #error Unknown Architecture #endif // Bounds "check" the value of code a la unix_syscall sy = (code >= NUM_SYSENT) ? &systrace_sysent[63] : &systrace_sysent[code]; if ((id = sy->stsy_entry) != DTRACE_IDNONE) { uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); if (uthread) uthread->t_dtrace_syscall_args = (void *)ip; if (ip) (*systrace_probe)(id, *ip, *(ip+1), *(ip+2), *(ip+3), *(ip+4)); else (*systrace_probe)(id, 0, 0, 0, 0, 0); if (uthread) uthread->t_dtrace_syscall_args = (void *)0; } #if 0 /* XXX */ /* * We want to explicitly allow DTrace consumers to stop a process * before it actually executes the meat of the syscall. */ p = ttoproc(curthread); mutex_enter(&p->p_lock); if (curthread->t_dtrace_stop && !curthread->t_lwp->lwp_nostop) { curthread->t_dtrace_stop = 0; stop(PR_REQUESTED, 0); } mutex_exit(&p->p_lock); #endif rval = (*sy->stsy_underlying)(pp, uap, rv); if ((id = sy->stsy_return) != DTRACE_IDNONE) { uint64_t munged_rv0, munged_rv1; uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); if (uthread) uthread->t_dtrace_errno = rval; /* Establish t_dtrace_errno now in case this enabling refers to it. */ /* * "Decode" rv for use in the call to dtrace_probe() */ if (rval == ERESTART) { munged_rv0 = -1LL; /* System call will be reissued in user mode. Make DTrace report a -1 return. */ munged_rv1 = -1LL; } else if (rval != EJUSTRETURN) { if (rval) { munged_rv0 = -1LL; /* Mimic what libc will do. */ munged_rv1 = -1LL; } else { switch (sy->stsy_return_type) { case _SYSCALL_RET_INT_T: munged_rv0 = rv[0]; munged_rv1 = rv[1]; break; case _SYSCALL_RET_UINT_T: munged_rv0 = ((u_int)rv[0]); munged_rv1 = ((u_int)rv[1]); break; case _SYSCALL_RET_OFF_T: case _SYSCALL_RET_UINT64_T: munged_rv0 = *(u_int64_t *)rv; munged_rv1 = 0LL; break; case _SYSCALL_RET_ADDR_T: case _SYSCALL_RET_SIZE_T: case _SYSCALL_RET_SSIZE_T: munged_rv0 = *(user_addr_t *)rv; munged_rv1 = 0LL; break; case _SYSCALL_RET_NONE: munged_rv0 = 0LL; munged_rv1 = 0LL; break; default: munged_rv0 = 0LL; munged_rv1 = 0LL; break; } } } else { munged_rv0 = 0LL; munged_rv1 = 0LL; } /* * <http://mail.opensolaris.org/pipermail/dtrace-discuss/2007-January/003276.html> says: * * "This is a bit of an historical artifact. At first, the syscall provider just * had its return value in arg0, and the fbt and pid providers had their return * values in arg1 (so that we could use arg0 for the offset of the return site). * * We inevitably started writing scripts where we wanted to see the return * values from probes in all three providers, and we made this script easier * to write by replicating the syscall return values in arg1 to match fbt and * pid. We debated briefly about removing the return value from arg0, but * decided that it would be less confusing to have the same data in two places * than to have some non-helpful, non-intuitive value in arg0. * * This change was made 4/23/2003 according to the DTrace project's putback log." */ (*systrace_probe)(id, munged_rv0, munged_rv0, munged_rv1, (uint64_t)rval, 0); } return (rval); }
static savearea *chudxnu_private_get_user_regs(void) { return find_user_regs(current_act()); // take the top user savearea (skip any kernel saveareas) }