/* * Routine: semaphore_signal_thread_trap * * Trap interface to the semaphore_signal_thread function. */ kern_return_t semaphore_signal_thread_trap( struct semaphore_signal_thread_trap_args *args) { mach_port_name_t sema_name = args->signal_name; mach_port_name_t thread_name = args->thread_name; semaphore_t semaphore; thread_t thread; kern_return_t kr; /* * MACH_PORT_NULL is not an error. It means that we want to * select any one thread that is already waiting, but not to * pre-post the semaphore. */ if (thread_name != MACH_PORT_NULL) { thread = port_name_to_thread(thread_name); if (thread == THREAD_NULL) return KERN_INVALID_ARGUMENT; } else thread = THREAD_NULL; kr = port_name_to_semaphore(sema_name, &semaphore); if (kr == KERN_SUCCESS) { kr = semaphore_signal_internal(semaphore, thread, SEMAPHORE_OPTION_NONE); semaphore_dereference(semaphore); } if (thread != THREAD_NULL) { thread_deallocate(thread); } return kr; }
int perfmon_control(struct savearea *ssp) { mach_port_name_t thr_port = CAST_DOWN(mach_port_name_t, ssp->save_r3); int action = (int)ssp->save_r4; int pmc = (int)ssp->save_r5; int val = (int)ssp->save_r6; uint64_t *usr_pmcs_p = CAST_DOWN(uint64_t *, ssp->save_r7); thread_t thread = THREAD_NULL; uint64_t kern_pmcs[MAX_CPUPMC_COUNT]; kern_return_t retval = KERN_SUCCESS; int error; boolean_t oldlevel; thread = port_name_to_thread(thr_port); // convert user space thread port name to a thread_t if(!thread) { ssp->save_r3 = KERN_INVALID_ARGUMENT; return 1; /* Return and check for ASTs... */ } if(thread!=current_thread()) { thread_suspend(thread); } #ifdef HWPERFMON_DEBUG // kprintf("perfmon_control: action=0x%x pmc=%d val=%d pmcs=0x%x\n", action, pmc, val, usr_pmcs_p); #endif oldlevel = ml_set_interrupts_enabled(FALSE); /* individual actions which do not require perfmon facility to be enabled */ if(action==PPC_PERFMON_DISABLE) { retval = perfmon_disable(thread); } else if(action==PPC_PERFMON_ENABLE) { retval = perfmon_enable(thread); } else { /* individual actions which do require perfmon facility to be enabled */ if(!(thread->machine.specFlags & perfMonitor)) { /* perfmon not enabled */ #ifdef HWPERFMON_DEBUG kprintf("perfmon_control: ERROR - perfmon not enabled for this thread\n"); #endif retval = KERN_NO_ACCESS; goto perfmon_return; } if(action==PPC_PERFMON_SET_EVENT) { retval = perfmon_set_event(thread, pmc, val); } else if(action==PPC_PERFMON_SET_THRESHOLD) { retval = perfmon_set_threshold(thread, val); } else if(action==PPC_PERFMON_SET_TBSEL) { retval = perfmon_set_tbsel(thread, val); } else if(action==PPC_PERFMON_SET_EVENT_FUNC) { retval = perfmon_set_event_func(thread, val); } else if(action==PPC_PERFMON_ENABLE_PMI_BRKPT) { if(val) { thread->machine.perfmonFlags |= PERFMONFLAG_BREAKPOINT_FOR_PMI; } else { thread->machine.perfmonFlags &= ~PERFMONFLAG_BREAKPOINT_FOR_PMI; } retval = KERN_SUCCESS; } /* combinable actions */ else { if(action & PPC_PERFMON_STOP_COUNTERS) { error = perfmon_stop_counters(thread); if(error!=KERN_SUCCESS) { retval = error; goto perfmon_return; } } if(action & PPC_PERFMON_CLEAR_COUNTERS) { error = perfmon_clear_counters(thread); if(error!=KERN_SUCCESS) { retval = error; goto perfmon_return; } } if(action & PPC_PERFMON_WRITE_COUNTERS) { if((error = copyin(CAST_USER_ADDR_T(usr_pmcs_p), (void *)kern_pmcs, MAX_CPUPMC_COUNT*sizeof(uint64_t)))) { retval = error; goto perfmon_return; } error = perfmon_write_counters(thread, kern_pmcs); if(error!=KERN_SUCCESS) { retval = error; goto perfmon_return; } } if(action & PPC_PERFMON_READ_COUNTERS) { error = perfmon_read_counters(thread, kern_pmcs); if(error!=KERN_SUCCESS) { retval = error; goto perfmon_return; } if((error = copyout((void *)kern_pmcs, CAST_USER_ADDR_T(usr_pmcs_p), MAX_CPUPMC_COUNT*sizeof(uint64_t)))) { retval = error; goto perfmon_return; } } if(action & PPC_PERFMON_START_COUNTERS) { error = perfmon_start_counters(thread); if(error!=KERN_SUCCESS) { retval = error; goto perfmon_return; } } } } perfmon_return: ml_set_interrupts_enabled(oldlevel); #ifdef HWPERFMON_DEBUG kprintf("perfmon_control (CPU%d): mmcr0 = %016llX, pmc1=%X pmc2=%X pmc3=%X pmc4=%X pmc5=%X pmc6=%X pmc7=%X pmc8=%X\n", cpu_number(), ssp->save_mmcr0, ssp->save_pmc[PMC_1], ssp->save_pmc[PMC_2], ssp->save_pmc[PMC_3], ssp->save_pmc[PMC_4], ssp->save_pmc[PMC_5], ssp->save_pmc[PMC_6], ssp->save_pmc[PMC_7], ssp->save_pmc[PMC_8]); #endif if(thread!=current_thread()) { thread_resume(thread); } #ifdef HWPERFMON_DEBUG if(retval!=KERN_SUCCESS) { kprintf("perfmon_control - ERROR: retval=%d\n", retval); } #endif /* HWPERFMON_DEBUG */ ssp->save_r3 = retval; return 1; /* Return and check for ASTs... */ }
int ptrace(struct proc *p, struct ptrace_args *uap, register_t *retval) { struct proc *t = current_proc(); /* target process */ task_t task; thread_t th_act; struct uthread *ut; int tr_sigexc = 0; int error = 0; int stopped = 0; AUDIT_ARG(cmd, uap->req); AUDIT_ARG(pid, uap->pid); AUDIT_ARG(addr, uap->addr); AUDIT_ARG(value, uap->data); if (uap->req == PT_DENY_ATTACH) { proc_lock(p); if (ISSET(p->p_lflag, P_LTRACED)) { proc_unlock(p); exit1(p, W_EXITCODE(ENOTSUP, 0), retval); /* drop funnel before we return */ thread_exception_return(); /* NOTREACHED */ } SET(p->p_lflag, P_LNOATTACH); proc_unlock(p); return(0); } if (uap->req == PT_FORCEQUOTA) { if (is_suser()) { OSBitOrAtomic(P_FORCEQUOTA, (UInt32 *)&t->p_flag); return (0); } else return (EPERM); } /* * Intercept and deal with "please trace me" request. */ if (uap->req == PT_TRACE_ME) { proc_lock(p); SET(p->p_lflag, P_LTRACED); /* Non-attached case, our tracer is our parent. */ p->p_oppid = p->p_ppid; proc_unlock(p); return(0); } if (uap->req == PT_SIGEXC) { proc_lock(p); if (ISSET(p->p_lflag, P_LTRACED)) { SET(p->p_lflag, P_LSIGEXC); proc_unlock(p); return(0); } else { proc_unlock(p); return(EINVAL); } } /* * We do not want ptrace to do anything with kernel or launchd */ if (uap->pid < 2) { return(EPERM); } /* * Locate victim, and make sure it is traceable. */ if ((t = proc_find(uap->pid)) == NULL) return (ESRCH); AUDIT_ARG(process, t); task = t->task; if (uap->req == PT_ATTACHEXC) { uap->req = PT_ATTACH; tr_sigexc = 1; } if (uap->req == PT_ATTACH) { int err; if ( kauth_authorize_process(proc_ucred(p), KAUTH_PROCESS_CANTRACE, t, (uintptr_t)&err, 0, 0) == 0 ) { /* it's OK to attach */ proc_lock(t); SET(t->p_lflag, P_LTRACED); if (tr_sigexc) SET(t->p_lflag, P_LSIGEXC); t->p_oppid = t->p_ppid; proc_unlock(t); if (t->p_pptr != p) proc_reparentlocked(t, p, 1, 0); proc_lock(t); if (get_task_userstop(task) > 0 ) { stopped = 1; } t->p_xstat = 0; proc_unlock(t); psignal(t, SIGSTOP); /* * If the process was stopped, wake up and run through * issignal() again to properly connect to the tracing * process. */ if (stopped) task_resume(task); error = 0; goto out; } else { /* not allowed to attach, proper error code returned by kauth_authorize_process */ if (ISSET(t->p_lflag, P_LNOATTACH)) { psignal(p, SIGSEGV); } error = err; goto out; } } /* * You can't do what you want to the process if: * (1) It's not being traced at all, */ proc_lock(t); if (!ISSET(t->p_lflag, P_LTRACED)) { proc_unlock(t); error = EPERM; goto out; } /* * (2) it's not being traced by _you_, or */ if (t->p_pptr != p) { proc_unlock(t); error = EBUSY; goto out; } /* * (3) it's not currently stopped. */ if (t->p_stat != SSTOP) { proc_unlock(t); error = EBUSY; goto out; } /* * Mach version of ptrace executes request directly here, * thus simplifying the interaction of ptrace and signals. */ /* proc lock is held here */ switch (uap->req) { case PT_DETACH: if (t->p_oppid != t->p_ppid) { struct proc *pp; proc_unlock(t); pp = proc_find(t->p_oppid); proc_reparentlocked(t, pp ? pp : initproc, 1, 0); if (pp != PROC_NULL) proc_rele(pp); proc_lock(t); } t->p_oppid = 0; CLR(t->p_lflag, P_LTRACED); CLR(t->p_lflag, P_LSIGEXC); proc_unlock(t); goto resume; case PT_KILL: /* * Tell child process to kill itself after it * is resumed by adding NSIG to p_cursig. [see issig] */ proc_unlock(t); psignal(t, SIGKILL); goto resume; case PT_STEP: /* single step the child */ case PT_CONTINUE: /* continue the child */ proc_unlock(t); th_act = (thread_t)get_firstthread(task); if (th_act == THREAD_NULL) { error = EINVAL; goto out; } if (uap->addr != (user_addr_t)1) { #if defined(ppc) #define ALIGNED(addr,size) (((unsigned)(addr)&((size)-1))==0) if (!ALIGNED((int)uap->addr, sizeof(int))) return (ERESTART); #undef ALIGNED #endif thread_setentrypoint(th_act, uap->addr); } if ((unsigned)uap->data >= NSIG) { error = EINVAL; goto out; } if (uap->data != 0) { psignal(t, uap->data); } if (uap->req == PT_STEP) { /* * set trace bit */ if (thread_setsinglestep(th_act, 1) != KERN_SUCCESS) { error = ENOTSUP; goto out; } } else { /* * clear trace bit if on */ if (thread_setsinglestep(th_act, 0) != KERN_SUCCESS) { error = ENOTSUP; goto out; } } resume: proc_lock(t); t->p_xstat = uap->data; t->p_stat = SRUN; if (t->sigwait) { wakeup((caddr_t)&(t->sigwait)); proc_unlock(t); if ((t->p_lflag & P_LSIGEXC) == 0) { task_resume(task); } } else proc_unlock(t); break; case PT_THUPDATE: { proc_unlock(t); if ((unsigned)uap->data >= NSIG) { error = EINVAL; goto out; } th_act = port_name_to_thread(CAST_DOWN(mach_port_name_t, uap->addr)); if (th_act == THREAD_NULL) return (ESRCH); ut = (uthread_t)get_bsdthread_info(th_act); if (uap->data) ut->uu_siglist |= sigmask(uap->data); proc_lock(t); t->p_xstat = uap->data; t->p_stat = SRUN; proc_unlock(t); thread_deallocate(th_act); error = 0; } break; default: proc_unlock(t); error = EINVAL; goto out; } error = 0; out: proc_rele(t); return(error); }
int ptrace(struct proc *p, struct ptrace_args *uap, int32_t *retval) { struct proc *t = current_proc(); /* target process */ task_t task; thread_t th_act; struct uthread *ut; int tr_sigexc = 0; int error = 0; int stopped = 0; AUDIT_ARG(cmd, uap->req); AUDIT_ARG(pid, uap->pid); AUDIT_ARG(addr, uap->addr); AUDIT_ARG(value32, uap->data); if (uap->req == PT_DENY_ATTACH) { proc_lock(p); if (ISSET(p->p_lflag, P_LTRACED)) { proc_unlock(p); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE, p->p_pid, W_EXITCODE(ENOTSUP, 0), 4, 0, 0); exit1(p, W_EXITCODE(ENOTSUP, 0), retval); thread_exception_return(); /* NOTREACHED */ } SET(p->p_lflag, P_LNOATTACH); proc_unlock(p); return(0); } if (uap->req == PT_FORCEQUOTA) { if (kauth_cred_issuser(kauth_cred_get())) { OSBitOrAtomic(P_FORCEQUOTA, &t->p_flag); return (0); } else return (EPERM); } /* * Intercept and deal with "please trace me" request. */ if (uap->req == PT_TRACE_ME) { retry_trace_me:; proc_t pproc = proc_parent(p); if (pproc == NULL) return (EINVAL); #if CONFIG_MACF /* * NB: Cannot call kauth_authorize_process(..., KAUTH_PROCESS_CANTRACE, ...) * since that assumes the process being checked is the current process * when, in this case, it is the current process's parent. * Most of the other checks in cantrace() don't apply either. */ if ((error = mac_proc_check_debug(pproc, p)) == 0) { #endif proc_lock(p); /* Make sure the process wasn't re-parented. */ if (p->p_ppid != pproc->p_pid) { proc_unlock(p); proc_rele(pproc); goto retry_trace_me; } SET(p->p_lflag, P_LTRACED); /* Non-attached case, our tracer is our parent. */ p->p_oppid = p->p_ppid; proc_unlock(p); /* Child and parent will have to be able to run modified code. */ cs_allow_invalid(p); cs_allow_invalid(pproc); #if CONFIG_MACF } #endif proc_rele(pproc); return (error); } if (uap->req == PT_SIGEXC) { proc_lock(p); if (ISSET(p->p_lflag, P_LTRACED)) { SET(p->p_lflag, P_LSIGEXC); proc_unlock(p); return(0); } else { proc_unlock(p); return(EINVAL); } } /* * We do not want ptrace to do anything with kernel or launchd */ if (uap->pid < 2) { return(EPERM); } /* * Locate victim, and make sure it is traceable. */ if ((t = proc_find(uap->pid)) == NULL) return (ESRCH); AUDIT_ARG(process, t); task = t->task; if (uap->req == PT_ATTACHEXC) { #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdeprecated-declarations" uap->req = PT_ATTACH; tr_sigexc = 1; } if (uap->req == PT_ATTACH) { #pragma clang diagnostic pop int err; if ( kauth_authorize_process(proc_ucred(p), KAUTH_PROCESS_CANTRACE, t, (uintptr_t)&err, 0, 0) == 0 ) { /* it's OK to attach */ proc_lock(t); SET(t->p_lflag, P_LTRACED); if (tr_sigexc) SET(t->p_lflag, P_LSIGEXC); t->p_oppid = t->p_ppid; /* Check whether child and parent are allowed to run modified * code (they'll have to) */ proc_unlock(t); cs_allow_invalid(t); cs_allow_invalid(p); if (t->p_pptr != p) proc_reparentlocked(t, p, 1, 0); proc_lock(t); if (get_task_userstop(task) > 0 ) { stopped = 1; } t->p_xstat = 0; proc_unlock(t); psignal(t, SIGSTOP); /* * If the process was stopped, wake up and run through * issignal() again to properly connect to the tracing * process. */ if (stopped) task_resume(task); error = 0; goto out; } else { /* not allowed to attach, proper error code returned by kauth_authorize_process */ if (ISSET(t->p_lflag, P_LNOATTACH)) { psignal(p, SIGSEGV); } error = err; goto out; } } /* * You can't do what you want to the process if: * (1) It's not being traced at all, */ proc_lock(t); if (!ISSET(t->p_lflag, P_LTRACED)) { proc_unlock(t); error = EPERM; goto out; } /* * (2) it's not being traced by _you_, or */ if (t->p_pptr != p) { proc_unlock(t); error = EBUSY; goto out; } /* * (3) it's not currently stopped. */ if (t->p_stat != SSTOP) { proc_unlock(t); error = EBUSY; goto out; } /* * Mach version of ptrace executes request directly here, * thus simplifying the interaction of ptrace and signals. */ /* proc lock is held here */ switch (uap->req) { case PT_DETACH: if (t->p_oppid != t->p_ppid) { struct proc *pp; proc_unlock(t); pp = proc_find(t->p_oppid); if (pp != PROC_NULL) { proc_reparentlocked(t, pp, 1, 0); proc_rele(pp); } else { /* original parent exited while traced */ proc_list_lock(); t->p_listflag |= P_LIST_DEADPARENT; proc_list_unlock(); proc_reparentlocked(t, initproc, 1, 0); } proc_lock(t); } t->p_oppid = 0; CLR(t->p_lflag, P_LTRACED); CLR(t->p_lflag, P_LSIGEXC); proc_unlock(t); goto resume; case PT_KILL: /* * Tell child process to kill itself after it * is resumed by adding NSIG to p_cursig. [see issig] */ proc_unlock(t); #if CONFIG_MACF error = mac_proc_check_signal(p, t, SIGKILL); if (0 != error) goto resume; #endif psignal(t, SIGKILL); goto resume; case PT_STEP: /* single step the child */ case PT_CONTINUE: /* continue the child */ proc_unlock(t); th_act = (thread_t)get_firstthread(task); if (th_act == THREAD_NULL) { error = EINVAL; goto out; } /* force use of Mach SPIs (and task_for_pid security checks) to adjust PC */ if (uap->addr != (user_addr_t)1) { error = ENOTSUP; goto out; } if ((unsigned)uap->data >= NSIG) { error = EINVAL; goto out; } if (uap->data != 0) { #if CONFIG_MACF error = mac_proc_check_signal(p, t, uap->data); if (0 != error) goto out; #endif psignal(t, uap->data); } if (uap->req == PT_STEP) { /* * set trace bit * we use sending SIGSTOP as a comparable security check. */ #if CONFIG_MACF error = mac_proc_check_signal(p, t, SIGSTOP); if (0 != error) { goto out; } #endif if (thread_setsinglestep(th_act, 1) != KERN_SUCCESS) { error = ENOTSUP; goto out; } } else { /* * clear trace bit if on * we use sending SIGCONT as a comparable security check. */ #if CONFIG_MACF error = mac_proc_check_signal(p, t, SIGCONT); if (0 != error) { goto out; } #endif if (thread_setsinglestep(th_act, 0) != KERN_SUCCESS) { error = ENOTSUP; goto out; } } resume: proc_lock(t); t->p_xstat = uap->data; t->p_stat = SRUN; if (t->sigwait) { wakeup((caddr_t)&(t->sigwait)); proc_unlock(t); if ((t->p_lflag & P_LSIGEXC) == 0) { task_resume(task); } } else proc_unlock(t); break; case PT_THUPDATE: { proc_unlock(t); if ((unsigned)uap->data >= NSIG) { error = EINVAL; goto out; } th_act = port_name_to_thread(CAST_MACH_PORT_TO_NAME(uap->addr)); if (th_act == THREAD_NULL) { error = ESRCH; goto out; } ut = (uthread_t)get_bsdthread_info(th_act); if (uap->data) ut->uu_siglist |= sigmask(uap->data); proc_lock(t); t->p_xstat = uap->data; t->p_stat = SRUN; proc_unlock(t); thread_deallocate(th_act); error = 0; } break; default: proc_unlock(t); error = EINVAL; goto out; } error = 0; out: proc_rele(t); return(error); }