void bsd_utaskbootstrap(void) { thread_t thread; struct uthread *ut; /* * Clone the bootstrap process from the kernel process, without * inheriting either task characteristics or memory from the kernel; */ thread = cloneproc(TASK_NULL, COALITION_NULL, kernproc, FALSE, TRUE); /* Hold the reference as it will be dropped during shutdown */ initproc = proc_find(1); #if __PROC_INTERNAL_DEBUG if (initproc == PROC_NULL) panic("bsd_utaskbootstrap: initproc not set\n"); #endif /* * Since we aren't going back out the normal way to our parent, * we have to drop the transition locks explicitly. */ proc_signalend(initproc, 0); proc_transend(initproc, 0); ut = (struct uthread *)get_bsdthread_info(thread); ut->uu_sigmask = 0; act_set_astbsd(thread); proc_clear_return_wait(initproc, thread); }
/* If an owning process has exited, reset the ownership. */ static void ktrace_ownership_maintenance(void) { lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED); /* do nothing if ktrace is not owned */ if (ktrace_owning_unique_id == 0) { return; } /* reset ownership if process cannot be found */ proc_t owning_proc = proc_find(ktrace_owning_pid); if (owning_proc != NULL) { /* make sure the pid was not recycled */ if (proc_uniqueid(owning_proc) != ktrace_owning_unique_id) { ktrace_release_ownership(); } proc_rele(owning_proc); } else { ktrace_release_ownership(); } }
/* Validate whether the current process has priviledges to access * kperf (and by extension, trace). Returns 0 if access is granted. */ int kperf_access_check(void) { proc_t p = current_proc(); proc_t blessed_p; int ret = 0; boolean_t pid_gone = FALSE; /* check if the pid that held the lock is gone */ blessed_p = proc_find(blessed_pid); if ( blessed_p != NULL ) proc_rele(blessed_p); else pid_gone = TRUE; if ( blessed_pid == -1 || pid_gone ) { /* check for root */ ret = suser(kauth_cred_get(), &p->p_acflag); if( !ret ) return ret; } /* check against blessed pid */ if( p->p_pid != blessed_pid ) return EACCES; /* access granted. */ return 0; }
int os_pqos_proc_stop(perf_ctl_t *ctl, perf_task_t *task) { task_pqos_cmt_t *t = (task_pqos_cmt_t *)task; track_proc_t *proc; track_lwp_t *lwp = NULL; boolean_t end; if (t->pid == 0) proc_pqos_func(NULL, os_pqos_cmt_proc_free); else { if ((proc = proc_find(t->pid)) == NULL) return -1; if (t->lwpid == 0) os_pqos_cmt_proc_free(proc, NULL, &end); else { if ((lwp = proc_lwp_find(proc, t->lwpid)) == NULL) { proc_refcount_dec(proc); return -1; } os_pqos_cmt_lwp_free(lwp, NULL, &end); } if (lwp != NULL) lwp_refcount_dec(lwp); proc_refcount_dec(proc); } return (0); }
int ktrace_set_owning_pid(int pid) { lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED); /* allow user space to successfully unset owning pid */ if (pid == -1) { ktrace_set_invalid_owning_pid(); return 0; } /* use ktrace_reset or ktrace_release_ownership, not this */ if (pid == 0) { ktrace_set_invalid_owning_pid(); return EINVAL; } proc_t p = proc_find(pid); if (!p) { ktrace_set_invalid_owning_pid(); return ESRCH; } ktrace_keep_ownership_on_reset = TRUE; ktrace_set_owning_proc(p); proc_rele(p); return 0; }
void db_kill_proc(db_expr_t addr, bool haddr, db_expr_t count, const char *modif) { #ifdef _KERNEL /* XXX CRASH(8) */ struct proc *p; ksiginfo_t ksi; db_expr_t pid, sig; int t; /* What pid? */ if (!db_expression(&pid)) { db_error("pid?\n"); /*NOTREACHED*/ } /* What sig? */ t = db_read_token(); if (t == tCOMMA) { if (!db_expression(&sig)) { db_error("sig?\n"); /*NOTREACHED*/ } } else { db_unread_token(t); sig = 15; } if (db_read_token() != tEOL) { db_error("?\n"); /*NOTREACHED*/ } /* We might stop when the mutex is held or when not */ t = mutex_tryenter(proc_lock); #ifdef DIAGNOSTIC if (!t) { db_error("could not acquire proc_lock mutex\n"); /*NOTREACHED*/ } #endif p = proc_find((pid_t)pid); if (p == NULL) { if (t) mutex_exit(proc_lock); db_error("no such proc\n"); /*NOTREACHED*/ } KSI_INIT(&ksi); ksi.ksi_signo = sig; ksi.ksi_code = SI_USER; ksi.ksi_pid = 0; ksi.ksi_uid = 0; mutex_enter(p->p_lock); kpsignal2(p, &ksi); mutex_exit(p->p_lock); if (t) mutex_exit(proc_lock); #else db_printf("This command is not currently supported.\n"); #endif }
static int pqos_cmt_start(perf_ctl_t *ctl, int pid, int lwpid, int flags) { track_proc_t *proc; track_lwp_t *lwp = NULL; perf_pqos_t *pqos; int ret = -1; if ((proc = proc_find(pid)) == NULL) return -1; if (lwpid == 0) { pqos = &proc->pqos; } else { if ((lwp = proc_lwp_find(proc, lwpid)) == NULL) { proc_refcount_dec(proc); return -1; } pqos = &lwp->pqos; proc->lwp_pqosed = B_TRUE; } memset(pqos, 0, sizeof(perf_pqos_t)); os_pqos_cmt_init(pqos); if (flags & PERF_PQOS_FLAG_LLC) { if (pf_pqos_occupancy_setup(pqos, pid, lwpid) != 0) goto L_EXIT; } if (flags & PERF_PQOS_FLAG_TOTAL_BW) { if (pf_pqos_totalbw_setup(pqos, pid, lwpid) != 0) goto L_EXIT; } if (flags & PERF_PQOS_FLAG_LOCAL_BW) { if (pf_pqos_localbw_setup(pqos, pid, lwpid) != 0) goto L_EXIT; } /* ctl->last_ms_pqos = current_ms(); */ if (pf_pqos_start(pqos) == 0) ret = 0; L_EXIT: if (ret != 0) pf_pqos_resource_free(pqos); if (lwp != NULL) lwp_refcount_dec(lwp); proc_refcount_dec(proc); return ret; }
/* * Filter attach method for EVFILT_PROC. */ static int filt_procattach(struct knote *kn) { struct proc *p; struct lwp *curl; curl = curlwp; mutex_enter(proc_lock); if (kn->kn_flags & EV_FLAG1) { /* * NOTE_TRACK attaches to the child process too early * for proc_find, so do a raw look up and check the state * explicitly. */ p = proc_find_raw(kn->kn_id); if (p != NULL && p->p_stat != SIDL) p = NULL; } else { p = proc_find(kn->kn_id); } if (p == NULL) { mutex_exit(proc_lock); return ESRCH; } /* * Fail if it's not owned by you, or the last exec gave us * setuid/setgid privs (unless you're root). */ mutex_enter(p->p_lock); mutex_exit(proc_lock); if (kauth_authorize_process(curl->l_cred, KAUTH_PROCESS_KEVENT_FILTER, p, NULL, NULL, NULL) != 0) { mutex_exit(p->p_lock); return EACCES; } kn->kn_obj = p; kn->kn_flags |= EV_CLEAR; /* automatically set */ /* * internal flag indicating registration done by kernel */ if (kn->kn_flags & EV_FLAG1) { kn->kn_data = kn->kn_sdata; /* ppid */ kn->kn_fflags = NOTE_CHILD; kn->kn_flags &= ~EV_FLAG1; } SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext); mutex_exit(p->p_lock); return 0; }
/* * Send a signal to one process. */ static int kill_one(pid_t pid, int sig) { struct proc *p; DPRINTF(("proc: killone pid=%d sig=%d\n", pid, sig)); if ((p = proc_find(pid)) == NULL) return ESRCH; return send_sig(p, sig); }
static void midi_softint(void *cookie) { struct midi_softc *sc; proc_t *p; pid_t pid; sc = cookie; mutex_enter(proc_lock); pid = sc->async; if (pid != 0 && (p = proc_find(pid)) != NULL) psignal(p, SIGIO); mutex_exit(proc_lock); }
/* specify a pid as being able to access kperf/trace, depiste not * being root */ int kperf_bless_pid(pid_t newpid) { proc_t p = NULL; pid_t current_pid; p = current_proc(); current_pid = p->p_pid; /* are we allowed to preempt? */ if ( (newpid != -1) && (blessed_pid != -1) && (blessed_pid != current_pid) && !blessed_preempt ) { /* check if the pid that held the lock is gone */ p = proc_find(blessed_pid); if ( p != NULL ) { proc_rele(p); return EACCES; } } /* validate new pid */ if ( newpid != -1 ) { p = proc_find(newpid); if ( p == NULL ) return EINVAL; proc_rele(p); } blessed_pid = newpid; blessed_preempt = FALSE; return 0; }
//used only if exit extension bool SocketCookie::IsValid() { //check socket //check last pid used proc_t p; p = proc_find(this->application->pid); if (p) { proc_rele(p); return true; } return false; }
static int cpu_ll_smpl(perf_cpu_t *cpu, void *arg) { task_ll_t *task = (task_ll_t *)arg; pf_ll_rec_t *record; track_proc_t *proc; track_lwp_t *lwp; int record_num, i; pf_ll_record(cpu, s_ll_recbuf, &record_num); if (record_num == 0) { return (0); } for (i = 0; i < record_num; i++) { record = &s_ll_recbuf[i]; if ((task->pid != 0) && (task->pid != record->pid)) { continue; } if ((task->pid != 0) && (task->lwpid != 0) && (task->lwpid != record->tid)) { continue; } if ((proc = proc_find(record->pid)) == NULL) { return (0); } if ((lwp = proc_lwp_find(proc, record->tid)) == NULL) { proc_refcount_dec(proc); return (0); } pthread_mutex_lock(&proc->mutex); llrec_add(&proc->llrec_grp, record); llrec_add(&lwp->llrec_grp, record); pthread_mutex_unlock(&proc->mutex); lwp_refcount_dec(lwp); proc_refcount_dec(proc); } return (0); }
/* Not called from probe context */ proc_t * sprlock(pid_t pid) { proc_t* p; if ((p = proc_find(pid)) == PROC_NULL) { return PROC_NULL; } task_suspend_internal(p->task); dtrace_sprlock(p); proc_lock(p); return p; }
/* Not called from probe context */ proc_t * sprlock(pid_t pid) { proc_t* p; if ((p = proc_find(pid)) == PROC_NULL) { return PROC_NULL; } task_suspend(p->task); proc_lock(p); lck_mtx_lock(&p->p_dtrace_sprlock); return p; }
int linux_sys_sched_getaffinity(struct lwp *l, const struct linux_sys_sched_getaffinity_args *uap, register_t *retval) { /* { syscallarg(linux_pid_t) pid; syscallarg(unsigned int) len; syscallarg(unsigned long *) mask; } */ proc_t *p; unsigned long *lp, *data; int error, size, nb = ncpu; /* Unlike Linux, dynamically calculate cpu mask size */ size = sizeof(long) * ((ncpu + LONG_BIT - 1) / LONG_BIT); if (SCARG(uap, len) < size) return EINVAL; /* XXX: Pointless check. TODO: Actually implement this. */ mutex_enter(proc_lock); p = proc_find(SCARG(uap, pid)); mutex_exit(proc_lock); if (p == NULL) { return ESRCH; } /* * return the actual number of CPU, tag all of them as available * The result is a mask, the first CPU being in the least significant * bit. */ data = kmem_zalloc(size, KM_SLEEP); lp = data; while (nb > LONG_BIT) { *lp++ = ~0UL; nb -= LONG_BIT; } if (nb) *lp = (1 << ncpu) - 1; error = copyout(data, SCARG(uap, mask), size); kmem_free(data, size); *retval = size; return error; }
int os_pqos_cmt_smpl(perf_ctl_t *ctl, perf_task_t *task, int *intval_ms) { task_pqos_cmt_t *t = (task_pqos_cmt_t *)task; track_proc_t *proc; track_lwp_t *lwp = NULL; boolean_t end; proc_enum_update(0); if (t->pid == 0) proc_pqos_func(NULL, os_pqos_cmt_proc_smpl); else { if ((proc = proc_find(t->pid)) == NULL) { disp_pqos_cmt_data_ready(0); return -1; } if (t->lwpid == 0) os_pqos_cmt_proc_smpl(proc, NULL, &end); else { if ((lwp = proc_lwp_find(proc, t->lwpid)) == NULL) { proc_refcount_dec(proc); disp_pqos_cmt_data_ready(0); return -1; } os_pqos_cmt_lwp_smpl(lwp, NULL, &end); } if (lwp != NULL) lwp_refcount_dec(lwp); proc_refcount_dec(proc); } *intval_ms = current_ms() - ctl->last_ms_pqos; ctl->last_ms_pqos = current_ms(); disp_pqos_cmt_data_ready(*intval_ms); return (0); }
errno_t sc_kill(thread_t *p, syscall_result_t *r, kill_args_t *args) { /* TODO: * 1) grupy procesow == -pid * 2) broadcast - pid == 0 * 3) broadcast poza initem - pid == -1 */ proc_t *dest_proc = NULL; /* sprawdzamy sygnał */ if ( args->sig < 0 || args->sig > _NSIG ) { r->result = -1; return EINVAL; } /* szukamy procesu, któremu mamy dostarczyć sygnał */ dest_proc = proc_find(args->pid); if ( dest_proc == NULL && args->pid != 0 ) { r->result = -1; return ESRCH; } /* sprawdzamy czy możemy dostarczyć sygnał */ /* ... */ /* sprawdzamy komu dostarczamy sygnał */ /* pojedynczy proces */ if ( args->pid > 0 ) { signal_send(dest_proc, args->sig); r->result = 0; return EOK; } /* Uzupelnic wg. TODO */ r->result = -1; return -ENOSTR; }
static struct proc * mac_task_get_proc(struct task *task) { if (task == current_task()) return proc_self(); /* * Tasks don't really hold a reference on a proc unless the * calling thread belongs to the task in question. */ int pid = task_pid(task); struct proc *p = proc_find(pid); if (p != NULL) { if (proc_task(p) == task) return p; proc_rele(p); } return NULL; }
static int linux_do_tkill(struct lwp *l, int tgid, int tid, int signum) { struct proc *p; struct lwp *t; ksiginfo_t ksi; int error; if (signum < 0 || signum >= LINUX__NSIG) return EINVAL; signum = linux_to_native_signo[signum]; if (tgid == -1) { tgid = tid; } KSI_INIT(&ksi); ksi.ksi_signo = signum; ksi.ksi_code = SI_LWP; ksi.ksi_pid = l->l_proc->p_pid; ksi.ksi_uid = kauth_cred_geteuid(l->l_cred); ksi.ksi_lid = tid; mutex_enter(proc_lock); p = proc_find(tgid); if (p == NULL) { mutex_exit(proc_lock); return ESRCH; } mutex_enter(p->p_lock); error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_SIGNAL, p, KAUTH_ARG(signum), NULL, NULL); if ((t = lwp_find(p, ksi.ksi_lid)) == NULL) error = ESRCH; else if (signum != 0) kpsignal2(p, &ksi); mutex_exit(p->p_lock); mutex_exit(proc_lock); return error; }
int sys_getpgid(struct lwp *l, const struct sys_getpgid_args *uap, register_t *retval) { /* { syscallarg(pid_t) pid; } */ pid_t pid = SCARG(uap, pid); struct proc *p; int error = 0; mutex_enter(proc_lock); if (pid == 0) *retval = l->l_proc->p_pgid; else if ((p = proc_find(pid)) != NULL) *retval = p->p_pgid; else error = ESRCH; mutex_exit(proc_lock); return error; }
/* * KPI to determine if a pid is currently backgrounded. * Returns ESRCH if pid cannot be found or has started exiting. * Returns EINVAL if state is NULL. * Sets *state to 1 if pid is backgrounded, and 0 otherwise. */ int proc_pidbackgrounded(pid_t pid, uint32_t* state) { proc_t target_proc = PROC_NULL; if (state == NULL) return(EINVAL); target_proc = proc_find(pid); if (target_proc == PROC_NULL) return(ESRCH); if ( proc_get_effective_task_policy(target_proc->task, TASK_POLICY_DARWIN_BG) ) { *state = 1; } else { *state = 0; } proc_rele(target_proc); return (0); }
/* * get the proc_t structure corresponding to a given process name */ proc_t find_proc_by_name(char *name) { // get pointer to kernel process proc_t all_proc = proc_find(0); // don't forget to drop reference proc_rele(all_proc); if (all_proc == PROC_NULL) { #if DEBUG printf("[ERROR] Couldn't find all_proc!\n"); #endif return PROC_NULL; } // we need to lock before searching - proc_list_lock() and proc_list_unlock() aren't exported if (_proc_list_lock == NULL) _proc_list_lock = (void*)solve_kernel_symbol(&g_kernel_info, "_proc_list_lock"); if (_proc_list_unlock == NULL) _proc_list_unlock = (void*)solve_kernel_symbol(&g_kernel_info, "_proc_list_unlock"); _proc_list_lock(); for (proc_t tmp = all_proc ; tmp != PROC_NULL; tmp = (proc_t)(tmp->p_list.le_prev)) { char processname[MAXCOMLEN+1] = { 0 }; strlcpy(processname, tmp->p_comm, MAXCOMLEN+1); if (strncmp(tmp->p_comm, name, sizeof(tmp->p_comm)) == 0) { _proc_list_unlock(); #if DEBUG // printf("[INFO] Found proc_t of %s\n", name); #endif return tmp; } } _proc_list_unlock(); #if DEBUG printf("[ERROR] Couldn't find target proc %s\n", name); #endif return PROC_NULL; }
int linux_sys_sched_setaffinity(struct lwp *l, const struct linux_sys_sched_setaffinity_args *uap, register_t *retval) { /* { syscallarg(linux_pid_t) pid; syscallarg(unsigned int) len; syscallarg(unsigned long *) mask; } */ proc_t *p; /* XXX: Pointless check. TODO: Actually implement this. */ mutex_enter(proc_lock); p = proc_find(SCARG(uap, pid)); mutex_exit(proc_lock); if (p == NULL) { return ESRCH; } /* Let's ignore it */ DPRINTF(("%s\n", __func__)); return 0; }
/* * The implementation of displaying window on screen for * window type "WIN_TYPE_LATNODE_PROC" and "WIN_TYPE_LATNODE_LWP" */ boolean_t os_latnode_win_draw(dyn_win_t *win) { dyn_latnode_t *dyn = (dyn_latnode_t *)(win->dyn); track_proc_t *proc; map_entry_t *entry; boolean_t note_out, ret; if ((proc = proc_find(dyn->pid)) == NULL) { win_warn_msg(WARN_INVALID_PID); win_note_show(NOTE_INVALID_PID); return (B_FALSE); } if ((entry = map_entry_find(proc, dyn->addr, dyn->size)) == NULL) { proc_refcount_dec(proc); win_warn_msg(WARN_INVALID_MAP); win_note_show(NOTE_INVALID_MAP); return (B_FALSE); } if (map_map2numa(proc, entry) != 0) { proc_refcount_dec(proc); win_warn_msg(WARN_INVALID_NUMAMAP); win_note_show(NOTE_INVALID_NUMAMAP); return (B_FALSE); } win_title_show(); ret = latnode_data_show(proc, dyn, entry, ¬e_out); if (!note_out) { win_note_show(NOTE_LATNODE); } proc_refcount_dec(proc); reg_update_all(); return (ret); }
/* * The implementation of displaying window on screen for * window type "WIN_TYPE_LAT_PROC" and "WIN_TYPE_LAT_LWP" */ boolean_t os_lat_win_draw(dyn_win_t *win) { dyn_lat_t *dyn = (dyn_lat_t *)(win->dyn); track_proc_t *proc; boolean_t note_out, ret; if (!perf_ll_started()) { win_warn_msg(WARN_LL_NOT_SUPPORT); win_note_show(NOTE_LAT); return (B_FALSE); } if ((proc = proc_find(dyn->pid)) == NULL) { win_warn_msg(WARN_INVALID_PID); win_note_show(NOTE_INVALID_PID); return (B_FALSE); } if (map_proc_load(proc) != 0) { proc_refcount_dec(proc); win_warn_msg(WARN_INVALID_MAP); win_note_show(NOTE_INVALID_MAP); return (B_FALSE); } win_title_show(); ret = win_lat_data_show(proc, dyn, ¬e_out); if (!note_out) { win_note_show(NOTE_LAT); } proc_refcount_dec(proc); reg_update_all(); return (ret); }
/* system call implementation */ int process_policy(__unused struct proc *p, struct process_policy_args * uap, __unused int32_t *retval) { int error = 0; int scope = uap->scope; int policy = uap->policy; int action = uap->action; int policy_subtype = uap->policy_subtype; user_addr_t attrp = uap->attrp; pid_t target_pid = uap->target_pid; uint64_t target_threadid = uap->target_threadid; proc_t target_proc = PROC_NULL; #if CONFIG_MACF || !CONFIG_EMBEDDED proc_t curp = current_proc(); #endif kauth_cred_t my_cred; #if CONFIG_EMBEDDED kauth_cred_t target_cred; #endif if ((scope != PROC_POLICY_SCOPE_PROCESS) && (scope != PROC_POLICY_SCOPE_THREAD)) { return(EINVAL); } if (target_pid == 0 || target_pid == proc_selfpid()) target_proc = proc_self(); else target_proc = proc_find(target_pid); if (target_proc == PROC_NULL) return(ESRCH); my_cred = kauth_cred_get(); #if CONFIG_EMBEDDED target_cred = kauth_cred_proc_ref(target_proc); if (!kauth_cred_issuser(my_cred) && kauth_cred_getruid(my_cred) && kauth_cred_getuid(my_cred) != kauth_cred_getuid(target_cred) && kauth_cred_getruid(my_cred) != kauth_cred_getuid(target_cred)) #else /* * Resoure starvation control can be used by unpriv resource owner but priv at the time of ownership claim. This is * checked in low resource handle routine. So bypass the checks here. */ if ((policy != PROC_POLICY_RESOURCE_STARVATION) && (policy != PROC_POLICY_APPTYPE) && (!kauth_cred_issuser(my_cred) && curp != p)) #endif { error = EPERM; goto out; } #if CONFIG_MACF switch (policy) { case PROC_POLICY_BOOST: case PROC_POLICY_RESOURCE_USAGE: #if CONFIG_EMBEDDED case PROC_POLICY_APPTYPE: case PROC_POLICY_APP_LIFECYCLE: #endif /* These policies do their own appropriate mac checks */ break; default: error = mac_proc_check_sched(curp, target_proc); if (error) goto out; break; } #endif /* CONFIG_MACF */ switch(policy) { case PROC_POLICY_BACKGROUND: error = ENOTSUP; break; case PROC_POLICY_HARDWARE_ACCESS: error = ENOTSUP; break; case PROC_POLICY_RESOURCE_STARVATION: error = handle_lowresource(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid); break; case PROC_POLICY_RESOURCE_USAGE: switch(policy_subtype) { case PROC_POLICY_RUSAGE_NONE: case PROC_POLICY_RUSAGE_WIREDMEM: case PROC_POLICY_RUSAGE_VIRTMEM: case PROC_POLICY_RUSAGE_DISK: case PROC_POLICY_RUSAGE_NETWORK: case PROC_POLICY_RUSAGE_POWER: error = ENOTSUP; goto out; default: error = EINVAL; goto out; case PROC_POLICY_RUSAGE_CPU: break; } error = handle_cpuuse(action, attrp, target_proc, target_threadid); break; #if CONFIG_EMBEDDED case PROC_POLICY_APP_LIFECYCLE: error = handle_applifecycle(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid); break; #endif /* CONFIG_EMBEDDED */ case PROC_POLICY_APPTYPE: error = handle_apptype(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid); break; case PROC_POLICY_BOOST: error = handle_boost(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid); break; default: error = EINVAL; break; } out: proc_rele(target_proc); #if CONFIG_EMBEDDED kauth_cred_unref(&target_cred); #endif return(error); }
void signal_send_syscall(pid_t pid,int sig) { proc_t *proc = proc_find(pid); if (proc!=NULL) { if (proc->uid==proc_current->uid || proc->gid==proc_current->gid || proc_current->uid==PERM_ROOTUID || proc_current->gid==PERM_ROOTGID) signal_send(proc,sig); } }
int ptrace(struct proc *p, struct ptrace_args *uap, register_t *retval) { struct proc *t = current_proc(); /* target process */ task_t task; thread_t th_act; struct uthread *ut; int tr_sigexc = 0; int error = 0; int stopped = 0; AUDIT_ARG(cmd, uap->req); AUDIT_ARG(pid, uap->pid); AUDIT_ARG(addr, uap->addr); AUDIT_ARG(value, uap->data); if (uap->req == PT_DENY_ATTACH) { proc_lock(p); if (ISSET(p->p_lflag, P_LTRACED)) { proc_unlock(p); exit1(p, W_EXITCODE(ENOTSUP, 0), retval); /* drop funnel before we return */ thread_exception_return(); /* NOTREACHED */ } SET(p->p_lflag, P_LNOATTACH); proc_unlock(p); return(0); } if (uap->req == PT_FORCEQUOTA) { if (is_suser()) { OSBitOrAtomic(P_FORCEQUOTA, (UInt32 *)&t->p_flag); return (0); } else return (EPERM); } /* * Intercept and deal with "please trace me" request. */ if (uap->req == PT_TRACE_ME) { proc_lock(p); SET(p->p_lflag, P_LTRACED); /* Non-attached case, our tracer is our parent. */ p->p_oppid = p->p_ppid; proc_unlock(p); return(0); } if (uap->req == PT_SIGEXC) { proc_lock(p); if (ISSET(p->p_lflag, P_LTRACED)) { SET(p->p_lflag, P_LSIGEXC); proc_unlock(p); return(0); } else { proc_unlock(p); return(EINVAL); } } /* * We do not want ptrace to do anything with kernel or launchd */ if (uap->pid < 2) { return(EPERM); } /* * Locate victim, and make sure it is traceable. */ if ((t = proc_find(uap->pid)) == NULL) return (ESRCH); AUDIT_ARG(process, t); task = t->task; if (uap->req == PT_ATTACHEXC) { uap->req = PT_ATTACH; tr_sigexc = 1; } if (uap->req == PT_ATTACH) { int err; if ( kauth_authorize_process(proc_ucred(p), KAUTH_PROCESS_CANTRACE, t, (uintptr_t)&err, 0, 0) == 0 ) { /* it's OK to attach */ proc_lock(t); SET(t->p_lflag, P_LTRACED); if (tr_sigexc) SET(t->p_lflag, P_LSIGEXC); t->p_oppid = t->p_ppid; proc_unlock(t); if (t->p_pptr != p) proc_reparentlocked(t, p, 1, 0); proc_lock(t); if (get_task_userstop(task) > 0 ) { stopped = 1; } t->p_xstat = 0; proc_unlock(t); psignal(t, SIGSTOP); /* * If the process was stopped, wake up and run through * issignal() again to properly connect to the tracing * process. */ if (stopped) task_resume(task); error = 0; goto out; } else { /* not allowed to attach, proper error code returned by kauth_authorize_process */ if (ISSET(t->p_lflag, P_LNOATTACH)) { psignal(p, SIGSEGV); } error = err; goto out; } } /* * You can't do what you want to the process if: * (1) It's not being traced at all, */ proc_lock(t); if (!ISSET(t->p_lflag, P_LTRACED)) { proc_unlock(t); error = EPERM; goto out; } /* * (2) it's not being traced by _you_, or */ if (t->p_pptr != p) { proc_unlock(t); error = EBUSY; goto out; } /* * (3) it's not currently stopped. */ if (t->p_stat != SSTOP) { proc_unlock(t); error = EBUSY; goto out; } /* * Mach version of ptrace executes request directly here, * thus simplifying the interaction of ptrace and signals. */ /* proc lock is held here */ switch (uap->req) { case PT_DETACH: if (t->p_oppid != t->p_ppid) { struct proc *pp; proc_unlock(t); pp = proc_find(t->p_oppid); proc_reparentlocked(t, pp ? pp : initproc, 1, 0); if (pp != PROC_NULL) proc_rele(pp); proc_lock(t); } t->p_oppid = 0; CLR(t->p_lflag, P_LTRACED); CLR(t->p_lflag, P_LSIGEXC); proc_unlock(t); goto resume; case PT_KILL: /* * Tell child process to kill itself after it * is resumed by adding NSIG to p_cursig. [see issig] */ proc_unlock(t); psignal(t, SIGKILL); goto resume; case PT_STEP: /* single step the child */ case PT_CONTINUE: /* continue the child */ proc_unlock(t); th_act = (thread_t)get_firstthread(task); if (th_act == THREAD_NULL) { error = EINVAL; goto out; } if (uap->addr != (user_addr_t)1) { #if defined(ppc) #define ALIGNED(addr,size) (((unsigned)(addr)&((size)-1))==0) if (!ALIGNED((int)uap->addr, sizeof(int))) return (ERESTART); #undef ALIGNED #endif thread_setentrypoint(th_act, uap->addr); } if ((unsigned)uap->data >= NSIG) { error = EINVAL; goto out; } if (uap->data != 0) { psignal(t, uap->data); } if (uap->req == PT_STEP) { /* * set trace bit */ if (thread_setsinglestep(th_act, 1) != KERN_SUCCESS) { error = ENOTSUP; goto out; } } else { /* * clear trace bit if on */ if (thread_setsinglestep(th_act, 0) != KERN_SUCCESS) { error = ENOTSUP; goto out; } } resume: proc_lock(t); t->p_xstat = uap->data; t->p_stat = SRUN; if (t->sigwait) { wakeup((caddr_t)&(t->sigwait)); proc_unlock(t); if ((t->p_lflag & P_LSIGEXC) == 0) { task_resume(task); } } else proc_unlock(t); break; case PT_THUPDATE: { proc_unlock(t); if ((unsigned)uap->data >= NSIG) { error = EINVAL; goto out; } th_act = port_name_to_thread(CAST_DOWN(mach_port_name_t, uap->addr)); if (th_act == THREAD_NULL) return (ESRCH); ut = (uthread_t)get_bsdthread_info(th_act); if (uap->data) ut->uu_siglist |= sigmask(uap->data); proc_lock(t); t->p_xstat = uap->data; t->p_stat = SRUN; proc_unlock(t); thread_deallocate(th_act); error = 0; } break; default: proc_unlock(t); error = EINVAL; goto out; } error = 0; out: proc_rele(t); return(error); }
bool CEGLNativeTypeRaspberryPI::SetNativeResolution(const RESOLUTION_INFO &res) { #if defined(TARGET_RASPBERRY_PI) if(!m_DllBcmHost || !m_nativeWindow) return false; while (proc_find("hello_video.bin") >= 0) Sleep(100); DestroyDispmaxWindow(); RENDER_STEREO_MODE stereo_mode = g_graphicsContext.GetStereoMode(); if(GETFLAGS_GROUP(res.dwFlags) && GETFLAGS_MODE(res.dwFlags)) { uint32_t mode3d = HDMI_3D_FORMAT_NONE; sem_init(&m_tv_synced, 0, 0); m_DllBcmHost->vc_tv_register_callback(CallbackTvServiceCallback, this); if (stereo_mode == RENDER_STEREO_MODE_SPLIT_HORIZONTAL || stereo_mode == RENDER_STEREO_MODE_SPLIT_VERTICAL) { /* inform TV of any 3D settings. Note this property just applies to next hdmi mode change, so no need to call for 2D modes */ HDMI_PROPERTY_PARAM_T property; property.property = HDMI_PROPERTY_3D_STRUCTURE; if (CSettings::GetInstance().GetBool(CSettings::SETTING_VIDEOSCREEN_FRAMEPACKING) && CSettings::GetInstance().GetBool(CSettings::SETTING_VIDEOPLAYER_SUPPORTMVC) && res.fRefreshRate <= 30.0f) property.param1 = HDMI_3D_FORMAT_FRAME_PACKING; else if (stereo_mode == RENDER_STEREO_MODE_SPLIT_VERTICAL) property.param1 = HDMI_3D_FORMAT_SBS_HALF; else if (stereo_mode == RENDER_STEREO_MODE_SPLIT_HORIZONTAL) property.param1 = HDMI_3D_FORMAT_TB_HALF; else property.param1 = HDMI_3D_FORMAT_NONE; property.param2 = 0; mode3d = property.param1; vc_tv_hdmi_set_property(&property); } HDMI_PROPERTY_PARAM_T property; property.property = HDMI_PROPERTY_PIXEL_CLOCK_TYPE; // if we are closer to ntsc version of framerate, let gpu know int iFrameRate = (int)(res.fRefreshRate + 0.5f); if (fabsf(res.fRefreshRate * (1001.0f / 1000.0f) - iFrameRate) < fabsf(res.fRefreshRate - iFrameRate)) property.param1 = HDMI_PIXEL_CLOCK_TYPE_NTSC; else property.param1 = HDMI_PIXEL_CLOCK_TYPE_PAL; property.param2 = 0; vc_tv_hdmi_set_property(&property); int success = m_DllBcmHost->vc_tv_hdmi_power_on_explicit_new(HDMI_MODE_HDMI, GETFLAGS_GROUP(res.dwFlags), GETFLAGS_MODE(res.dwFlags)); if (success == 0) { CLog::Log(LOGDEBUG, "EGL set HDMI mode (%d,%d)=%d %s%s\n", GETFLAGS_GROUP(res.dwFlags), GETFLAGS_MODE(res.dwFlags), success, CStereoscopicsManager::GetInstance().ConvertGuiStereoModeToString(stereo_mode), mode3d==HDMI_3D_FORMAT_FRAME_PACKING ? " FP" : mode3d==HDMI_3D_FORMAT_SBS_HALF ? " SBS" : mode3d==HDMI_3D_FORMAT_TB_HALF ? " TB" : ""); sem_wait(&m_tv_synced); } else { CLog::Log(LOGERROR, "EGL failed to set HDMI mode (%d,%d)=%d %s%s\n", GETFLAGS_GROUP(res.dwFlags), GETFLAGS_MODE(res.dwFlags), success, CStereoscopicsManager::GetInstance().ConvertGuiStereoModeToString(stereo_mode), mode3d==HDMI_3D_FORMAT_FRAME_PACKING ? " FP" : mode3d==HDMI_3D_FORMAT_SBS_HALF ? " SBS" : mode3d==HDMI_3D_FORMAT_TB_HALF ? " TB" : ""); } m_DllBcmHost->vc_tv_unregister_callback(CallbackTvServiceCallback); sem_destroy(&m_tv_synced); m_desktopRes = res; } else if(!GETFLAGS_GROUP(res.dwFlags) && GETFLAGS_MODE(res.dwFlags)) { sem_init(&m_tv_synced, 0, 0); m_DllBcmHost->vc_tv_register_callback(CallbackTvServiceCallback, this); SDTV_OPTIONS_T options; options.aspect = get_sdtv_aspect_from_display_aspect((float)res.iScreenWidth / (float)res.iScreenHeight); int success = m_DllBcmHost->vc_tv_sdtv_power_on((SDTV_MODE_T)GETFLAGS_MODE(res.dwFlags), &options); if (success == 0) { CLog::Log(LOGDEBUG, "EGL set SDTV mode (%d,%d)=%d\n", GETFLAGS_GROUP(res.dwFlags), GETFLAGS_MODE(res.dwFlags), success); sem_wait(&m_tv_synced); } else { CLog::Log(LOGERROR, "EGL failed to set SDTV mode (%d,%d)=%d\n", GETFLAGS_GROUP(res.dwFlags), GETFLAGS_MODE(res.dwFlags), success); } m_DllBcmHost->vc_tv_unregister_callback(CallbackTvServiceCallback); sem_destroy(&m_tv_synced); m_desktopRes = res; } m_dispman_display = g_RBP.OpenDisplay(0); m_width = res.iWidth; m_height = res.iHeight; VC_RECT_T dst_rect; VC_RECT_T src_rect; dst_rect.x = 0; dst_rect.y = 0; dst_rect.width = res.iScreenWidth; dst_rect.height = res.iScreenHeight; src_rect.x = 0; src_rect.y = 0; src_rect.width = m_width << 16; src_rect.height = m_height << 16; VC_DISPMANX_ALPHA_T alpha; memset(&alpha, 0x0, sizeof(VC_DISPMANX_ALPHA_T)); alpha.flags = DISPMANX_FLAGS_ALPHA_FROM_SOURCE; DISPMANX_CLAMP_T clamp; memset(&clamp, 0x0, sizeof(DISPMANX_CLAMP_T)); DISPMANX_TRANSFORM_T transform = DISPMANX_NO_ROTATE; DISPMANX_UPDATE_HANDLE_T dispman_update = m_DllBcmHost->vc_dispmanx_update_start(0); if (stereo_mode == RENDER_STEREO_MODE_SPLIT_VERTICAL) transform = DISPMANX_STEREOSCOPIC_SBS; else if (stereo_mode == RENDER_STEREO_MODE_SPLIT_HORIZONTAL) transform = DISPMANX_STEREOSCOPIC_TB; else transform = DISPMANX_STEREOSCOPIC_MONO; CLog::Log(LOGDEBUG, "EGL set resolution %dx%d -> %dx%d @ %.2f fps (%d,%d) flags:%x aspect:%.2f\n", m_width, m_height, dst_rect.width, dst_rect.height, res.fRefreshRate, GETFLAGS_GROUP(res.dwFlags), GETFLAGS_MODE(res.dwFlags), (int)res.dwFlags, res.fPixelRatio); m_dispman_element = m_DllBcmHost->vc_dispmanx_element_add(dispman_update, m_dispman_display, 1, // layer &dst_rect, (DISPMANX_RESOURCE_HANDLE_T)0, // src &src_rect, DISPMANX_PROTECTION_NONE, &alpha, //alphe &clamp, //clamp transform); // transform assert(m_dispman_element != DISPMANX_NO_HANDLE); assert(m_dispman_element != (unsigned)DISPMANX_INVALID); memset(m_nativeWindow, 0, sizeof(EGL_DISPMANX_WINDOW_T)); EGL_DISPMANX_WINDOW_T *nativeWindow = (EGL_DISPMANX_WINDOW_T *)m_nativeWindow; nativeWindow->element = m_dispman_element; nativeWindow->width = m_width; nativeWindow->height = m_height; m_DllBcmHost->vc_dispmanx_display_set_background(dispman_update, m_dispman_display, 0x00, 0x00, 0x00); m_DllBcmHost->vc_dispmanx_update_submit_sync(dispman_update); DLOG("CEGLNativeTypeRaspberryPI::SetNativeResolution\n"); return true; #else return false; #endif }